1 /* tc-arm.c -- Assemble for the ARM
2 Copyright 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003,
3 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
5 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
6 Modified by David Taylor (dtaylor@armltd.co.uk)
7 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
8 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
9 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
11 This file is part of GAS, the GNU Assembler.
13 GAS is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 3, or (at your option)
18 GAS is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with GAS; see the file COPYING. If not, write to the Free
25 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
32 #include "safe-ctype.h"
36 #include "opcode/arm.h"
40 #include "dw2gencfi.h"
43 #include "dwarf2dbg.h"
46 /* Must be at least the size of the largest unwind opcode (currently two). */
47 #define ARM_OPCODE_CHUNK_SIZE 8
49 /* This structure holds the unwinding state. */
54 symbolS
* table_entry
;
55 symbolS
* personality_routine
;
56 int personality_index
;
57 /* The segment containing the function. */
60 /* Opcodes generated from this function. */
61 unsigned char * opcodes
;
64 /* The number of bytes pushed to the stack. */
66 /* We don't add stack adjustment opcodes immediately so that we can merge
67 multiple adjustments. We can also omit the final adjustment
68 when using a frame pointer. */
69 offsetT pending_offset
;
70 /* These two fields are set by both unwind_movsp and unwind_setfp. They
71 hold the reg+offset to use when restoring sp from a frame pointer. */
74 /* Nonzero if an unwind_setfp directive has been seen. */
76 /* Nonzero if the last opcode restores sp from fp_reg. */
77 unsigned sp_restored
:1;
82 /* Results from operand parsing worker functions. */
86 PARSE_OPERAND_SUCCESS
,
88 PARSE_OPERAND_FAIL_NO_BACKTRACK
89 } parse_operand_result
;
98 /* Types of processor to assemble for. */
100 /* The code that was here used to select a default CPU depending on compiler
101 pre-defines which were only present when doing native builds, thus
102 changing gas' default behaviour depending upon the build host.
104 If you have a target that requires a default CPU option then the you
105 should define CPU_DEFAULT here. */
110 # define FPU_DEFAULT FPU_ARCH_FPA
111 # elif defined (TE_NetBSD)
113 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
115 /* Legacy a.out format. */
116 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
118 # elif defined (TE_VXWORKS)
119 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
121 /* For backwards compatibility, default to FPA. */
122 # define FPU_DEFAULT FPU_ARCH_FPA
124 #endif /* ifndef FPU_DEFAULT */
126 #define streq(a, b) (strcmp (a, b) == 0)
128 static arm_feature_set cpu_variant
;
129 static arm_feature_set arm_arch_used
;
130 static arm_feature_set thumb_arch_used
;
132 /* Flags stored in private area of BFD structure. */
133 static int uses_apcs_26
= FALSE
;
134 static int atpcs
= FALSE
;
135 static int support_interwork
= FALSE
;
136 static int uses_apcs_float
= FALSE
;
137 static int pic_code
= FALSE
;
138 static int fix_v4bx
= FALSE
;
139 /* Warn on using deprecated features. */
140 static int warn_on_deprecated
= TRUE
;
143 /* Variables that we set while parsing command-line options. Once all
144 options have been read we re-process these values to set the real
146 static const arm_feature_set
*legacy_cpu
= NULL
;
147 static const arm_feature_set
*legacy_fpu
= NULL
;
149 static const arm_feature_set
*mcpu_cpu_opt
= NULL
;
150 static const arm_feature_set
*mcpu_fpu_opt
= NULL
;
151 static const arm_feature_set
*march_cpu_opt
= NULL
;
152 static const arm_feature_set
*march_fpu_opt
= NULL
;
153 static const arm_feature_set
*mfpu_opt
= NULL
;
154 static const arm_feature_set
*object_arch
= NULL
;
156 /* Constants for known architecture features. */
157 static const arm_feature_set fpu_default
= FPU_DEFAULT
;
158 static const arm_feature_set fpu_arch_vfp_v1
= FPU_ARCH_VFP_V1
;
159 static const arm_feature_set fpu_arch_vfp_v2
= FPU_ARCH_VFP_V2
;
160 static const arm_feature_set fpu_arch_vfp_v3
= FPU_ARCH_VFP_V3
;
161 static const arm_feature_set fpu_arch_neon_v1
= FPU_ARCH_NEON_V1
;
162 static const arm_feature_set fpu_arch_fpa
= FPU_ARCH_FPA
;
163 static const arm_feature_set fpu_any_hard
= FPU_ANY_HARD
;
164 static const arm_feature_set fpu_arch_maverick
= FPU_ARCH_MAVERICK
;
165 static const arm_feature_set fpu_endian_pure
= FPU_ARCH_ENDIAN_PURE
;
168 static const arm_feature_set cpu_default
= CPU_DEFAULT
;
171 static const arm_feature_set arm_ext_v1
= ARM_FEATURE (ARM_EXT_V1
, 0);
172 static const arm_feature_set arm_ext_v2
= ARM_FEATURE (ARM_EXT_V1
, 0);
173 static const arm_feature_set arm_ext_v2s
= ARM_FEATURE (ARM_EXT_V2S
, 0);
174 static const arm_feature_set arm_ext_v3
= ARM_FEATURE (ARM_EXT_V3
, 0);
175 static const arm_feature_set arm_ext_v3m
= ARM_FEATURE (ARM_EXT_V3M
, 0);
176 static const arm_feature_set arm_ext_v4
= ARM_FEATURE (ARM_EXT_V4
, 0);
177 static const arm_feature_set arm_ext_v4t
= ARM_FEATURE (ARM_EXT_V4T
, 0);
178 static const arm_feature_set arm_ext_v5
= ARM_FEATURE (ARM_EXT_V5
, 0);
179 static const arm_feature_set arm_ext_v4t_5
=
180 ARM_FEATURE (ARM_EXT_V4T
| ARM_EXT_V5
, 0);
181 static const arm_feature_set arm_ext_v5t
= ARM_FEATURE (ARM_EXT_V5T
, 0);
182 static const arm_feature_set arm_ext_v5e
= ARM_FEATURE (ARM_EXT_V5E
, 0);
183 static const arm_feature_set arm_ext_v5exp
= ARM_FEATURE (ARM_EXT_V5ExP
, 0);
184 static const arm_feature_set arm_ext_v5j
= ARM_FEATURE (ARM_EXT_V5J
, 0);
185 static const arm_feature_set arm_ext_v6
= ARM_FEATURE (ARM_EXT_V6
, 0);
186 static const arm_feature_set arm_ext_v6k
= ARM_FEATURE (ARM_EXT_V6K
, 0);
187 static const arm_feature_set arm_ext_v6z
= ARM_FEATURE (ARM_EXT_V6Z
, 0);
188 static const arm_feature_set arm_ext_v6t2
= ARM_FEATURE (ARM_EXT_V6T2
, 0);
189 static const arm_feature_set arm_ext_v6_notm
= ARM_FEATURE (ARM_EXT_V6_NOTM
, 0);
190 static const arm_feature_set arm_ext_v6_dsp
= ARM_FEATURE (ARM_EXT_V6_DSP
, 0);
191 static const arm_feature_set arm_ext_barrier
= ARM_FEATURE (ARM_EXT_BARRIER
, 0);
192 static const arm_feature_set arm_ext_msr
= ARM_FEATURE (ARM_EXT_THUMB_MSR
, 0);
193 static const arm_feature_set arm_ext_div
= ARM_FEATURE (ARM_EXT_DIV
, 0);
194 static const arm_feature_set arm_ext_v7
= ARM_FEATURE (ARM_EXT_V7
, 0);
195 static const arm_feature_set arm_ext_v7a
= ARM_FEATURE (ARM_EXT_V7A
, 0);
196 static const arm_feature_set arm_ext_v7r
= ARM_FEATURE (ARM_EXT_V7R
, 0);
197 static const arm_feature_set arm_ext_v7m
= ARM_FEATURE (ARM_EXT_V7M
, 0);
198 static const arm_feature_set arm_ext_m
=
199 ARM_FEATURE (ARM_EXT_V6M
| ARM_EXT_V7M
, 0);
201 static const arm_feature_set arm_arch_any
= ARM_ANY
;
202 static const arm_feature_set arm_arch_full
= ARM_FEATURE (-1, -1);
203 static const arm_feature_set arm_arch_t2
= ARM_ARCH_THUMB2
;
204 static const arm_feature_set arm_arch_none
= ARM_ARCH_NONE
;
206 static const arm_feature_set arm_cext_iwmmxt2
=
207 ARM_FEATURE (0, ARM_CEXT_IWMMXT2
);
208 static const arm_feature_set arm_cext_iwmmxt
=
209 ARM_FEATURE (0, ARM_CEXT_IWMMXT
);
210 static const arm_feature_set arm_cext_xscale
=
211 ARM_FEATURE (0, ARM_CEXT_XSCALE
);
212 static const arm_feature_set arm_cext_maverick
=
213 ARM_FEATURE (0, ARM_CEXT_MAVERICK
);
214 static const arm_feature_set fpu_fpa_ext_v1
= ARM_FEATURE (0, FPU_FPA_EXT_V1
);
215 static const arm_feature_set fpu_fpa_ext_v2
= ARM_FEATURE (0, FPU_FPA_EXT_V2
);
216 static const arm_feature_set fpu_vfp_ext_v1xd
=
217 ARM_FEATURE (0, FPU_VFP_EXT_V1xD
);
218 static const arm_feature_set fpu_vfp_ext_v1
= ARM_FEATURE (0, FPU_VFP_EXT_V1
);
219 static const arm_feature_set fpu_vfp_ext_v2
= ARM_FEATURE (0, FPU_VFP_EXT_V2
);
220 static const arm_feature_set fpu_vfp_ext_v3xd
= ARM_FEATURE (0, FPU_VFP_EXT_V3xD
);
221 static const arm_feature_set fpu_vfp_ext_v3
= ARM_FEATURE (0, FPU_VFP_EXT_V3
);
222 static const arm_feature_set fpu_vfp_ext_d32
=
223 ARM_FEATURE (0, FPU_VFP_EXT_D32
);
224 static const arm_feature_set fpu_neon_ext_v1
= ARM_FEATURE (0, FPU_NEON_EXT_V1
);
225 static const arm_feature_set fpu_vfp_v3_or_neon_ext
=
226 ARM_FEATURE (0, FPU_NEON_EXT_V1
| FPU_VFP_EXT_V3
);
227 static const arm_feature_set fpu_vfp_fp16
= ARM_FEATURE (0, FPU_VFP_EXT_FP16
);
228 static const arm_feature_set fpu_neon_ext_fma
= ARM_FEATURE (0, FPU_NEON_EXT_FMA
);
229 static const arm_feature_set fpu_vfp_ext_fma
= ARM_FEATURE (0, FPU_VFP_EXT_FMA
);
231 static int mfloat_abi_opt
= -1;
232 /* Record user cpu selection for object attributes. */
233 static arm_feature_set selected_cpu
= ARM_ARCH_NONE
;
234 /* Must be long enough to hold any of the names in arm_cpus. */
235 static char selected_cpu_name
[16];
238 static int meabi_flags
= EABI_DEFAULT
;
240 static int meabi_flags
= EF_ARM_EABI_UNKNOWN
;
243 static int attributes_set_explicitly
[NUM_KNOWN_OBJ_ATTRIBUTES
];
248 return (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
);
253 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
254 symbolS
* GOT_symbol
;
257 /* 0: assemble for ARM,
258 1: assemble for Thumb,
259 2: assemble for Thumb even though target CPU does not support thumb
261 static int thumb_mode
= 0;
262 /* A value distinct from the possible values for thumb_mode that we
263 can use to record whether thumb_mode has been copied into the
264 tc_frag_data field of a frag. */
265 #define MODE_RECORDED (1 << 4)
267 /* Specifies the intrinsic IT insn behavior mode. */
268 enum implicit_it_mode
270 IMPLICIT_IT_MODE_NEVER
= 0x00,
271 IMPLICIT_IT_MODE_ARM
= 0x01,
272 IMPLICIT_IT_MODE_THUMB
= 0x02,
273 IMPLICIT_IT_MODE_ALWAYS
= (IMPLICIT_IT_MODE_ARM
| IMPLICIT_IT_MODE_THUMB
)
275 static int implicit_it_mode
= IMPLICIT_IT_MODE_ARM
;
277 /* If unified_syntax is true, we are processing the new unified
278 ARM/Thumb syntax. Important differences from the old ARM mode:
280 - Immediate operands do not require a # prefix.
281 - Conditional affixes always appear at the end of the
282 instruction. (For backward compatibility, those instructions
283 that formerly had them in the middle, continue to accept them
285 - The IT instruction may appear, and if it does is validated
286 against subsequent conditional affixes. It does not generate
289 Important differences from the old Thumb mode:
291 - Immediate operands do not require a # prefix.
292 - Most of the V6T2 instructions are only available in unified mode.
293 - The .N and .W suffixes are recognized and honored (it is an error
294 if they cannot be honored).
295 - All instructions set the flags if and only if they have an 's' affix.
296 - Conditional affixes may be used. They are validated against
297 preceding IT instructions. Unlike ARM mode, you cannot use a
298 conditional affix except in the scope of an IT instruction. */
300 static bfd_boolean unified_syntax
= FALSE
;
315 enum neon_el_type type
;
319 #define NEON_MAX_TYPE_ELS 4
323 struct neon_type_el el
[NEON_MAX_TYPE_ELS
];
327 enum it_instruction_type
332 IF_INSIDE_IT_LAST_INSN
, /* Either outside or inside;
333 if inside, should be the last one. */
334 NEUTRAL_IT_INSN
, /* This could be either inside or outside,
335 i.e. BKPT and NOP. */
336 IT_INSN
/* The IT insn has been parsed. */
342 unsigned long instruction
;
346 /* "uncond_value" is set to the value in place of the conditional field in
347 unconditional versions of the instruction, or -1 if nothing is
350 struct neon_type vectype
;
351 /* This does not indicate an actual NEON instruction, only that
352 the mnemonic accepts neon-style type suffixes. */
354 /* Set to the opcode if the instruction needs relaxation.
355 Zero if the instruction is not relaxed. */
359 bfd_reloc_code_real_type type
;
364 enum it_instruction_type it_insn_type
;
370 struct neon_type_el vectype
;
371 unsigned present
: 1; /* Operand present. */
372 unsigned isreg
: 1; /* Operand was a register. */
373 unsigned immisreg
: 1; /* .imm field is a second register. */
374 unsigned isscalar
: 1; /* Operand is a (Neon) scalar. */
375 unsigned immisalign
: 1; /* Immediate is an alignment specifier. */
376 unsigned immisfloat
: 1; /* Immediate was parsed as a float. */
377 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
378 instructions. This allows us to disambiguate ARM <-> vector insns. */
379 unsigned regisimm
: 1; /* 64-bit immediate, reg forms high 32 bits. */
380 unsigned isvec
: 1; /* Is a single, double or quad VFP/Neon reg. */
381 unsigned isquad
: 1; /* Operand is Neon quad-precision register. */
382 unsigned issingle
: 1; /* Operand is VFP single-precision register. */
383 unsigned hasreloc
: 1; /* Operand has relocation suffix. */
384 unsigned writeback
: 1; /* Operand has trailing ! */
385 unsigned preind
: 1; /* Preindexed address. */
386 unsigned postind
: 1; /* Postindexed address. */
387 unsigned negative
: 1; /* Index register was negated. */
388 unsigned shifted
: 1; /* Shift applied to operation. */
389 unsigned shift_kind
: 3; /* Shift operation (enum shift_kind). */
393 static struct arm_it inst
;
395 #define NUM_FLOAT_VALS 8
397 const char * fp_const
[] =
399 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
402 /* Number of littlenums required to hold an extended precision number. */
403 #define MAX_LITTLENUMS 6
405 LITTLENUM_TYPE fp_values
[NUM_FLOAT_VALS
][MAX_LITTLENUMS
];
415 #define CP_T_X 0x00008000
416 #define CP_T_Y 0x00400000
418 #define CONDS_BIT 0x00100000
419 #define LOAD_BIT 0x00100000
421 #define DOUBLE_LOAD_FLAG 0x00000001
425 const char * template_name
;
429 #define COND_ALWAYS 0xE
433 const char * template_name
;
437 struct asm_barrier_opt
439 const char * template_name
;
443 /* The bit that distinguishes CPSR and SPSR. */
444 #define SPSR_BIT (1 << 22)
446 /* The individual PSR flag bits. */
447 #define PSR_c (1 << 16)
448 #define PSR_x (1 << 17)
449 #define PSR_s (1 << 18)
450 #define PSR_f (1 << 19)
455 bfd_reloc_code_real_type reloc
;
460 VFP_REG_Sd
, VFP_REG_Sm
, VFP_REG_Sn
,
461 VFP_REG_Dd
, VFP_REG_Dm
, VFP_REG_Dn
466 VFP_LDSTMIA
, VFP_LDSTMDB
, VFP_LDSTMIAX
, VFP_LDSTMDBX
469 /* Bits for DEFINED field in neon_typed_alias. */
470 #define NTA_HASTYPE 1
471 #define NTA_HASINDEX 2
473 struct neon_typed_alias
475 unsigned char defined
;
477 struct neon_type_el eltype
;
480 /* ARM register categories. This includes coprocessor numbers and various
481 architecture extensions' registers. */
507 /* Structure for a hash table entry for a register.
508 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
509 information which states whether a vector type or index is specified (for a
510 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
514 unsigned char number
;
516 unsigned char builtin
;
517 struct neon_typed_alias
* neon
;
520 /* Diagnostics used when we don't get a register of the expected type. */
521 const char * const reg_expected_msgs
[] =
523 N_("ARM register expected"),
524 N_("bad or missing co-processor number"),
525 N_("co-processor register expected"),
526 N_("FPA register expected"),
527 N_("VFP single precision register expected"),
528 N_("VFP/Neon double precision register expected"),
529 N_("Neon quad precision register expected"),
530 N_("VFP single or double precision register expected"),
531 N_("Neon double or quad precision register expected"),
532 N_("VFP single, double or Neon quad precision register expected"),
533 N_("VFP system register expected"),
534 N_("Maverick MVF register expected"),
535 N_("Maverick MVD register expected"),
536 N_("Maverick MVFX register expected"),
537 N_("Maverick MVDX register expected"),
538 N_("Maverick MVAX register expected"),
539 N_("Maverick DSPSC register expected"),
540 N_("iWMMXt data register expected"),
541 N_("iWMMXt control register expected"),
542 N_("iWMMXt scalar register expected"),
543 N_("XScale accumulator register expected"),
546 /* Some well known registers that we refer to directly elsewhere. */
551 /* ARM instructions take 4bytes in the object file, Thumb instructions
557 /* Basic string to match. */
558 const char * template_name
;
560 /* Parameters to instruction. */
561 unsigned int operands
[8];
563 /* Conditional tag - see opcode_lookup. */
564 unsigned int tag
: 4;
566 /* Basic instruction code. */
567 unsigned int avalue
: 28;
569 /* Thumb-format instruction code. */
572 /* Which architecture variant provides this instruction. */
573 const arm_feature_set
* avariant
;
574 const arm_feature_set
* tvariant
;
576 /* Function to call to encode instruction in ARM format. */
577 void (* aencode
) (void);
579 /* Function to call to encode instruction in Thumb format. */
580 void (* tencode
) (void);
583 /* Defines for various bits that we will want to toggle. */
584 #define INST_IMMEDIATE 0x02000000
585 #define OFFSET_REG 0x02000000
586 #define HWOFFSET_IMM 0x00400000
587 #define SHIFT_BY_REG 0x00000010
588 #define PRE_INDEX 0x01000000
589 #define INDEX_UP 0x00800000
590 #define WRITE_BACK 0x00200000
591 #define LDM_TYPE_2_OR_3 0x00400000
592 #define CPSI_MMOD 0x00020000
594 #define LITERAL_MASK 0xf000f000
595 #define OPCODE_MASK 0xfe1fffff
596 #define V4_STR_BIT 0x00000020
598 #define T2_SUBS_PC_LR 0xf3de8f00
600 #define DATA_OP_SHIFT 21
602 #define T2_OPCODE_MASK 0xfe1fffff
603 #define T2_DATA_OP_SHIFT 21
605 /* Codes to distinguish the arithmetic instructions. */
616 #define OPCODE_CMP 10
617 #define OPCODE_CMN 11
618 #define OPCODE_ORR 12
619 #define OPCODE_MOV 13
620 #define OPCODE_BIC 14
621 #define OPCODE_MVN 15
623 #define T2_OPCODE_AND 0
624 #define T2_OPCODE_BIC 1
625 #define T2_OPCODE_ORR 2
626 #define T2_OPCODE_ORN 3
627 #define T2_OPCODE_EOR 4
628 #define T2_OPCODE_ADD 8
629 #define T2_OPCODE_ADC 10
630 #define T2_OPCODE_SBC 11
631 #define T2_OPCODE_SUB 13
632 #define T2_OPCODE_RSB 14
634 #define T_OPCODE_MUL 0x4340
635 #define T_OPCODE_TST 0x4200
636 #define T_OPCODE_CMN 0x42c0
637 #define T_OPCODE_NEG 0x4240
638 #define T_OPCODE_MVN 0x43c0
640 #define T_OPCODE_ADD_R3 0x1800
641 #define T_OPCODE_SUB_R3 0x1a00
642 #define T_OPCODE_ADD_HI 0x4400
643 #define T_OPCODE_ADD_ST 0xb000
644 #define T_OPCODE_SUB_ST 0xb080
645 #define T_OPCODE_ADD_SP 0xa800
646 #define T_OPCODE_ADD_PC 0xa000
647 #define T_OPCODE_ADD_I8 0x3000
648 #define T_OPCODE_SUB_I8 0x3800
649 #define T_OPCODE_ADD_I3 0x1c00
650 #define T_OPCODE_SUB_I3 0x1e00
652 #define T_OPCODE_ASR_R 0x4100
653 #define T_OPCODE_LSL_R 0x4080
654 #define T_OPCODE_LSR_R 0x40c0
655 #define T_OPCODE_ROR_R 0x41c0
656 #define T_OPCODE_ASR_I 0x1000
657 #define T_OPCODE_LSL_I 0x0000
658 #define T_OPCODE_LSR_I 0x0800
660 #define T_OPCODE_MOV_I8 0x2000
661 #define T_OPCODE_CMP_I8 0x2800
662 #define T_OPCODE_CMP_LR 0x4280
663 #define T_OPCODE_MOV_HR 0x4600
664 #define T_OPCODE_CMP_HR 0x4500
666 #define T_OPCODE_LDR_PC 0x4800
667 #define T_OPCODE_LDR_SP 0x9800
668 #define T_OPCODE_STR_SP 0x9000
669 #define T_OPCODE_LDR_IW 0x6800
670 #define T_OPCODE_STR_IW 0x6000
671 #define T_OPCODE_LDR_IH 0x8800
672 #define T_OPCODE_STR_IH 0x8000
673 #define T_OPCODE_LDR_IB 0x7800
674 #define T_OPCODE_STR_IB 0x7000
675 #define T_OPCODE_LDR_RW 0x5800
676 #define T_OPCODE_STR_RW 0x5000
677 #define T_OPCODE_LDR_RH 0x5a00
678 #define T_OPCODE_STR_RH 0x5200
679 #define T_OPCODE_LDR_RB 0x5c00
680 #define T_OPCODE_STR_RB 0x5400
682 #define T_OPCODE_PUSH 0xb400
683 #define T_OPCODE_POP 0xbc00
685 #define T_OPCODE_BRANCH 0xe000
687 #define THUMB_SIZE 2 /* Size of thumb instruction. */
688 #define THUMB_PP_PC_LR 0x0100
689 #define THUMB_LOAD_BIT 0x0800
690 #define THUMB2_LOAD_BIT 0x00100000
692 #define BAD_ARGS _("bad arguments to instruction")
693 #define BAD_SP _("r13 not allowed here")
694 #define BAD_PC _("r15 not allowed here")
695 #define BAD_COND _("instruction cannot be conditional")
696 #define BAD_OVERLAP _("registers may not be the same")
697 #define BAD_HIREG _("lo register required")
698 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
699 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode");
700 #define BAD_BRANCH _("branch must be last instruction in IT block")
701 #define BAD_NOT_IT _("instruction not allowed in IT block")
702 #define BAD_FPU _("selected FPU does not support instruction")
703 #define BAD_OUT_IT _("thumb conditional instruction should be in IT block")
704 #define BAD_IT_COND _("incorrect condition in IT block")
705 #define BAD_IT_IT _("IT falling in the range of a previous IT block")
706 #define MISSING_FNSTART _("missing .fnstart before unwinding directive")
707 #define BAD_PC_ADDRESSING \
708 _("cannot use register index with PC-relative addressing")
709 #define BAD_PC_WRITEBACK \
710 _("cannot use writeback with PC-relative addressing")
712 static struct hash_control
* arm_ops_hsh
;
713 static struct hash_control
* arm_cond_hsh
;
714 static struct hash_control
* arm_shift_hsh
;
715 static struct hash_control
* arm_psr_hsh
;
716 static struct hash_control
* arm_v7m_psr_hsh
;
717 static struct hash_control
* arm_reg_hsh
;
718 static struct hash_control
* arm_reloc_hsh
;
719 static struct hash_control
* arm_barrier_opt_hsh
;
721 /* Stuff needed to resolve the label ambiguity
730 symbolS
* last_label_seen
;
731 static int label_is_thumb_function_name
= FALSE
;
733 /* Literal pool structure. Held on a per-section
734 and per-sub-section basis. */
736 #define MAX_LITERAL_POOL_SIZE 1024
737 typedef struct literal_pool
739 expressionS literals
[MAX_LITERAL_POOL_SIZE
];
740 unsigned int next_free_entry
;
745 struct literal_pool
* next
;
748 /* Pointer to a linked list of literal pools. */
749 literal_pool
* list_of_pools
= NULL
;
752 # define now_it seg_info (now_seg)->tc_segment_info_data.current_it
754 static struct current_it now_it
;
758 now_it_compatible (int cond
)
760 return (cond
& ~1) == (now_it
.cc
& ~1);
764 conditional_insn (void)
766 return inst
.cond
!= COND_ALWAYS
;
769 static int in_it_block (void);
771 static int handle_it_state (void);
773 static void force_automatic_it_block_close (void);
775 static void it_fsm_post_encode (void);
777 #define set_it_insn_type(type) \
780 inst.it_insn_type = type; \
781 if (handle_it_state () == FAIL) \
786 #define set_it_insn_type_nonvoid(type, failret) \
789 inst.it_insn_type = type; \
790 if (handle_it_state () == FAIL) \
795 #define set_it_insn_type_last() \
798 if (inst.cond == COND_ALWAYS) \
799 set_it_insn_type (IF_INSIDE_IT_LAST_INSN); \
801 set_it_insn_type (INSIDE_IT_LAST_INSN); \
807 /* This array holds the chars that always start a comment. If the
808 pre-processor is disabled, these aren't very useful. */
809 const char comment_chars
[] = "@";
811 /* This array holds the chars that only start a comment at the beginning of
812 a line. If the line seems to have the form '# 123 filename'
813 .line and .file directives will appear in the pre-processed output. */
814 /* Note that input_file.c hand checks for '#' at the beginning of the
815 first line of the input file. This is because the compiler outputs
816 #NO_APP at the beginning of its output. */
817 /* Also note that comments like this one will always work. */
818 const char line_comment_chars
[] = "#";
820 const char line_separator_chars
[] = ";";
822 /* Chars that can be used to separate mant
823 from exp in floating point numbers. */
824 const char EXP_CHARS
[] = "eE";
826 /* Chars that mean this number is a floating point constant. */
830 const char FLT_CHARS
[] = "rRsSfFdDxXeEpP";
832 /* Prefix characters that indicate the start of an immediate
834 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
836 /* Separator character handling. */
838 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
841 skip_past_char (char ** str
, char c
)
852 #define skip_past_comma(str) skip_past_char (str, ',')
854 /* Arithmetic expressions (possibly involving symbols). */
856 /* Return TRUE if anything in the expression is a bignum. */
859 walk_no_bignums (symbolS
* sp
)
861 if (symbol_get_value_expression (sp
)->X_op
== O_big
)
864 if (symbol_get_value_expression (sp
)->X_add_symbol
)
866 return (walk_no_bignums (symbol_get_value_expression (sp
)->X_add_symbol
)
867 || (symbol_get_value_expression (sp
)->X_op_symbol
868 && walk_no_bignums (symbol_get_value_expression (sp
)->X_op_symbol
)));
874 static int in_my_get_expression
= 0;
876 /* Third argument to my_get_expression. */
877 #define GE_NO_PREFIX 0
878 #define GE_IMM_PREFIX 1
879 #define GE_OPT_PREFIX 2
880 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
881 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
882 #define GE_OPT_PREFIX_BIG 3
885 my_get_expression (expressionS
* ep
, char ** str
, int prefix_mode
)
890 /* In unified syntax, all prefixes are optional. */
892 prefix_mode
= (prefix_mode
== GE_OPT_PREFIX_BIG
) ? prefix_mode
897 case GE_NO_PREFIX
: break;
899 if (!is_immediate_prefix (**str
))
901 inst
.error
= _("immediate expression requires a # prefix");
907 case GE_OPT_PREFIX_BIG
:
908 if (is_immediate_prefix (**str
))
914 memset (ep
, 0, sizeof (expressionS
));
916 save_in
= input_line_pointer
;
917 input_line_pointer
= *str
;
918 in_my_get_expression
= 1;
919 seg
= expression (ep
);
920 in_my_get_expression
= 0;
922 if (ep
->X_op
== O_illegal
|| ep
->X_op
== O_absent
)
924 /* We found a bad or missing expression in md_operand(). */
925 *str
= input_line_pointer
;
926 input_line_pointer
= save_in
;
927 if (inst
.error
== NULL
)
928 inst
.error
= (ep
->X_op
== O_absent
929 ? _("missing expression") :_("bad expression"));
934 if (seg
!= absolute_section
935 && seg
!= text_section
936 && seg
!= data_section
937 && seg
!= bss_section
938 && seg
!= undefined_section
)
940 inst
.error
= _("bad segment");
941 *str
= input_line_pointer
;
942 input_line_pointer
= save_in
;
949 /* Get rid of any bignums now, so that we don't generate an error for which
950 we can't establish a line number later on. Big numbers are never valid
951 in instructions, which is where this routine is always called. */
952 if (prefix_mode
!= GE_OPT_PREFIX_BIG
953 && (ep
->X_op
== O_big
955 && (walk_no_bignums (ep
->X_add_symbol
)
957 && walk_no_bignums (ep
->X_op_symbol
))))))
959 inst
.error
= _("invalid constant");
960 *str
= input_line_pointer
;
961 input_line_pointer
= save_in
;
965 *str
= input_line_pointer
;
966 input_line_pointer
= save_in
;
970 /* Turn a string in input_line_pointer into a floating point constant
971 of type TYPE, and store the appropriate bytes in *LITP. The number
972 of LITTLENUMS emitted is stored in *SIZEP. An error message is
973 returned, or NULL on OK.
975 Note that fp constants aren't represent in the normal way on the ARM.
976 In big endian mode, things are as expected. However, in little endian
977 mode fp constants are big-endian word-wise, and little-endian byte-wise
978 within the words. For example, (double) 1.1 in big endian mode is
979 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
980 the byte sequence 99 99 f1 3f 9a 99 99 99.
982 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
985 md_atof (int type
, char * litP
, int * sizeP
)
988 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
1020 return _("Unrecognized or unsupported floating point constant");
1023 t
= atof_ieee (input_line_pointer
, type
, words
);
1025 input_line_pointer
= t
;
1026 *sizeP
= prec
* sizeof (LITTLENUM_TYPE
);
1028 if (target_big_endian
)
1030 for (i
= 0; i
< prec
; i
++)
1032 md_number_to_chars (litP
, (valueT
) words
[i
], sizeof (LITTLENUM_TYPE
));
1033 litP
+= sizeof (LITTLENUM_TYPE
);
1038 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_endian_pure
))
1039 for (i
= prec
- 1; i
>= 0; i
--)
1041 md_number_to_chars (litP
, (valueT
) words
[i
], sizeof (LITTLENUM_TYPE
));
1042 litP
+= sizeof (LITTLENUM_TYPE
);
1045 /* For a 4 byte float the order of elements in `words' is 1 0.
1046 For an 8 byte float the order is 1 0 3 2. */
1047 for (i
= 0; i
< prec
; i
+= 2)
1049 md_number_to_chars (litP
, (valueT
) words
[i
+ 1],
1050 sizeof (LITTLENUM_TYPE
));
1051 md_number_to_chars (litP
+ sizeof (LITTLENUM_TYPE
),
1052 (valueT
) words
[i
], sizeof (LITTLENUM_TYPE
));
1053 litP
+= 2 * sizeof (LITTLENUM_TYPE
);
1060 /* We handle all bad expressions here, so that we can report the faulty
1061 instruction in the error message. */
1063 md_operand (expressionS
* exp
)
1065 if (in_my_get_expression
)
1066 exp
->X_op
= O_illegal
;
1069 /* Immediate values. */
1071 /* Generic immediate-value read function for use in directives.
1072 Accepts anything that 'expression' can fold to a constant.
1073 *val receives the number. */
1076 immediate_for_directive (int *val
)
1079 exp
.X_op
= O_illegal
;
1081 if (is_immediate_prefix (*input_line_pointer
))
1083 input_line_pointer
++;
1087 if (exp
.X_op
!= O_constant
)
1089 as_bad (_("expected #constant"));
1090 ignore_rest_of_line ();
1093 *val
= exp
.X_add_number
;
1098 /* Register parsing. */
1100 /* Generic register parser. CCP points to what should be the
1101 beginning of a register name. If it is indeed a valid register
1102 name, advance CCP over it and return the reg_entry structure;
1103 otherwise return NULL. Does not issue diagnostics. */
1105 static struct reg_entry
*
1106 arm_reg_parse_multi (char **ccp
)
1110 struct reg_entry
*reg
;
1112 #ifdef REGISTER_PREFIX
1113 if (*start
!= REGISTER_PREFIX
)
1117 #ifdef OPTIONAL_REGISTER_PREFIX
1118 if (*start
== OPTIONAL_REGISTER_PREFIX
)
1123 if (!ISALPHA (*p
) || !is_name_beginner (*p
))
1128 while (ISALPHA (*p
) || ISDIGIT (*p
) || *p
== '_');
1130 reg
= (struct reg_entry
*) hash_find_n (arm_reg_hsh
, start
, p
- start
);
1140 arm_reg_alt_syntax (char **ccp
, char *start
, struct reg_entry
*reg
,
1141 enum arm_reg_type type
)
1143 /* Alternative syntaxes are accepted for a few register classes. */
1150 /* Generic coprocessor register names are allowed for these. */
1151 if (reg
&& reg
->type
== REG_TYPE_CN
)
1156 /* For backward compatibility, a bare number is valid here. */
1158 unsigned long processor
= strtoul (start
, ccp
, 10);
1159 if (*ccp
!= start
&& processor
<= 15)
1163 case REG_TYPE_MMXWC
:
1164 /* WC includes WCG. ??? I'm not sure this is true for all
1165 instructions that take WC registers. */
1166 if (reg
&& reg
->type
== REG_TYPE_MMXWCG
)
1177 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1178 return value is the register number or FAIL. */
1181 arm_reg_parse (char **ccp
, enum arm_reg_type type
)
1184 struct reg_entry
*reg
= arm_reg_parse_multi (ccp
);
1187 /* Do not allow a scalar (reg+index) to parse as a register. */
1188 if (reg
&& reg
->neon
&& (reg
->neon
->defined
& NTA_HASINDEX
))
1191 if (reg
&& reg
->type
== type
)
1194 if ((ret
= arm_reg_alt_syntax (ccp
, start
, reg
, type
)) != FAIL
)
1201 /* Parse a Neon type specifier. *STR should point at the leading '.'
1202 character. Does no verification at this stage that the type fits the opcode
1209 Can all be legally parsed by this function.
1211 Fills in neon_type struct pointer with parsed information, and updates STR
1212 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1213 type, FAIL if not. */
1216 parse_neon_type (struct neon_type
*type
, char **str
)
1223 while (type
->elems
< NEON_MAX_TYPE_ELS
)
1225 enum neon_el_type thistype
= NT_untyped
;
1226 unsigned thissize
= -1u;
1233 /* Just a size without an explicit type. */
1237 switch (TOLOWER (*ptr
))
1239 case 'i': thistype
= NT_integer
; break;
1240 case 'f': thistype
= NT_float
; break;
1241 case 'p': thistype
= NT_poly
; break;
1242 case 's': thistype
= NT_signed
; break;
1243 case 'u': thistype
= NT_unsigned
; break;
1245 thistype
= NT_float
;
1250 as_bad (_("unexpected character `%c' in type specifier"), *ptr
);
1256 /* .f is an abbreviation for .f32. */
1257 if (thistype
== NT_float
&& !ISDIGIT (*ptr
))
1262 thissize
= strtoul (ptr
, &ptr
, 10);
1264 if (thissize
!= 8 && thissize
!= 16 && thissize
!= 32
1267 as_bad (_("bad size %d in type specifier"), thissize
);
1275 type
->el
[type
->elems
].type
= thistype
;
1276 type
->el
[type
->elems
].size
= thissize
;
1281 /* Empty/missing type is not a successful parse. */
1282 if (type
->elems
== 0)
1290 /* Errors may be set multiple times during parsing or bit encoding
1291 (particularly in the Neon bits), but usually the earliest error which is set
1292 will be the most meaningful. Avoid overwriting it with later (cascading)
1293 errors by calling this function. */
1296 first_error (const char *err
)
1302 /* Parse a single type, e.g. ".s32", leading period included. */
1304 parse_neon_operand_type (struct neon_type_el
*vectype
, char **ccp
)
1307 struct neon_type optype
;
1311 if (parse_neon_type (&optype
, &str
) == SUCCESS
)
1313 if (optype
.elems
== 1)
1314 *vectype
= optype
.el
[0];
1317 first_error (_("only one type should be specified for operand"));
1323 first_error (_("vector type expected"));
1335 /* Special meanings for indices (which have a range of 0-7), which will fit into
1338 #define NEON_ALL_LANES 15
1339 #define NEON_INTERLEAVE_LANES 14
1341 /* Parse either a register or a scalar, with an optional type. Return the
1342 register number, and optionally fill in the actual type of the register
1343 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1344 type/index information in *TYPEINFO. */
1347 parse_typed_reg_or_scalar (char **ccp
, enum arm_reg_type type
,
1348 enum arm_reg_type
*rtype
,
1349 struct neon_typed_alias
*typeinfo
)
1352 struct reg_entry
*reg
= arm_reg_parse_multi (&str
);
1353 struct neon_typed_alias atype
;
1354 struct neon_type_el parsetype
;
1358 atype
.eltype
.type
= NT_invtype
;
1359 atype
.eltype
.size
= -1;
1361 /* Try alternate syntax for some types of register. Note these are mutually
1362 exclusive with the Neon syntax extensions. */
1365 int altreg
= arm_reg_alt_syntax (&str
, *ccp
, reg
, type
);
1373 /* Undo polymorphism when a set of register types may be accepted. */
1374 if ((type
== REG_TYPE_NDQ
1375 && (reg
->type
== REG_TYPE_NQ
|| reg
->type
== REG_TYPE_VFD
))
1376 || (type
== REG_TYPE_VFSD
1377 && (reg
->type
== REG_TYPE_VFS
|| reg
->type
== REG_TYPE_VFD
))
1378 || (type
== REG_TYPE_NSDQ
1379 && (reg
->type
== REG_TYPE_VFS
|| reg
->type
== REG_TYPE_VFD
1380 || reg
->type
== REG_TYPE_NQ
))
1381 || (type
== REG_TYPE_MMXWC
1382 && (reg
->type
== REG_TYPE_MMXWCG
)))
1383 type
= (enum arm_reg_type
) reg
->type
;
1385 if (type
!= reg
->type
)
1391 if (parse_neon_operand_type (&parsetype
, &str
) == SUCCESS
)
1393 if ((atype
.defined
& NTA_HASTYPE
) != 0)
1395 first_error (_("can't redefine type for operand"));
1398 atype
.defined
|= NTA_HASTYPE
;
1399 atype
.eltype
= parsetype
;
1402 if (skip_past_char (&str
, '[') == SUCCESS
)
1404 if (type
!= REG_TYPE_VFD
)
1406 first_error (_("only D registers may be indexed"));
1410 if ((atype
.defined
& NTA_HASINDEX
) != 0)
1412 first_error (_("can't change index for operand"));
1416 atype
.defined
|= NTA_HASINDEX
;
1418 if (skip_past_char (&str
, ']') == SUCCESS
)
1419 atype
.index
= NEON_ALL_LANES
;
1424 my_get_expression (&exp
, &str
, GE_NO_PREFIX
);
1426 if (exp
.X_op
!= O_constant
)
1428 first_error (_("constant expression required"));
1432 if (skip_past_char (&str
, ']') == FAIL
)
1435 atype
.index
= exp
.X_add_number
;
1450 /* Like arm_reg_parse, but allow allow the following extra features:
1451 - If RTYPE is non-zero, return the (possibly restricted) type of the
1452 register (e.g. Neon double or quad reg when either has been requested).
1453 - If this is a Neon vector type with additional type information, fill
1454 in the struct pointed to by VECTYPE (if non-NULL).
1455 This function will fault on encountering a scalar. */
1458 arm_typed_reg_parse (char **ccp
, enum arm_reg_type type
,
1459 enum arm_reg_type
*rtype
, struct neon_type_el
*vectype
)
1461 struct neon_typed_alias atype
;
1463 int reg
= parse_typed_reg_or_scalar (&str
, type
, rtype
, &atype
);
1468 /* Do not allow a scalar (reg+index) to parse as a register. */
1469 if ((atype
.defined
& NTA_HASINDEX
) != 0)
1471 first_error (_("register operand expected, but got scalar"));
1476 *vectype
= atype
.eltype
;
1483 #define NEON_SCALAR_REG(X) ((X) >> 4)
1484 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1486 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1487 have enough information to be able to do a good job bounds-checking. So, we
1488 just do easy checks here, and do further checks later. */
1491 parse_scalar (char **ccp
, int elsize
, struct neon_type_el
*type
)
1495 struct neon_typed_alias atype
;
1497 reg
= parse_typed_reg_or_scalar (&str
, REG_TYPE_VFD
, NULL
, &atype
);
1499 if (reg
== FAIL
|| (atype
.defined
& NTA_HASINDEX
) == 0)
1502 if (atype
.index
== NEON_ALL_LANES
)
1504 first_error (_("scalar must have an index"));
1507 else if (atype
.index
>= 64 / elsize
)
1509 first_error (_("scalar index out of range"));
1514 *type
= atype
.eltype
;
1518 return reg
* 16 + atype
.index
;
1521 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1524 parse_reg_list (char ** strp
)
1526 char * str
= * strp
;
1530 /* We come back here if we get ranges concatenated by '+' or '|'. */
1545 if ((reg
= arm_reg_parse (&str
, REG_TYPE_RN
)) == FAIL
)
1547 first_error (_(reg_expected_msgs
[REG_TYPE_RN
]));
1557 first_error (_("bad range in register list"));
1561 for (i
= cur_reg
+ 1; i
< reg
; i
++)
1563 if (range
& (1 << i
))
1565 (_("Warning: duplicated register (r%d) in register list"),
1573 if (range
& (1 << reg
))
1574 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1576 else if (reg
<= cur_reg
)
1577 as_tsktsk (_("Warning: register range not in ascending order"));
1582 while (skip_past_comma (&str
) != FAIL
1583 || (in_range
= 1, *str
++ == '-'));
1588 first_error (_("missing `}'"));
1596 if (my_get_expression (&exp
, &str
, GE_NO_PREFIX
))
1599 if (exp
.X_op
== O_constant
)
1601 if (exp
.X_add_number
1602 != (exp
.X_add_number
& 0x0000ffff))
1604 inst
.error
= _("invalid register mask");
1608 if ((range
& exp
.X_add_number
) != 0)
1610 int regno
= range
& exp
.X_add_number
;
1613 regno
= (1 << regno
) - 1;
1615 (_("Warning: duplicated register (r%d) in register list"),
1619 range
|= exp
.X_add_number
;
1623 if (inst
.reloc
.type
!= 0)
1625 inst
.error
= _("expression too complex");
1629 memcpy (&inst
.reloc
.exp
, &exp
, sizeof (expressionS
));
1630 inst
.reloc
.type
= BFD_RELOC_ARM_MULTI
;
1631 inst
.reloc
.pc_rel
= 0;
1635 if (*str
== '|' || *str
== '+')
1641 while (another_range
);
1647 /* Types of registers in a list. */
1656 /* Parse a VFP register list. If the string is invalid return FAIL.
1657 Otherwise return the number of registers, and set PBASE to the first
1658 register. Parses registers of type ETYPE.
1659 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1660 - Q registers can be used to specify pairs of D registers
1661 - { } can be omitted from around a singleton register list
1662 FIXME: This is not implemented, as it would require backtracking in
1665 This could be done (the meaning isn't really ambiguous), but doesn't
1666 fit in well with the current parsing framework.
1667 - 32 D registers may be used (also true for VFPv3).
1668 FIXME: Types are ignored in these register lists, which is probably a
1672 parse_vfp_reg_list (char **ccp
, unsigned int *pbase
, enum reg_list_els etype
)
1677 enum arm_reg_type regtype
= (enum arm_reg_type
) 0;
1681 unsigned long mask
= 0;
1686 inst
.error
= _("expecting {");
1695 regtype
= REG_TYPE_VFS
;
1700 regtype
= REG_TYPE_VFD
;
1703 case REGLIST_NEON_D
:
1704 regtype
= REG_TYPE_NDQ
;
1708 if (etype
!= REGLIST_VFP_S
)
1710 /* VFPv3 allows 32 D registers, except for the VFPv3-D16 variant. */
1711 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_d32
))
1715 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
1718 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
1725 base_reg
= max_regs
;
1729 int setmask
= 1, addregs
= 1;
1731 new_base
= arm_typed_reg_parse (&str
, regtype
, ®type
, NULL
);
1733 if (new_base
== FAIL
)
1735 first_error (_(reg_expected_msgs
[regtype
]));
1739 if (new_base
>= max_regs
)
1741 first_error (_("register out of range in list"));
1745 /* Note: a value of 2 * n is returned for the register Q<n>. */
1746 if (regtype
== REG_TYPE_NQ
)
1752 if (new_base
< base_reg
)
1753 base_reg
= new_base
;
1755 if (mask
& (setmask
<< new_base
))
1757 first_error (_("invalid register list"));
1761 if ((mask
>> new_base
) != 0 && ! warned
)
1763 as_tsktsk (_("register list not in ascending order"));
1767 mask
|= setmask
<< new_base
;
1770 if (*str
== '-') /* We have the start of a range expression */
1776 if ((high_range
= arm_typed_reg_parse (&str
, regtype
, NULL
, NULL
))
1779 inst
.error
= gettext (reg_expected_msgs
[regtype
]);
1783 if (high_range
>= max_regs
)
1785 first_error (_("register out of range in list"));
1789 if (regtype
== REG_TYPE_NQ
)
1790 high_range
= high_range
+ 1;
1792 if (high_range
<= new_base
)
1794 inst
.error
= _("register range not in ascending order");
1798 for (new_base
+= addregs
; new_base
<= high_range
; new_base
+= addregs
)
1800 if (mask
& (setmask
<< new_base
))
1802 inst
.error
= _("invalid register list");
1806 mask
|= setmask
<< new_base
;
1811 while (skip_past_comma (&str
) != FAIL
);
1815 /* Sanity check -- should have raised a parse error above. */
1816 if (count
== 0 || count
> max_regs
)
1821 /* Final test -- the registers must be consecutive. */
1823 for (i
= 0; i
< count
; i
++)
1825 if ((mask
& (1u << i
)) == 0)
1827 inst
.error
= _("non-contiguous register range");
1837 /* True if two alias types are the same. */
1840 neon_alias_types_same (struct neon_typed_alias
*a
, struct neon_typed_alias
*b
)
1848 if (a
->defined
!= b
->defined
)
1851 if ((a
->defined
& NTA_HASTYPE
) != 0
1852 && (a
->eltype
.type
!= b
->eltype
.type
1853 || a
->eltype
.size
!= b
->eltype
.size
))
1856 if ((a
->defined
& NTA_HASINDEX
) != 0
1857 && (a
->index
!= b
->index
))
1863 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
1864 The base register is put in *PBASE.
1865 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
1867 The register stride (minus one) is put in bit 4 of the return value.
1868 Bits [6:5] encode the list length (minus one).
1869 The type of the list elements is put in *ELTYPE, if non-NULL. */
1871 #define NEON_LANE(X) ((X) & 0xf)
1872 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
1873 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
1876 parse_neon_el_struct_list (char **str
, unsigned *pbase
,
1877 struct neon_type_el
*eltype
)
1884 int leading_brace
= 0;
1885 enum arm_reg_type rtype
= REG_TYPE_NDQ
;
1886 const char *const incr_error
= _("register stride must be 1 or 2");
1887 const char *const type_error
= _("mismatched element/structure types in list");
1888 struct neon_typed_alias firsttype
;
1890 if (skip_past_char (&ptr
, '{') == SUCCESS
)
1895 struct neon_typed_alias atype
;
1896 int getreg
= parse_typed_reg_or_scalar (&ptr
, rtype
, &rtype
, &atype
);
1900 first_error (_(reg_expected_msgs
[rtype
]));
1907 if (rtype
== REG_TYPE_NQ
)
1913 else if (reg_incr
== -1)
1915 reg_incr
= getreg
- base_reg
;
1916 if (reg_incr
< 1 || reg_incr
> 2)
1918 first_error (_(incr_error
));
1922 else if (getreg
!= base_reg
+ reg_incr
* count
)
1924 first_error (_(incr_error
));
1928 if (! neon_alias_types_same (&atype
, &firsttype
))
1930 first_error (_(type_error
));
1934 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
1938 struct neon_typed_alias htype
;
1939 int hireg
, dregs
= (rtype
== REG_TYPE_NQ
) ? 2 : 1;
1941 lane
= NEON_INTERLEAVE_LANES
;
1942 else if (lane
!= NEON_INTERLEAVE_LANES
)
1944 first_error (_(type_error
));
1949 else if (reg_incr
!= 1)
1951 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
1955 hireg
= parse_typed_reg_or_scalar (&ptr
, rtype
, NULL
, &htype
);
1958 first_error (_(reg_expected_msgs
[rtype
]));
1961 if (! neon_alias_types_same (&htype
, &firsttype
))
1963 first_error (_(type_error
));
1966 count
+= hireg
+ dregs
- getreg
;
1970 /* If we're using Q registers, we can't use [] or [n] syntax. */
1971 if (rtype
== REG_TYPE_NQ
)
1977 if ((atype
.defined
& NTA_HASINDEX
) != 0)
1981 else if (lane
!= atype
.index
)
1983 first_error (_(type_error
));
1987 else if (lane
== -1)
1988 lane
= NEON_INTERLEAVE_LANES
;
1989 else if (lane
!= NEON_INTERLEAVE_LANES
)
1991 first_error (_(type_error
));
1996 while ((count
!= 1 || leading_brace
) && skip_past_comma (&ptr
) != FAIL
);
1998 /* No lane set by [x]. We must be interleaving structures. */
2000 lane
= NEON_INTERLEAVE_LANES
;
2003 if (lane
== -1 || base_reg
== -1 || count
< 1 || count
> 4
2004 || (count
> 1 && reg_incr
== -1))
2006 first_error (_("error parsing element/structure list"));
2010 if ((count
> 1 || leading_brace
) && skip_past_char (&ptr
, '}') == FAIL
)
2012 first_error (_("expected }"));
2020 *eltype
= firsttype
.eltype
;
2025 return lane
| ((reg_incr
- 1) << 4) | ((count
- 1) << 5);
2028 /* Parse an explicit relocation suffix on an expression. This is
2029 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
2030 arm_reloc_hsh contains no entries, so this function can only
2031 succeed if there is no () after the word. Returns -1 on error,
2032 BFD_RELOC_UNUSED if there wasn't any suffix. */
2034 parse_reloc (char **str
)
2036 struct reloc_entry
*r
;
2040 return BFD_RELOC_UNUSED
;
2045 while (*q
&& *q
!= ')' && *q
!= ',')
2050 if ((r
= (struct reloc_entry
*)
2051 hash_find_n (arm_reloc_hsh
, p
, q
- p
)) == NULL
)
2058 /* Directives: register aliases. */
2060 static struct reg_entry
*
2061 insert_reg_alias (char *str
, int number
, int type
)
2063 struct reg_entry
*new_reg
;
2066 if ((new_reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
, str
)) != 0)
2068 if (new_reg
->builtin
)
2069 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str
);
2071 /* Only warn about a redefinition if it's not defined as the
2073 else if (new_reg
->number
!= number
|| new_reg
->type
!= type
)
2074 as_warn (_("ignoring redefinition of register alias '%s'"), str
);
2079 name
= xstrdup (str
);
2080 new_reg
= (struct reg_entry
*) xmalloc (sizeof (struct reg_entry
));
2082 new_reg
->name
= name
;
2083 new_reg
->number
= number
;
2084 new_reg
->type
= type
;
2085 new_reg
->builtin
= FALSE
;
2086 new_reg
->neon
= NULL
;
2088 if (hash_insert (arm_reg_hsh
, name
, (void *) new_reg
))
2095 insert_neon_reg_alias (char *str
, int number
, int type
,
2096 struct neon_typed_alias
*atype
)
2098 struct reg_entry
*reg
= insert_reg_alias (str
, number
, type
);
2102 first_error (_("attempt to redefine typed alias"));
2108 reg
->neon
= (struct neon_typed_alias
*)
2109 xmalloc (sizeof (struct neon_typed_alias
));
2110 *reg
->neon
= *atype
;
2114 /* Look for the .req directive. This is of the form:
2116 new_register_name .req existing_register_name
2118 If we find one, or if it looks sufficiently like one that we want to
2119 handle any error here, return TRUE. Otherwise return FALSE. */
2122 create_register_alias (char * newname
, char *p
)
2124 struct reg_entry
*old
;
2125 char *oldname
, *nbuf
;
2128 /* The input scrubber ensures that whitespace after the mnemonic is
2129 collapsed to single spaces. */
2131 if (strncmp (oldname
, " .req ", 6) != 0)
2135 if (*oldname
== '\0')
2138 old
= (struct reg_entry
*) hash_find (arm_reg_hsh
, oldname
);
2141 as_warn (_("unknown register '%s' -- .req ignored"), oldname
);
2145 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2146 the desired alias name, and p points to its end. If not, then
2147 the desired alias name is in the global original_case_string. */
2148 #ifdef TC_CASE_SENSITIVE
2151 newname
= original_case_string
;
2152 nlen
= strlen (newname
);
2155 nbuf
= (char *) alloca (nlen
+ 1);
2156 memcpy (nbuf
, newname
, nlen
);
2159 /* Create aliases under the new name as stated; an all-lowercase
2160 version of the new name; and an all-uppercase version of the new
2162 if (insert_reg_alias (nbuf
, old
->number
, old
->type
) != NULL
)
2164 for (p
= nbuf
; *p
; p
++)
2167 if (strncmp (nbuf
, newname
, nlen
))
2169 /* If this attempt to create an additional alias fails, do not bother
2170 trying to create the all-lower case alias. We will fail and issue
2171 a second, duplicate error message. This situation arises when the
2172 programmer does something like:
2175 The second .req creates the "Foo" alias but then fails to create
2176 the artificial FOO alias because it has already been created by the
2178 if (insert_reg_alias (nbuf
, old
->number
, old
->type
) == NULL
)
2182 for (p
= nbuf
; *p
; p
++)
2185 if (strncmp (nbuf
, newname
, nlen
))
2186 insert_reg_alias (nbuf
, old
->number
, old
->type
);
2192 /* Create a Neon typed/indexed register alias using directives, e.g.:
2197 These typed registers can be used instead of the types specified after the
2198 Neon mnemonic, so long as all operands given have types. Types can also be
2199 specified directly, e.g.:
2200 vadd d0.s32, d1.s32, d2.s32 */
2203 create_neon_reg_alias (char *newname
, char *p
)
2205 enum arm_reg_type basetype
;
2206 struct reg_entry
*basereg
;
2207 struct reg_entry mybasereg
;
2208 struct neon_type ntype
;
2209 struct neon_typed_alias typeinfo
;
2210 char *namebuf
, *nameend
;
2213 typeinfo
.defined
= 0;
2214 typeinfo
.eltype
.type
= NT_invtype
;
2215 typeinfo
.eltype
.size
= -1;
2216 typeinfo
.index
= -1;
2220 if (strncmp (p
, " .dn ", 5) == 0)
2221 basetype
= REG_TYPE_VFD
;
2222 else if (strncmp (p
, " .qn ", 5) == 0)
2223 basetype
= REG_TYPE_NQ
;
2232 basereg
= arm_reg_parse_multi (&p
);
2234 if (basereg
&& basereg
->type
!= basetype
)
2236 as_bad (_("bad type for register"));
2240 if (basereg
== NULL
)
2243 /* Try parsing as an integer. */
2244 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
2245 if (exp
.X_op
!= O_constant
)
2247 as_bad (_("expression must be constant"));
2250 basereg
= &mybasereg
;
2251 basereg
->number
= (basetype
== REG_TYPE_NQ
) ? exp
.X_add_number
* 2
2257 typeinfo
= *basereg
->neon
;
2259 if (parse_neon_type (&ntype
, &p
) == SUCCESS
)
2261 /* We got a type. */
2262 if (typeinfo
.defined
& NTA_HASTYPE
)
2264 as_bad (_("can't redefine the type of a register alias"));
2268 typeinfo
.defined
|= NTA_HASTYPE
;
2269 if (ntype
.elems
!= 1)
2271 as_bad (_("you must specify a single type only"));
2274 typeinfo
.eltype
= ntype
.el
[0];
2277 if (skip_past_char (&p
, '[') == SUCCESS
)
2280 /* We got a scalar index. */
2282 if (typeinfo
.defined
& NTA_HASINDEX
)
2284 as_bad (_("can't redefine the index of a scalar alias"));
2288 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
2290 if (exp
.X_op
!= O_constant
)
2292 as_bad (_("scalar index must be constant"));
2296 typeinfo
.defined
|= NTA_HASINDEX
;
2297 typeinfo
.index
= exp
.X_add_number
;
2299 if (skip_past_char (&p
, ']') == FAIL
)
2301 as_bad (_("expecting ]"));
2306 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2307 the desired alias name, and p points to its end. If not, then
2308 the desired alias name is in the global original_case_string. */
2309 #ifdef TC_CASE_SENSITIVE
2310 namelen
= nameend
- newname
;
2312 newname
= original_case_string
;
2313 namelen
= strlen (newname
);
2316 namebuf
= (char *) alloca (namelen
+ 1);
2317 strncpy (namebuf
, newname
, namelen
);
2318 namebuf
[namelen
] = '\0';
2320 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2321 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2323 /* Insert name in all uppercase. */
2324 for (p
= namebuf
; *p
; p
++)
2327 if (strncmp (namebuf
, newname
, namelen
))
2328 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2329 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2331 /* Insert name in all lowercase. */
2332 for (p
= namebuf
; *p
; p
++)
2335 if (strncmp (namebuf
, newname
, namelen
))
2336 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2337 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2342 /* Should never be called, as .req goes between the alias and the
2343 register name, not at the beginning of the line. */
2346 s_req (int a ATTRIBUTE_UNUSED
)
2348 as_bad (_("invalid syntax for .req directive"));
2352 s_dn (int a ATTRIBUTE_UNUSED
)
2354 as_bad (_("invalid syntax for .dn directive"));
2358 s_qn (int a ATTRIBUTE_UNUSED
)
2360 as_bad (_("invalid syntax for .qn directive"));
2363 /* The .unreq directive deletes an alias which was previously defined
2364 by .req. For example:
2370 s_unreq (int a ATTRIBUTE_UNUSED
)
2375 name
= input_line_pointer
;
2377 while (*input_line_pointer
!= 0
2378 && *input_line_pointer
!= ' '
2379 && *input_line_pointer
!= '\n')
2380 ++input_line_pointer
;
2382 saved_char
= *input_line_pointer
;
2383 *input_line_pointer
= 0;
2386 as_bad (_("invalid syntax for .unreq directive"));
2389 struct reg_entry
*reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
,
2393 as_bad (_("unknown register alias '%s'"), name
);
2394 else if (reg
->builtin
)
2395 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
2402 hash_delete (arm_reg_hsh
, name
, FALSE
);
2403 free ((char *) reg
->name
);
2408 /* Also locate the all upper case and all lower case versions.
2409 Do not complain if we cannot find one or the other as it
2410 was probably deleted above. */
2412 nbuf
= strdup (name
);
2413 for (p
= nbuf
; *p
; p
++)
2415 reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
, nbuf
);
2418 hash_delete (arm_reg_hsh
, nbuf
, FALSE
);
2419 free ((char *) reg
->name
);
2425 for (p
= nbuf
; *p
; p
++)
2427 reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
, nbuf
);
2430 hash_delete (arm_reg_hsh
, nbuf
, FALSE
);
2431 free ((char *) reg
->name
);
2441 *input_line_pointer
= saved_char
;
2442 demand_empty_rest_of_line ();
2445 /* Directives: Instruction set selection. */
2448 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2449 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2450 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2451 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2453 /* Create a new mapping symbol for the transition to STATE. */
2456 make_mapping_symbol (enum mstate state
, valueT value
, fragS
*frag
)
2459 const char * symname
;
2466 type
= BSF_NO_FLAGS
;
2470 type
= BSF_NO_FLAGS
;
2474 type
= BSF_NO_FLAGS
;
2480 symbolP
= symbol_new (symname
, now_seg
, value
, frag
);
2481 symbol_get_bfdsym (symbolP
)->flags
|= type
| BSF_LOCAL
;
2486 THUMB_SET_FUNC (symbolP
, 0);
2487 ARM_SET_THUMB (symbolP
, 0);
2488 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2492 THUMB_SET_FUNC (symbolP
, 1);
2493 ARM_SET_THUMB (symbolP
, 1);
2494 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2502 /* Save the mapping symbols for future reference. Also check that
2503 we do not place two mapping symbols at the same offset within a
2504 frag. We'll handle overlap between frags in
2505 check_mapping_symbols.
2507 If .fill or other data filling directive generates zero sized data,
2508 the mapping symbol for the following code will have the same value
2509 as the one generated for the data filling directive. In this case,
2510 we replace the old symbol with the new one at the same address. */
2513 if (frag
->tc_frag_data
.first_map
!= NULL
)
2515 know (S_GET_VALUE (frag
->tc_frag_data
.first_map
) == 0);
2516 symbol_remove (frag
->tc_frag_data
.first_map
, &symbol_rootP
, &symbol_lastP
);
2518 frag
->tc_frag_data
.first_map
= symbolP
;
2520 if (frag
->tc_frag_data
.last_map
!= NULL
)
2522 know (S_GET_VALUE (frag
->tc_frag_data
.last_map
) <= S_GET_VALUE (symbolP
));
2523 if (S_GET_VALUE (frag
->tc_frag_data
.last_map
) == S_GET_VALUE (symbolP
))
2524 symbol_remove (frag
->tc_frag_data
.last_map
, &symbol_rootP
, &symbol_lastP
);
2526 frag
->tc_frag_data
.last_map
= symbolP
;
2529 /* We must sometimes convert a region marked as code to data during
2530 code alignment, if an odd number of bytes have to be padded. The
2531 code mapping symbol is pushed to an aligned address. */
2534 insert_data_mapping_symbol (enum mstate state
,
2535 valueT value
, fragS
*frag
, offsetT bytes
)
2537 /* If there was already a mapping symbol, remove it. */
2538 if (frag
->tc_frag_data
.last_map
!= NULL
2539 && S_GET_VALUE (frag
->tc_frag_data
.last_map
) == frag
->fr_address
+ value
)
2541 symbolS
*symp
= frag
->tc_frag_data
.last_map
;
2545 know (frag
->tc_frag_data
.first_map
== symp
);
2546 frag
->tc_frag_data
.first_map
= NULL
;
2548 frag
->tc_frag_data
.last_map
= NULL
;
2549 symbol_remove (symp
, &symbol_rootP
, &symbol_lastP
);
2552 make_mapping_symbol (MAP_DATA
, value
, frag
);
2553 make_mapping_symbol (state
, value
+ bytes
, frag
);
2556 static void mapping_state_2 (enum mstate state
, int max_chars
);
2558 /* Set the mapping state to STATE. Only call this when about to
2559 emit some STATE bytes to the file. */
2562 mapping_state (enum mstate state
)
2564 enum mstate mapstate
= seg_info (now_seg
)->tc_segment_info_data
.mapstate
;
2566 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
2568 if (mapstate
== state
)
2569 /* The mapping symbol has already been emitted.
2570 There is nothing else to do. */
2572 else if (TRANSITION (MAP_UNDEFINED
, MAP_DATA
))
2573 /* This case will be evaluated later in the next else. */
2575 else if (TRANSITION (MAP_UNDEFINED
, MAP_ARM
)
2576 || TRANSITION (MAP_UNDEFINED
, MAP_THUMB
))
2578 /* Only add the symbol if the offset is > 0:
2579 if we're at the first frag, check it's size > 0;
2580 if we're not at the first frag, then for sure
2581 the offset is > 0. */
2582 struct frag
* const frag_first
= seg_info (now_seg
)->frchainP
->frch_root
;
2583 const int add_symbol
= (frag_now
!= frag_first
) || (frag_now_fix () > 0);
2586 make_mapping_symbol (MAP_DATA
, (valueT
) 0, frag_first
);
2589 mapping_state_2 (state
, 0);
2593 /* Same as mapping_state, but MAX_CHARS bytes have already been
2594 allocated. Put the mapping symbol that far back. */
2597 mapping_state_2 (enum mstate state
, int max_chars
)
2599 enum mstate mapstate
= seg_info (now_seg
)->tc_segment_info_data
.mapstate
;
2601 if (!SEG_NORMAL (now_seg
))
2604 if (mapstate
== state
)
2605 /* The mapping symbol has already been emitted.
2606 There is nothing else to do. */
2609 seg_info (now_seg
)->tc_segment_info_data
.mapstate
= state
;
2610 make_mapping_symbol (state
, (valueT
) frag_now_fix () - max_chars
, frag_now
);
2613 #define mapping_state(x) ((void)0)
2614 #define mapping_state_2(x, y) ((void)0)
2617 /* Find the real, Thumb encoded start of a Thumb function. */
2621 find_real_start (symbolS
* symbolP
)
2624 const char * name
= S_GET_NAME (symbolP
);
2625 symbolS
* new_target
;
2627 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2628 #define STUB_NAME ".real_start_of"
2633 /* The compiler may generate BL instructions to local labels because
2634 it needs to perform a branch to a far away location. These labels
2635 do not have a corresponding ".real_start_of" label. We check
2636 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2637 the ".real_start_of" convention for nonlocal branches. */
2638 if (S_IS_LOCAL (symbolP
) || name
[0] == '.')
2641 real_start
= ACONCAT ((STUB_NAME
, name
, NULL
));
2642 new_target
= symbol_find (real_start
);
2644 if (new_target
== NULL
)
2646 as_warn (_("Failed to find real start of function: %s\n"), name
);
2647 new_target
= symbolP
;
2655 opcode_select (int width
)
2662 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
2663 as_bad (_("selected processor does not support THUMB opcodes"));
2666 /* No need to force the alignment, since we will have been
2667 coming from ARM mode, which is word-aligned. */
2668 record_alignment (now_seg
, 1);
2675 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
2676 as_bad (_("selected processor does not support ARM opcodes"));
2681 frag_align (2, 0, 0);
2683 record_alignment (now_seg
, 1);
2688 as_bad (_("invalid instruction size selected (%d)"), width
);
2693 s_arm (int ignore ATTRIBUTE_UNUSED
)
2696 demand_empty_rest_of_line ();
2700 s_thumb (int ignore ATTRIBUTE_UNUSED
)
2703 demand_empty_rest_of_line ();
2707 s_code (int unused ATTRIBUTE_UNUSED
)
2711 temp
= get_absolute_expression ();
2716 opcode_select (temp
);
2720 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp
);
2725 s_force_thumb (int ignore ATTRIBUTE_UNUSED
)
2727 /* If we are not already in thumb mode go into it, EVEN if
2728 the target processor does not support thumb instructions.
2729 This is used by gcc/config/arm/lib1funcs.asm for example
2730 to compile interworking support functions even if the
2731 target processor should not support interworking. */
2735 record_alignment (now_seg
, 1);
2738 demand_empty_rest_of_line ();
2742 s_thumb_func (int ignore ATTRIBUTE_UNUSED
)
2746 /* The following label is the name/address of the start of a Thumb function.
2747 We need to know this for the interworking support. */
2748 label_is_thumb_function_name
= TRUE
;
2751 /* Perform a .set directive, but also mark the alias as
2752 being a thumb function. */
2755 s_thumb_set (int equiv
)
2757 /* XXX the following is a duplicate of the code for s_set() in read.c
2758 We cannot just call that code as we need to get at the symbol that
2765 /* Especial apologies for the random logic:
2766 This just grew, and could be parsed much more simply!
2768 name
= input_line_pointer
;
2769 delim
= get_symbol_end ();
2770 end_name
= input_line_pointer
;
2773 if (*input_line_pointer
!= ',')
2776 as_bad (_("expected comma after name \"%s\""), name
);
2778 ignore_rest_of_line ();
2782 input_line_pointer
++;
2785 if (name
[0] == '.' && name
[1] == '\0')
2787 /* XXX - this should not happen to .thumb_set. */
2791 if ((symbolP
= symbol_find (name
)) == NULL
2792 && (symbolP
= md_undefined_symbol (name
)) == NULL
)
2795 /* When doing symbol listings, play games with dummy fragments living
2796 outside the normal fragment chain to record the file and line info
2798 if (listing
& LISTING_SYMBOLS
)
2800 extern struct list_info_struct
* listing_tail
;
2801 fragS
* dummy_frag
= (fragS
* ) xmalloc (sizeof (fragS
));
2803 memset (dummy_frag
, 0, sizeof (fragS
));
2804 dummy_frag
->fr_type
= rs_fill
;
2805 dummy_frag
->line
= listing_tail
;
2806 symbolP
= symbol_new (name
, undefined_section
, 0, dummy_frag
);
2807 dummy_frag
->fr_symbol
= symbolP
;
2811 symbolP
= symbol_new (name
, undefined_section
, 0, &zero_address_frag
);
2814 /* "set" symbols are local unless otherwise specified. */
2815 SF_SET_LOCAL (symbolP
);
2816 #endif /* OBJ_COFF */
2817 } /* Make a new symbol. */
2819 symbol_table_insert (symbolP
);
2824 && S_IS_DEFINED (symbolP
)
2825 && S_GET_SEGMENT (symbolP
) != reg_section
)
2826 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP
));
2828 pseudo_set (symbolP
);
2830 demand_empty_rest_of_line ();
2832 /* XXX Now we come to the Thumb specific bit of code. */
2834 THUMB_SET_FUNC (symbolP
, 1);
2835 ARM_SET_THUMB (symbolP
, 1);
2836 #if defined OBJ_ELF || defined OBJ_COFF
2837 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2841 /* Directives: Mode selection. */
2843 /* .syntax [unified|divided] - choose the new unified syntax
2844 (same for Arm and Thumb encoding, modulo slight differences in what
2845 can be represented) or the old divergent syntax for each mode. */
2847 s_syntax (int unused ATTRIBUTE_UNUSED
)
2851 name
= input_line_pointer
;
2852 delim
= get_symbol_end ();
2854 if (!strcasecmp (name
, "unified"))
2855 unified_syntax
= TRUE
;
2856 else if (!strcasecmp (name
, "divided"))
2857 unified_syntax
= FALSE
;
2860 as_bad (_("unrecognized syntax mode \"%s\""), name
);
2863 *input_line_pointer
= delim
;
2864 demand_empty_rest_of_line ();
2867 /* Directives: sectioning and alignment. */
2869 /* Same as s_align_ptwo but align 0 => align 2. */
2872 s_align (int unused ATTRIBUTE_UNUSED
)
2877 long max_alignment
= 15;
2879 temp
= get_absolute_expression ();
2880 if (temp
> max_alignment
)
2881 as_bad (_("alignment too large: %d assumed"), temp
= max_alignment
);
2884 as_bad (_("alignment negative. 0 assumed."));
2888 if (*input_line_pointer
== ',')
2890 input_line_pointer
++;
2891 temp_fill
= get_absolute_expression ();
2903 /* Only make a frag if we HAVE to. */
2904 if (temp
&& !need_pass_2
)
2906 if (!fill_p
&& subseg_text_p (now_seg
))
2907 frag_align_code (temp
, 0);
2909 frag_align (temp
, (int) temp_fill
, 0);
2911 demand_empty_rest_of_line ();
2913 record_alignment (now_seg
, temp
);
2917 s_bss (int ignore ATTRIBUTE_UNUSED
)
2919 /* We don't support putting frags in the BSS segment, we fake it by
2920 marking in_bss, then looking at s_skip for clues. */
2921 subseg_set (bss_section
, 0);
2922 demand_empty_rest_of_line ();
2924 #ifdef md_elf_section_change_hook
2925 md_elf_section_change_hook ();
2930 s_even (int ignore ATTRIBUTE_UNUSED
)
2932 /* Never make frag if expect extra pass. */
2934 frag_align (1, 0, 0);
2936 record_alignment (now_seg
, 1);
2938 demand_empty_rest_of_line ();
2941 /* Directives: Literal pools. */
2943 static literal_pool
*
2944 find_literal_pool (void)
2946 literal_pool
* pool
;
2948 for (pool
= list_of_pools
; pool
!= NULL
; pool
= pool
->next
)
2950 if (pool
->section
== now_seg
2951 && pool
->sub_section
== now_subseg
)
2958 static literal_pool
*
2959 find_or_make_literal_pool (void)
2961 /* Next literal pool ID number. */
2962 static unsigned int latest_pool_num
= 1;
2963 literal_pool
* pool
;
2965 pool
= find_literal_pool ();
2969 /* Create a new pool. */
2970 pool
= (literal_pool
*) xmalloc (sizeof (* pool
));
2974 pool
->next_free_entry
= 0;
2975 pool
->section
= now_seg
;
2976 pool
->sub_section
= now_subseg
;
2977 pool
->next
= list_of_pools
;
2978 pool
->symbol
= NULL
;
2980 /* Add it to the list. */
2981 list_of_pools
= pool
;
2984 /* New pools, and emptied pools, will have a NULL symbol. */
2985 if (pool
->symbol
== NULL
)
2987 pool
->symbol
= symbol_create (FAKE_LABEL_NAME
, undefined_section
,
2988 (valueT
) 0, &zero_address_frag
);
2989 pool
->id
= latest_pool_num
++;
2996 /* Add the literal in the global 'inst'
2997 structure to the relevant literal pool. */
3000 add_to_lit_pool (void)
3002 literal_pool
* pool
;
3005 pool
= find_or_make_literal_pool ();
3007 /* Check if this literal value is already in the pool. */
3008 for (entry
= 0; entry
< pool
->next_free_entry
; entry
++)
3010 if ((pool
->literals
[entry
].X_op
== inst
.reloc
.exp
.X_op
)
3011 && (inst
.reloc
.exp
.X_op
== O_constant
)
3012 && (pool
->literals
[entry
].X_add_number
3013 == inst
.reloc
.exp
.X_add_number
)
3014 && (pool
->literals
[entry
].X_unsigned
3015 == inst
.reloc
.exp
.X_unsigned
))
3018 if ((pool
->literals
[entry
].X_op
== inst
.reloc
.exp
.X_op
)
3019 && (inst
.reloc
.exp
.X_op
== O_symbol
)
3020 && (pool
->literals
[entry
].X_add_number
3021 == inst
.reloc
.exp
.X_add_number
)
3022 && (pool
->literals
[entry
].X_add_symbol
3023 == inst
.reloc
.exp
.X_add_symbol
)
3024 && (pool
->literals
[entry
].X_op_symbol
3025 == inst
.reloc
.exp
.X_op_symbol
))
3029 /* Do we need to create a new entry? */
3030 if (entry
== pool
->next_free_entry
)
3032 if (entry
>= MAX_LITERAL_POOL_SIZE
)
3034 inst
.error
= _("literal pool overflow");
3038 pool
->literals
[entry
] = inst
.reloc
.exp
;
3039 pool
->next_free_entry
+= 1;
3042 inst
.reloc
.exp
.X_op
= O_symbol
;
3043 inst
.reloc
.exp
.X_add_number
= ((int) entry
) * 4;
3044 inst
.reloc
.exp
.X_add_symbol
= pool
->symbol
;
3049 /* Can't use symbol_new here, so have to create a symbol and then at
3050 a later date assign it a value. Thats what these functions do. */
3053 symbol_locate (symbolS
* symbolP
,
3054 const char * name
, /* It is copied, the caller can modify. */
3055 segT segment
, /* Segment identifier (SEG_<something>). */
3056 valueT valu
, /* Symbol value. */
3057 fragS
* frag
) /* Associated fragment. */
3059 unsigned int name_length
;
3060 char * preserved_copy_of_name
;
3062 name_length
= strlen (name
) + 1; /* +1 for \0. */
3063 obstack_grow (¬es
, name
, name_length
);
3064 preserved_copy_of_name
= (char *) obstack_finish (¬es
);
3066 #ifdef tc_canonicalize_symbol_name
3067 preserved_copy_of_name
=
3068 tc_canonicalize_symbol_name (preserved_copy_of_name
);
3071 S_SET_NAME (symbolP
, preserved_copy_of_name
);
3073 S_SET_SEGMENT (symbolP
, segment
);
3074 S_SET_VALUE (symbolP
, valu
);
3075 symbol_clear_list_pointers (symbolP
);
3077 symbol_set_frag (symbolP
, frag
);
3079 /* Link to end of symbol chain. */
3081 extern int symbol_table_frozen
;
3083 if (symbol_table_frozen
)
3087 symbol_append (symbolP
, symbol_lastP
, & symbol_rootP
, & symbol_lastP
);
3089 obj_symbol_new_hook (symbolP
);
3091 #ifdef tc_symbol_new_hook
3092 tc_symbol_new_hook (symbolP
);
3096 verify_symbol_chain (symbol_rootP
, symbol_lastP
);
3097 #endif /* DEBUG_SYMS */
3102 s_ltorg (int ignored ATTRIBUTE_UNUSED
)
3105 literal_pool
* pool
;
3108 pool
= find_literal_pool ();
3110 || pool
->symbol
== NULL
3111 || pool
->next_free_entry
== 0)
3114 mapping_state (MAP_DATA
);
3116 /* Align pool as you have word accesses.
3117 Only make a frag if we have to. */
3119 frag_align (2, 0, 0);
3121 record_alignment (now_seg
, 2);
3123 sprintf (sym_name
, "$$lit_\002%x", pool
->id
);
3125 symbol_locate (pool
->symbol
, sym_name
, now_seg
,
3126 (valueT
) frag_now_fix (), frag_now
);
3127 symbol_table_insert (pool
->symbol
);
3129 ARM_SET_THUMB (pool
->symbol
, thumb_mode
);
3131 #if defined OBJ_COFF || defined OBJ_ELF
3132 ARM_SET_INTERWORK (pool
->symbol
, support_interwork
);
3135 for (entry
= 0; entry
< pool
->next_free_entry
; entry
++)
3136 /* First output the expression in the instruction to the pool. */
3137 emit_expr (&(pool
->literals
[entry
]), 4); /* .word */
3139 /* Mark the pool as empty. */
3140 pool
->next_free_entry
= 0;
3141 pool
->symbol
= NULL
;
3145 /* Forward declarations for functions below, in the MD interface
3147 static void fix_new_arm (fragS
*, int, short, expressionS
*, int, int);
3148 static valueT
create_unwind_entry (int);
3149 static void start_unwind_section (const segT
, int);
3150 static void add_unwind_opcode (valueT
, int);
3151 static void flush_pending_unwind (void);
3153 /* Directives: Data. */
3156 s_arm_elf_cons (int nbytes
)
3160 #ifdef md_flush_pending_output
3161 md_flush_pending_output ();
3164 if (is_it_end_of_statement ())
3166 demand_empty_rest_of_line ();
3170 #ifdef md_cons_align
3171 md_cons_align (nbytes
);
3174 mapping_state (MAP_DATA
);
3178 char *base
= input_line_pointer
;
3182 if (exp
.X_op
!= O_symbol
)
3183 emit_expr (&exp
, (unsigned int) nbytes
);
3186 char *before_reloc
= input_line_pointer
;
3187 reloc
= parse_reloc (&input_line_pointer
);
3190 as_bad (_("unrecognized relocation suffix"));
3191 ignore_rest_of_line ();
3194 else if (reloc
== BFD_RELOC_UNUSED
)
3195 emit_expr (&exp
, (unsigned int) nbytes
);
3198 reloc_howto_type
*howto
= (reloc_howto_type
*)
3199 bfd_reloc_type_lookup (stdoutput
,
3200 (bfd_reloc_code_real_type
) reloc
);
3201 int size
= bfd_get_reloc_size (howto
);
3203 if (reloc
== BFD_RELOC_ARM_PLT32
)
3205 as_bad (_("(plt) is only valid on branch targets"));
3206 reloc
= BFD_RELOC_UNUSED
;
3211 as_bad (_("%s relocations do not fit in %d bytes"),
3212 howto
->name
, nbytes
);
3215 /* We've parsed an expression stopping at O_symbol.
3216 But there may be more expression left now that we
3217 have parsed the relocation marker. Parse it again.
3218 XXX Surely there is a cleaner way to do this. */
3219 char *p
= input_line_pointer
;
3221 char *save_buf
= (char *) alloca (input_line_pointer
- base
);
3222 memcpy (save_buf
, base
, input_line_pointer
- base
);
3223 memmove (base
+ (input_line_pointer
- before_reloc
),
3224 base
, before_reloc
- base
);
3226 input_line_pointer
= base
+ (input_line_pointer
-before_reloc
);
3228 memcpy (base
, save_buf
, p
- base
);
3230 offset
= nbytes
- size
;
3231 p
= frag_more ((int) nbytes
);
3232 fix_new_exp (frag_now
, p
- frag_now
->fr_literal
+ offset
,
3233 size
, &exp
, 0, (enum bfd_reloc_code_real
) reloc
);
3238 while (*input_line_pointer
++ == ',');
3240 /* Put terminator back into stream. */
3241 input_line_pointer
--;
3242 demand_empty_rest_of_line ();
3245 /* Emit an expression containing a 32-bit thumb instruction.
3246 Implementation based on put_thumb32_insn. */
3249 emit_thumb32_expr (expressionS
* exp
)
3251 expressionS exp_high
= *exp
;
3253 exp_high
.X_add_number
= (unsigned long)exp_high
.X_add_number
>> 16;
3254 emit_expr (& exp_high
, (unsigned int) THUMB_SIZE
);
3255 exp
->X_add_number
&= 0xffff;
3256 emit_expr (exp
, (unsigned int) THUMB_SIZE
);
3259 /* Guess the instruction size based on the opcode. */
3262 thumb_insn_size (int opcode
)
3264 if ((unsigned int) opcode
< 0xe800u
)
3266 else if ((unsigned int) opcode
>= 0xe8000000u
)
3273 emit_insn (expressionS
*exp
, int nbytes
)
3277 if (exp
->X_op
== O_constant
)
3282 size
= thumb_insn_size (exp
->X_add_number
);
3286 if (size
== 2 && (unsigned int)exp
->X_add_number
> 0xffffu
)
3288 as_bad (_(".inst.n operand too big. "\
3289 "Use .inst.w instead"));
3294 if (now_it
.state
== AUTOMATIC_IT_BLOCK
)
3295 set_it_insn_type_nonvoid (OUTSIDE_IT_INSN
, 0);
3297 set_it_insn_type_nonvoid (NEUTRAL_IT_INSN
, 0);
3299 if (thumb_mode
&& (size
> THUMB_SIZE
) && !target_big_endian
)
3300 emit_thumb32_expr (exp
);
3302 emit_expr (exp
, (unsigned int) size
);
3304 it_fsm_post_encode ();
3308 as_bad (_("cannot determine Thumb instruction size. " \
3309 "Use .inst.n/.inst.w instead"));
3312 as_bad (_("constant expression required"));
3317 /* Like s_arm_elf_cons but do not use md_cons_align and
3318 set the mapping state to MAP_ARM/MAP_THUMB. */
3321 s_arm_elf_inst (int nbytes
)
3323 if (is_it_end_of_statement ())
3325 demand_empty_rest_of_line ();
3329 /* Calling mapping_state () here will not change ARM/THUMB,
3330 but will ensure not to be in DATA state. */
3333 mapping_state (MAP_THUMB
);
3338 as_bad (_("width suffixes are invalid in ARM mode"));
3339 ignore_rest_of_line ();
3345 mapping_state (MAP_ARM
);
3354 if (! emit_insn (& exp
, nbytes
))
3356 ignore_rest_of_line ();
3360 while (*input_line_pointer
++ == ',');
3362 /* Put terminator back into stream. */
3363 input_line_pointer
--;
3364 demand_empty_rest_of_line ();
3367 /* Parse a .rel31 directive. */
3370 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED
)
3377 if (*input_line_pointer
== '1')
3378 highbit
= 0x80000000;
3379 else if (*input_line_pointer
!= '0')
3380 as_bad (_("expected 0 or 1"));
3382 input_line_pointer
++;
3383 if (*input_line_pointer
!= ',')
3384 as_bad (_("missing comma"));
3385 input_line_pointer
++;
3387 #ifdef md_flush_pending_output
3388 md_flush_pending_output ();
3391 #ifdef md_cons_align
3395 mapping_state (MAP_DATA
);
3400 md_number_to_chars (p
, highbit
, 4);
3401 fix_new_arm (frag_now
, p
- frag_now
->fr_literal
, 4, &exp
, 1,
3402 BFD_RELOC_ARM_PREL31
);
3404 demand_empty_rest_of_line ();
3407 /* Directives: AEABI stack-unwind tables. */
3409 /* Parse an unwind_fnstart directive. Simply records the current location. */
3412 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED
)
3414 demand_empty_rest_of_line ();
3415 if (unwind
.proc_start
)
3417 as_bad (_("duplicate .fnstart directive"));
3421 /* Mark the start of the function. */
3422 unwind
.proc_start
= expr_build_dot ();
3424 /* Reset the rest of the unwind info. */
3425 unwind
.opcode_count
= 0;
3426 unwind
.table_entry
= NULL
;
3427 unwind
.personality_routine
= NULL
;
3428 unwind
.personality_index
= -1;
3429 unwind
.frame_size
= 0;
3430 unwind
.fp_offset
= 0;
3431 unwind
.fp_reg
= REG_SP
;
3433 unwind
.sp_restored
= 0;
3437 /* Parse a handlerdata directive. Creates the exception handling table entry
3438 for the function. */
3441 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED
)
3443 demand_empty_rest_of_line ();
3444 if (!unwind
.proc_start
)
3445 as_bad (MISSING_FNSTART
);
3447 if (unwind
.table_entry
)
3448 as_bad (_("duplicate .handlerdata directive"));
3450 create_unwind_entry (1);
3453 /* Parse an unwind_fnend directive. Generates the index table entry. */
3456 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED
)
3461 unsigned int marked_pr_dependency
;
3463 demand_empty_rest_of_line ();
3465 if (!unwind
.proc_start
)
3467 as_bad (_(".fnend directive without .fnstart"));
3471 /* Add eh table entry. */
3472 if (unwind
.table_entry
== NULL
)
3473 val
= create_unwind_entry (0);
3477 /* Add index table entry. This is two words. */
3478 start_unwind_section (unwind
.saved_seg
, 1);
3479 frag_align (2, 0, 0);
3480 record_alignment (now_seg
, 2);
3482 ptr
= frag_more (8);
3483 where
= frag_now_fix () - 8;
3485 /* Self relative offset of the function start. */
3486 fix_new (frag_now
, where
, 4, unwind
.proc_start
, 0, 1,
3487 BFD_RELOC_ARM_PREL31
);
3489 /* Indicate dependency on EHABI-defined personality routines to the
3490 linker, if it hasn't been done already. */
3491 marked_pr_dependency
3492 = seg_info (now_seg
)->tc_segment_info_data
.marked_pr_dependency
;
3493 if (unwind
.personality_index
>= 0 && unwind
.personality_index
< 3
3494 && !(marked_pr_dependency
& (1 << unwind
.personality_index
)))
3496 static const char *const name
[] =
3498 "__aeabi_unwind_cpp_pr0",
3499 "__aeabi_unwind_cpp_pr1",
3500 "__aeabi_unwind_cpp_pr2"
3502 symbolS
*pr
= symbol_find_or_make (name
[unwind
.personality_index
]);
3503 fix_new (frag_now
, where
, 0, pr
, 0, 1, BFD_RELOC_NONE
);
3504 seg_info (now_seg
)->tc_segment_info_data
.marked_pr_dependency
3505 |= 1 << unwind
.personality_index
;
3509 /* Inline exception table entry. */
3510 md_number_to_chars (ptr
+ 4, val
, 4);
3512 /* Self relative offset of the table entry. */
3513 fix_new (frag_now
, where
+ 4, 4, unwind
.table_entry
, 0, 1,
3514 BFD_RELOC_ARM_PREL31
);
3516 /* Restore the original section. */
3517 subseg_set (unwind
.saved_seg
, unwind
.saved_subseg
);
3519 unwind
.proc_start
= NULL
;
3523 /* Parse an unwind_cantunwind directive. */
3526 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED
)
3528 demand_empty_rest_of_line ();
3529 if (!unwind
.proc_start
)
3530 as_bad (MISSING_FNSTART
);
3532 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
3533 as_bad (_("personality routine specified for cantunwind frame"));
3535 unwind
.personality_index
= -2;
3539 /* Parse a personalityindex directive. */
3542 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED
)
3546 if (!unwind
.proc_start
)
3547 as_bad (MISSING_FNSTART
);
3549 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
3550 as_bad (_("duplicate .personalityindex directive"));
3554 if (exp
.X_op
!= O_constant
3555 || exp
.X_add_number
< 0 || exp
.X_add_number
> 15)
3557 as_bad (_("bad personality routine number"));
3558 ignore_rest_of_line ();
3562 unwind
.personality_index
= exp
.X_add_number
;
3564 demand_empty_rest_of_line ();
3568 /* Parse a personality directive. */
3571 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED
)
3575 if (!unwind
.proc_start
)
3576 as_bad (MISSING_FNSTART
);
3578 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
3579 as_bad (_("duplicate .personality directive"));
3581 name
= input_line_pointer
;
3582 c
= get_symbol_end ();
3583 p
= input_line_pointer
;
3584 unwind
.personality_routine
= symbol_find_or_make (name
);
3586 demand_empty_rest_of_line ();
3590 /* Parse a directive saving core registers. */
3593 s_arm_unwind_save_core (void)
3599 range
= parse_reg_list (&input_line_pointer
);
3602 as_bad (_("expected register list"));
3603 ignore_rest_of_line ();
3607 demand_empty_rest_of_line ();
3609 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
3610 into .unwind_save {..., sp...}. We aren't bothered about the value of
3611 ip because it is clobbered by calls. */
3612 if (unwind
.sp_restored
&& unwind
.fp_reg
== 12
3613 && (range
& 0x3000) == 0x1000)
3615 unwind
.opcode_count
--;
3616 unwind
.sp_restored
= 0;
3617 range
= (range
| 0x2000) & ~0x1000;
3618 unwind
.pending_offset
= 0;
3624 /* See if we can use the short opcodes. These pop a block of up to 8
3625 registers starting with r4, plus maybe r14. */
3626 for (n
= 0; n
< 8; n
++)
3628 /* Break at the first non-saved register. */
3629 if ((range
& (1 << (n
+ 4))) == 0)
3632 /* See if there are any other bits set. */
3633 if (n
== 0 || (range
& (0xfff0 << n
) & 0xbff0) != 0)
3635 /* Use the long form. */
3636 op
= 0x8000 | ((range
>> 4) & 0xfff);
3637 add_unwind_opcode (op
, 2);
3641 /* Use the short form. */
3643 op
= 0xa8; /* Pop r14. */
3645 op
= 0xa0; /* Do not pop r14. */
3647 add_unwind_opcode (op
, 1);
3654 op
= 0xb100 | (range
& 0xf);
3655 add_unwind_opcode (op
, 2);
3658 /* Record the number of bytes pushed. */
3659 for (n
= 0; n
< 16; n
++)
3661 if (range
& (1 << n
))
3662 unwind
.frame_size
+= 4;
3667 /* Parse a directive saving FPA registers. */
3670 s_arm_unwind_save_fpa (int reg
)
3676 /* Get Number of registers to transfer. */
3677 if (skip_past_comma (&input_line_pointer
) != FAIL
)
3680 exp
.X_op
= O_illegal
;
3682 if (exp
.X_op
!= O_constant
)
3684 as_bad (_("expected , <constant>"));
3685 ignore_rest_of_line ();
3689 num_regs
= exp
.X_add_number
;
3691 if (num_regs
< 1 || num_regs
> 4)
3693 as_bad (_("number of registers must be in the range [1:4]"));
3694 ignore_rest_of_line ();
3698 demand_empty_rest_of_line ();
3703 op
= 0xb4 | (num_regs
- 1);
3704 add_unwind_opcode (op
, 1);
3709 op
= 0xc800 | (reg
<< 4) | (num_regs
- 1);
3710 add_unwind_opcode (op
, 2);
3712 unwind
.frame_size
+= num_regs
* 12;
3716 /* Parse a directive saving VFP registers for ARMv6 and above. */
3719 s_arm_unwind_save_vfp_armv6 (void)
3724 int num_vfpv3_regs
= 0;
3725 int num_regs_below_16
;
3727 count
= parse_vfp_reg_list (&input_line_pointer
, &start
, REGLIST_VFP_D
);
3730 as_bad (_("expected register list"));
3731 ignore_rest_of_line ();
3735 demand_empty_rest_of_line ();
3737 /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
3738 than FSTMX/FLDMX-style ones). */
3740 /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31. */
3742 num_vfpv3_regs
= count
;
3743 else if (start
+ count
> 16)
3744 num_vfpv3_regs
= start
+ count
- 16;
3746 if (num_vfpv3_regs
> 0)
3748 int start_offset
= start
> 16 ? start
- 16 : 0;
3749 op
= 0xc800 | (start_offset
<< 4) | (num_vfpv3_regs
- 1);
3750 add_unwind_opcode (op
, 2);
3753 /* Generate opcode for registers numbered in the range 0 .. 15. */
3754 num_regs_below_16
= num_vfpv3_regs
> 0 ? 16 - (int) start
: count
;
3755 gas_assert (num_regs_below_16
+ num_vfpv3_regs
== count
);
3756 if (num_regs_below_16
> 0)
3758 op
= 0xc900 | (start
<< 4) | (num_regs_below_16
- 1);
3759 add_unwind_opcode (op
, 2);
3762 unwind
.frame_size
+= count
* 8;
3766 /* Parse a directive saving VFP registers for pre-ARMv6. */
3769 s_arm_unwind_save_vfp (void)
3775 count
= parse_vfp_reg_list (&input_line_pointer
, ®
, REGLIST_VFP_D
);
3778 as_bad (_("expected register list"));
3779 ignore_rest_of_line ();
3783 demand_empty_rest_of_line ();
3788 op
= 0xb8 | (count
- 1);
3789 add_unwind_opcode (op
, 1);
3794 op
= 0xb300 | (reg
<< 4) | (count
- 1);
3795 add_unwind_opcode (op
, 2);
3797 unwind
.frame_size
+= count
* 8 + 4;
3801 /* Parse a directive saving iWMMXt data registers. */
3804 s_arm_unwind_save_mmxwr (void)
3812 if (*input_line_pointer
== '{')
3813 input_line_pointer
++;
3817 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWR
);
3821 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWR
]));
3826 as_tsktsk (_("register list not in ascending order"));
3829 if (*input_line_pointer
== '-')
3831 input_line_pointer
++;
3832 hi_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWR
);
3835 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWR
]));
3838 else if (reg
>= hi_reg
)
3840 as_bad (_("bad register range"));
3843 for (; reg
< hi_reg
; reg
++)
3847 while (skip_past_comma (&input_line_pointer
) != FAIL
);
3849 if (*input_line_pointer
== '}')
3850 input_line_pointer
++;
3852 demand_empty_rest_of_line ();
3854 /* Generate any deferred opcodes because we're going to be looking at
3856 flush_pending_unwind ();
3858 for (i
= 0; i
< 16; i
++)
3860 if (mask
& (1 << i
))
3861 unwind
.frame_size
+= 8;
3864 /* Attempt to combine with a previous opcode. We do this because gcc
3865 likes to output separate unwind directives for a single block of
3867 if (unwind
.opcode_count
> 0)
3869 i
= unwind
.opcodes
[unwind
.opcode_count
- 1];
3870 if ((i
& 0xf8) == 0xc0)
3873 /* Only merge if the blocks are contiguous. */
3876 if ((mask
& 0xfe00) == (1 << 9))
3878 mask
|= ((1 << (i
+ 11)) - 1) & 0xfc00;
3879 unwind
.opcode_count
--;
3882 else if (i
== 6 && unwind
.opcode_count
>= 2)
3884 i
= unwind
.opcodes
[unwind
.opcode_count
- 2];
3888 op
= 0xffff << (reg
- 1);
3890 && ((mask
& op
) == (1u << (reg
- 1))))
3892 op
= (1 << (reg
+ i
+ 1)) - 1;
3893 op
&= ~((1 << reg
) - 1);
3895 unwind
.opcode_count
-= 2;
3902 /* We want to generate opcodes in the order the registers have been
3903 saved, ie. descending order. */
3904 for (reg
= 15; reg
>= -1; reg
--)
3906 /* Save registers in blocks. */
3908 || !(mask
& (1 << reg
)))
3910 /* We found an unsaved reg. Generate opcodes to save the
3917 op
= 0xc0 | (hi_reg
- 10);
3918 add_unwind_opcode (op
, 1);
3923 op
= 0xc600 | ((reg
+ 1) << 4) | ((hi_reg
- reg
) - 1);
3924 add_unwind_opcode (op
, 2);
3933 ignore_rest_of_line ();
3937 s_arm_unwind_save_mmxwcg (void)
3944 if (*input_line_pointer
== '{')
3945 input_line_pointer
++;
3949 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWCG
);
3953 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWCG
]));
3959 as_tsktsk (_("register list not in ascending order"));
3962 if (*input_line_pointer
== '-')
3964 input_line_pointer
++;
3965 hi_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWCG
);
3968 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWCG
]));
3971 else if (reg
>= hi_reg
)
3973 as_bad (_("bad register range"));
3976 for (; reg
< hi_reg
; reg
++)
3980 while (skip_past_comma (&input_line_pointer
) != FAIL
);
3982 if (*input_line_pointer
== '}')
3983 input_line_pointer
++;
3985 demand_empty_rest_of_line ();
3987 /* Generate any deferred opcodes because we're going to be looking at
3989 flush_pending_unwind ();
3991 for (reg
= 0; reg
< 16; reg
++)
3993 if (mask
& (1 << reg
))
3994 unwind
.frame_size
+= 4;
3997 add_unwind_opcode (op
, 2);
4000 ignore_rest_of_line ();
4004 /* Parse an unwind_save directive.
4005 If the argument is non-zero, this is a .vsave directive. */
4008 s_arm_unwind_save (int arch_v6
)
4011 struct reg_entry
*reg
;
4012 bfd_boolean had_brace
= FALSE
;
4014 if (!unwind
.proc_start
)
4015 as_bad (MISSING_FNSTART
);
4017 /* Figure out what sort of save we have. */
4018 peek
= input_line_pointer
;
4026 reg
= arm_reg_parse_multi (&peek
);
4030 as_bad (_("register expected"));
4031 ignore_rest_of_line ();
4040 as_bad (_("FPA .unwind_save does not take a register list"));
4041 ignore_rest_of_line ();
4044 input_line_pointer
= peek
;
4045 s_arm_unwind_save_fpa (reg
->number
);
4048 case REG_TYPE_RN
: s_arm_unwind_save_core (); return;
4051 s_arm_unwind_save_vfp_armv6 ();
4053 s_arm_unwind_save_vfp ();
4055 case REG_TYPE_MMXWR
: s_arm_unwind_save_mmxwr (); return;
4056 case REG_TYPE_MMXWCG
: s_arm_unwind_save_mmxwcg (); return;
4059 as_bad (_(".unwind_save does not support this kind of register"));
4060 ignore_rest_of_line ();
4065 /* Parse an unwind_movsp directive. */
4068 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED
)
4074 if (!unwind
.proc_start
)
4075 as_bad (MISSING_FNSTART
);
4077 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
4080 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_RN
]));
4081 ignore_rest_of_line ();
4085 /* Optional constant. */
4086 if (skip_past_comma (&input_line_pointer
) != FAIL
)
4088 if (immediate_for_directive (&offset
) == FAIL
)
4094 demand_empty_rest_of_line ();
4096 if (reg
== REG_SP
|| reg
== REG_PC
)
4098 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
4102 if (unwind
.fp_reg
!= REG_SP
)
4103 as_bad (_("unexpected .unwind_movsp directive"));
4105 /* Generate opcode to restore the value. */
4107 add_unwind_opcode (op
, 1);
4109 /* Record the information for later. */
4110 unwind
.fp_reg
= reg
;
4111 unwind
.fp_offset
= unwind
.frame_size
- offset
;
4112 unwind
.sp_restored
= 1;
4115 /* Parse an unwind_pad directive. */
4118 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED
)
4122 if (!unwind
.proc_start
)
4123 as_bad (MISSING_FNSTART
);
4125 if (immediate_for_directive (&offset
) == FAIL
)
4130 as_bad (_("stack increment must be multiple of 4"));
4131 ignore_rest_of_line ();
4135 /* Don't generate any opcodes, just record the details for later. */
4136 unwind
.frame_size
+= offset
;
4137 unwind
.pending_offset
+= offset
;
4139 demand_empty_rest_of_line ();
4142 /* Parse an unwind_setfp directive. */
4145 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED
)
4151 if (!unwind
.proc_start
)
4152 as_bad (MISSING_FNSTART
);
4154 fp_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
4155 if (skip_past_comma (&input_line_pointer
) == FAIL
)
4158 sp_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
4160 if (fp_reg
== FAIL
|| sp_reg
== FAIL
)
4162 as_bad (_("expected <reg>, <reg>"));
4163 ignore_rest_of_line ();
4167 /* Optional constant. */
4168 if (skip_past_comma (&input_line_pointer
) != FAIL
)
4170 if (immediate_for_directive (&offset
) == FAIL
)
4176 demand_empty_rest_of_line ();
4178 if (sp_reg
!= REG_SP
&& sp_reg
!= unwind
.fp_reg
)
4180 as_bad (_("register must be either sp or set by a previous"
4181 "unwind_movsp directive"));
4185 /* Don't generate any opcodes, just record the information for later. */
4186 unwind
.fp_reg
= fp_reg
;
4188 if (sp_reg
== REG_SP
)
4189 unwind
.fp_offset
= unwind
.frame_size
- offset
;
4191 unwind
.fp_offset
-= offset
;
4194 /* Parse an unwind_raw directive. */
4197 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED
)
4200 /* This is an arbitrary limit. */
4201 unsigned char op
[16];
4204 if (!unwind
.proc_start
)
4205 as_bad (MISSING_FNSTART
);
4208 if (exp
.X_op
== O_constant
4209 && skip_past_comma (&input_line_pointer
) != FAIL
)
4211 unwind
.frame_size
+= exp
.X_add_number
;
4215 exp
.X_op
= O_illegal
;
4217 if (exp
.X_op
!= O_constant
)
4219 as_bad (_("expected <offset>, <opcode>"));
4220 ignore_rest_of_line ();
4226 /* Parse the opcode. */
4231 as_bad (_("unwind opcode too long"));
4232 ignore_rest_of_line ();
4234 if (exp
.X_op
!= O_constant
|| exp
.X_add_number
& ~0xff)
4236 as_bad (_("invalid unwind opcode"));
4237 ignore_rest_of_line ();
4240 op
[count
++] = exp
.X_add_number
;
4242 /* Parse the next byte. */
4243 if (skip_past_comma (&input_line_pointer
) == FAIL
)
4249 /* Add the opcode bytes in reverse order. */
4251 add_unwind_opcode (op
[count
], 1);
4253 demand_empty_rest_of_line ();
4257 /* Parse a .eabi_attribute directive. */
4260 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED
)
4262 int tag
= s_vendor_attribute (OBJ_ATTR_PROC
);
4264 if (tag
< NUM_KNOWN_OBJ_ATTRIBUTES
)
4265 attributes_set_explicitly
[tag
] = 1;
4267 #endif /* OBJ_ELF */
4269 static void s_arm_arch (int);
4270 static void s_arm_object_arch (int);
4271 static void s_arm_cpu (int);
4272 static void s_arm_fpu (int);
4277 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED
)
4284 if (exp
.X_op
== O_symbol
)
4285 exp
.X_op
= O_secrel
;
4287 emit_expr (&exp
, 4);
4289 while (*input_line_pointer
++ == ',');
4291 input_line_pointer
--;
4292 demand_empty_rest_of_line ();
4296 /* This table describes all the machine specific pseudo-ops the assembler
4297 has to support. The fields are:
4298 pseudo-op name without dot
4299 function to call to execute this pseudo-op
4300 Integer arg to pass to the function. */
4302 const pseudo_typeS md_pseudo_table
[] =
4304 /* Never called because '.req' does not start a line. */
4305 { "req", s_req
, 0 },
4306 /* Following two are likewise never called. */
4309 { "unreq", s_unreq
, 0 },
4310 { "bss", s_bss
, 0 },
4311 { "align", s_align
, 0 },
4312 { "arm", s_arm
, 0 },
4313 { "thumb", s_thumb
, 0 },
4314 { "code", s_code
, 0 },
4315 { "force_thumb", s_force_thumb
, 0 },
4316 { "thumb_func", s_thumb_func
, 0 },
4317 { "thumb_set", s_thumb_set
, 0 },
4318 { "even", s_even
, 0 },
4319 { "ltorg", s_ltorg
, 0 },
4320 { "pool", s_ltorg
, 0 },
4321 { "syntax", s_syntax
, 0 },
4322 { "cpu", s_arm_cpu
, 0 },
4323 { "arch", s_arm_arch
, 0 },
4324 { "object_arch", s_arm_object_arch
, 0 },
4325 { "fpu", s_arm_fpu
, 0 },
4327 { "word", s_arm_elf_cons
, 4 },
4328 { "long", s_arm_elf_cons
, 4 },
4329 { "inst.n", s_arm_elf_inst
, 2 },
4330 { "inst.w", s_arm_elf_inst
, 4 },
4331 { "inst", s_arm_elf_inst
, 0 },
4332 { "rel31", s_arm_rel31
, 0 },
4333 { "fnstart", s_arm_unwind_fnstart
, 0 },
4334 { "fnend", s_arm_unwind_fnend
, 0 },
4335 { "cantunwind", s_arm_unwind_cantunwind
, 0 },
4336 { "personality", s_arm_unwind_personality
, 0 },
4337 { "personalityindex", s_arm_unwind_personalityindex
, 0 },
4338 { "handlerdata", s_arm_unwind_handlerdata
, 0 },
4339 { "save", s_arm_unwind_save
, 0 },
4340 { "vsave", s_arm_unwind_save
, 1 },
4341 { "movsp", s_arm_unwind_movsp
, 0 },
4342 { "pad", s_arm_unwind_pad
, 0 },
4343 { "setfp", s_arm_unwind_setfp
, 0 },
4344 { "unwind_raw", s_arm_unwind_raw
, 0 },
4345 { "eabi_attribute", s_arm_eabi_attribute
, 0 },
4349 /* These are used for dwarf. */
4353 /* These are used for dwarf2. */
4354 { "file", (void (*) (int)) dwarf2_directive_file
, 0 },
4355 { "loc", dwarf2_directive_loc
, 0 },
4356 { "loc_mark_labels", dwarf2_directive_loc_mark_labels
, 0 },
4358 { "extend", float_cons
, 'x' },
4359 { "ldouble", float_cons
, 'x' },
4360 { "packed", float_cons
, 'p' },
4362 {"secrel32", pe_directive_secrel
, 0},
4367 /* Parser functions used exclusively in instruction operands. */
4369 /* Generic immediate-value read function for use in insn parsing.
4370 STR points to the beginning of the immediate (the leading #);
4371 VAL receives the value; if the value is outside [MIN, MAX]
4372 issue an error. PREFIX_OPT is true if the immediate prefix is
4376 parse_immediate (char **str
, int *val
, int min
, int max
,
4377 bfd_boolean prefix_opt
)
4380 my_get_expression (&exp
, str
, prefix_opt
? GE_OPT_PREFIX
: GE_IMM_PREFIX
);
4381 if (exp
.X_op
!= O_constant
)
4383 inst
.error
= _("constant expression required");
4387 if (exp
.X_add_number
< min
|| exp
.X_add_number
> max
)
4389 inst
.error
= _("immediate value out of range");
4393 *val
= exp
.X_add_number
;
4397 /* Less-generic immediate-value read function with the possibility of loading a
4398 big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
4399 instructions. Puts the result directly in inst.operands[i]. */
4402 parse_big_immediate (char **str
, int i
)
4407 my_get_expression (&exp
, &ptr
, GE_OPT_PREFIX_BIG
);
4409 if (exp
.X_op
== O_constant
)
4411 inst
.operands
[i
].imm
= exp
.X_add_number
& 0xffffffff;
4412 /* If we're on a 64-bit host, then a 64-bit number can be returned using
4413 O_constant. We have to be careful not to break compilation for
4414 32-bit X_add_number, though. */
4415 if ((exp
.X_add_number
& ~0xffffffffl
) != 0)
4417 /* X >> 32 is illegal if sizeof (exp.X_add_number) == 4. */
4418 inst
.operands
[i
].reg
= ((exp
.X_add_number
>> 16) >> 16) & 0xffffffff;
4419 inst
.operands
[i
].regisimm
= 1;
4422 else if (exp
.X_op
== O_big
4423 && LITTLENUM_NUMBER_OF_BITS
* exp
.X_add_number
> 32)
4425 unsigned parts
= 32 / LITTLENUM_NUMBER_OF_BITS
, j
, idx
= 0;
4427 /* Bignums have their least significant bits in
4428 generic_bignum[0]. Make sure we put 32 bits in imm and
4429 32 bits in reg, in a (hopefully) portable way. */
4430 gas_assert (parts
!= 0);
4432 /* Make sure that the number is not too big.
4433 PR 11972: Bignums can now be sign-extended to the
4434 size of a .octa so check that the out of range bits
4435 are all zero or all one. */
4436 if (LITTLENUM_NUMBER_OF_BITS
* exp
.X_add_number
> 64)
4438 LITTLENUM_TYPE m
= -1;
4440 if (generic_bignum
[parts
* 2] != 0
4441 && generic_bignum
[parts
* 2] != m
)
4444 for (j
= parts
* 2 + 1; j
< (unsigned) exp
.X_add_number
; j
++)
4445 if (generic_bignum
[j
] != generic_bignum
[j
-1])
4449 inst
.operands
[i
].imm
= 0;
4450 for (j
= 0; j
< parts
; j
++, idx
++)
4451 inst
.operands
[i
].imm
|= generic_bignum
[idx
]
4452 << (LITTLENUM_NUMBER_OF_BITS
* j
);
4453 inst
.operands
[i
].reg
= 0;
4454 for (j
= 0; j
< parts
; j
++, idx
++)
4455 inst
.operands
[i
].reg
|= generic_bignum
[idx
]
4456 << (LITTLENUM_NUMBER_OF_BITS
* j
);
4457 inst
.operands
[i
].regisimm
= 1;
4467 /* Returns the pseudo-register number of an FPA immediate constant,
4468 or FAIL if there isn't a valid constant here. */
4471 parse_fpa_immediate (char ** str
)
4473 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
4479 /* First try and match exact strings, this is to guarantee
4480 that some formats will work even for cross assembly. */
4482 for (i
= 0; fp_const
[i
]; i
++)
4484 if (strncmp (*str
, fp_const
[i
], strlen (fp_const
[i
])) == 0)
4488 *str
+= strlen (fp_const
[i
]);
4489 if (is_end_of_line
[(unsigned char) **str
])
4495 /* Just because we didn't get a match doesn't mean that the constant
4496 isn't valid, just that it is in a format that we don't
4497 automatically recognize. Try parsing it with the standard
4498 expression routines. */
4500 memset (words
, 0, MAX_LITTLENUMS
* sizeof (LITTLENUM_TYPE
));
4502 /* Look for a raw floating point number. */
4503 if ((save_in
= atof_ieee (*str
, 'x', words
)) != NULL
4504 && is_end_of_line
[(unsigned char) *save_in
])
4506 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
4508 for (j
= 0; j
< MAX_LITTLENUMS
; j
++)
4510 if (words
[j
] != fp_values
[i
][j
])
4514 if (j
== MAX_LITTLENUMS
)
4522 /* Try and parse a more complex expression, this will probably fail
4523 unless the code uses a floating point prefix (eg "0f"). */
4524 save_in
= input_line_pointer
;
4525 input_line_pointer
= *str
;
4526 if (expression (&exp
) == absolute_section
4527 && exp
.X_op
== O_big
4528 && exp
.X_add_number
< 0)
4530 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
4532 if (gen_to_words (words
, 5, (long) 15) == 0)
4534 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
4536 for (j
= 0; j
< MAX_LITTLENUMS
; j
++)
4538 if (words
[j
] != fp_values
[i
][j
])
4542 if (j
== MAX_LITTLENUMS
)
4544 *str
= input_line_pointer
;
4545 input_line_pointer
= save_in
;
4552 *str
= input_line_pointer
;
4553 input_line_pointer
= save_in
;
4554 inst
.error
= _("invalid FPA immediate expression");
4558 /* Returns 1 if a number has "quarter-precision" float format
4559 0baBbbbbbc defgh000 00000000 00000000. */
4562 is_quarter_float (unsigned imm
)
4564 int bs
= (imm
& 0x20000000) ? 0x3e000000 : 0x40000000;
4565 return (imm
& 0x7ffff) == 0 && ((imm
& 0x7e000000) ^ bs
) == 0;
4568 /* Parse an 8-bit "quarter-precision" floating point number of the form:
4569 0baBbbbbbc defgh000 00000000 00000000.
4570 The zero and minus-zero cases need special handling, since they can't be
4571 encoded in the "quarter-precision" float format, but can nonetheless be
4572 loaded as integer constants. */
4575 parse_qfloat_immediate (char **ccp
, int *immed
)
4579 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
4580 int found_fpchar
= 0;
4582 skip_past_char (&str
, '#');
4584 /* We must not accidentally parse an integer as a floating-point number. Make
4585 sure that the value we parse is not an integer by checking for special
4586 characters '.' or 'e'.
4587 FIXME: This is a horrible hack, but doing better is tricky because type
4588 information isn't in a very usable state at parse time. */
4590 skip_whitespace (fpnum
);
4592 if (strncmp (fpnum
, "0x", 2) == 0)
4596 for (; *fpnum
!= '\0' && *fpnum
!= ' ' && *fpnum
!= '\n'; fpnum
++)
4597 if (*fpnum
== '.' || *fpnum
== 'e' || *fpnum
== 'E')
4607 if ((str
= atof_ieee (str
, 's', words
)) != NULL
)
4609 unsigned fpword
= 0;
4612 /* Our FP word must be 32 bits (single-precision FP). */
4613 for (i
= 0; i
< 32 / LITTLENUM_NUMBER_OF_BITS
; i
++)
4615 fpword
<<= LITTLENUM_NUMBER_OF_BITS
;
4619 if (is_quarter_float (fpword
) || (fpword
& 0x7fffffff) == 0)
4632 /* Shift operands. */
4635 SHIFT_LSL
, SHIFT_LSR
, SHIFT_ASR
, SHIFT_ROR
, SHIFT_RRX
4638 struct asm_shift_name
4641 enum shift_kind kind
;
4644 /* Third argument to parse_shift. */
4645 enum parse_shift_mode
4647 NO_SHIFT_RESTRICT
, /* Any kind of shift is accepted. */
4648 SHIFT_IMMEDIATE
, /* Shift operand must be an immediate. */
4649 SHIFT_LSL_OR_ASR_IMMEDIATE
, /* Shift must be LSL or ASR immediate. */
4650 SHIFT_ASR_IMMEDIATE
, /* Shift must be ASR immediate. */
4651 SHIFT_LSL_IMMEDIATE
, /* Shift must be LSL immediate. */
4654 /* Parse a <shift> specifier on an ARM data processing instruction.
4655 This has three forms:
4657 (LSL|LSR|ASL|ASR|ROR) Rs
4658 (LSL|LSR|ASL|ASR|ROR) #imm
4661 Note that ASL is assimilated to LSL in the instruction encoding, and
4662 RRX to ROR #0 (which cannot be written as such). */
4665 parse_shift (char **str
, int i
, enum parse_shift_mode mode
)
4667 const struct asm_shift_name
*shift_name
;
4668 enum shift_kind shift
;
4673 for (p
= *str
; ISALPHA (*p
); p
++)
4678 inst
.error
= _("shift expression expected");
4682 shift_name
= (const struct asm_shift_name
*) hash_find_n (arm_shift_hsh
, *str
,
4685 if (shift_name
== NULL
)
4687 inst
.error
= _("shift expression expected");
4691 shift
= shift_name
->kind
;
4695 case NO_SHIFT_RESTRICT
:
4696 case SHIFT_IMMEDIATE
: break;
4698 case SHIFT_LSL_OR_ASR_IMMEDIATE
:
4699 if (shift
!= SHIFT_LSL
&& shift
!= SHIFT_ASR
)
4701 inst
.error
= _("'LSL' or 'ASR' required");
4706 case SHIFT_LSL_IMMEDIATE
:
4707 if (shift
!= SHIFT_LSL
)
4709 inst
.error
= _("'LSL' required");
4714 case SHIFT_ASR_IMMEDIATE
:
4715 if (shift
!= SHIFT_ASR
)
4717 inst
.error
= _("'ASR' required");
4725 if (shift
!= SHIFT_RRX
)
4727 /* Whitespace can appear here if the next thing is a bare digit. */
4728 skip_whitespace (p
);
4730 if (mode
== NO_SHIFT_RESTRICT
4731 && (reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
4733 inst
.operands
[i
].imm
= reg
;
4734 inst
.operands
[i
].immisreg
= 1;
4736 else if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_IMM_PREFIX
))
4739 inst
.operands
[i
].shift_kind
= shift
;
4740 inst
.operands
[i
].shifted
= 1;
4745 /* Parse a <shifter_operand> for an ARM data processing instruction:
4748 #<immediate>, <rotate>
4752 where <shift> is defined by parse_shift above, and <rotate> is a
4753 multiple of 2 between 0 and 30. Validation of immediate operands
4754 is deferred to md_apply_fix. */
4757 parse_shifter_operand (char **str
, int i
)
4762 if ((value
= arm_reg_parse (str
, REG_TYPE_RN
)) != FAIL
)
4764 inst
.operands
[i
].reg
= value
;
4765 inst
.operands
[i
].isreg
= 1;
4767 /* parse_shift will override this if appropriate */
4768 inst
.reloc
.exp
.X_op
= O_constant
;
4769 inst
.reloc
.exp
.X_add_number
= 0;
4771 if (skip_past_comma (str
) == FAIL
)
4774 /* Shift operation on register. */
4775 return parse_shift (str
, i
, NO_SHIFT_RESTRICT
);
4778 if (my_get_expression (&inst
.reloc
.exp
, str
, GE_IMM_PREFIX
))
4781 if (skip_past_comma (str
) == SUCCESS
)
4783 /* #x, y -- ie explicit rotation by Y. */
4784 if (my_get_expression (&exp
, str
, GE_NO_PREFIX
))
4787 if (exp
.X_op
!= O_constant
|| inst
.reloc
.exp
.X_op
!= O_constant
)
4789 inst
.error
= _("constant expression expected");
4793 value
= exp
.X_add_number
;
4794 if (value
< 0 || value
> 30 || value
% 2 != 0)
4796 inst
.error
= _("invalid rotation");
4799 if (inst
.reloc
.exp
.X_add_number
< 0 || inst
.reloc
.exp
.X_add_number
> 255)
4801 inst
.error
= _("invalid constant");
4805 /* Convert to decoded value. md_apply_fix will put it back. */
4806 inst
.reloc
.exp
.X_add_number
4807 = (((inst
.reloc
.exp
.X_add_number
<< (32 - value
))
4808 | (inst
.reloc
.exp
.X_add_number
>> value
)) & 0xffffffff);
4811 inst
.reloc
.type
= BFD_RELOC_ARM_IMMEDIATE
;
4812 inst
.reloc
.pc_rel
= 0;
4816 /* Group relocation information. Each entry in the table contains the
4817 textual name of the relocation as may appear in assembler source
4818 and must end with a colon.
4819 Along with this textual name are the relocation codes to be used if
4820 the corresponding instruction is an ALU instruction (ADD or SUB only),
4821 an LDR, an LDRS, or an LDC. */
4823 struct group_reloc_table_entry
4834 /* Varieties of non-ALU group relocation. */
4841 static struct group_reloc_table_entry group_reloc_table
[] =
4842 { /* Program counter relative: */
4844 BFD_RELOC_ARM_ALU_PC_G0_NC
, /* ALU */
4849 BFD_RELOC_ARM_ALU_PC_G0
, /* ALU */
4850 BFD_RELOC_ARM_LDR_PC_G0
, /* LDR */
4851 BFD_RELOC_ARM_LDRS_PC_G0
, /* LDRS */
4852 BFD_RELOC_ARM_LDC_PC_G0
}, /* LDC */
4854 BFD_RELOC_ARM_ALU_PC_G1_NC
, /* ALU */
4859 BFD_RELOC_ARM_ALU_PC_G1
, /* ALU */
4860 BFD_RELOC_ARM_LDR_PC_G1
, /* LDR */
4861 BFD_RELOC_ARM_LDRS_PC_G1
, /* LDRS */
4862 BFD_RELOC_ARM_LDC_PC_G1
}, /* LDC */
4864 BFD_RELOC_ARM_ALU_PC_G2
, /* ALU */
4865 BFD_RELOC_ARM_LDR_PC_G2
, /* LDR */
4866 BFD_RELOC_ARM_LDRS_PC_G2
, /* LDRS */
4867 BFD_RELOC_ARM_LDC_PC_G2
}, /* LDC */
4868 /* Section base relative */
4870 BFD_RELOC_ARM_ALU_SB_G0_NC
, /* ALU */
4875 BFD_RELOC_ARM_ALU_SB_G0
, /* ALU */
4876 BFD_RELOC_ARM_LDR_SB_G0
, /* LDR */
4877 BFD_RELOC_ARM_LDRS_SB_G0
, /* LDRS */
4878 BFD_RELOC_ARM_LDC_SB_G0
}, /* LDC */
4880 BFD_RELOC_ARM_ALU_SB_G1_NC
, /* ALU */
4885 BFD_RELOC_ARM_ALU_SB_G1
, /* ALU */
4886 BFD_RELOC_ARM_LDR_SB_G1
, /* LDR */
4887 BFD_RELOC_ARM_LDRS_SB_G1
, /* LDRS */
4888 BFD_RELOC_ARM_LDC_SB_G1
}, /* LDC */
4890 BFD_RELOC_ARM_ALU_SB_G2
, /* ALU */
4891 BFD_RELOC_ARM_LDR_SB_G2
, /* LDR */
4892 BFD_RELOC_ARM_LDRS_SB_G2
, /* LDRS */
4893 BFD_RELOC_ARM_LDC_SB_G2
} }; /* LDC */
4895 /* Given the address of a pointer pointing to the textual name of a group
4896 relocation as may appear in assembler source, attempt to find its details
4897 in group_reloc_table. The pointer will be updated to the character after
4898 the trailing colon. On failure, FAIL will be returned; SUCCESS
4899 otherwise. On success, *entry will be updated to point at the relevant
4900 group_reloc_table entry. */
4903 find_group_reloc_table_entry (char **str
, struct group_reloc_table_entry
**out
)
4906 for (i
= 0; i
< ARRAY_SIZE (group_reloc_table
); i
++)
4908 int length
= strlen (group_reloc_table
[i
].name
);
4910 if (strncasecmp (group_reloc_table
[i
].name
, *str
, length
) == 0
4911 && (*str
)[length
] == ':')
4913 *out
= &group_reloc_table
[i
];
4914 *str
+= (length
+ 1);
4922 /* Parse a <shifter_operand> for an ARM data processing instruction
4923 (as for parse_shifter_operand) where group relocations are allowed:
4926 #<immediate>, <rotate>
4927 #:<group_reloc>:<expression>
4931 where <group_reloc> is one of the strings defined in group_reloc_table.
4932 The hashes are optional.
4934 Everything else is as for parse_shifter_operand. */
4936 static parse_operand_result
4937 parse_shifter_operand_group_reloc (char **str
, int i
)
4939 /* Determine if we have the sequence of characters #: or just :
4940 coming next. If we do, then we check for a group relocation.
4941 If we don't, punt the whole lot to parse_shifter_operand. */
4943 if (((*str
)[0] == '#' && (*str
)[1] == ':')
4944 || (*str
)[0] == ':')
4946 struct group_reloc_table_entry
*entry
;
4948 if ((*str
)[0] == '#')
4953 /* Try to parse a group relocation. Anything else is an error. */
4954 if (find_group_reloc_table_entry (str
, &entry
) == FAIL
)
4956 inst
.error
= _("unknown group relocation");
4957 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
4960 /* We now have the group relocation table entry corresponding to
4961 the name in the assembler source. Next, we parse the expression. */
4962 if (my_get_expression (&inst
.reloc
.exp
, str
, GE_NO_PREFIX
))
4963 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
4965 /* Record the relocation type (always the ALU variant here). */
4966 inst
.reloc
.type
= (bfd_reloc_code_real_type
) entry
->alu_code
;
4967 gas_assert (inst
.reloc
.type
!= 0);
4969 return PARSE_OPERAND_SUCCESS
;
4972 return parse_shifter_operand (str
, i
) == SUCCESS
4973 ? PARSE_OPERAND_SUCCESS
: PARSE_OPERAND_FAIL
;
4975 /* Never reached. */
4978 /* Parse a Neon alignment expression. Information is written to
4979 inst.operands[i]. We assume the initial ':' has been skipped.
4981 align .imm = align << 8, .immisalign=1, .preind=0 */
4982 static parse_operand_result
4983 parse_neon_alignment (char **str
, int i
)
4988 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
4990 if (exp
.X_op
!= O_constant
)
4992 inst
.error
= _("alignment must be constant");
4993 return PARSE_OPERAND_FAIL
;
4996 inst
.operands
[i
].imm
= exp
.X_add_number
<< 8;
4997 inst
.operands
[i
].immisalign
= 1;
4998 /* Alignments are not pre-indexes. */
4999 inst
.operands
[i
].preind
= 0;
5002 return PARSE_OPERAND_SUCCESS
;
5005 /* Parse all forms of an ARM address expression. Information is written
5006 to inst.operands[i] and/or inst.reloc.
5008 Preindexed addressing (.preind=1):
5010 [Rn, #offset] .reg=Rn .reloc.exp=offset
5011 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5012 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5013 .shift_kind=shift .reloc.exp=shift_imm
5015 These three may have a trailing ! which causes .writeback to be set also.
5017 Postindexed addressing (.postind=1, .writeback=1):
5019 [Rn], #offset .reg=Rn .reloc.exp=offset
5020 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5021 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5022 .shift_kind=shift .reloc.exp=shift_imm
5024 Unindexed addressing (.preind=0, .postind=0):
5026 [Rn], {option} .reg=Rn .imm=option .immisreg=0
5030 [Rn]{!} shorthand for [Rn,#0]{!}
5031 =immediate .isreg=0 .reloc.exp=immediate
5032 label .reg=PC .reloc.pc_rel=1 .reloc.exp=label
5034 It is the caller's responsibility to check for addressing modes not
5035 supported by the instruction, and to set inst.reloc.type. */
5037 static parse_operand_result
5038 parse_address_main (char **str
, int i
, int group_relocations
,
5039 group_reloc_type group_type
)
5044 if (skip_past_char (&p
, '[') == FAIL
)
5046 if (skip_past_char (&p
, '=') == FAIL
)
5048 /* Bare address - translate to PC-relative offset. */
5049 inst
.reloc
.pc_rel
= 1;
5050 inst
.operands
[i
].reg
= REG_PC
;
5051 inst
.operands
[i
].isreg
= 1;
5052 inst
.operands
[i
].preind
= 1;
5054 /* Otherwise a load-constant pseudo op, no special treatment needed here. */
5056 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_NO_PREFIX
))
5057 return PARSE_OPERAND_FAIL
;
5060 return PARSE_OPERAND_SUCCESS
;
5063 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
5065 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
5066 return PARSE_OPERAND_FAIL
;
5068 inst
.operands
[i
].reg
= reg
;
5069 inst
.operands
[i
].isreg
= 1;
5071 if (skip_past_comma (&p
) == SUCCESS
)
5073 inst
.operands
[i
].preind
= 1;
5076 else if (*p
== '-') p
++, inst
.operands
[i
].negative
= 1;
5078 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
5080 inst
.operands
[i
].imm
= reg
;
5081 inst
.operands
[i
].immisreg
= 1;
5083 if (skip_past_comma (&p
) == SUCCESS
)
5084 if (parse_shift (&p
, i
, SHIFT_IMMEDIATE
) == FAIL
)
5085 return PARSE_OPERAND_FAIL
;
5087 else if (skip_past_char (&p
, ':') == SUCCESS
)
5089 /* FIXME: '@' should be used here, but it's filtered out by generic
5090 code before we get to see it here. This may be subject to
5092 parse_operand_result result
= parse_neon_alignment (&p
, i
);
5094 if (result
!= PARSE_OPERAND_SUCCESS
)
5099 if (inst
.operands
[i
].negative
)
5101 inst
.operands
[i
].negative
= 0;
5105 if (group_relocations
5106 && ((*p
== '#' && *(p
+ 1) == ':') || *p
== ':'))
5108 struct group_reloc_table_entry
*entry
;
5110 /* Skip over the #: or : sequence. */
5116 /* Try to parse a group relocation. Anything else is an
5118 if (find_group_reloc_table_entry (&p
, &entry
) == FAIL
)
5120 inst
.error
= _("unknown group relocation");
5121 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5124 /* We now have the group relocation table entry corresponding to
5125 the name in the assembler source. Next, we parse the
5127 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_NO_PREFIX
))
5128 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5130 /* Record the relocation type. */
5134 inst
.reloc
.type
= (bfd_reloc_code_real_type
) entry
->ldr_code
;
5138 inst
.reloc
.type
= (bfd_reloc_code_real_type
) entry
->ldrs_code
;
5142 inst
.reloc
.type
= (bfd_reloc_code_real_type
) entry
->ldc_code
;
5149 if (inst
.reloc
.type
== 0)
5151 inst
.error
= _("this group relocation is not allowed on this instruction");
5152 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5156 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_IMM_PREFIX
))
5157 return PARSE_OPERAND_FAIL
;
5160 else if (skip_past_char (&p
, ':') == SUCCESS
)
5162 /* FIXME: '@' should be used here, but it's filtered out by generic code
5163 before we get to see it here. This may be subject to change. */
5164 parse_operand_result result
= parse_neon_alignment (&p
, i
);
5166 if (result
!= PARSE_OPERAND_SUCCESS
)
5170 if (skip_past_char (&p
, ']') == FAIL
)
5172 inst
.error
= _("']' expected");
5173 return PARSE_OPERAND_FAIL
;
5176 if (skip_past_char (&p
, '!') == SUCCESS
)
5177 inst
.operands
[i
].writeback
= 1;
5179 else if (skip_past_comma (&p
) == SUCCESS
)
5181 if (skip_past_char (&p
, '{') == SUCCESS
)
5183 /* [Rn], {expr} - unindexed, with option */
5184 if (parse_immediate (&p
, &inst
.operands
[i
].imm
,
5185 0, 255, TRUE
) == FAIL
)
5186 return PARSE_OPERAND_FAIL
;
5188 if (skip_past_char (&p
, '}') == FAIL
)
5190 inst
.error
= _("'}' expected at end of 'option' field");
5191 return PARSE_OPERAND_FAIL
;
5193 if (inst
.operands
[i
].preind
)
5195 inst
.error
= _("cannot combine index with option");
5196 return PARSE_OPERAND_FAIL
;
5199 return PARSE_OPERAND_SUCCESS
;
5203 inst
.operands
[i
].postind
= 1;
5204 inst
.operands
[i
].writeback
= 1;
5206 if (inst
.operands
[i
].preind
)
5208 inst
.error
= _("cannot combine pre- and post-indexing");
5209 return PARSE_OPERAND_FAIL
;
5213 else if (*p
== '-') p
++, inst
.operands
[i
].negative
= 1;
5215 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
5217 /* We might be using the immediate for alignment already. If we
5218 are, OR the register number into the low-order bits. */
5219 if (inst
.operands
[i
].immisalign
)
5220 inst
.operands
[i
].imm
|= reg
;
5222 inst
.operands
[i
].imm
= reg
;
5223 inst
.operands
[i
].immisreg
= 1;
5225 if (skip_past_comma (&p
) == SUCCESS
)
5226 if (parse_shift (&p
, i
, SHIFT_IMMEDIATE
) == FAIL
)
5227 return PARSE_OPERAND_FAIL
;
5231 if (inst
.operands
[i
].negative
)
5233 inst
.operands
[i
].negative
= 0;
5236 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_IMM_PREFIX
))
5237 return PARSE_OPERAND_FAIL
;
5242 /* If at this point neither .preind nor .postind is set, we have a
5243 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
5244 if (inst
.operands
[i
].preind
== 0 && inst
.operands
[i
].postind
== 0)
5246 inst
.operands
[i
].preind
= 1;
5247 inst
.reloc
.exp
.X_op
= O_constant
;
5248 inst
.reloc
.exp
.X_add_number
= 0;
5251 return PARSE_OPERAND_SUCCESS
;
5255 parse_address (char **str
, int i
)
5257 return parse_address_main (str
, i
, 0, GROUP_LDR
) == PARSE_OPERAND_SUCCESS
5261 static parse_operand_result
5262 parse_address_group_reloc (char **str
, int i
, group_reloc_type type
)
5264 return parse_address_main (str
, i
, 1, type
);
5267 /* Parse an operand for a MOVW or MOVT instruction. */
5269 parse_half (char **str
)
5274 skip_past_char (&p
, '#');
5275 if (strncasecmp (p
, ":lower16:", 9) == 0)
5276 inst
.reloc
.type
= BFD_RELOC_ARM_MOVW
;
5277 else if (strncasecmp (p
, ":upper16:", 9) == 0)
5278 inst
.reloc
.type
= BFD_RELOC_ARM_MOVT
;
5280 if (inst
.reloc
.type
!= BFD_RELOC_UNUSED
)
5283 skip_whitespace (p
);
5286 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_NO_PREFIX
))
5289 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
5291 if (inst
.reloc
.exp
.X_op
!= O_constant
)
5293 inst
.error
= _("constant expression expected");
5296 if (inst
.reloc
.exp
.X_add_number
< 0
5297 || inst
.reloc
.exp
.X_add_number
> 0xffff)
5299 inst
.error
= _("immediate value out of range");
5307 /* Miscellaneous. */
5309 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
5310 or a bitmask suitable to be or-ed into the ARM msr instruction. */
5312 parse_psr (char **str
)
5315 unsigned long psr_field
;
5316 const struct asm_psr
*psr
;
5319 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
5320 feature for ease of use and backwards compatibility. */
5322 if (strncasecmp (p
, "SPSR", 4) == 0)
5323 psr_field
= SPSR_BIT
;
5324 else if (strncasecmp (p
, "CPSR", 4) == 0
5325 || (strncasecmp (p
, "APSR", 4) == 0
5326 && !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_m
)))
5333 while (ISALNUM (*p
) || *p
== '_');
5335 psr
= (const struct asm_psr
*) hash_find_n (arm_v7m_psr_hsh
, start
,
5347 /* A suffix follows. */
5353 while (ISALNUM (*p
) || *p
== '_');
5355 psr
= (const struct asm_psr
*) hash_find_n (arm_psr_hsh
, start
,
5360 psr_field
|= psr
->field
;
5365 goto error
; /* Garbage after "[CS]PSR". */
5367 psr_field
|= (PSR_c
| PSR_f
);
5373 inst
.error
= _("flag for {c}psr instruction expected");
5377 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
5378 value suitable for splatting into the AIF field of the instruction. */
5381 parse_cps_flags (char **str
)
5390 case '\0': case ',':
5393 case 'a': case 'A': saw_a_flag
= 1; val
|= 0x4; break;
5394 case 'i': case 'I': saw_a_flag
= 1; val
|= 0x2; break;
5395 case 'f': case 'F': saw_a_flag
= 1; val
|= 0x1; break;
5398 inst
.error
= _("unrecognized CPS flag");
5403 if (saw_a_flag
== 0)
5405 inst
.error
= _("missing CPS flags");
5413 /* Parse an endian specifier ("BE" or "LE", case insensitive);
5414 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
5417 parse_endian_specifier (char **str
)
5422 if (strncasecmp (s
, "BE", 2))
5424 else if (strncasecmp (s
, "LE", 2))
5428 inst
.error
= _("valid endian specifiers are be or le");
5432 if (ISALNUM (s
[2]) || s
[2] == '_')
5434 inst
.error
= _("valid endian specifiers are be or le");
5439 return little_endian
;
5442 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
5443 value suitable for poking into the rotate field of an sxt or sxta
5444 instruction, or FAIL on error. */
5447 parse_ror (char **str
)
5452 if (strncasecmp (s
, "ROR", 3) == 0)
5456 inst
.error
= _("missing rotation field after comma");
5460 if (parse_immediate (&s
, &rot
, 0, 24, FALSE
) == FAIL
)
5465 case 0: *str
= s
; return 0x0;
5466 case 8: *str
= s
; return 0x1;
5467 case 16: *str
= s
; return 0x2;
5468 case 24: *str
= s
; return 0x3;
5471 inst
.error
= _("rotation can only be 0, 8, 16, or 24");
5476 /* Parse a conditional code (from conds[] below). The value returned is in the
5477 range 0 .. 14, or FAIL. */
5479 parse_cond (char **str
)
5482 const struct asm_cond
*c
;
5484 /* Condition codes are always 2 characters, so matching up to
5485 3 characters is sufficient. */
5490 while (ISALPHA (*q
) && n
< 3)
5492 cond
[n
] = TOLOWER (*q
);
5497 c
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, cond
, n
);
5500 inst
.error
= _("condition required");
5508 /* Parse an option for a barrier instruction. Returns the encoding for the
5511 parse_barrier (char **str
)
5514 const struct asm_barrier_opt
*o
;
5517 while (ISALPHA (*q
))
5520 o
= (const struct asm_barrier_opt
*) hash_find_n (arm_barrier_opt_hsh
, p
,
5529 /* Parse the operands of a table branch instruction. Similar to a memory
5532 parse_tb (char **str
)
5537 if (skip_past_char (&p
, '[') == FAIL
)
5539 inst
.error
= _("'[' expected");
5543 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
5545 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
5548 inst
.operands
[0].reg
= reg
;
5550 if (skip_past_comma (&p
) == FAIL
)
5552 inst
.error
= _("',' expected");
5556 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
5558 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
5561 inst
.operands
[0].imm
= reg
;
5563 if (skip_past_comma (&p
) == SUCCESS
)
5565 if (parse_shift (&p
, 0, SHIFT_LSL_IMMEDIATE
) == FAIL
)
5567 if (inst
.reloc
.exp
.X_add_number
!= 1)
5569 inst
.error
= _("invalid shift");
5572 inst
.operands
[0].shifted
= 1;
5575 if (skip_past_char (&p
, ']') == FAIL
)
5577 inst
.error
= _("']' expected");
5584 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
5585 information on the types the operands can take and how they are encoded.
5586 Up to four operands may be read; this function handles setting the
5587 ".present" field for each read operand itself.
5588 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
5589 else returns FAIL. */
5592 parse_neon_mov (char **str
, int *which_operand
)
5594 int i
= *which_operand
, val
;
5595 enum arm_reg_type rtype
;
5597 struct neon_type_el optype
;
5599 if ((val
= parse_scalar (&ptr
, 8, &optype
)) != FAIL
)
5601 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
5602 inst
.operands
[i
].reg
= val
;
5603 inst
.operands
[i
].isscalar
= 1;
5604 inst
.operands
[i
].vectype
= optype
;
5605 inst
.operands
[i
++].present
= 1;
5607 if (skip_past_comma (&ptr
) == FAIL
)
5610 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
5613 inst
.operands
[i
].reg
= val
;
5614 inst
.operands
[i
].isreg
= 1;
5615 inst
.operands
[i
].present
= 1;
5617 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_NSDQ
, &rtype
, &optype
))
5620 /* Cases 0, 1, 2, 3, 5 (D only). */
5621 if (skip_past_comma (&ptr
) == FAIL
)
5624 inst
.operands
[i
].reg
= val
;
5625 inst
.operands
[i
].isreg
= 1;
5626 inst
.operands
[i
].isquad
= (rtype
== REG_TYPE_NQ
);
5627 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
5628 inst
.operands
[i
].isvec
= 1;
5629 inst
.operands
[i
].vectype
= optype
;
5630 inst
.operands
[i
++].present
= 1;
5632 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
5634 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
5635 Case 13: VMOV <Sd>, <Rm> */
5636 inst
.operands
[i
].reg
= val
;
5637 inst
.operands
[i
].isreg
= 1;
5638 inst
.operands
[i
].present
= 1;
5640 if (rtype
== REG_TYPE_NQ
)
5642 first_error (_("can't use Neon quad register here"));
5645 else if (rtype
!= REG_TYPE_VFS
)
5648 if (skip_past_comma (&ptr
) == FAIL
)
5650 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
5652 inst
.operands
[i
].reg
= val
;
5653 inst
.operands
[i
].isreg
= 1;
5654 inst
.operands
[i
].present
= 1;
5657 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_NSDQ
, &rtype
,
5660 /* Case 0: VMOV<c><q> <Qd>, <Qm>
5661 Case 1: VMOV<c><q> <Dd>, <Dm>
5662 Case 8: VMOV.F32 <Sd>, <Sm>
5663 Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */
5665 inst
.operands
[i
].reg
= val
;
5666 inst
.operands
[i
].isreg
= 1;
5667 inst
.operands
[i
].isquad
= (rtype
== REG_TYPE_NQ
);
5668 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
5669 inst
.operands
[i
].isvec
= 1;
5670 inst
.operands
[i
].vectype
= optype
;
5671 inst
.operands
[i
].present
= 1;
5673 if (skip_past_comma (&ptr
) == SUCCESS
)
5678 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
5681 inst
.operands
[i
].reg
= val
;
5682 inst
.operands
[i
].isreg
= 1;
5683 inst
.operands
[i
++].present
= 1;
5685 if (skip_past_comma (&ptr
) == FAIL
)
5688 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
5691 inst
.operands
[i
].reg
= val
;
5692 inst
.operands
[i
].isreg
= 1;
5693 inst
.operands
[i
++].present
= 1;
5696 else if (parse_qfloat_immediate (&ptr
, &inst
.operands
[i
].imm
) == SUCCESS
)
5697 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
5698 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
5699 Case 10: VMOV.F32 <Sd>, #<imm>
5700 Case 11: VMOV.F64 <Dd>, #<imm> */
5701 inst
.operands
[i
].immisfloat
= 1;
5702 else if (parse_big_immediate (&ptr
, i
) == SUCCESS
)
5703 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
5704 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
5708 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
5712 else if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
5715 inst
.operands
[i
].reg
= val
;
5716 inst
.operands
[i
].isreg
= 1;
5717 inst
.operands
[i
++].present
= 1;
5719 if (skip_past_comma (&ptr
) == FAIL
)
5722 if ((val
= parse_scalar (&ptr
, 8, &optype
)) != FAIL
)
5724 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
5725 inst
.operands
[i
].reg
= val
;
5726 inst
.operands
[i
].isscalar
= 1;
5727 inst
.operands
[i
].present
= 1;
5728 inst
.operands
[i
].vectype
= optype
;
5730 else if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
5732 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
5733 inst
.operands
[i
].reg
= val
;
5734 inst
.operands
[i
].isreg
= 1;
5735 inst
.operands
[i
++].present
= 1;
5737 if (skip_past_comma (&ptr
) == FAIL
)
5740 if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFSD
, &rtype
, &optype
))
5743 first_error (_(reg_expected_msgs
[REG_TYPE_VFSD
]));
5747 inst
.operands
[i
].reg
= val
;
5748 inst
.operands
[i
].isreg
= 1;
5749 inst
.operands
[i
].isvec
= 1;
5750 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
5751 inst
.operands
[i
].vectype
= optype
;
5752 inst
.operands
[i
].present
= 1;
5754 if (rtype
== REG_TYPE_VFS
)
5758 if (skip_past_comma (&ptr
) == FAIL
)
5760 if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFS
, NULL
,
5763 first_error (_(reg_expected_msgs
[REG_TYPE_VFS
]));
5766 inst
.operands
[i
].reg
= val
;
5767 inst
.operands
[i
].isreg
= 1;
5768 inst
.operands
[i
].isvec
= 1;
5769 inst
.operands
[i
].issingle
= 1;
5770 inst
.operands
[i
].vectype
= optype
;
5771 inst
.operands
[i
].present
= 1;
5774 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFS
, NULL
, &optype
))
5778 inst
.operands
[i
].reg
= val
;
5779 inst
.operands
[i
].isreg
= 1;
5780 inst
.operands
[i
].isvec
= 1;
5781 inst
.operands
[i
].issingle
= 1;
5782 inst
.operands
[i
].vectype
= optype
;
5783 inst
.operands
[i
++].present
= 1;
5788 first_error (_("parse error"));
5792 /* Successfully parsed the operands. Update args. */
5798 first_error (_("expected comma"));
5802 first_error (_(reg_expected_msgs
[REG_TYPE_RN
]));
5806 /* Use this macro when the operand constraints are different
5807 for ARM and THUMB (e.g. ldrd). */
5808 #define MIX_ARM_THUMB_OPERANDS(arm_operand, thumb_operand) \
5809 ((arm_operand) | ((thumb_operand) << 16))
5811 /* Matcher codes for parse_operands. */
5812 enum operand_parse_code
5814 OP_stop
, /* end of line */
5816 OP_RR
, /* ARM register */
5817 OP_RRnpc
, /* ARM register, not r15 */
5818 OP_RRnpcsp
, /* ARM register, neither r15 nor r13 (a.k.a. 'BadReg') */
5819 OP_RRnpcb
, /* ARM register, not r15, in square brackets */
5820 OP_RRnpctw
, /* ARM register, not r15 in Thumb-state or with writeback,
5821 optional trailing ! */
5822 OP_RRw
, /* ARM register, not r15, optional trailing ! */
5823 OP_RCP
, /* Coprocessor number */
5824 OP_RCN
, /* Coprocessor register */
5825 OP_RF
, /* FPA register */
5826 OP_RVS
, /* VFP single precision register */
5827 OP_RVD
, /* VFP double precision register (0..15) */
5828 OP_RND
, /* Neon double precision register (0..31) */
5829 OP_RNQ
, /* Neon quad precision register */
5830 OP_RVSD
, /* VFP single or double precision register */
5831 OP_RNDQ
, /* Neon double or quad precision register */
5832 OP_RNSDQ
, /* Neon single, double or quad precision register */
5833 OP_RNSC
, /* Neon scalar D[X] */
5834 OP_RVC
, /* VFP control register */
5835 OP_RMF
, /* Maverick F register */
5836 OP_RMD
, /* Maverick D register */
5837 OP_RMFX
, /* Maverick FX register */
5838 OP_RMDX
, /* Maverick DX register */
5839 OP_RMAX
, /* Maverick AX register */
5840 OP_RMDS
, /* Maverick DSPSC register */
5841 OP_RIWR
, /* iWMMXt wR register */
5842 OP_RIWC
, /* iWMMXt wC register */
5843 OP_RIWG
, /* iWMMXt wCG register */
5844 OP_RXA
, /* XScale accumulator register */
5846 OP_REGLST
, /* ARM register list */
5847 OP_VRSLST
, /* VFP single-precision register list */
5848 OP_VRDLST
, /* VFP double-precision register list */
5849 OP_VRSDLST
, /* VFP single or double-precision register list (& quad) */
5850 OP_NRDLST
, /* Neon double-precision register list (d0-d31, qN aliases) */
5851 OP_NSTRLST
, /* Neon element/structure list */
5853 OP_RNDQ_I0
, /* Neon D or Q reg, or immediate zero. */
5854 OP_RVSD_I0
, /* VFP S or D reg, or immediate zero. */
5855 OP_RR_RNSC
, /* ARM reg or Neon scalar. */
5856 OP_RNSDQ_RNSC
, /* Vector S, D or Q reg, or Neon scalar. */
5857 OP_RNDQ_RNSC
, /* Neon D or Q reg, or Neon scalar. */
5858 OP_RND_RNSC
, /* Neon D reg, or Neon scalar. */
5859 OP_VMOV
, /* Neon VMOV operands. */
5860 OP_RNDQ_Ibig
, /* Neon D or Q reg, or big immediate for logic and VMVN. */
5861 OP_RNDQ_I63b
, /* Neon D or Q reg, or immediate for shift. */
5862 OP_RIWR_I32z
, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2. */
5864 OP_I0
, /* immediate zero */
5865 OP_I7
, /* immediate value 0 .. 7 */
5866 OP_I15
, /* 0 .. 15 */
5867 OP_I16
, /* 1 .. 16 */
5868 OP_I16z
, /* 0 .. 16 */
5869 OP_I31
, /* 0 .. 31 */
5870 OP_I31w
, /* 0 .. 31, optional trailing ! */
5871 OP_I32
, /* 1 .. 32 */
5872 OP_I32z
, /* 0 .. 32 */
5873 OP_I63
, /* 0 .. 63 */
5874 OP_I63s
, /* -64 .. 63 */
5875 OP_I64
, /* 1 .. 64 */
5876 OP_I64z
, /* 0 .. 64 */
5877 OP_I255
, /* 0 .. 255 */
5879 OP_I4b
, /* immediate, prefix optional, 1 .. 4 */
5880 OP_I7b
, /* 0 .. 7 */
5881 OP_I15b
, /* 0 .. 15 */
5882 OP_I31b
, /* 0 .. 31 */
5884 OP_SH
, /* shifter operand */
5885 OP_SHG
, /* shifter operand with possible group relocation */
5886 OP_ADDR
, /* Memory address expression (any mode) */
5887 OP_ADDRGLDR
, /* Mem addr expr (any mode) with possible LDR group reloc */
5888 OP_ADDRGLDRS
, /* Mem addr expr (any mode) with possible LDRS group reloc */
5889 OP_ADDRGLDC
, /* Mem addr expr (any mode) with possible LDC group reloc */
5890 OP_EXP
, /* arbitrary expression */
5891 OP_EXPi
, /* same, with optional immediate prefix */
5892 OP_EXPr
, /* same, with optional relocation suffix */
5893 OP_HALF
, /* 0 .. 65535 or low/high reloc. */
5895 OP_CPSF
, /* CPS flags */
5896 OP_ENDI
, /* Endianness specifier */
5897 OP_PSR
, /* CPSR/SPSR mask for msr */
5898 OP_COND
, /* conditional code */
5899 OP_TB
, /* Table branch. */
5901 OP_RVC_PSR
, /* CPSR/SPSR mask for msr, or VFP control register. */
5902 OP_APSR_RR
, /* ARM register or "APSR_nzcv". */
5904 OP_RRnpc_I0
, /* ARM register or literal 0 */
5905 OP_RR_EXr
, /* ARM register or expression with opt. reloc suff. */
5906 OP_RR_EXi
, /* ARM register or expression with imm prefix */
5907 OP_RF_IF
, /* FPA register or immediate */
5908 OP_RIWR_RIWC
, /* iWMMXt R or C reg */
5909 OP_RIWC_RIWG
, /* iWMMXt wC or wCG reg */
5911 /* Optional operands. */
5912 OP_oI7b
, /* immediate, prefix optional, 0 .. 7 */
5913 OP_oI31b
, /* 0 .. 31 */
5914 OP_oI32b
, /* 1 .. 32 */
5915 OP_oIffffb
, /* 0 .. 65535 */
5916 OP_oI255c
, /* curly-brace enclosed, 0 .. 255 */
5918 OP_oRR
, /* ARM register */
5919 OP_oRRnpc
, /* ARM register, not the PC */
5920 OP_oRRnpcsp
, /* ARM register, neither the PC nor the SP (a.k.a. BadReg) */
5921 OP_oRRw
, /* ARM register, not r15, optional trailing ! */
5922 OP_oRND
, /* Optional Neon double precision register */
5923 OP_oRNQ
, /* Optional Neon quad precision register */
5924 OP_oRNDQ
, /* Optional Neon double or quad precision register */
5925 OP_oRNSDQ
, /* Optional single, double or quad precision vector register */
5926 OP_oSHll
, /* LSL immediate */
5927 OP_oSHar
, /* ASR immediate */
5928 OP_oSHllar
, /* LSL or ASR immediate */
5929 OP_oROR
, /* ROR 0/8/16/24 */
5930 OP_oBARRIER_I15
, /* Option argument for a barrier instruction. */
5932 /* Some pre-defined mixed (ARM/THUMB) operands. */
5933 OP_RR_npcsp
= MIX_ARM_THUMB_OPERANDS (OP_RR
, OP_RRnpcsp
),
5934 OP_RRnpc_npcsp
= MIX_ARM_THUMB_OPERANDS (OP_RRnpc
, OP_RRnpcsp
),
5935 OP_oRRnpc_npcsp
= MIX_ARM_THUMB_OPERANDS (OP_oRRnpc
, OP_oRRnpcsp
),
5937 OP_FIRST_OPTIONAL
= OP_oI7b
5940 /* Generic instruction operand parser. This does no encoding and no
5941 semantic validation; it merely squirrels values away in the inst
5942 structure. Returns SUCCESS or FAIL depending on whether the
5943 specified grammar matched. */
5945 parse_operands (char *str
, const unsigned int *pattern
, bfd_boolean thumb
)
5947 unsigned const int *upat
= pattern
;
5948 char *backtrack_pos
= 0;
5949 const char *backtrack_error
= 0;
5950 int i
, val
, backtrack_index
= 0;
5951 enum arm_reg_type rtype
;
5952 parse_operand_result result
;
5953 unsigned int op_parse_code
;
5955 #define po_char_or_fail(chr) \
5958 if (skip_past_char (&str, chr) == FAIL) \
5963 #define po_reg_or_fail(regtype) \
5966 val = arm_typed_reg_parse (& str, regtype, & rtype, \
5967 & inst.operands[i].vectype); \
5970 first_error (_(reg_expected_msgs[regtype])); \
5973 inst.operands[i].reg = val; \
5974 inst.operands[i].isreg = 1; \
5975 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
5976 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
5977 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
5978 || rtype == REG_TYPE_VFD \
5979 || rtype == REG_TYPE_NQ); \
5983 #define po_reg_or_goto(regtype, label) \
5986 val = arm_typed_reg_parse (& str, regtype, & rtype, \
5987 & inst.operands[i].vectype); \
5991 inst.operands[i].reg = val; \
5992 inst.operands[i].isreg = 1; \
5993 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
5994 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
5995 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
5996 || rtype == REG_TYPE_VFD \
5997 || rtype == REG_TYPE_NQ); \
6001 #define po_imm_or_fail(min, max, popt) \
6004 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
6006 inst.operands[i].imm = val; \
6010 #define po_scalar_or_goto(elsz, label) \
6013 val = parse_scalar (& str, elsz, & inst.operands[i].vectype); \
6016 inst.operands[i].reg = val; \
6017 inst.operands[i].isscalar = 1; \
6021 #define po_misc_or_fail(expr) \
6029 #define po_misc_or_fail_no_backtrack(expr) \
6033 if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK) \
6034 backtrack_pos = 0; \
6035 if (result != PARSE_OPERAND_SUCCESS) \
6040 #define po_barrier_or_imm(str) \
6043 val = parse_barrier (&str); \
6046 if (ISALPHA (*str)) \
6053 if ((inst.instruction & 0xf0) == 0x60 \
6056 /* ISB can only take SY as an option. */ \
6057 inst.error = _("invalid barrier type"); \
6064 skip_whitespace (str
);
6066 for (i
= 0; upat
[i
] != OP_stop
; i
++)
6068 op_parse_code
= upat
[i
];
6069 if (op_parse_code
>= 1<<16)
6070 op_parse_code
= thumb
? (op_parse_code
>> 16)
6071 : (op_parse_code
& ((1<<16)-1));
6073 if (op_parse_code
>= OP_FIRST_OPTIONAL
)
6075 /* Remember where we are in case we need to backtrack. */
6076 gas_assert (!backtrack_pos
);
6077 backtrack_pos
= str
;
6078 backtrack_error
= inst
.error
;
6079 backtrack_index
= i
;
6082 if (i
> 0 && (i
> 1 || inst
.operands
[0].present
))
6083 po_char_or_fail (',');
6085 switch (op_parse_code
)
6093 case OP_RR
: po_reg_or_fail (REG_TYPE_RN
); break;
6094 case OP_RCP
: po_reg_or_fail (REG_TYPE_CP
); break;
6095 case OP_RCN
: po_reg_or_fail (REG_TYPE_CN
); break;
6096 case OP_RF
: po_reg_or_fail (REG_TYPE_FN
); break;
6097 case OP_RVS
: po_reg_or_fail (REG_TYPE_VFS
); break;
6098 case OP_RVD
: po_reg_or_fail (REG_TYPE_VFD
); break;
6100 case OP_RND
: po_reg_or_fail (REG_TYPE_VFD
); break;
6102 po_reg_or_goto (REG_TYPE_VFC
, coproc_reg
);
6104 /* Also accept generic coprocessor regs for unknown registers. */
6106 po_reg_or_fail (REG_TYPE_CN
);
6108 case OP_RMF
: po_reg_or_fail (REG_TYPE_MVF
); break;
6109 case OP_RMD
: po_reg_or_fail (REG_TYPE_MVD
); break;
6110 case OP_RMFX
: po_reg_or_fail (REG_TYPE_MVFX
); break;
6111 case OP_RMDX
: po_reg_or_fail (REG_TYPE_MVDX
); break;
6112 case OP_RMAX
: po_reg_or_fail (REG_TYPE_MVAX
); break;
6113 case OP_RMDS
: po_reg_or_fail (REG_TYPE_DSPSC
); break;
6114 case OP_RIWR
: po_reg_or_fail (REG_TYPE_MMXWR
); break;
6115 case OP_RIWC
: po_reg_or_fail (REG_TYPE_MMXWC
); break;
6116 case OP_RIWG
: po_reg_or_fail (REG_TYPE_MMXWCG
); break;
6117 case OP_RXA
: po_reg_or_fail (REG_TYPE_XSCALE
); break;
6119 case OP_RNQ
: po_reg_or_fail (REG_TYPE_NQ
); break;
6121 case OP_RNDQ
: po_reg_or_fail (REG_TYPE_NDQ
); break;
6122 case OP_RVSD
: po_reg_or_fail (REG_TYPE_VFSD
); break;
6124 case OP_RNSDQ
: po_reg_or_fail (REG_TYPE_NSDQ
); break;
6126 /* Neon scalar. Using an element size of 8 means that some invalid
6127 scalars are accepted here, so deal with those in later code. */
6128 case OP_RNSC
: po_scalar_or_goto (8, failure
); break;
6132 po_reg_or_goto (REG_TYPE_NDQ
, try_imm0
);
6135 po_imm_or_fail (0, 0, TRUE
);
6140 po_reg_or_goto (REG_TYPE_VFSD
, try_imm0
);
6145 po_scalar_or_goto (8, try_rr
);
6148 po_reg_or_fail (REG_TYPE_RN
);
6154 po_scalar_or_goto (8, try_nsdq
);
6157 po_reg_or_fail (REG_TYPE_NSDQ
);
6163 po_scalar_or_goto (8, try_ndq
);
6166 po_reg_or_fail (REG_TYPE_NDQ
);
6172 po_scalar_or_goto (8, try_vfd
);
6175 po_reg_or_fail (REG_TYPE_VFD
);
6180 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
6181 not careful then bad things might happen. */
6182 po_misc_or_fail (parse_neon_mov (&str
, &i
) == FAIL
);
6187 po_reg_or_goto (REG_TYPE_NDQ
, try_immbig
);
6190 /* There's a possibility of getting a 64-bit immediate here, so
6191 we need special handling. */
6192 if (parse_big_immediate (&str
, i
) == FAIL
)
6194 inst
.error
= _("immediate value is out of range");
6202 po_reg_or_goto (REG_TYPE_NDQ
, try_shimm
);
6205 po_imm_or_fail (0, 63, TRUE
);
6210 po_char_or_fail ('[');
6211 po_reg_or_fail (REG_TYPE_RN
);
6212 po_char_or_fail (']');
6218 po_reg_or_fail (REG_TYPE_RN
);
6219 if (skip_past_char (&str
, '!') == SUCCESS
)
6220 inst
.operands
[i
].writeback
= 1;
6224 case OP_I7
: po_imm_or_fail ( 0, 7, FALSE
); break;
6225 case OP_I15
: po_imm_or_fail ( 0, 15, FALSE
); break;
6226 case OP_I16
: po_imm_or_fail ( 1, 16, FALSE
); break;
6227 case OP_I16z
: po_imm_or_fail ( 0, 16, FALSE
); break;
6228 case OP_I31
: po_imm_or_fail ( 0, 31, FALSE
); break;
6229 case OP_I32
: po_imm_or_fail ( 1, 32, FALSE
); break;
6230 case OP_I32z
: po_imm_or_fail ( 0, 32, FALSE
); break;
6231 case OP_I63s
: po_imm_or_fail (-64, 63, FALSE
); break;
6232 case OP_I63
: po_imm_or_fail ( 0, 63, FALSE
); break;
6233 case OP_I64
: po_imm_or_fail ( 1, 64, FALSE
); break;
6234 case OP_I64z
: po_imm_or_fail ( 0, 64, FALSE
); break;
6235 case OP_I255
: po_imm_or_fail ( 0, 255, FALSE
); break;
6237 case OP_I4b
: po_imm_or_fail ( 1, 4, TRUE
); break;
6239 case OP_I7b
: po_imm_or_fail ( 0, 7, TRUE
); break;
6240 case OP_I15b
: po_imm_or_fail ( 0, 15, TRUE
); break;
6242 case OP_I31b
: po_imm_or_fail ( 0, 31, TRUE
); break;
6243 case OP_oI32b
: po_imm_or_fail ( 1, 32, TRUE
); break;
6244 case OP_oIffffb
: po_imm_or_fail ( 0, 0xffff, TRUE
); break;
6246 /* Immediate variants */
6248 po_char_or_fail ('{');
6249 po_imm_or_fail (0, 255, TRUE
);
6250 po_char_or_fail ('}');
6254 /* The expression parser chokes on a trailing !, so we have
6255 to find it first and zap it. */
6258 while (*s
&& *s
!= ',')
6263 inst
.operands
[i
].writeback
= 1;
6265 po_imm_or_fail (0, 31, TRUE
);
6273 po_misc_or_fail (my_get_expression (&inst
.reloc
.exp
, &str
,
6278 po_misc_or_fail (my_get_expression (&inst
.reloc
.exp
, &str
,
6283 po_misc_or_fail (my_get_expression (&inst
.reloc
.exp
, &str
,
6285 if (inst
.reloc
.exp
.X_op
== O_symbol
)
6287 val
= parse_reloc (&str
);
6290 inst
.error
= _("unrecognized relocation suffix");
6293 else if (val
!= BFD_RELOC_UNUSED
)
6295 inst
.operands
[i
].imm
= val
;
6296 inst
.operands
[i
].hasreloc
= 1;
6301 /* Operand for MOVW or MOVT. */
6303 po_misc_or_fail (parse_half (&str
));
6306 /* Register or expression. */
6307 case OP_RR_EXr
: po_reg_or_goto (REG_TYPE_RN
, EXPr
); break;
6308 case OP_RR_EXi
: po_reg_or_goto (REG_TYPE_RN
, EXPi
); break;
6310 /* Register or immediate. */
6311 case OP_RRnpc_I0
: po_reg_or_goto (REG_TYPE_RN
, I0
); break;
6312 I0
: po_imm_or_fail (0, 0, FALSE
); break;
6314 case OP_RF_IF
: po_reg_or_goto (REG_TYPE_FN
, IF
); break;
6316 if (!is_immediate_prefix (*str
))
6319 val
= parse_fpa_immediate (&str
);
6322 /* FPA immediates are encoded as registers 8-15.
6323 parse_fpa_immediate has already applied the offset. */
6324 inst
.operands
[i
].reg
= val
;
6325 inst
.operands
[i
].isreg
= 1;
6328 case OP_RIWR_I32z
: po_reg_or_goto (REG_TYPE_MMXWR
, I32z
); break;
6329 I32z
: po_imm_or_fail (0, 32, FALSE
); break;
6331 /* Two kinds of register. */
6334 struct reg_entry
*rege
= arm_reg_parse_multi (&str
);
6336 || (rege
->type
!= REG_TYPE_MMXWR
6337 && rege
->type
!= REG_TYPE_MMXWC
6338 && rege
->type
!= REG_TYPE_MMXWCG
))
6340 inst
.error
= _("iWMMXt data or control register expected");
6343 inst
.operands
[i
].reg
= rege
->number
;
6344 inst
.operands
[i
].isreg
= (rege
->type
== REG_TYPE_MMXWR
);
6350 struct reg_entry
*rege
= arm_reg_parse_multi (&str
);
6352 || (rege
->type
!= REG_TYPE_MMXWC
6353 && rege
->type
!= REG_TYPE_MMXWCG
))
6355 inst
.error
= _("iWMMXt control register expected");
6358 inst
.operands
[i
].reg
= rege
->number
;
6359 inst
.operands
[i
].isreg
= 1;
6364 case OP_CPSF
: val
= parse_cps_flags (&str
); break;
6365 case OP_ENDI
: val
= parse_endian_specifier (&str
); break;
6366 case OP_oROR
: val
= parse_ror (&str
); break;
6367 case OP_PSR
: val
= parse_psr (&str
); break;
6368 case OP_COND
: val
= parse_cond (&str
); break;
6369 case OP_oBARRIER_I15
:
6370 po_barrier_or_imm (str
); break;
6372 if (parse_immediate (&str
, &val
, 0, 15, TRUE
) == FAIL
)
6377 po_reg_or_goto (REG_TYPE_VFC
, try_psr
);
6378 inst
.operands
[i
].isvec
= 1; /* Mark VFP control reg as vector. */
6381 val
= parse_psr (&str
);
6385 po_reg_or_goto (REG_TYPE_RN
, try_apsr
);
6388 /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
6390 if (strncasecmp (str
, "APSR_", 5) == 0)
6397 case 'c': found
= (found
& 1) ? 16 : found
| 1; break;
6398 case 'n': found
= (found
& 2) ? 16 : found
| 2; break;
6399 case 'z': found
= (found
& 4) ? 16 : found
| 4; break;
6400 case 'v': found
= (found
& 8) ? 16 : found
| 8; break;
6401 default: found
= 16;
6405 inst
.operands
[i
].isvec
= 1;
6406 /* APSR_nzcv is encoded in instructions as if it were the REG_PC. */
6407 inst
.operands
[i
].reg
= REG_PC
;
6414 po_misc_or_fail (parse_tb (&str
));
6417 /* Register lists. */
6419 val
= parse_reg_list (&str
);
6422 inst
.operands
[1].writeback
= 1;
6428 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
, REGLIST_VFP_S
);
6432 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
, REGLIST_VFP_D
);
6436 /* Allow Q registers too. */
6437 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
6442 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
6444 inst
.operands
[i
].issingle
= 1;
6449 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
6454 val
= parse_neon_el_struct_list (&str
, &inst
.operands
[i
].reg
,
6455 &inst
.operands
[i
].vectype
);
6458 /* Addressing modes */
6460 po_misc_or_fail (parse_address (&str
, i
));
6464 po_misc_or_fail_no_backtrack (
6465 parse_address_group_reloc (&str
, i
, GROUP_LDR
));
6469 po_misc_or_fail_no_backtrack (
6470 parse_address_group_reloc (&str
, i
, GROUP_LDRS
));
6474 po_misc_or_fail_no_backtrack (
6475 parse_address_group_reloc (&str
, i
, GROUP_LDC
));
6479 po_misc_or_fail (parse_shifter_operand (&str
, i
));
6483 po_misc_or_fail_no_backtrack (
6484 parse_shifter_operand_group_reloc (&str
, i
));
6488 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_LSL_IMMEDIATE
));
6492 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_ASR_IMMEDIATE
));
6496 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_LSL_OR_ASR_IMMEDIATE
));
6500 as_fatal (_("unhandled operand code %d"), op_parse_code
);
6503 /* Various value-based sanity checks and shared operations. We
6504 do not signal immediate failures for the register constraints;
6505 this allows a syntax error to take precedence. */
6506 switch (op_parse_code
)
6514 if (inst
.operands
[i
].isreg
&& inst
.operands
[i
].reg
== REG_PC
)
6515 inst
.error
= BAD_PC
;
6520 if (inst
.operands
[i
].isreg
)
6522 if (inst
.operands
[i
].reg
== REG_PC
)
6523 inst
.error
= BAD_PC
;
6524 else if (inst
.operands
[i
].reg
== REG_SP
)
6525 inst
.error
= BAD_SP
;
6530 if (inst
.operands
[i
].isreg
6531 && inst
.operands
[i
].reg
== REG_PC
6532 && (inst
.operands
[i
].writeback
|| thumb
))
6533 inst
.error
= BAD_PC
;
6542 case OP_oBARRIER_I15
:
6551 inst
.operands
[i
].imm
= val
;
6558 /* If we get here, this operand was successfully parsed. */
6559 inst
.operands
[i
].present
= 1;
6563 inst
.error
= BAD_ARGS
;
6568 /* The parse routine should already have set inst.error, but set a
6569 default here just in case. */
6571 inst
.error
= _("syntax error");
6575 /* Do not backtrack over a trailing optional argument that
6576 absorbed some text. We will only fail again, with the
6577 'garbage following instruction' error message, which is
6578 probably less helpful than the current one. */
6579 if (backtrack_index
== i
&& backtrack_pos
!= str
6580 && upat
[i
+1] == OP_stop
)
6583 inst
.error
= _("syntax error");
6587 /* Try again, skipping the optional argument at backtrack_pos. */
6588 str
= backtrack_pos
;
6589 inst
.error
= backtrack_error
;
6590 inst
.operands
[backtrack_index
].present
= 0;
6591 i
= backtrack_index
;
6595 /* Check that we have parsed all the arguments. */
6596 if (*str
!= '\0' && !inst
.error
)
6597 inst
.error
= _("garbage following instruction");
6599 return inst
.error
? FAIL
: SUCCESS
;
6602 #undef po_char_or_fail
6603 #undef po_reg_or_fail
6604 #undef po_reg_or_goto
6605 #undef po_imm_or_fail
6606 #undef po_scalar_or_fail
6607 #undef po_barrier_or_imm
6609 /* Shorthand macro for instruction encoding functions issuing errors. */
6610 #define constraint(expr, err) \
6621 /* Reject "bad registers" for Thumb-2 instructions. Many Thumb-2
6622 instructions are unpredictable if these registers are used. This
6623 is the BadReg predicate in ARM's Thumb-2 documentation. */
6624 #define reject_bad_reg(reg) \
6626 if (reg == REG_SP || reg == REG_PC) \
6628 inst.error = (reg == REG_SP) ? BAD_SP : BAD_PC; \
6633 /* If REG is R13 (the stack pointer), warn that its use is
6635 #define warn_deprecated_sp(reg) \
6637 if (warn_on_deprecated && reg == REG_SP) \
6638 as_warn (_("use of r13 is deprecated")); \
6641 /* Functions for operand encoding. ARM, then Thumb. */
6643 #define rotate_left(v, n) (v << n | v >> (32 - n))
6645 /* If VAL can be encoded in the immediate field of an ARM instruction,
6646 return the encoded form. Otherwise, return FAIL. */
6649 encode_arm_immediate (unsigned int val
)
6653 for (i
= 0; i
< 32; i
+= 2)
6654 if ((a
= rotate_left (val
, i
)) <= 0xff)
6655 return a
| (i
<< 7); /* 12-bit pack: [shift-cnt,const]. */
6660 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
6661 return the encoded form. Otherwise, return FAIL. */
6663 encode_thumb32_immediate (unsigned int val
)
6670 for (i
= 1; i
<= 24; i
++)
6673 if ((val
& ~(0xff << i
)) == 0)
6674 return ((val
>> i
) & 0x7f) | ((32 - i
) << 7);
6678 if (val
== ((a
<< 16) | a
))
6680 if (val
== ((a
<< 24) | (a
<< 16) | (a
<< 8) | a
))
6684 if (val
== ((a
<< 16) | a
))
6685 return 0x200 | (a
>> 8);
6689 /* Encode a VFP SP or DP register number into inst.instruction. */
6692 encode_arm_vfp_reg (int reg
, enum vfp_reg_pos pos
)
6694 if ((pos
== VFP_REG_Dd
|| pos
== VFP_REG_Dn
|| pos
== VFP_REG_Dm
)
6697 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_d32
))
6700 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
6703 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
6708 first_error (_("D register out of range for selected VFP version"));
6716 inst
.instruction
|= ((reg
>> 1) << 12) | ((reg
& 1) << 22);
6720 inst
.instruction
|= ((reg
>> 1) << 16) | ((reg
& 1) << 7);
6724 inst
.instruction
|= ((reg
>> 1) << 0) | ((reg
& 1) << 5);
6728 inst
.instruction
|= ((reg
& 15) << 12) | ((reg
>> 4) << 22);
6732 inst
.instruction
|= ((reg
& 15) << 16) | ((reg
>> 4) << 7);
6736 inst
.instruction
|= (reg
& 15) | ((reg
>> 4) << 5);
6744 /* Encode a <shift> in an ARM-format instruction. The immediate,
6745 if any, is handled by md_apply_fix. */
6747 encode_arm_shift (int i
)
6749 if (inst
.operands
[i
].shift_kind
== SHIFT_RRX
)
6750 inst
.instruction
|= SHIFT_ROR
<< 5;
6753 inst
.instruction
|= inst
.operands
[i
].shift_kind
<< 5;
6754 if (inst
.operands
[i
].immisreg
)
6756 inst
.instruction
|= SHIFT_BY_REG
;
6757 inst
.instruction
|= inst
.operands
[i
].imm
<< 8;
6760 inst
.reloc
.type
= BFD_RELOC_ARM_SHIFT_IMM
;
6765 encode_arm_shifter_operand (int i
)
6767 if (inst
.operands
[i
].isreg
)
6769 inst
.instruction
|= inst
.operands
[i
].reg
;
6770 encode_arm_shift (i
);
6773 inst
.instruction
|= INST_IMMEDIATE
;
6776 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
6778 encode_arm_addr_mode_common (int i
, bfd_boolean is_t
)
6780 gas_assert (inst
.operands
[i
].isreg
);
6781 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
6783 if (inst
.operands
[i
].preind
)
6787 inst
.error
= _("instruction does not accept preindexed addressing");
6790 inst
.instruction
|= PRE_INDEX
;
6791 if (inst
.operands
[i
].writeback
)
6792 inst
.instruction
|= WRITE_BACK
;
6795 else if (inst
.operands
[i
].postind
)
6797 gas_assert (inst
.operands
[i
].writeback
);
6799 inst
.instruction
|= WRITE_BACK
;
6801 else /* unindexed - only for coprocessor */
6803 inst
.error
= _("instruction does not accept unindexed addressing");
6807 if (((inst
.instruction
& WRITE_BACK
) || !(inst
.instruction
& PRE_INDEX
))
6808 && (((inst
.instruction
& 0x000f0000) >> 16)
6809 == ((inst
.instruction
& 0x0000f000) >> 12)))
6810 as_warn ((inst
.instruction
& LOAD_BIT
)
6811 ? _("destination register same as write-back base")
6812 : _("source register same as write-back base"));
6815 /* inst.operands[i] was set up by parse_address. Encode it into an
6816 ARM-format mode 2 load or store instruction. If is_t is true,
6817 reject forms that cannot be used with a T instruction (i.e. not
6820 encode_arm_addr_mode_2 (int i
, bfd_boolean is_t
)
6822 const bfd_boolean is_pc
= (inst
.operands
[i
].reg
== REG_PC
);
6824 encode_arm_addr_mode_common (i
, is_t
);
6826 if (inst
.operands
[i
].immisreg
)
6828 constraint ((inst
.operands
[i
].imm
== REG_PC
6829 || (is_pc
&& inst
.operands
[i
].writeback
)),
6831 inst
.instruction
|= INST_IMMEDIATE
; /* yes, this is backwards */
6832 inst
.instruction
|= inst
.operands
[i
].imm
;
6833 if (!inst
.operands
[i
].negative
)
6834 inst
.instruction
|= INDEX_UP
;
6835 if (inst
.operands
[i
].shifted
)
6837 if (inst
.operands
[i
].shift_kind
== SHIFT_RRX
)
6838 inst
.instruction
|= SHIFT_ROR
<< 5;
6841 inst
.instruction
|= inst
.operands
[i
].shift_kind
<< 5;
6842 inst
.reloc
.type
= BFD_RELOC_ARM_SHIFT_IMM
;
6846 else /* immediate offset in inst.reloc */
6848 if (is_pc
&& !inst
.reloc
.pc_rel
)
6850 const bfd_boolean is_load
= ((inst
.instruction
& LOAD_BIT
) != 0);
6852 /* If is_t is TRUE, it's called from do_ldstt. ldrt/strt
6853 cannot use PC in addressing.
6854 PC cannot be used in writeback addressing, either. */
6855 constraint ((is_t
|| inst
.operands
[i
].writeback
),
6858 /* Use of PC in str is deprecated for ARMv7. */
6859 if (warn_on_deprecated
6861 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v7
))
6862 as_warn (_("use of PC in this instruction is deprecated"));
6865 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
6866 inst
.reloc
.type
= BFD_RELOC_ARM_OFFSET_IMM
;
6870 /* inst.operands[i] was set up by parse_address. Encode it into an
6871 ARM-format mode 3 load or store instruction. Reject forms that
6872 cannot be used with such instructions. If is_t is true, reject
6873 forms that cannot be used with a T instruction (i.e. not
6876 encode_arm_addr_mode_3 (int i
, bfd_boolean is_t
)
6878 if (inst
.operands
[i
].immisreg
&& inst
.operands
[i
].shifted
)
6880 inst
.error
= _("instruction does not accept scaled register index");
6884 encode_arm_addr_mode_common (i
, is_t
);
6886 if (inst
.operands
[i
].immisreg
)
6888 constraint ((inst
.operands
[i
].imm
== REG_PC
6889 || inst
.operands
[i
].reg
== REG_PC
),
6891 inst
.instruction
|= inst
.operands
[i
].imm
;
6892 if (!inst
.operands
[i
].negative
)
6893 inst
.instruction
|= INDEX_UP
;
6895 else /* immediate offset in inst.reloc */
6897 constraint ((inst
.operands
[i
].reg
== REG_PC
&& !inst
.reloc
.pc_rel
6898 && inst
.operands
[i
].writeback
),
6900 inst
.instruction
|= HWOFFSET_IMM
;
6901 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
6902 inst
.reloc
.type
= BFD_RELOC_ARM_OFFSET_IMM8
;
6906 /* inst.operands[i] was set up by parse_address. Encode it into an
6907 ARM-format instruction. Reject all forms which cannot be encoded
6908 into a coprocessor load/store instruction. If wb_ok is false,
6909 reject use of writeback; if unind_ok is false, reject use of
6910 unindexed addressing. If reloc_override is not 0, use it instead
6911 of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
6912 (in which case it is preserved). */
6915 encode_arm_cp_address (int i
, int wb_ok
, int unind_ok
, int reloc_override
)
6917 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
6919 gas_assert (!(inst
.operands
[i
].preind
&& inst
.operands
[i
].postind
));
6921 if (!inst
.operands
[i
].preind
&& !inst
.operands
[i
].postind
) /* unindexed */
6923 gas_assert (!inst
.operands
[i
].writeback
);
6926 inst
.error
= _("instruction does not support unindexed addressing");
6929 inst
.instruction
|= inst
.operands
[i
].imm
;
6930 inst
.instruction
|= INDEX_UP
;
6934 if (inst
.operands
[i
].preind
)
6935 inst
.instruction
|= PRE_INDEX
;
6937 if (inst
.operands
[i
].writeback
)
6939 if (inst
.operands
[i
].reg
== REG_PC
)
6941 inst
.error
= _("pc may not be used with write-back");
6946 inst
.error
= _("instruction does not support writeback");
6949 inst
.instruction
|= WRITE_BACK
;
6953 inst
.reloc
.type
= (bfd_reloc_code_real_type
) reloc_override
;
6954 else if ((inst
.reloc
.type
< BFD_RELOC_ARM_ALU_PC_G0_NC
6955 || inst
.reloc
.type
> BFD_RELOC_ARM_LDC_SB_G2
)
6956 && inst
.reloc
.type
!= BFD_RELOC_ARM_LDR_PC_G0
)
6959 inst
.reloc
.type
= BFD_RELOC_ARM_T32_CP_OFF_IMM
;
6961 inst
.reloc
.type
= BFD_RELOC_ARM_CP_OFF_IMM
;
6967 /* inst.reloc.exp describes an "=expr" load pseudo-operation.
6968 Determine whether it can be performed with a move instruction; if
6969 it can, convert inst.instruction to that move instruction and
6970 return TRUE; if it can't, convert inst.instruction to a literal-pool
6971 load and return FALSE. If this is not a valid thing to do in the
6972 current context, set inst.error and return TRUE.
6974 inst.operands[i] describes the destination register. */
6977 move_or_literal_pool (int i
, bfd_boolean thumb_p
, bfd_boolean mode_3
)
6982 tbit
= (inst
.instruction
> 0xffff) ? THUMB2_LOAD_BIT
: THUMB_LOAD_BIT
;
6986 if ((inst
.instruction
& tbit
) == 0)
6988 inst
.error
= _("invalid pseudo operation");
6991 if (inst
.reloc
.exp
.X_op
!= O_constant
&& inst
.reloc
.exp
.X_op
!= O_symbol
)
6993 inst
.error
= _("constant expression expected");
6996 if (inst
.reloc
.exp
.X_op
== O_constant
)
7000 if (!unified_syntax
&& (inst
.reloc
.exp
.X_add_number
& ~0xFF) == 0)
7002 /* This can be done with a mov(1) instruction. */
7003 inst
.instruction
= T_OPCODE_MOV_I8
| (inst
.operands
[i
].reg
<< 8);
7004 inst
.instruction
|= inst
.reloc
.exp
.X_add_number
;
7010 int value
= encode_arm_immediate (inst
.reloc
.exp
.X_add_number
);
7013 /* This can be done with a mov instruction. */
7014 inst
.instruction
&= LITERAL_MASK
;
7015 inst
.instruction
|= INST_IMMEDIATE
| (OPCODE_MOV
<< DATA_OP_SHIFT
);
7016 inst
.instruction
|= value
& 0xfff;
7020 value
= encode_arm_immediate (~inst
.reloc
.exp
.X_add_number
);
7023 /* This can be done with a mvn instruction. */
7024 inst
.instruction
&= LITERAL_MASK
;
7025 inst
.instruction
|= INST_IMMEDIATE
| (OPCODE_MVN
<< DATA_OP_SHIFT
);
7026 inst
.instruction
|= value
& 0xfff;
7032 if (add_to_lit_pool () == FAIL
)
7034 inst
.error
= _("literal pool insertion failed");
7037 inst
.operands
[1].reg
= REG_PC
;
7038 inst
.operands
[1].isreg
= 1;
7039 inst
.operands
[1].preind
= 1;
7040 inst
.reloc
.pc_rel
= 1;
7041 inst
.reloc
.type
= (thumb_p
7042 ? BFD_RELOC_ARM_THUMB_OFFSET
7044 ? BFD_RELOC_ARM_HWLITERAL
7045 : BFD_RELOC_ARM_LITERAL
));
7049 /* Functions for instruction encoding, sorted by sub-architecture.
7050 First some generics; their names are taken from the conventional
7051 bit positions for register arguments in ARM format instructions. */
7061 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7067 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7068 inst
.instruction
|= inst
.operands
[1].reg
;
7074 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7075 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7081 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7082 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
7088 unsigned Rn
= inst
.operands
[2].reg
;
7089 /* Enforce restrictions on SWP instruction. */
7090 if ((inst
.instruction
& 0x0fbfffff) == 0x01000090)
7092 constraint (Rn
== inst
.operands
[0].reg
|| Rn
== inst
.operands
[1].reg
,
7093 _("Rn must not overlap other operands"));
7095 /* SWP{b} is deprecated for ARMv6* and ARMv7. */
7096 if (warn_on_deprecated
7097 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6
))
7098 as_warn (_("swp{b} use is deprecated for this architecture"));
7101 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7102 inst
.instruction
|= inst
.operands
[1].reg
;
7103 inst
.instruction
|= Rn
<< 16;
7109 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7110 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7111 inst
.instruction
|= inst
.operands
[2].reg
;
7117 constraint ((inst
.operands
[2].reg
== REG_PC
), BAD_PC
);
7118 constraint (((inst
.reloc
.exp
.X_op
!= O_constant
7119 && inst
.reloc
.exp
.X_op
!= O_illegal
)
7120 || inst
.reloc
.exp
.X_add_number
!= 0),
7122 inst
.instruction
|= inst
.operands
[0].reg
;
7123 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
7124 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
7130 inst
.instruction
|= inst
.operands
[0].imm
;
7136 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7137 encode_arm_cp_address (1, TRUE
, TRUE
, 0);
7140 /* ARM instructions, in alphabetical order by function name (except
7141 that wrapper functions appear immediately after the function they
7144 /* This is a pseudo-op of the form "adr rd, label" to be converted
7145 into a relative address of the form "add rd, pc, #label-.-8". */
7150 inst
.instruction
|= (inst
.operands
[0].reg
<< 12); /* Rd */
7152 /* Frag hacking will turn this into a sub instruction if the offset turns
7153 out to be negative. */
7154 inst
.reloc
.type
= BFD_RELOC_ARM_IMMEDIATE
;
7155 inst
.reloc
.pc_rel
= 1;
7156 inst
.reloc
.exp
.X_add_number
-= 8;
7159 /* This is a pseudo-op of the form "adrl rd, label" to be converted
7160 into a relative address of the form:
7161 add rd, pc, #low(label-.-8)"
7162 add rd, rd, #high(label-.-8)" */
7167 inst
.instruction
|= (inst
.operands
[0].reg
<< 12); /* Rd */
7169 /* Frag hacking will turn this into a sub instruction if the offset turns
7170 out to be negative. */
7171 inst
.reloc
.type
= BFD_RELOC_ARM_ADRL_IMMEDIATE
;
7172 inst
.reloc
.pc_rel
= 1;
7173 inst
.size
= INSN_SIZE
* 2;
7174 inst
.reloc
.exp
.X_add_number
-= 8;
7180 if (!inst
.operands
[1].present
)
7181 inst
.operands
[1].reg
= inst
.operands
[0].reg
;
7182 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7183 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7184 encode_arm_shifter_operand (2);
7190 if (inst
.operands
[0].present
)
7192 constraint ((inst
.instruction
& 0xf0) != 0x40
7193 && inst
.operands
[0].imm
> 0xf
7194 && inst
.operands
[0].imm
< 0x0,
7195 _("bad barrier type"));
7196 inst
.instruction
|= inst
.operands
[0].imm
;
7199 inst
.instruction
|= 0xf;
7205 unsigned int msb
= inst
.operands
[1].imm
+ inst
.operands
[2].imm
;
7206 constraint (msb
> 32, _("bit-field extends past end of register"));
7207 /* The instruction encoding stores the LSB and MSB,
7208 not the LSB and width. */
7209 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7210 inst
.instruction
|= inst
.operands
[1].imm
<< 7;
7211 inst
.instruction
|= (msb
- 1) << 16;
7219 /* #0 in second position is alternative syntax for bfc, which is
7220 the same instruction but with REG_PC in the Rm field. */
7221 if (!inst
.operands
[1].isreg
)
7222 inst
.operands
[1].reg
= REG_PC
;
7224 msb
= inst
.operands
[2].imm
+ inst
.operands
[3].imm
;
7225 constraint (msb
> 32, _("bit-field extends past end of register"));
7226 /* The instruction encoding stores the LSB and MSB,
7227 not the LSB and width. */
7228 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7229 inst
.instruction
|= inst
.operands
[1].reg
;
7230 inst
.instruction
|= inst
.operands
[2].imm
<< 7;
7231 inst
.instruction
|= (msb
- 1) << 16;
7237 constraint (inst
.operands
[2].imm
+ inst
.operands
[3].imm
> 32,
7238 _("bit-field extends past end of register"));
7239 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7240 inst
.instruction
|= inst
.operands
[1].reg
;
7241 inst
.instruction
|= inst
.operands
[2].imm
<< 7;
7242 inst
.instruction
|= (inst
.operands
[3].imm
- 1) << 16;
7245 /* ARM V5 breakpoint instruction (argument parse)
7246 BKPT <16 bit unsigned immediate>
7247 Instruction is not conditional.
7248 The bit pattern given in insns[] has the COND_ALWAYS condition,
7249 and it is an error if the caller tried to override that. */
7254 /* Top 12 of 16 bits to bits 19:8. */
7255 inst
.instruction
|= (inst
.operands
[0].imm
& 0xfff0) << 4;
7257 /* Bottom 4 of 16 bits to bits 3:0. */
7258 inst
.instruction
|= inst
.operands
[0].imm
& 0xf;
7262 encode_branch (int default_reloc
)
7264 if (inst
.operands
[0].hasreloc
)
7266 constraint (inst
.operands
[0].imm
!= BFD_RELOC_ARM_PLT32
,
7267 _("the only suffix valid here is '(plt)'"));
7268 inst
.reloc
.type
= BFD_RELOC_ARM_PLT32
;
7272 inst
.reloc
.type
= (bfd_reloc_code_real_type
) default_reloc
;
7274 inst
.reloc
.pc_rel
= 1;
7281 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
7282 encode_branch (BFD_RELOC_ARM_PCREL_JUMP
);
7285 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH
);
7292 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
7294 if (inst
.cond
== COND_ALWAYS
)
7295 encode_branch (BFD_RELOC_ARM_PCREL_CALL
);
7297 encode_branch (BFD_RELOC_ARM_PCREL_JUMP
);
7301 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH
);
7304 /* ARM V5 branch-link-exchange instruction (argument parse)
7305 BLX <target_addr> ie BLX(1)
7306 BLX{<condition>} <Rm> ie BLX(2)
7307 Unfortunately, there are two different opcodes for this mnemonic.
7308 So, the insns[].value is not used, and the code here zaps values
7309 into inst.instruction.
7310 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
7315 if (inst
.operands
[0].isreg
)
7317 /* Arg is a register; the opcode provided by insns[] is correct.
7318 It is not illegal to do "blx pc", just useless. */
7319 if (inst
.operands
[0].reg
== REG_PC
)
7320 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
7322 inst
.instruction
|= inst
.operands
[0].reg
;
7326 /* Arg is an address; this instruction cannot be executed
7327 conditionally, and the opcode must be adjusted.
7328 We retain the BFD_RELOC_ARM_PCREL_BLX till the very end
7329 where we generate out a BFD_RELOC_ARM_PCREL_CALL instead. */
7330 constraint (inst
.cond
!= COND_ALWAYS
, BAD_COND
);
7331 inst
.instruction
= 0xfa000000;
7332 encode_branch (BFD_RELOC_ARM_PCREL_BLX
);
7339 bfd_boolean want_reloc
;
7341 if (inst
.operands
[0].reg
== REG_PC
)
7342 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
7344 inst
.instruction
|= inst
.operands
[0].reg
;
7345 /* Output R_ARM_V4BX relocations if is an EABI object that looks like
7346 it is for ARMv4t or earlier. */
7347 want_reloc
= !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5
);
7348 if (object_arch
&& !ARM_CPU_HAS_FEATURE (*object_arch
, arm_ext_v5
))
7352 if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
7357 inst
.reloc
.type
= BFD_RELOC_ARM_V4BX
;
7361 /* ARM v5TEJ. Jump to Jazelle code. */
7366 if (inst
.operands
[0].reg
== REG_PC
)
7367 as_tsktsk (_("use of r15 in bxj is not really useful"));
7369 inst
.instruction
|= inst
.operands
[0].reg
;
7372 /* Co-processor data operation:
7373 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
7374 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
7378 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
7379 inst
.instruction
|= inst
.operands
[1].imm
<< 20;
7380 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
7381 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
7382 inst
.instruction
|= inst
.operands
[4].reg
;
7383 inst
.instruction
|= inst
.operands
[5].imm
<< 5;
7389 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7390 encode_arm_shifter_operand (1);
7393 /* Transfer between coprocessor and ARM registers.
7394 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
7399 No special properties. */
7406 Rd
= inst
.operands
[2].reg
;
7409 if (inst
.instruction
== 0xee000010
7410 || inst
.instruction
== 0xfe000010)
7412 reject_bad_reg (Rd
);
7415 constraint (Rd
== REG_SP
, BAD_SP
);
7420 if (inst
.instruction
== 0xe000010)
7421 constraint (Rd
== REG_PC
, BAD_PC
);
7425 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
7426 inst
.instruction
|= inst
.operands
[1].imm
<< 21;
7427 inst
.instruction
|= Rd
<< 12;
7428 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
7429 inst
.instruction
|= inst
.operands
[4].reg
;
7430 inst
.instruction
|= inst
.operands
[5].imm
<< 5;
7433 /* Transfer between coprocessor register and pair of ARM registers.
7434 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
7439 Two XScale instructions are special cases of these:
7441 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
7442 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
7444 Result unpredictable if Rd or Rn is R15. */
7451 Rd
= inst
.operands
[2].reg
;
7452 Rn
= inst
.operands
[3].reg
;
7456 reject_bad_reg (Rd
);
7457 reject_bad_reg (Rn
);
7461 constraint (Rd
== REG_PC
, BAD_PC
);
7462 constraint (Rn
== REG_PC
, BAD_PC
);
7465 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
7466 inst
.instruction
|= inst
.operands
[1].imm
<< 4;
7467 inst
.instruction
|= Rd
<< 12;
7468 inst
.instruction
|= Rn
<< 16;
7469 inst
.instruction
|= inst
.operands
[4].reg
;
7475 inst
.instruction
|= inst
.operands
[0].imm
<< 6;
7476 if (inst
.operands
[1].present
)
7478 inst
.instruction
|= CPSI_MMOD
;
7479 inst
.instruction
|= inst
.operands
[1].imm
;
7486 inst
.instruction
|= inst
.operands
[0].imm
;
7492 /* There is no IT instruction in ARM mode. We
7493 process it to do the validation as if in
7494 thumb mode, just in case the code gets
7495 assembled for thumb using the unified syntax. */
7500 set_it_insn_type (IT_INSN
);
7501 now_it
.mask
= (inst
.instruction
& 0xf) | 0x10;
7502 now_it
.cc
= inst
.operands
[0].imm
;
7509 int base_reg
= inst
.operands
[0].reg
;
7510 int range
= inst
.operands
[1].imm
;
7512 inst
.instruction
|= base_reg
<< 16;
7513 inst
.instruction
|= range
;
7515 if (inst
.operands
[1].writeback
)
7516 inst
.instruction
|= LDM_TYPE_2_OR_3
;
7518 if (inst
.operands
[0].writeback
)
7520 inst
.instruction
|= WRITE_BACK
;
7521 /* Check for unpredictable uses of writeback. */
7522 if (inst
.instruction
& LOAD_BIT
)
7524 /* Not allowed in LDM type 2. */
7525 if ((inst
.instruction
& LDM_TYPE_2_OR_3
)
7526 && ((range
& (1 << REG_PC
)) == 0))
7527 as_warn (_("writeback of base register is UNPREDICTABLE"));
7528 /* Only allowed if base reg not in list for other types. */
7529 else if (range
& (1 << base_reg
))
7530 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
7534 /* Not allowed for type 2. */
7535 if (inst
.instruction
& LDM_TYPE_2_OR_3
)
7536 as_warn (_("writeback of base register is UNPREDICTABLE"));
7537 /* Only allowed if base reg not in list, or first in list. */
7538 else if ((range
& (1 << base_reg
))
7539 && (range
& ((1 << base_reg
) - 1)))
7540 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
7545 /* ARMv5TE load-consecutive (argument parse)
7554 constraint (inst
.operands
[0].reg
% 2 != 0,
7555 _("first destination register must be even"));
7556 constraint (inst
.operands
[1].present
7557 && inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
7558 _("can only load two consecutive registers"));
7559 constraint (inst
.operands
[0].reg
== REG_LR
, _("r14 not allowed here"));
7560 constraint (!inst
.operands
[2].isreg
, _("'[' expected"));
7562 if (!inst
.operands
[1].present
)
7563 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
7565 if (inst
.instruction
& LOAD_BIT
)
7567 /* encode_arm_addr_mode_3 will diagnose overlap between the base
7568 register and the first register written; we have to diagnose
7569 overlap between the base and the second register written here. */
7571 if (inst
.operands
[2].reg
== inst
.operands
[1].reg
7572 && (inst
.operands
[2].writeback
|| inst
.operands
[2].postind
))
7573 as_warn (_("base register written back, and overlaps "
7574 "second destination register"));
7576 /* For an index-register load, the index register must not overlap the
7577 destination (even if not write-back). */
7578 else if (inst
.operands
[2].immisreg
7579 && ((unsigned) inst
.operands
[2].imm
== inst
.operands
[0].reg
7580 || (unsigned) inst
.operands
[2].imm
== inst
.operands
[1].reg
))
7581 as_warn (_("index register overlaps destination register"));
7584 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7585 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE
);
7591 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].preind
7592 || inst
.operands
[1].postind
|| inst
.operands
[1].writeback
7593 || inst
.operands
[1].immisreg
|| inst
.operands
[1].shifted
7594 || inst
.operands
[1].negative
7595 /* This can arise if the programmer has written
7597 or if they have mistakenly used a register name as the last
7600 It is very difficult to distinguish between these two cases
7601 because "rX" might actually be a label. ie the register
7602 name has been occluded by a symbol of the same name. So we
7603 just generate a general 'bad addressing mode' type error
7604 message and leave it up to the programmer to discover the
7605 true cause and fix their mistake. */
7606 || (inst
.operands
[1].reg
== REG_PC
),
7609 constraint (inst
.reloc
.exp
.X_op
!= O_constant
7610 || inst
.reloc
.exp
.X_add_number
!= 0,
7611 _("offset must be zero in ARM encoding"));
7613 constraint ((inst
.operands
[1].reg
== REG_PC
), BAD_PC
);
7615 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7616 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7617 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
7623 constraint (inst
.operands
[0].reg
% 2 != 0,
7624 _("even register required"));
7625 constraint (inst
.operands
[1].present
7626 && inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
7627 _("can only load two consecutive registers"));
7628 /* If op 1 were present and equal to PC, this function wouldn't
7629 have been called in the first place. */
7630 constraint (inst
.operands
[0].reg
== REG_LR
, _("r14 not allowed here"));
7632 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7633 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
7639 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7640 if (!inst
.operands
[1].isreg
)
7641 if (move_or_literal_pool (0, /*thumb_p=*/FALSE
, /*mode_3=*/FALSE
))
7643 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE
);
7649 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
7651 if (inst
.operands
[1].preind
)
7653 constraint (inst
.reloc
.exp
.X_op
!= O_constant
7654 || inst
.reloc
.exp
.X_add_number
!= 0,
7655 _("this instruction requires a post-indexed address"));
7657 inst
.operands
[1].preind
= 0;
7658 inst
.operands
[1].postind
= 1;
7659 inst
.operands
[1].writeback
= 1;
7661 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7662 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE
);
7665 /* Halfword and signed-byte load/store operations. */
7670 constraint (inst
.operands
[0].reg
== REG_PC
, BAD_PC
);
7671 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7672 if (!inst
.operands
[1].isreg
)
7673 if (move_or_literal_pool (0, /*thumb_p=*/FALSE
, /*mode_3=*/TRUE
))
7675 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE
);
7681 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
7683 if (inst
.operands
[1].preind
)
7685 constraint (inst
.reloc
.exp
.X_op
!= O_constant
7686 || inst
.reloc
.exp
.X_add_number
!= 0,
7687 _("this instruction requires a post-indexed address"));
7689 inst
.operands
[1].preind
= 0;
7690 inst
.operands
[1].postind
= 1;
7691 inst
.operands
[1].writeback
= 1;
7693 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7694 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE
);
7697 /* Co-processor register load/store.
7698 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
7702 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
7703 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
7704 encode_arm_cp_address (2, TRUE
, TRUE
, 0);
7710 /* This restriction does not apply to mls (nor to mla in v6 or later). */
7711 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
7712 && !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6
)
7713 && !(inst
.instruction
& 0x00400000))
7714 as_tsktsk (_("Rd and Rm should be different in mla"));
7716 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7717 inst
.instruction
|= inst
.operands
[1].reg
;
7718 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
7719 inst
.instruction
|= inst
.operands
[3].reg
<< 12;
7725 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7726 encode_arm_shifter_operand (1);
7729 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
7736 top
= (inst
.instruction
& 0x00400000) != 0;
7737 constraint (top
&& inst
.reloc
.type
== BFD_RELOC_ARM_MOVW
,
7738 _(":lower16: not allowed this instruction"));
7739 constraint (!top
&& inst
.reloc
.type
== BFD_RELOC_ARM_MOVT
,
7740 _(":upper16: not allowed instruction"));
7741 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7742 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
7744 imm
= inst
.reloc
.exp
.X_add_number
;
7745 /* The value is in two pieces: 0:11, 16:19. */
7746 inst
.instruction
|= (imm
& 0x00000fff);
7747 inst
.instruction
|= (imm
& 0x0000f000) << 4;
7751 static void do_vfp_nsyn_opcode (const char *);
7754 do_vfp_nsyn_mrs (void)
7756 if (inst
.operands
[0].isvec
)
7758 if (inst
.operands
[1].reg
!= 1)
7759 first_error (_("operand 1 must be FPSCR"));
7760 memset (&inst
.operands
[0], '\0', sizeof (inst
.operands
[0]));
7761 memset (&inst
.operands
[1], '\0', sizeof (inst
.operands
[1]));
7762 do_vfp_nsyn_opcode ("fmstat");
7764 else if (inst
.operands
[1].isvec
)
7765 do_vfp_nsyn_opcode ("fmrx");
7773 do_vfp_nsyn_msr (void)
7775 if (inst
.operands
[0].isvec
)
7776 do_vfp_nsyn_opcode ("fmxr");
7786 unsigned Rt
= inst
.operands
[0].reg
;
7788 if (thumb_mode
&& inst
.operands
[0].reg
== REG_SP
)
7790 inst
.error
= BAD_SP
;
7794 /* APSR_ sets isvec. All other refs to PC are illegal. */
7795 if (!inst
.operands
[0].isvec
&& inst
.operands
[0].reg
== REG_PC
)
7797 inst
.error
= BAD_PC
;
7801 if (inst
.operands
[1].reg
!= 1)
7802 first_error (_("operand 1 must be FPSCR"));
7804 inst
.instruction
|= (Rt
<< 12);
7810 unsigned Rt
= inst
.operands
[1].reg
;
7813 reject_bad_reg (Rt
);
7814 else if (Rt
== REG_PC
)
7816 inst
.error
= BAD_PC
;
7820 if (inst
.operands
[0].reg
!= 1)
7821 first_error (_("operand 0 must be FPSCR"));
7823 inst
.instruction
|= (Rt
<< 12);
7829 if (do_vfp_nsyn_mrs () == SUCCESS
)
7832 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
7833 constraint ((inst
.operands
[1].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
))
7835 _("'CPSR' or 'SPSR' expected"));
7836 constraint (inst
.operands
[0].reg
== REG_PC
, BAD_PC
);
7837 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7838 inst
.instruction
|= (inst
.operands
[1].imm
& SPSR_BIT
);
7841 /* Two possible forms:
7842 "{C|S}PSR_<field>, Rm",
7843 "{C|S}PSR_f, #expression". */
7848 if (do_vfp_nsyn_msr () == SUCCESS
)
7851 inst
.instruction
|= inst
.operands
[0].imm
;
7852 if (inst
.operands
[1].isreg
)
7853 inst
.instruction
|= inst
.operands
[1].reg
;
7856 inst
.instruction
|= INST_IMMEDIATE
;
7857 inst
.reloc
.type
= BFD_RELOC_ARM_IMMEDIATE
;
7858 inst
.reloc
.pc_rel
= 0;
7865 constraint (inst
.operands
[2].reg
== REG_PC
, BAD_PC
);
7867 if (!inst
.operands
[2].present
)
7868 inst
.operands
[2].reg
= inst
.operands
[0].reg
;
7869 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7870 inst
.instruction
|= inst
.operands
[1].reg
;
7871 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
7873 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
7874 && !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6
))
7875 as_tsktsk (_("Rd and Rm should be different in mul"));
7878 /* Long Multiply Parser
7879 UMULL RdLo, RdHi, Rm, Rs
7880 SMULL RdLo, RdHi, Rm, Rs
7881 UMLAL RdLo, RdHi, Rm, Rs
7882 SMLAL RdLo, RdHi, Rm, Rs. */
7887 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7888 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7889 inst
.instruction
|= inst
.operands
[2].reg
;
7890 inst
.instruction
|= inst
.operands
[3].reg
<< 8;
7892 /* rdhi and rdlo must be different. */
7893 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
)
7894 as_tsktsk (_("rdhi and rdlo must be different"));
7896 /* rdhi, rdlo and rm must all be different before armv6. */
7897 if ((inst
.operands
[0].reg
== inst
.operands
[2].reg
7898 || inst
.operands
[1].reg
== inst
.operands
[2].reg
)
7899 && !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6
))
7900 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
7906 if (inst
.operands
[0].present
7907 || ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6k
))
7909 /* Architectural NOP hints are CPSR sets with no bits selected. */
7910 inst
.instruction
&= 0xf0000000;
7911 inst
.instruction
|= 0x0320f000;
7912 if (inst
.operands
[0].present
)
7913 inst
.instruction
|= inst
.operands
[0].imm
;
7917 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
7918 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
7919 Condition defaults to COND_ALWAYS.
7920 Error if Rd, Rn or Rm are R15. */
7925 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7926 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7927 inst
.instruction
|= inst
.operands
[2].reg
;
7928 if (inst
.operands
[3].present
)
7929 encode_arm_shift (3);
7932 /* ARM V6 PKHTB (Argument Parse). */
7937 if (!inst
.operands
[3].present
)
7939 /* If the shift specifier is omitted, turn the instruction
7940 into pkhbt rd, rm, rn. */
7941 inst
.instruction
&= 0xfff00010;
7942 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7943 inst
.instruction
|= inst
.operands
[1].reg
;
7944 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
7948 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7949 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7950 inst
.instruction
|= inst
.operands
[2].reg
;
7951 encode_arm_shift (3);
7955 /* ARMv5TE: Preload-Cache
7959 Syntactically, like LDR with B=1, W=0, L=1. */
7964 constraint (!inst
.operands
[0].isreg
,
7965 _("'[' expected after PLD mnemonic"));
7966 constraint (inst
.operands
[0].postind
,
7967 _("post-indexed expression used in preload instruction"));
7968 constraint (inst
.operands
[0].writeback
,
7969 _("writeback used in preload instruction"));
7970 constraint (!inst
.operands
[0].preind
,
7971 _("unindexed addressing used in preload instruction"));
7972 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE
);
7975 /* ARMv7: PLI <addr_mode> */
7979 constraint (!inst
.operands
[0].isreg
,
7980 _("'[' expected after PLI mnemonic"));
7981 constraint (inst
.operands
[0].postind
,
7982 _("post-indexed expression used in preload instruction"));
7983 constraint (inst
.operands
[0].writeback
,
7984 _("writeback used in preload instruction"));
7985 constraint (!inst
.operands
[0].preind
,
7986 _("unindexed addressing used in preload instruction"));
7987 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE
);
7988 inst
.instruction
&= ~PRE_INDEX
;
7994 inst
.operands
[1] = inst
.operands
[0];
7995 memset (&inst
.operands
[0], 0, sizeof inst
.operands
[0]);
7996 inst
.operands
[0].isreg
= 1;
7997 inst
.operands
[0].writeback
= 1;
7998 inst
.operands
[0].reg
= REG_SP
;
8002 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
8003 word at the specified address and the following word
8005 Unconditionally executed.
8006 Error if Rn is R15. */
8011 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8012 if (inst
.operands
[0].writeback
)
8013 inst
.instruction
|= WRITE_BACK
;
8016 /* ARM V6 ssat (argument parse). */
8021 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8022 inst
.instruction
|= (inst
.operands
[1].imm
- 1) << 16;
8023 inst
.instruction
|= inst
.operands
[2].reg
;
8025 if (inst
.operands
[3].present
)
8026 encode_arm_shift (3);
8029 /* ARM V6 usat (argument parse). */
8034 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8035 inst
.instruction
|= inst
.operands
[1].imm
<< 16;
8036 inst
.instruction
|= inst
.operands
[2].reg
;
8038 if (inst
.operands
[3].present
)
8039 encode_arm_shift (3);
8042 /* ARM V6 ssat16 (argument parse). */
8047 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8048 inst
.instruction
|= ((inst
.operands
[1].imm
- 1) << 16);
8049 inst
.instruction
|= inst
.operands
[2].reg
;
8055 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8056 inst
.instruction
|= inst
.operands
[1].imm
<< 16;
8057 inst
.instruction
|= inst
.operands
[2].reg
;
8060 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
8061 preserving the other bits.
8063 setend <endian_specifier>, where <endian_specifier> is either
8069 if (inst
.operands
[0].imm
)
8070 inst
.instruction
|= 0x200;
8076 unsigned int Rm
= (inst
.operands
[1].present
8077 ? inst
.operands
[1].reg
8078 : inst
.operands
[0].reg
);
8080 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8081 inst
.instruction
|= Rm
;
8082 if (inst
.operands
[2].isreg
) /* Rd, {Rm,} Rs */
8084 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
8085 inst
.instruction
|= SHIFT_BY_REG
;
8088 inst
.reloc
.type
= BFD_RELOC_ARM_SHIFT_IMM
;
8094 inst
.reloc
.type
= BFD_RELOC_ARM_SMC
;
8095 inst
.reloc
.pc_rel
= 0;
8101 inst
.reloc
.type
= BFD_RELOC_ARM_SWI
;
8102 inst
.reloc
.pc_rel
= 0;
8105 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
8106 SMLAxy{cond} Rd,Rm,Rs,Rn
8107 SMLAWy{cond} Rd,Rm,Rs,Rn
8108 Error if any register is R15. */
8113 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8114 inst
.instruction
|= inst
.operands
[1].reg
;
8115 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
8116 inst
.instruction
|= inst
.operands
[3].reg
<< 12;
8119 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
8120 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
8121 Error if any register is R15.
8122 Warning if Rdlo == Rdhi. */
8127 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8128 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8129 inst
.instruction
|= inst
.operands
[2].reg
;
8130 inst
.instruction
|= inst
.operands
[3].reg
<< 8;
8132 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
)
8133 as_tsktsk (_("rdhi and rdlo must be different"));
8136 /* ARM V5E (El Segundo) signed-multiply (argument parse)
8137 SMULxy{cond} Rd,Rm,Rs
8138 Error if any register is R15. */
8143 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8144 inst
.instruction
|= inst
.operands
[1].reg
;
8145 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
8148 /* ARM V6 srs (argument parse). The variable fields in the encoding are
8149 the same for both ARM and Thumb-2. */
8156 if (inst
.operands
[0].present
)
8158 reg
= inst
.operands
[0].reg
;
8159 constraint (reg
!= REG_SP
, _("SRS base register must be r13"));
8164 inst
.instruction
|= reg
<< 16;
8165 inst
.instruction
|= inst
.operands
[1].imm
;
8166 if (inst
.operands
[0].writeback
|| inst
.operands
[1].writeback
)
8167 inst
.instruction
|= WRITE_BACK
;
8170 /* ARM V6 strex (argument parse). */
8175 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
8176 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
8177 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
8178 || inst
.operands
[2].negative
8179 /* See comment in do_ldrex(). */
8180 || (inst
.operands
[2].reg
== REG_PC
),
8183 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
8184 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
8186 constraint (inst
.reloc
.exp
.X_op
!= O_constant
8187 || inst
.reloc
.exp
.X_add_number
!= 0,
8188 _("offset must be zero in ARM encoding"));
8190 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8191 inst
.instruction
|= inst
.operands
[1].reg
;
8192 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
8193 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
8199 constraint (inst
.operands
[1].reg
% 2 != 0,
8200 _("even register required"));
8201 constraint (inst
.operands
[2].present
8202 && inst
.operands
[2].reg
!= inst
.operands
[1].reg
+ 1,
8203 _("can only store two consecutive registers"));
8204 /* If op 2 were present and equal to PC, this function wouldn't
8205 have been called in the first place. */
8206 constraint (inst
.operands
[1].reg
== REG_LR
, _("r14 not allowed here"));
8208 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
8209 || inst
.operands
[0].reg
== inst
.operands
[1].reg
+ 1
8210 || inst
.operands
[0].reg
== inst
.operands
[3].reg
,
8213 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8214 inst
.instruction
|= inst
.operands
[1].reg
;
8215 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
8218 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
8219 extends it to 32-bits, and adds the result to a value in another
8220 register. You can specify a rotation by 0, 8, 16, or 24 bits
8221 before extracting the 16-bit value.
8222 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
8223 Condition defaults to COND_ALWAYS.
8224 Error if any register uses R15. */
8229 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8230 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8231 inst
.instruction
|= inst
.operands
[2].reg
;
8232 inst
.instruction
|= inst
.operands
[3].imm
<< 10;
8237 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
8238 Condition defaults to COND_ALWAYS.
8239 Error if any register uses R15. */
8244 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8245 inst
.instruction
|= inst
.operands
[1].reg
;
8246 inst
.instruction
|= inst
.operands
[2].imm
<< 10;
8249 /* VFP instructions. In a logical order: SP variant first, monad
8250 before dyad, arithmetic then move then load/store. */
8253 do_vfp_sp_monadic (void)
8255 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
8256 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sm
);
8260 do_vfp_sp_dyadic (void)
8262 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
8263 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sn
);
8264 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Sm
);
8268 do_vfp_sp_compare_z (void)
8270 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
8274 do_vfp_dp_sp_cvt (void)
8276 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
8277 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sm
);
8281 do_vfp_sp_dp_cvt (void)
8283 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
8284 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dm
);
8288 do_vfp_reg_from_sp (void)
8290 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8291 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sn
);
8295 do_vfp_reg2_from_sp2 (void)
8297 constraint (inst
.operands
[2].imm
!= 2,
8298 _("only two consecutive VFP SP registers allowed here"));
8299 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8300 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8301 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Sm
);
8305 do_vfp_sp_from_reg (void)
8307 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sn
);
8308 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8312 do_vfp_sp2_from_reg2 (void)
8314 constraint (inst
.operands
[0].imm
!= 2,
8315 _("only two consecutive VFP SP registers allowed here"));
8316 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sm
);
8317 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8318 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
8322 do_vfp_sp_ldst (void)
8324 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
8325 encode_arm_cp_address (1, FALSE
, TRUE
, 0);
8329 do_vfp_dp_ldst (void)
8331 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
8332 encode_arm_cp_address (1, FALSE
, TRUE
, 0);
8337 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type
)
8339 if (inst
.operands
[0].writeback
)
8340 inst
.instruction
|= WRITE_BACK
;
8342 constraint (ldstm_type
!= VFP_LDSTMIA
,
8343 _("this addressing mode requires base-register writeback"));
8344 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8345 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sd
);
8346 inst
.instruction
|= inst
.operands
[1].imm
;
8350 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type
)
8354 if (inst
.operands
[0].writeback
)
8355 inst
.instruction
|= WRITE_BACK
;
8357 constraint (ldstm_type
!= VFP_LDSTMIA
&& ldstm_type
!= VFP_LDSTMIAX
,
8358 _("this addressing mode requires base-register writeback"));
8360 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8361 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
8363 count
= inst
.operands
[1].imm
<< 1;
8364 if (ldstm_type
== VFP_LDSTMIAX
|| ldstm_type
== VFP_LDSTMDBX
)
8367 inst
.instruction
|= count
;
8371 do_vfp_sp_ldstmia (void)
8373 vfp_sp_ldstm (VFP_LDSTMIA
);
8377 do_vfp_sp_ldstmdb (void)
8379 vfp_sp_ldstm (VFP_LDSTMDB
);
8383 do_vfp_dp_ldstmia (void)
8385 vfp_dp_ldstm (VFP_LDSTMIA
);
8389 do_vfp_dp_ldstmdb (void)
8391 vfp_dp_ldstm (VFP_LDSTMDB
);
8395 do_vfp_xp_ldstmia (void)
8397 vfp_dp_ldstm (VFP_LDSTMIAX
);
8401 do_vfp_xp_ldstmdb (void)
8403 vfp_dp_ldstm (VFP_LDSTMDBX
);
8407 do_vfp_dp_rd_rm (void)
8409 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
8410 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dm
);
8414 do_vfp_dp_rn_rd (void)
8416 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dn
);
8417 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
8421 do_vfp_dp_rd_rn (void)
8423 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
8424 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dn
);
8428 do_vfp_dp_rd_rn_rm (void)
8430 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
8431 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dn
);
8432 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Dm
);
8438 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
8442 do_vfp_dp_rm_rd_rn (void)
8444 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dm
);
8445 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
8446 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Dn
);
8449 /* VFPv3 instructions. */
8451 do_vfp_sp_const (void)
8453 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
8454 inst
.instruction
|= (inst
.operands
[1].imm
& 0xf0) << 12;
8455 inst
.instruction
|= (inst
.operands
[1].imm
& 0x0f);
8459 do_vfp_dp_const (void)
8461 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
8462 inst
.instruction
|= (inst
.operands
[1].imm
& 0xf0) << 12;
8463 inst
.instruction
|= (inst
.operands
[1].imm
& 0x0f);
8467 vfp_conv (int srcsize
)
8469 unsigned immbits
= srcsize
- inst
.operands
[1].imm
;
8470 inst
.instruction
|= (immbits
& 1) << 5;
8471 inst
.instruction
|= (immbits
>> 1);
8475 do_vfp_sp_conv_16 (void)
8477 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
8482 do_vfp_dp_conv_16 (void)
8484 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
8489 do_vfp_sp_conv_32 (void)
8491 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
8496 do_vfp_dp_conv_32 (void)
8498 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
8502 /* FPA instructions. Also in a logical order. */
8507 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8508 inst
.instruction
|= inst
.operands
[1].reg
;
8512 do_fpa_ldmstm (void)
8514 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8515 switch (inst
.operands
[1].imm
)
8517 case 1: inst
.instruction
|= CP_T_X
; break;
8518 case 2: inst
.instruction
|= CP_T_Y
; break;
8519 case 3: inst
.instruction
|= CP_T_Y
| CP_T_X
; break;
8524 if (inst
.instruction
& (PRE_INDEX
| INDEX_UP
))
8526 /* The instruction specified "ea" or "fd", so we can only accept
8527 [Rn]{!}. The instruction does not really support stacking or
8528 unstacking, so we have to emulate these by setting appropriate
8529 bits and offsets. */
8530 constraint (inst
.reloc
.exp
.X_op
!= O_constant
8531 || inst
.reloc
.exp
.X_add_number
!= 0,
8532 _("this instruction does not support indexing"));
8534 if ((inst
.instruction
& PRE_INDEX
) || inst
.operands
[2].writeback
)
8535 inst
.reloc
.exp
.X_add_number
= 12 * inst
.operands
[1].imm
;
8537 if (!(inst
.instruction
& INDEX_UP
))
8538 inst
.reloc
.exp
.X_add_number
= -inst
.reloc
.exp
.X_add_number
;
8540 if (!(inst
.instruction
& PRE_INDEX
) && inst
.operands
[2].writeback
)
8542 inst
.operands
[2].preind
= 0;
8543 inst
.operands
[2].postind
= 1;
8547 encode_arm_cp_address (2, TRUE
, TRUE
, 0);
8550 /* iWMMXt instructions: strictly in alphabetical order. */
8553 do_iwmmxt_tandorc (void)
8555 constraint (inst
.operands
[0].reg
!= REG_PC
, _("only r15 allowed here"));
8559 do_iwmmxt_textrc (void)
8561 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8562 inst
.instruction
|= inst
.operands
[1].imm
;
8566 do_iwmmxt_textrm (void)
8568 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8569 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8570 inst
.instruction
|= inst
.operands
[2].imm
;
8574 do_iwmmxt_tinsr (void)
8576 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8577 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8578 inst
.instruction
|= inst
.operands
[2].imm
;
8582 do_iwmmxt_tmia (void)
8584 inst
.instruction
|= inst
.operands
[0].reg
<< 5;
8585 inst
.instruction
|= inst
.operands
[1].reg
;
8586 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
8590 do_iwmmxt_waligni (void)
8592 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8593 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8594 inst
.instruction
|= inst
.operands
[2].reg
;
8595 inst
.instruction
|= inst
.operands
[3].imm
<< 20;
8599 do_iwmmxt_wmerge (void)
8601 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8602 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8603 inst
.instruction
|= inst
.operands
[2].reg
;
8604 inst
.instruction
|= inst
.operands
[3].imm
<< 21;
8608 do_iwmmxt_wmov (void)
8610 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
8611 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8612 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8613 inst
.instruction
|= inst
.operands
[1].reg
;
8617 do_iwmmxt_wldstbh (void)
8620 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8622 reloc
= BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
;
8624 reloc
= BFD_RELOC_ARM_CP_OFF_IMM_S2
;
8625 encode_arm_cp_address (1, TRUE
, FALSE
, reloc
);
8629 do_iwmmxt_wldstw (void)
8631 /* RIWR_RIWC clears .isreg for a control register. */
8632 if (!inst
.operands
[0].isreg
)
8634 constraint (inst
.cond
!= COND_ALWAYS
, BAD_COND
);
8635 inst
.instruction
|= 0xf0000000;
8638 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8639 encode_arm_cp_address (1, TRUE
, TRUE
, 0);
8643 do_iwmmxt_wldstd (void)
8645 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8646 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
)
8647 && inst
.operands
[1].immisreg
)
8649 inst
.instruction
&= ~0x1a000ff;
8650 inst
.instruction
|= (0xf << 28);
8651 if (inst
.operands
[1].preind
)
8652 inst
.instruction
|= PRE_INDEX
;
8653 if (!inst
.operands
[1].negative
)
8654 inst
.instruction
|= INDEX_UP
;
8655 if (inst
.operands
[1].writeback
)
8656 inst
.instruction
|= WRITE_BACK
;
8657 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8658 inst
.instruction
|= inst
.reloc
.exp
.X_add_number
<< 4;
8659 inst
.instruction
|= inst
.operands
[1].imm
;
8662 encode_arm_cp_address (1, TRUE
, FALSE
, 0);
8666 do_iwmmxt_wshufh (void)
8668 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8669 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8670 inst
.instruction
|= ((inst
.operands
[2].imm
& 0xf0) << 16);
8671 inst
.instruction
|= (inst
.operands
[2].imm
& 0x0f);
8675 do_iwmmxt_wzero (void)
8677 /* WZERO reg is an alias for WANDN reg, reg, reg. */
8678 inst
.instruction
|= inst
.operands
[0].reg
;
8679 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8680 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8684 do_iwmmxt_wrwrwr_or_imm5 (void)
8686 if (inst
.operands
[2].isreg
)
8689 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
),
8690 _("immediate operand requires iWMMXt2"));
8692 if (inst
.operands
[2].imm
== 0)
8694 switch ((inst
.instruction
>> 20) & 0xf)
8700 /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16. */
8701 inst
.operands
[2].imm
= 16;
8702 inst
.instruction
= (inst
.instruction
& 0xff0fffff) | (0x7 << 20);
8708 /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32. */
8709 inst
.operands
[2].imm
= 32;
8710 inst
.instruction
= (inst
.instruction
& 0xff0fffff) | (0xb << 20);
8717 /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn. */
8719 wrn
= (inst
.instruction
>> 16) & 0xf;
8720 inst
.instruction
&= 0xff0fff0f;
8721 inst
.instruction
|= wrn
;
8722 /* Bail out here; the instruction is now assembled. */
8727 /* Map 32 -> 0, etc. */
8728 inst
.operands
[2].imm
&= 0x1f;
8729 inst
.instruction
|= (0xf << 28) | ((inst
.operands
[2].imm
& 0x10) << 4) | (inst
.operands
[2].imm
& 0xf);
8733 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
8734 operations first, then control, shift, and load/store. */
8736 /* Insns like "foo X,Y,Z". */
8739 do_mav_triple (void)
8741 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8742 inst
.instruction
|= inst
.operands
[1].reg
;
8743 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
8746 /* Insns like "foo W,X,Y,Z".
8747 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
8752 inst
.instruction
|= inst
.operands
[0].reg
<< 5;
8753 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8754 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
8755 inst
.instruction
|= inst
.operands
[3].reg
;
8758 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
8762 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8765 /* Maverick shift immediate instructions.
8766 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
8767 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
8772 int imm
= inst
.operands
[2].imm
;
8774 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8775 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8777 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
8778 Bits 5-7 of the insn should have bits 4-6 of the immediate.
8779 Bit 4 should be 0. */
8780 imm
= (imm
& 0xf) | ((imm
& 0x70) << 1);
8782 inst
.instruction
|= imm
;
8785 /* XScale instructions. Also sorted arithmetic before move. */
8787 /* Xscale multiply-accumulate (argument parse)
8790 MIAxycc acc0,Rm,Rs. */
8795 inst
.instruction
|= inst
.operands
[1].reg
;
8796 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
8799 /* Xscale move-accumulator-register (argument parse)
8801 MARcc acc0,RdLo,RdHi. */
8806 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8807 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
8810 /* Xscale move-register-accumulator (argument parse)
8812 MRAcc RdLo,RdHi,acc0. */
8817 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
, BAD_OVERLAP
);
8818 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8819 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8822 /* Encoding functions relevant only to Thumb. */
8824 /* inst.operands[i] is a shifted-register operand; encode
8825 it into inst.instruction in the format used by Thumb32. */
8828 encode_thumb32_shifted_operand (int i
)
8830 unsigned int value
= inst
.reloc
.exp
.X_add_number
;
8831 unsigned int shift
= inst
.operands
[i
].shift_kind
;
8833 constraint (inst
.operands
[i
].immisreg
,
8834 _("shift by register not allowed in thumb mode"));
8835 inst
.instruction
|= inst
.operands
[i
].reg
;
8836 if (shift
== SHIFT_RRX
)
8837 inst
.instruction
|= SHIFT_ROR
<< 4;
8840 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
8841 _("expression too complex"));
8843 constraint (value
> 32
8844 || (value
== 32 && (shift
== SHIFT_LSL
8845 || shift
== SHIFT_ROR
)),
8846 _("shift expression is too large"));
8850 else if (value
== 32)
8853 inst
.instruction
|= shift
<< 4;
8854 inst
.instruction
|= (value
& 0x1c) << 10;
8855 inst
.instruction
|= (value
& 0x03) << 6;
8860 /* inst.operands[i] was set up by parse_address. Encode it into a
8861 Thumb32 format load or store instruction. Reject forms that cannot
8862 be used with such instructions. If is_t is true, reject forms that
8863 cannot be used with a T instruction; if is_d is true, reject forms
8864 that cannot be used with a D instruction. If it is a store insn,
8868 encode_thumb32_addr_mode (int i
, bfd_boolean is_t
, bfd_boolean is_d
)
8870 const bfd_boolean is_pc
= (inst
.operands
[i
].reg
== REG_PC
);
8872 constraint (!inst
.operands
[i
].isreg
,
8873 _("Instruction does not support =N addresses"));
8875 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
8876 if (inst
.operands
[i
].immisreg
)
8878 constraint (is_pc
, BAD_PC_ADDRESSING
);
8879 constraint (is_t
|| is_d
, _("cannot use register index with this instruction"));
8880 constraint (inst
.operands
[i
].negative
,
8881 _("Thumb does not support negative register indexing"));
8882 constraint (inst
.operands
[i
].postind
,
8883 _("Thumb does not support register post-indexing"));
8884 constraint (inst
.operands
[i
].writeback
,
8885 _("Thumb does not support register indexing with writeback"));
8886 constraint (inst
.operands
[i
].shifted
&& inst
.operands
[i
].shift_kind
!= SHIFT_LSL
,
8887 _("Thumb supports only LSL in shifted register indexing"));
8889 inst
.instruction
|= inst
.operands
[i
].imm
;
8890 if (inst
.operands
[i
].shifted
)
8892 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
8893 _("expression too complex"));
8894 constraint (inst
.reloc
.exp
.X_add_number
< 0
8895 || inst
.reloc
.exp
.X_add_number
> 3,
8896 _("shift out of range"));
8897 inst
.instruction
|= inst
.reloc
.exp
.X_add_number
<< 4;
8899 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
8901 else if (inst
.operands
[i
].preind
)
8903 constraint (is_pc
&& inst
.operands
[i
].writeback
, BAD_PC_WRITEBACK
);
8904 constraint (is_t
&& inst
.operands
[i
].writeback
,
8905 _("cannot use writeback with this instruction"));
8906 constraint (is_pc
&& ((inst
.instruction
& THUMB2_LOAD_BIT
) == 0)
8907 && !inst
.reloc
.pc_rel
, BAD_PC_ADDRESSING
);
8911 inst
.instruction
|= 0x01000000;
8912 if (inst
.operands
[i
].writeback
)
8913 inst
.instruction
|= 0x00200000;
8917 inst
.instruction
|= 0x00000c00;
8918 if (inst
.operands
[i
].writeback
)
8919 inst
.instruction
|= 0x00000100;
8921 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
8923 else if (inst
.operands
[i
].postind
)
8925 gas_assert (inst
.operands
[i
].writeback
);
8926 constraint (is_pc
, _("cannot use post-indexing with PC-relative addressing"));
8927 constraint (is_t
, _("cannot use post-indexing with this instruction"));
8930 inst
.instruction
|= 0x00200000;
8932 inst
.instruction
|= 0x00000900;
8933 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
8935 else /* unindexed - only for coprocessor */
8936 inst
.error
= _("instruction does not accept unindexed addressing");
8939 /* Table of Thumb instructions which exist in both 16- and 32-bit
8940 encodings (the latter only in post-V6T2 cores). The index is the
8941 value used in the insns table below. When there is more than one
8942 possible 16-bit encoding for the instruction, this table always
8944 Also contains several pseudo-instructions used during relaxation. */
8945 #define T16_32_TAB \
8946 X(_adc, 4140, eb400000), \
8947 X(_adcs, 4140, eb500000), \
8948 X(_add, 1c00, eb000000), \
8949 X(_adds, 1c00, eb100000), \
8950 X(_addi, 0000, f1000000), \
8951 X(_addis, 0000, f1100000), \
8952 X(_add_pc,000f, f20f0000), \
8953 X(_add_sp,000d, f10d0000), \
8954 X(_adr, 000f, f20f0000), \
8955 X(_and, 4000, ea000000), \
8956 X(_ands, 4000, ea100000), \
8957 X(_asr, 1000, fa40f000), \
8958 X(_asrs, 1000, fa50f000), \
8959 X(_b, e000, f000b000), \
8960 X(_bcond, d000, f0008000), \
8961 X(_bic, 4380, ea200000), \
8962 X(_bics, 4380, ea300000), \
8963 X(_cmn, 42c0, eb100f00), \
8964 X(_cmp, 2800, ebb00f00), \
8965 X(_cpsie, b660, f3af8400), \
8966 X(_cpsid, b670, f3af8600), \
8967 X(_cpy, 4600, ea4f0000), \
8968 X(_dec_sp,80dd, f1ad0d00), \
8969 X(_eor, 4040, ea800000), \
8970 X(_eors, 4040, ea900000), \
8971 X(_inc_sp,00dd, f10d0d00), \
8972 X(_ldmia, c800, e8900000), \
8973 X(_ldr, 6800, f8500000), \
8974 X(_ldrb, 7800, f8100000), \
8975 X(_ldrh, 8800, f8300000), \
8976 X(_ldrsb, 5600, f9100000), \
8977 X(_ldrsh, 5e00, f9300000), \
8978 X(_ldr_pc,4800, f85f0000), \
8979 X(_ldr_pc2,4800, f85f0000), \
8980 X(_ldr_sp,9800, f85d0000), \
8981 X(_lsl, 0000, fa00f000), \
8982 X(_lsls, 0000, fa10f000), \
8983 X(_lsr, 0800, fa20f000), \
8984 X(_lsrs, 0800, fa30f000), \
8985 X(_mov, 2000, ea4f0000), \
8986 X(_movs, 2000, ea5f0000), \
8987 X(_mul, 4340, fb00f000), \
8988 X(_muls, 4340, ffffffff), /* no 32b muls */ \
8989 X(_mvn, 43c0, ea6f0000), \
8990 X(_mvns, 43c0, ea7f0000), \
8991 X(_neg, 4240, f1c00000), /* rsb #0 */ \
8992 X(_negs, 4240, f1d00000), /* rsbs #0 */ \
8993 X(_orr, 4300, ea400000), \
8994 X(_orrs, 4300, ea500000), \
8995 X(_pop, bc00, e8bd0000), /* ldmia sp!,... */ \
8996 X(_push, b400, e92d0000), /* stmdb sp!,... */ \
8997 X(_rev, ba00, fa90f080), \
8998 X(_rev16, ba40, fa90f090), \
8999 X(_revsh, bac0, fa90f0b0), \
9000 X(_ror, 41c0, fa60f000), \
9001 X(_rors, 41c0, fa70f000), \
9002 X(_sbc, 4180, eb600000), \
9003 X(_sbcs, 4180, eb700000), \
9004 X(_stmia, c000, e8800000), \
9005 X(_str, 6000, f8400000), \
9006 X(_strb, 7000, f8000000), \
9007 X(_strh, 8000, f8200000), \
9008 X(_str_sp,9000, f84d0000), \
9009 X(_sub, 1e00, eba00000), \
9010 X(_subs, 1e00, ebb00000), \
9011 X(_subi, 8000, f1a00000), \
9012 X(_subis, 8000, f1b00000), \
9013 X(_sxtb, b240, fa4ff080), \
9014 X(_sxth, b200, fa0ff080), \
9015 X(_tst, 4200, ea100f00), \
9016 X(_uxtb, b2c0, fa5ff080), \
9017 X(_uxth, b280, fa1ff080), \
9018 X(_nop, bf00, f3af8000), \
9019 X(_yield, bf10, f3af8001), \
9020 X(_wfe, bf20, f3af8002), \
9021 X(_wfi, bf30, f3af8003), \
9022 X(_sev, bf40, f3af8004),
9024 /* To catch errors in encoding functions, the codes are all offset by
9025 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
9026 as 16-bit instructions. */
9027 #define X(a,b,c) T_MNEM##a
9028 enum t16_32_codes
{ T16_32_OFFSET
= 0xF7FF, T16_32_TAB
};
9031 #define X(a,b,c) 0x##b
9032 static const unsigned short thumb_op16
[] = { T16_32_TAB
};
9033 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
9036 #define X(a,b,c) 0x##c
9037 static const unsigned int thumb_op32
[] = { T16_32_TAB
};
9038 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
9039 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
9043 /* Thumb instruction encoders, in alphabetical order. */
9048 do_t_add_sub_w (void)
9052 Rd
= inst
.operands
[0].reg
;
9053 Rn
= inst
.operands
[1].reg
;
9055 /* If Rn is REG_PC, this is ADR; if Rn is REG_SP, then this
9056 is the SP-{plus,minus}-immediate form of the instruction. */
9058 constraint (Rd
== REG_PC
, BAD_PC
);
9060 reject_bad_reg (Rd
);
9062 inst
.instruction
|= (Rn
<< 16) | (Rd
<< 8);
9063 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMM12
;
9066 /* Parse an add or subtract instruction. We get here with inst.instruction
9067 equalling any of THUMB_OPCODE_add, adds, sub, or subs. */
9074 Rd
= inst
.operands
[0].reg
;
9075 Rs
= (inst
.operands
[1].present
9076 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
9077 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
9080 set_it_insn_type_last ();
9088 flags
= (inst
.instruction
== T_MNEM_adds
9089 || inst
.instruction
== T_MNEM_subs
);
9091 narrow
= !in_it_block ();
9093 narrow
= in_it_block ();
9094 if (!inst
.operands
[2].isreg
)
9098 constraint (Rd
== REG_SP
&& Rs
!= REG_SP
, BAD_SP
);
9100 add
= (inst
.instruction
== T_MNEM_add
9101 || inst
.instruction
== T_MNEM_adds
);
9103 if (inst
.size_req
!= 4)
9105 /* Attempt to use a narrow opcode, with relaxation if
9107 if (Rd
== REG_SP
&& Rs
== REG_SP
&& !flags
)
9108 opcode
= add
? T_MNEM_inc_sp
: T_MNEM_dec_sp
;
9109 else if (Rd
<= 7 && Rs
== REG_SP
&& add
&& !flags
)
9110 opcode
= T_MNEM_add_sp
;
9111 else if (Rd
<= 7 && Rs
== REG_PC
&& add
&& !flags
)
9112 opcode
= T_MNEM_add_pc
;
9113 else if (Rd
<= 7 && Rs
<= 7 && narrow
)
9116 opcode
= add
? T_MNEM_addis
: T_MNEM_subis
;
9118 opcode
= add
? T_MNEM_addi
: T_MNEM_subi
;
9122 inst
.instruction
= THUMB_OP16(opcode
);
9123 inst
.instruction
|= (Rd
<< 4) | Rs
;
9124 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_ADD
;
9125 if (inst
.size_req
!= 2)
9126 inst
.relax
= opcode
;
9129 constraint (inst
.size_req
== 2, BAD_HIREG
);
9131 if (inst
.size_req
== 4
9132 || (inst
.size_req
!= 2 && !opcode
))
9136 constraint (add
, BAD_PC
);
9137 constraint (Rs
!= REG_LR
|| inst
.instruction
!= T_MNEM_subs
,
9138 _("only SUBS PC, LR, #const allowed"));
9139 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
9140 _("expression too complex"));
9141 constraint (inst
.reloc
.exp
.X_add_number
< 0
9142 || inst
.reloc
.exp
.X_add_number
> 0xff,
9143 _("immediate value out of range"));
9144 inst
.instruction
= T2_SUBS_PC_LR
9145 | inst
.reloc
.exp
.X_add_number
;
9146 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
9149 else if (Rs
== REG_PC
)
9151 /* Always use addw/subw. */
9152 inst
.instruction
= add
? 0xf20f0000 : 0xf2af0000;
9153 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMM12
;
9157 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9158 inst
.instruction
= (inst
.instruction
& 0xe1ffffff)
9161 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
9163 inst
.reloc
.type
= BFD_RELOC_ARM_T32_ADD_IMM
;
9165 inst
.instruction
|= Rd
<< 8;
9166 inst
.instruction
|= Rs
<< 16;
9171 Rn
= inst
.operands
[2].reg
;
9172 /* See if we can do this with a 16-bit instruction. */
9173 if (!inst
.operands
[2].shifted
&& inst
.size_req
!= 4)
9175 if (Rd
> 7 || Rs
> 7 || Rn
> 7)
9180 inst
.instruction
= ((inst
.instruction
== T_MNEM_adds
9181 || inst
.instruction
== T_MNEM_add
)
9184 inst
.instruction
|= Rd
| (Rs
<< 3) | (Rn
<< 6);
9188 if (inst
.instruction
== T_MNEM_add
&& (Rd
== Rs
|| Rd
== Rn
))
9190 /* Thumb-1 cores (except v6-M) require at least one high
9191 register in a narrow non flag setting add. */
9192 if (Rd
> 7 || Rn
> 7
9193 || ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6t2
)
9194 || ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_msr
))
9201 inst
.instruction
= T_OPCODE_ADD_HI
;
9202 inst
.instruction
|= (Rd
& 8) << 4;
9203 inst
.instruction
|= (Rd
& 7);
9204 inst
.instruction
|= Rn
<< 3;
9210 constraint (Rd
== REG_PC
, BAD_PC
);
9211 constraint (Rd
== REG_SP
&& Rs
!= REG_SP
, BAD_SP
);
9212 constraint (Rs
== REG_PC
, BAD_PC
);
9213 reject_bad_reg (Rn
);
9215 /* If we get here, it can't be done in 16 bits. */
9216 constraint (inst
.operands
[2].shifted
&& inst
.operands
[2].immisreg
,
9217 _("shift must be constant"));
9218 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9219 inst
.instruction
|= Rd
<< 8;
9220 inst
.instruction
|= Rs
<< 16;
9221 encode_thumb32_shifted_operand (2);
9226 constraint (inst
.instruction
== T_MNEM_adds
9227 || inst
.instruction
== T_MNEM_subs
,
9230 if (!inst
.operands
[2].isreg
) /* Rd, Rs, #imm */
9232 constraint ((Rd
> 7 && (Rd
!= REG_SP
|| Rs
!= REG_SP
))
9233 || (Rs
> 7 && Rs
!= REG_SP
&& Rs
!= REG_PC
),
9236 inst
.instruction
= (inst
.instruction
== T_MNEM_add
9238 inst
.instruction
|= (Rd
<< 4) | Rs
;
9239 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_ADD
;
9243 Rn
= inst
.operands
[2].reg
;
9244 constraint (inst
.operands
[2].shifted
, _("unshifted register required"));
9246 /* We now have Rd, Rs, and Rn set to registers. */
9247 if (Rd
> 7 || Rs
> 7 || Rn
> 7)
9249 /* Can't do this for SUB. */
9250 constraint (inst
.instruction
== T_MNEM_sub
, BAD_HIREG
);
9251 inst
.instruction
= T_OPCODE_ADD_HI
;
9252 inst
.instruction
|= (Rd
& 8) << 4;
9253 inst
.instruction
|= (Rd
& 7);
9255 inst
.instruction
|= Rn
<< 3;
9257 inst
.instruction
|= Rs
<< 3;
9259 constraint (1, _("dest must overlap one source register"));
9263 inst
.instruction
= (inst
.instruction
== T_MNEM_add
9264 ? T_OPCODE_ADD_R3
: T_OPCODE_SUB_R3
);
9265 inst
.instruction
|= Rd
| (Rs
<< 3) | (Rn
<< 6);
9275 Rd
= inst
.operands
[0].reg
;
9276 reject_bad_reg (Rd
);
9278 if (unified_syntax
&& inst
.size_req
== 0 && Rd
<= 7)
9280 /* Defer to section relaxation. */
9281 inst
.relax
= inst
.instruction
;
9282 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9283 inst
.instruction
|= Rd
<< 4;
9285 else if (unified_syntax
&& inst
.size_req
!= 2)
9287 /* Generate a 32-bit opcode. */
9288 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9289 inst
.instruction
|= Rd
<< 8;
9290 inst
.reloc
.type
= BFD_RELOC_ARM_T32_ADD_PC12
;
9291 inst
.reloc
.pc_rel
= 1;
9295 /* Generate a 16-bit opcode. */
9296 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9297 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_ADD
;
9298 inst
.reloc
.exp
.X_add_number
-= 4; /* PC relative adjust. */
9299 inst
.reloc
.pc_rel
= 1;
9301 inst
.instruction
|= Rd
<< 4;
9305 /* Arithmetic instructions for which there is just one 16-bit
9306 instruction encoding, and it allows only two low registers.
9307 For maximal compatibility with ARM syntax, we allow three register
9308 operands even when Thumb-32 instructions are not available, as long
9309 as the first two are identical. For instance, both "sbc r0,r1" and
9310 "sbc r0,r0,r1" are allowed. */
9316 Rd
= inst
.operands
[0].reg
;
9317 Rs
= (inst
.operands
[1].present
9318 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
9319 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
9320 Rn
= inst
.operands
[2].reg
;
9322 reject_bad_reg (Rd
);
9323 reject_bad_reg (Rs
);
9324 if (inst
.operands
[2].isreg
)
9325 reject_bad_reg (Rn
);
9329 if (!inst
.operands
[2].isreg
)
9331 /* For an immediate, we always generate a 32-bit opcode;
9332 section relaxation will shrink it later if possible. */
9333 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9334 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
9335 inst
.instruction
|= Rd
<< 8;
9336 inst
.instruction
|= Rs
<< 16;
9337 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
9343 /* See if we can do this with a 16-bit instruction. */
9344 if (THUMB_SETS_FLAGS (inst
.instruction
))
9345 narrow
= !in_it_block ();
9347 narrow
= in_it_block ();
9349 if (Rd
> 7 || Rn
> 7 || Rs
> 7)
9351 if (inst
.operands
[2].shifted
)
9353 if (inst
.size_req
== 4)
9359 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9360 inst
.instruction
|= Rd
;
9361 inst
.instruction
|= Rn
<< 3;
9365 /* If we get here, it can't be done in 16 bits. */
9366 constraint (inst
.operands
[2].shifted
9367 && inst
.operands
[2].immisreg
,
9368 _("shift must be constant"));
9369 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9370 inst
.instruction
|= Rd
<< 8;
9371 inst
.instruction
|= Rs
<< 16;
9372 encode_thumb32_shifted_operand (2);
9377 /* On its face this is a lie - the instruction does set the
9378 flags. However, the only supported mnemonic in this mode
9380 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
9382 constraint (!inst
.operands
[2].isreg
|| inst
.operands
[2].shifted
,
9383 _("unshifted register required"));
9384 constraint (Rd
> 7 || Rs
> 7 || Rn
> 7, BAD_HIREG
);
9385 constraint (Rd
!= Rs
,
9386 _("dest and source1 must be the same register"));
9388 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9389 inst
.instruction
|= Rd
;
9390 inst
.instruction
|= Rn
<< 3;
9394 /* Similarly, but for instructions where the arithmetic operation is
9395 commutative, so we can allow either of them to be different from
9396 the destination operand in a 16-bit instruction. For instance, all
9397 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
9404 Rd
= inst
.operands
[0].reg
;
9405 Rs
= (inst
.operands
[1].present
9406 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
9407 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
9408 Rn
= inst
.operands
[2].reg
;
9410 reject_bad_reg (Rd
);
9411 reject_bad_reg (Rs
);
9412 if (inst
.operands
[2].isreg
)
9413 reject_bad_reg (Rn
);
9417 if (!inst
.operands
[2].isreg
)
9419 /* For an immediate, we always generate a 32-bit opcode;
9420 section relaxation will shrink it later if possible. */
9421 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9422 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
9423 inst
.instruction
|= Rd
<< 8;
9424 inst
.instruction
|= Rs
<< 16;
9425 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
9431 /* See if we can do this with a 16-bit instruction. */
9432 if (THUMB_SETS_FLAGS (inst
.instruction
))
9433 narrow
= !in_it_block ();
9435 narrow
= in_it_block ();
9437 if (Rd
> 7 || Rn
> 7 || Rs
> 7)
9439 if (inst
.operands
[2].shifted
)
9441 if (inst
.size_req
== 4)
9448 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9449 inst
.instruction
|= Rd
;
9450 inst
.instruction
|= Rn
<< 3;
9455 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9456 inst
.instruction
|= Rd
;
9457 inst
.instruction
|= Rs
<< 3;
9462 /* If we get here, it can't be done in 16 bits. */
9463 constraint (inst
.operands
[2].shifted
9464 && inst
.operands
[2].immisreg
,
9465 _("shift must be constant"));
9466 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9467 inst
.instruction
|= Rd
<< 8;
9468 inst
.instruction
|= Rs
<< 16;
9469 encode_thumb32_shifted_operand (2);
9474 /* On its face this is a lie - the instruction does set the
9475 flags. However, the only supported mnemonic in this mode
9477 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
9479 constraint (!inst
.operands
[2].isreg
|| inst
.operands
[2].shifted
,
9480 _("unshifted register required"));
9481 constraint (Rd
> 7 || Rs
> 7 || Rn
> 7, BAD_HIREG
);
9483 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9484 inst
.instruction
|= Rd
;
9487 inst
.instruction
|= Rn
<< 3;
9489 inst
.instruction
|= Rs
<< 3;
9491 constraint (1, _("dest must overlap one source register"));
9498 if (inst
.operands
[0].present
)
9500 constraint ((inst
.instruction
& 0xf0) != 0x40
9501 && inst
.operands
[0].imm
> 0xf
9502 && inst
.operands
[0].imm
< 0x0,
9503 _("bad barrier type"));
9504 inst
.instruction
|= inst
.operands
[0].imm
;
9507 inst
.instruction
|= 0xf;
9514 unsigned int msb
= inst
.operands
[1].imm
+ inst
.operands
[2].imm
;
9515 constraint (msb
> 32, _("bit-field extends past end of register"));
9516 /* The instruction encoding stores the LSB and MSB,
9517 not the LSB and width. */
9518 Rd
= inst
.operands
[0].reg
;
9519 reject_bad_reg (Rd
);
9520 inst
.instruction
|= Rd
<< 8;
9521 inst
.instruction
|= (inst
.operands
[1].imm
& 0x1c) << 10;
9522 inst
.instruction
|= (inst
.operands
[1].imm
& 0x03) << 6;
9523 inst
.instruction
|= msb
- 1;
9532 Rd
= inst
.operands
[0].reg
;
9533 reject_bad_reg (Rd
);
9535 /* #0 in second position is alternative syntax for bfc, which is
9536 the same instruction but with REG_PC in the Rm field. */
9537 if (!inst
.operands
[1].isreg
)
9541 Rn
= inst
.operands
[1].reg
;
9542 reject_bad_reg (Rn
);
9545 msb
= inst
.operands
[2].imm
+ inst
.operands
[3].imm
;
9546 constraint (msb
> 32, _("bit-field extends past end of register"));
9547 /* The instruction encoding stores the LSB and MSB,
9548 not the LSB and width. */
9549 inst
.instruction
|= Rd
<< 8;
9550 inst
.instruction
|= Rn
<< 16;
9551 inst
.instruction
|= (inst
.operands
[2].imm
& 0x1c) << 10;
9552 inst
.instruction
|= (inst
.operands
[2].imm
& 0x03) << 6;
9553 inst
.instruction
|= msb
- 1;
9561 Rd
= inst
.operands
[0].reg
;
9562 Rn
= inst
.operands
[1].reg
;
9564 reject_bad_reg (Rd
);
9565 reject_bad_reg (Rn
);
9567 constraint (inst
.operands
[2].imm
+ inst
.operands
[3].imm
> 32,
9568 _("bit-field extends past end of register"));
9569 inst
.instruction
|= Rd
<< 8;
9570 inst
.instruction
|= Rn
<< 16;
9571 inst
.instruction
|= (inst
.operands
[2].imm
& 0x1c) << 10;
9572 inst
.instruction
|= (inst
.operands
[2].imm
& 0x03) << 6;
9573 inst
.instruction
|= inst
.operands
[3].imm
- 1;
9576 /* ARM V5 Thumb BLX (argument parse)
9577 BLX <target_addr> which is BLX(1)
9578 BLX <Rm> which is BLX(2)
9579 Unfortunately, there are two different opcodes for this mnemonic.
9580 So, the insns[].value is not used, and the code here zaps values
9581 into inst.instruction.
9583 ??? How to take advantage of the additional two bits of displacement
9584 available in Thumb32 mode? Need new relocation? */
9589 set_it_insn_type_last ();
9591 if (inst
.operands
[0].isreg
)
9593 constraint (inst
.operands
[0].reg
== REG_PC
, BAD_PC
);
9594 /* We have a register, so this is BLX(2). */
9595 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
9599 /* No register. This must be BLX(1). */
9600 inst
.instruction
= 0xf000e800;
9601 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BLX
;
9602 inst
.reloc
.pc_rel
= 1;
9613 set_it_insn_type (IF_INSIDE_IT_LAST_INSN
);
9617 /* Conditional branches inside IT blocks are encoded as unconditional
9624 if (cond
!= COND_ALWAYS
)
9625 opcode
= T_MNEM_bcond
;
9627 opcode
= inst
.instruction
;
9629 if (unified_syntax
&& inst
.size_req
== 4)
9631 inst
.instruction
= THUMB_OP32(opcode
);
9632 if (cond
== COND_ALWAYS
)
9633 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH25
;
9636 gas_assert (cond
!= 0xF);
9637 inst
.instruction
|= cond
<< 22;
9638 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH20
;
9643 inst
.instruction
= THUMB_OP16(opcode
);
9644 if (cond
== COND_ALWAYS
)
9645 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH12
;
9648 inst
.instruction
|= cond
<< 8;
9649 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH9
;
9651 /* Allow section relaxation. */
9652 if (unified_syntax
&& inst
.size_req
!= 2)
9653 inst
.relax
= opcode
;
9656 inst
.reloc
.pc_rel
= 1;
9662 constraint (inst
.cond
!= COND_ALWAYS
,
9663 _("instruction is always unconditional"));
9664 if (inst
.operands
[0].present
)
9666 constraint (inst
.operands
[0].imm
> 255,
9667 _("immediate value out of range"));
9668 inst
.instruction
|= inst
.operands
[0].imm
;
9669 set_it_insn_type (NEUTRAL_IT_INSN
);
9674 do_t_branch23 (void)
9676 set_it_insn_type_last ();
9677 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
9678 inst
.reloc
.pc_rel
= 1;
9680 #if defined(OBJ_COFF)
9681 /* If the destination of the branch is a defined symbol which does not have
9682 the THUMB_FUNC attribute, then we must be calling a function which has
9683 the (interfacearm) attribute. We look for the Thumb entry point to that
9684 function and change the branch to refer to that function instead. */
9685 if ( inst
.reloc
.exp
.X_op
== O_symbol
9686 && inst
.reloc
.exp
.X_add_symbol
!= NULL
9687 && S_IS_DEFINED (inst
.reloc
.exp
.X_add_symbol
)
9688 && ! THUMB_IS_FUNC (inst
.reloc
.exp
.X_add_symbol
))
9689 inst
.reloc
.exp
.X_add_symbol
=
9690 find_real_start (inst
.reloc
.exp
.X_add_symbol
);
9697 set_it_insn_type_last ();
9698 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
9699 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
9700 should cause the alignment to be checked once it is known. This is
9701 because BX PC only works if the instruction is word aligned. */
9709 set_it_insn_type_last ();
9710 Rm
= inst
.operands
[0].reg
;
9711 reject_bad_reg (Rm
);
9712 inst
.instruction
|= Rm
<< 16;
9721 Rd
= inst
.operands
[0].reg
;
9722 Rm
= inst
.operands
[1].reg
;
9724 reject_bad_reg (Rd
);
9725 reject_bad_reg (Rm
);
9727 inst
.instruction
|= Rd
<< 8;
9728 inst
.instruction
|= Rm
<< 16;
9729 inst
.instruction
|= Rm
;
9735 set_it_insn_type (OUTSIDE_IT_INSN
);
9736 inst
.instruction
|= inst
.operands
[0].imm
;
9742 set_it_insn_type (OUTSIDE_IT_INSN
);
9744 && (inst
.operands
[1].present
|| inst
.size_req
== 4)
9745 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6_notm
))
9747 unsigned int imod
= (inst
.instruction
& 0x0030) >> 4;
9748 inst
.instruction
= 0xf3af8000;
9749 inst
.instruction
|= imod
<< 9;
9750 inst
.instruction
|= inst
.operands
[0].imm
<< 5;
9751 if (inst
.operands
[1].present
)
9752 inst
.instruction
|= 0x100 | inst
.operands
[1].imm
;
9756 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
)
9757 && (inst
.operands
[0].imm
& 4),
9758 _("selected processor does not support 'A' form "
9759 "of this instruction"));
9760 constraint (inst
.operands
[1].present
|| inst
.size_req
== 4,
9761 _("Thumb does not support the 2-argument "
9762 "form of this instruction"));
9763 inst
.instruction
|= inst
.operands
[0].imm
;
9767 /* THUMB CPY instruction (argument parse). */
9772 if (inst
.size_req
== 4)
9774 inst
.instruction
= THUMB_OP32 (T_MNEM_mov
);
9775 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9776 inst
.instruction
|= inst
.operands
[1].reg
;
9780 inst
.instruction
|= (inst
.operands
[0].reg
& 0x8) << 4;
9781 inst
.instruction
|= (inst
.operands
[0].reg
& 0x7);
9782 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9789 set_it_insn_type (OUTSIDE_IT_INSN
);
9790 constraint (inst
.operands
[0].reg
> 7, BAD_HIREG
);
9791 inst
.instruction
|= inst
.operands
[0].reg
;
9792 inst
.reloc
.pc_rel
= 1;
9793 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH7
;
9799 inst
.instruction
|= inst
.operands
[0].imm
;
9805 unsigned Rd
, Rn
, Rm
;
9807 Rd
= inst
.operands
[0].reg
;
9808 Rn
= (inst
.operands
[1].present
9809 ? inst
.operands
[1].reg
: Rd
);
9810 Rm
= inst
.operands
[2].reg
;
9812 reject_bad_reg (Rd
);
9813 reject_bad_reg (Rn
);
9814 reject_bad_reg (Rm
);
9816 inst
.instruction
|= Rd
<< 8;
9817 inst
.instruction
|= Rn
<< 16;
9818 inst
.instruction
|= Rm
;
9824 if (unified_syntax
&& inst
.size_req
== 4)
9825 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9827 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9833 unsigned int cond
= inst
.operands
[0].imm
;
9835 set_it_insn_type (IT_INSN
);
9836 now_it
.mask
= (inst
.instruction
& 0xf) | 0x10;
9839 /* If the condition is a negative condition, invert the mask. */
9840 if ((cond
& 0x1) == 0x0)
9842 unsigned int mask
= inst
.instruction
& 0x000f;
9844 if ((mask
& 0x7) == 0)
9845 /* no conversion needed */;
9846 else if ((mask
& 0x3) == 0)
9848 else if ((mask
& 0x1) == 0)
9853 inst
.instruction
&= 0xfff0;
9854 inst
.instruction
|= mask
;
9857 inst
.instruction
|= cond
<< 4;
9860 /* Helper function used for both push/pop and ldm/stm. */
9862 encode_thumb2_ldmstm (int base
, unsigned mask
, bfd_boolean writeback
)
9866 load
= (inst
.instruction
& (1 << 20)) != 0;
9868 if (mask
& (1 << 13))
9869 inst
.error
= _("SP not allowed in register list");
9871 if ((mask
& (1 << base
)) != 0
9873 inst
.error
= _("having the base register in the register list when "
9874 "using write back is UNPREDICTABLE");
9878 if (mask
& (1 << 15))
9880 if (mask
& (1 << 14))
9881 inst
.error
= _("LR and PC should not both be in register list");
9883 set_it_insn_type_last ();
9888 if (mask
& (1 << 15))
9889 inst
.error
= _("PC not allowed in register list");
9892 if ((mask
& (mask
- 1)) == 0)
9894 /* Single register transfers implemented as str/ldr. */
9897 if (inst
.instruction
& (1 << 23))
9898 inst
.instruction
= 0x00000b04; /* ia! -> [base], #4 */
9900 inst
.instruction
= 0x00000d04; /* db! -> [base, #-4]! */
9904 if (inst
.instruction
& (1 << 23))
9905 inst
.instruction
= 0x00800000; /* ia -> [base] */
9907 inst
.instruction
= 0x00000c04; /* db -> [base, #-4] */
9910 inst
.instruction
|= 0xf8400000;
9912 inst
.instruction
|= 0x00100000;
9914 mask
= ffs (mask
) - 1;
9918 inst
.instruction
|= WRITE_BACK
;
9920 inst
.instruction
|= mask
;
9921 inst
.instruction
|= base
<< 16;
9927 /* This really doesn't seem worth it. */
9928 constraint (inst
.reloc
.type
!= BFD_RELOC_UNUSED
,
9929 _("expression too complex"));
9930 constraint (inst
.operands
[1].writeback
,
9931 _("Thumb load/store multiple does not support {reglist}^"));
9939 /* See if we can use a 16-bit instruction. */
9940 if (inst
.instruction
< 0xffff /* not ldmdb/stmdb */
9941 && inst
.size_req
!= 4
9942 && !(inst
.operands
[1].imm
& ~0xff))
9944 mask
= 1 << inst
.operands
[0].reg
;
9946 if (inst
.operands
[0].reg
<= 7
9947 && (inst
.instruction
== T_MNEM_stmia
9948 ? inst
.operands
[0].writeback
9949 : (inst
.operands
[0].writeback
9950 == !(inst
.operands
[1].imm
& mask
))))
9952 if (inst
.instruction
== T_MNEM_stmia
9953 && (inst
.operands
[1].imm
& mask
)
9954 && (inst
.operands
[1].imm
& (mask
- 1)))
9955 as_warn (_("value stored for r%d is UNKNOWN"),
9956 inst
.operands
[0].reg
);
9958 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9959 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9960 inst
.instruction
|= inst
.operands
[1].imm
;
9963 else if (inst
.operands
[0] .reg
== REG_SP
9964 && inst
.operands
[0].writeback
)
9966 inst
.instruction
= THUMB_OP16 (inst
.instruction
== T_MNEM_stmia
9967 ? T_MNEM_push
: T_MNEM_pop
);
9968 inst
.instruction
|= inst
.operands
[1].imm
;
9975 if (inst
.instruction
< 0xffff)
9976 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9978 encode_thumb2_ldmstm (inst
.operands
[0].reg
, inst
.operands
[1].imm
,
9979 inst
.operands
[0].writeback
);
9984 constraint (inst
.operands
[0].reg
> 7
9985 || (inst
.operands
[1].imm
& ~0xff), BAD_HIREG
);
9986 constraint (inst
.instruction
!= T_MNEM_ldmia
9987 && inst
.instruction
!= T_MNEM_stmia
,
9988 _("Thumb-2 instruction only valid in unified syntax"));
9989 if (inst
.instruction
== T_MNEM_stmia
)
9991 if (!inst
.operands
[0].writeback
)
9992 as_warn (_("this instruction will write back the base register"));
9993 if ((inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
))
9994 && (inst
.operands
[1].imm
& ((1 << inst
.operands
[0].reg
) - 1)))
9995 as_warn (_("value stored for r%d is UNKNOWN"),
9996 inst
.operands
[0].reg
);
10000 if (!inst
.operands
[0].writeback
10001 && !(inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
)))
10002 as_warn (_("this instruction will write back the base register"));
10003 else if (inst
.operands
[0].writeback
10004 && (inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
)))
10005 as_warn (_("this instruction will not write back the base register"));
10008 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10009 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
10010 inst
.instruction
|= inst
.operands
[1].imm
;
10017 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].preind
10018 || inst
.operands
[1].postind
|| inst
.operands
[1].writeback
10019 || inst
.operands
[1].immisreg
|| inst
.operands
[1].shifted
10020 || inst
.operands
[1].negative
,
10023 constraint ((inst
.operands
[1].reg
== REG_PC
), BAD_PC
);
10025 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10026 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10027 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_U8
;
10033 if (!inst
.operands
[1].present
)
10035 constraint (inst
.operands
[0].reg
== REG_LR
,
10036 _("r14 not allowed as first register "
10037 "when second register is omitted"));
10038 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
10040 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
,
10043 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10044 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
10045 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
10051 unsigned long opcode
;
10054 if (inst
.operands
[0].isreg
10055 && !inst
.operands
[0].preind
10056 && inst
.operands
[0].reg
== REG_PC
)
10057 set_it_insn_type_last ();
10059 opcode
= inst
.instruction
;
10060 if (unified_syntax
)
10062 if (!inst
.operands
[1].isreg
)
10064 if (opcode
<= 0xffff)
10065 inst
.instruction
= THUMB_OP32 (opcode
);
10066 if (move_or_literal_pool (0, /*thumb_p=*/TRUE
, /*mode_3=*/FALSE
))
10069 if (inst
.operands
[1].isreg
10070 && !inst
.operands
[1].writeback
10071 && !inst
.operands
[1].shifted
&& !inst
.operands
[1].postind
10072 && !inst
.operands
[1].negative
&& inst
.operands
[0].reg
<= 7
10073 && opcode
<= 0xffff
10074 && inst
.size_req
!= 4)
10076 /* Insn may have a 16-bit form. */
10077 Rn
= inst
.operands
[1].reg
;
10078 if (inst
.operands
[1].immisreg
)
10080 inst
.instruction
= THUMB_OP16 (opcode
);
10082 if (Rn
<= 7 && inst
.operands
[1].imm
<= 7)
10084 else if (opcode
!= T_MNEM_ldr
&& opcode
!= T_MNEM_str
)
10085 reject_bad_reg (inst
.operands
[1].imm
);
10087 else if ((Rn
<= 7 && opcode
!= T_MNEM_ldrsh
10088 && opcode
!= T_MNEM_ldrsb
)
10089 || ((Rn
== REG_PC
|| Rn
== REG_SP
) && opcode
== T_MNEM_ldr
)
10090 || (Rn
== REG_SP
&& opcode
== T_MNEM_str
))
10097 if (inst
.reloc
.pc_rel
)
10098 opcode
= T_MNEM_ldr_pc2
;
10100 opcode
= T_MNEM_ldr_pc
;
10104 if (opcode
== T_MNEM_ldr
)
10105 opcode
= T_MNEM_ldr_sp
;
10107 opcode
= T_MNEM_str_sp
;
10109 inst
.instruction
= inst
.operands
[0].reg
<< 8;
10113 inst
.instruction
= inst
.operands
[0].reg
;
10114 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
10116 inst
.instruction
|= THUMB_OP16 (opcode
);
10117 if (inst
.size_req
== 2)
10118 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_OFFSET
;
10120 inst
.relax
= opcode
;
10124 /* Definitely a 32-bit variant. */
10126 /* Do some validations regarding addressing modes. */
10127 if (inst
.operands
[1].immisreg
&& opcode
!= T_MNEM_ldr
10128 && opcode
!= T_MNEM_str
)
10129 reject_bad_reg (inst
.operands
[1].imm
);
10131 inst
.instruction
= THUMB_OP32 (opcode
);
10132 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10133 encode_thumb32_addr_mode (1, /*is_t=*/FALSE
, /*is_d=*/FALSE
);
10137 constraint (inst
.operands
[0].reg
> 7, BAD_HIREG
);
10139 if (inst
.instruction
== T_MNEM_ldrsh
|| inst
.instruction
== T_MNEM_ldrsb
)
10141 /* Only [Rn,Rm] is acceptable. */
10142 constraint (inst
.operands
[1].reg
> 7 || inst
.operands
[1].imm
> 7, BAD_HIREG
);
10143 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].immisreg
10144 || inst
.operands
[1].postind
|| inst
.operands
[1].shifted
10145 || inst
.operands
[1].negative
,
10146 _("Thumb does not support this addressing mode"));
10147 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10151 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10152 if (!inst
.operands
[1].isreg
)
10153 if (move_or_literal_pool (0, /*thumb_p=*/TRUE
, /*mode_3=*/FALSE
))
10156 constraint (!inst
.operands
[1].preind
10157 || inst
.operands
[1].shifted
10158 || inst
.operands
[1].writeback
,
10159 _("Thumb does not support this addressing mode"));
10160 if (inst
.operands
[1].reg
== REG_PC
|| inst
.operands
[1].reg
== REG_SP
)
10162 constraint (inst
.instruction
& 0x0600,
10163 _("byte or halfword not valid for base register"));
10164 constraint (inst
.operands
[1].reg
== REG_PC
10165 && !(inst
.instruction
& THUMB_LOAD_BIT
),
10166 _("r15 based store not allowed"));
10167 constraint (inst
.operands
[1].immisreg
,
10168 _("invalid base register for register offset"));
10170 if (inst
.operands
[1].reg
== REG_PC
)
10171 inst
.instruction
= T_OPCODE_LDR_PC
;
10172 else if (inst
.instruction
& THUMB_LOAD_BIT
)
10173 inst
.instruction
= T_OPCODE_LDR_SP
;
10175 inst
.instruction
= T_OPCODE_STR_SP
;
10177 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
10178 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_OFFSET
;
10182 constraint (inst
.operands
[1].reg
> 7, BAD_HIREG
);
10183 if (!inst
.operands
[1].immisreg
)
10185 /* Immediate offset. */
10186 inst
.instruction
|= inst
.operands
[0].reg
;
10187 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
10188 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_OFFSET
;
10192 /* Register offset. */
10193 constraint (inst
.operands
[1].imm
> 7, BAD_HIREG
);
10194 constraint (inst
.operands
[1].negative
,
10195 _("Thumb does not support this addressing mode"));
10198 switch (inst
.instruction
)
10200 case T_OPCODE_STR_IW
: inst
.instruction
= T_OPCODE_STR_RW
; break;
10201 case T_OPCODE_STR_IH
: inst
.instruction
= T_OPCODE_STR_RH
; break;
10202 case T_OPCODE_STR_IB
: inst
.instruction
= T_OPCODE_STR_RB
; break;
10203 case T_OPCODE_LDR_IW
: inst
.instruction
= T_OPCODE_LDR_RW
; break;
10204 case T_OPCODE_LDR_IH
: inst
.instruction
= T_OPCODE_LDR_RH
; break;
10205 case T_OPCODE_LDR_IB
: inst
.instruction
= T_OPCODE_LDR_RB
; break;
10206 case 0x5600 /* ldrsb */:
10207 case 0x5e00 /* ldrsh */: break;
10211 inst
.instruction
|= inst
.operands
[0].reg
;
10212 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
10213 inst
.instruction
|= inst
.operands
[1].imm
<< 6;
10219 if (!inst
.operands
[1].present
)
10221 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
10222 constraint (inst
.operands
[0].reg
== REG_LR
,
10223 _("r14 not allowed here"));
10225 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10226 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
10227 encode_thumb32_addr_mode (2, /*is_t=*/FALSE
, /*is_d=*/TRUE
);
10233 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10234 encode_thumb32_addr_mode (1, /*is_t=*/TRUE
, /*is_d=*/FALSE
);
10240 unsigned Rd
, Rn
, Rm
, Ra
;
10242 Rd
= inst
.operands
[0].reg
;
10243 Rn
= inst
.operands
[1].reg
;
10244 Rm
= inst
.operands
[2].reg
;
10245 Ra
= inst
.operands
[3].reg
;
10247 reject_bad_reg (Rd
);
10248 reject_bad_reg (Rn
);
10249 reject_bad_reg (Rm
);
10250 reject_bad_reg (Ra
);
10252 inst
.instruction
|= Rd
<< 8;
10253 inst
.instruction
|= Rn
<< 16;
10254 inst
.instruction
|= Rm
;
10255 inst
.instruction
|= Ra
<< 12;
10261 unsigned RdLo
, RdHi
, Rn
, Rm
;
10263 RdLo
= inst
.operands
[0].reg
;
10264 RdHi
= inst
.operands
[1].reg
;
10265 Rn
= inst
.operands
[2].reg
;
10266 Rm
= inst
.operands
[3].reg
;
10268 reject_bad_reg (RdLo
);
10269 reject_bad_reg (RdHi
);
10270 reject_bad_reg (Rn
);
10271 reject_bad_reg (Rm
);
10273 inst
.instruction
|= RdLo
<< 12;
10274 inst
.instruction
|= RdHi
<< 8;
10275 inst
.instruction
|= Rn
<< 16;
10276 inst
.instruction
|= Rm
;
10280 do_t_mov_cmp (void)
10284 Rn
= inst
.operands
[0].reg
;
10285 Rm
= inst
.operands
[1].reg
;
10288 set_it_insn_type_last ();
10290 if (unified_syntax
)
10292 int r0off
= (inst
.instruction
== T_MNEM_mov
10293 || inst
.instruction
== T_MNEM_movs
) ? 8 : 16;
10294 unsigned long opcode
;
10295 bfd_boolean narrow
;
10296 bfd_boolean low_regs
;
10298 low_regs
= (Rn
<= 7 && Rm
<= 7);
10299 opcode
= inst
.instruction
;
10300 if (in_it_block ())
10301 narrow
= opcode
!= T_MNEM_movs
;
10303 narrow
= opcode
!= T_MNEM_movs
|| low_regs
;
10304 if (inst
.size_req
== 4
10305 || inst
.operands
[1].shifted
)
10308 /* MOVS PC, LR is encoded as SUBS PC, LR, #0. */
10309 if (opcode
== T_MNEM_movs
&& inst
.operands
[1].isreg
10310 && !inst
.operands
[1].shifted
10314 inst
.instruction
= T2_SUBS_PC_LR
;
10318 if (opcode
== T_MNEM_cmp
)
10320 constraint (Rn
== REG_PC
, BAD_PC
);
10323 /* In the Thumb-2 ISA, use of R13 as Rm is deprecated,
10325 warn_deprecated_sp (Rm
);
10326 /* R15 was documented as a valid choice for Rm in ARMv6,
10327 but as UNPREDICTABLE in ARMv7. ARM's proprietary
10328 tools reject R15, so we do too. */
10329 constraint (Rm
== REG_PC
, BAD_PC
);
10332 reject_bad_reg (Rm
);
10334 else if (opcode
== T_MNEM_mov
10335 || opcode
== T_MNEM_movs
)
10337 if (inst
.operands
[1].isreg
)
10339 if (opcode
== T_MNEM_movs
)
10341 reject_bad_reg (Rn
);
10342 reject_bad_reg (Rm
);
10346 /* This is mov.n. */
10347 if ((Rn
== REG_SP
|| Rn
== REG_PC
)
10348 && (Rm
== REG_SP
|| Rm
== REG_PC
))
10350 as_warn (_("Use of r%u as a source register is "
10351 "deprecated when r%u is the destination "
10352 "register."), Rm
, Rn
);
10357 /* This is mov.w. */
10358 constraint (Rn
== REG_PC
, BAD_PC
);
10359 constraint (Rm
== REG_PC
, BAD_PC
);
10360 constraint (Rn
== REG_SP
&& Rm
== REG_SP
, BAD_SP
);
10364 reject_bad_reg (Rn
);
10367 if (!inst
.operands
[1].isreg
)
10369 /* Immediate operand. */
10370 if (!in_it_block () && opcode
== T_MNEM_mov
)
10372 if (low_regs
&& narrow
)
10374 inst
.instruction
= THUMB_OP16 (opcode
);
10375 inst
.instruction
|= Rn
<< 8;
10376 if (inst
.size_req
== 2)
10377 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_IMM
;
10379 inst
.relax
= opcode
;
10383 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10384 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
10385 inst
.instruction
|= Rn
<< r0off
;
10386 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
10389 else if (inst
.operands
[1].shifted
&& inst
.operands
[1].immisreg
10390 && (inst
.instruction
== T_MNEM_mov
10391 || inst
.instruction
== T_MNEM_movs
))
10393 /* Register shifts are encoded as separate shift instructions. */
10394 bfd_boolean flags
= (inst
.instruction
== T_MNEM_movs
);
10396 if (in_it_block ())
10401 if (inst
.size_req
== 4)
10404 if (!low_regs
|| inst
.operands
[1].imm
> 7)
10410 switch (inst
.operands
[1].shift_kind
)
10413 opcode
= narrow
? T_OPCODE_LSL_R
: THUMB_OP32 (T_MNEM_lsl
);
10416 opcode
= narrow
? T_OPCODE_ASR_R
: THUMB_OP32 (T_MNEM_asr
);
10419 opcode
= narrow
? T_OPCODE_LSR_R
: THUMB_OP32 (T_MNEM_lsr
);
10422 opcode
= narrow
? T_OPCODE_ROR_R
: THUMB_OP32 (T_MNEM_ror
);
10428 inst
.instruction
= opcode
;
10431 inst
.instruction
|= Rn
;
10432 inst
.instruction
|= inst
.operands
[1].imm
<< 3;
10437 inst
.instruction
|= CONDS_BIT
;
10439 inst
.instruction
|= Rn
<< 8;
10440 inst
.instruction
|= Rm
<< 16;
10441 inst
.instruction
|= inst
.operands
[1].imm
;
10446 /* Some mov with immediate shift have narrow variants.
10447 Register shifts are handled above. */
10448 if (low_regs
&& inst
.operands
[1].shifted
10449 && (inst
.instruction
== T_MNEM_mov
10450 || inst
.instruction
== T_MNEM_movs
))
10452 if (in_it_block ())
10453 narrow
= (inst
.instruction
== T_MNEM_mov
);
10455 narrow
= (inst
.instruction
== T_MNEM_movs
);
10460 switch (inst
.operands
[1].shift_kind
)
10462 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_I
; break;
10463 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_I
; break;
10464 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_I
; break;
10465 default: narrow
= FALSE
; break;
10471 inst
.instruction
|= Rn
;
10472 inst
.instruction
|= Rm
<< 3;
10473 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_SHIFT
;
10477 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10478 inst
.instruction
|= Rn
<< r0off
;
10479 encode_thumb32_shifted_operand (1);
10483 switch (inst
.instruction
)
10486 inst
.instruction
= T_OPCODE_MOV_HR
;
10487 inst
.instruction
|= (Rn
& 0x8) << 4;
10488 inst
.instruction
|= (Rn
& 0x7);
10489 inst
.instruction
|= Rm
<< 3;
10493 /* We know we have low registers at this point.
10494 Generate LSLS Rd, Rs, #0. */
10495 inst
.instruction
= T_OPCODE_LSL_I
;
10496 inst
.instruction
|= Rn
;
10497 inst
.instruction
|= Rm
<< 3;
10503 inst
.instruction
= T_OPCODE_CMP_LR
;
10504 inst
.instruction
|= Rn
;
10505 inst
.instruction
|= Rm
<< 3;
10509 inst
.instruction
= T_OPCODE_CMP_HR
;
10510 inst
.instruction
|= (Rn
& 0x8) << 4;
10511 inst
.instruction
|= (Rn
& 0x7);
10512 inst
.instruction
|= Rm
<< 3;
10519 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10521 /* PR 10443: Do not silently ignore shifted operands. */
10522 constraint (inst
.operands
[1].shifted
,
10523 _("shifts in CMP/MOV instructions are only supported in unified syntax"));
10525 if (inst
.operands
[1].isreg
)
10527 if (Rn
< 8 && Rm
< 8)
10529 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
10530 since a MOV instruction produces unpredictable results. */
10531 if (inst
.instruction
== T_OPCODE_MOV_I8
)
10532 inst
.instruction
= T_OPCODE_ADD_I3
;
10534 inst
.instruction
= T_OPCODE_CMP_LR
;
10536 inst
.instruction
|= Rn
;
10537 inst
.instruction
|= Rm
<< 3;
10541 if (inst
.instruction
== T_OPCODE_MOV_I8
)
10542 inst
.instruction
= T_OPCODE_MOV_HR
;
10544 inst
.instruction
= T_OPCODE_CMP_HR
;
10550 constraint (Rn
> 7,
10551 _("only lo regs allowed with immediate"));
10552 inst
.instruction
|= Rn
<< 8;
10553 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_IMM
;
10564 top
= (inst
.instruction
& 0x00800000) != 0;
10565 if (inst
.reloc
.type
== BFD_RELOC_ARM_MOVW
)
10567 constraint (top
, _(":lower16: not allowed this instruction"));
10568 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_MOVW
;
10570 else if (inst
.reloc
.type
== BFD_RELOC_ARM_MOVT
)
10572 constraint (!top
, _(":upper16: not allowed this instruction"));
10573 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_MOVT
;
10576 Rd
= inst
.operands
[0].reg
;
10577 reject_bad_reg (Rd
);
10579 inst
.instruction
|= Rd
<< 8;
10580 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
10582 imm
= inst
.reloc
.exp
.X_add_number
;
10583 inst
.instruction
|= (imm
& 0xf000) << 4;
10584 inst
.instruction
|= (imm
& 0x0800) << 15;
10585 inst
.instruction
|= (imm
& 0x0700) << 4;
10586 inst
.instruction
|= (imm
& 0x00ff);
10591 do_t_mvn_tst (void)
10595 Rn
= inst
.operands
[0].reg
;
10596 Rm
= inst
.operands
[1].reg
;
10598 if (inst
.instruction
== T_MNEM_cmp
10599 || inst
.instruction
== T_MNEM_cmn
)
10600 constraint (Rn
== REG_PC
, BAD_PC
);
10602 reject_bad_reg (Rn
);
10603 reject_bad_reg (Rm
);
10605 if (unified_syntax
)
10607 int r0off
= (inst
.instruction
== T_MNEM_mvn
10608 || inst
.instruction
== T_MNEM_mvns
) ? 8 : 16;
10609 bfd_boolean narrow
;
10611 if (inst
.size_req
== 4
10612 || inst
.instruction
> 0xffff
10613 || inst
.operands
[1].shifted
10614 || Rn
> 7 || Rm
> 7)
10616 else if (inst
.instruction
== T_MNEM_cmn
)
10618 else if (THUMB_SETS_FLAGS (inst
.instruction
))
10619 narrow
= !in_it_block ();
10621 narrow
= in_it_block ();
10623 if (!inst
.operands
[1].isreg
)
10625 /* For an immediate, we always generate a 32-bit opcode;
10626 section relaxation will shrink it later if possible. */
10627 if (inst
.instruction
< 0xffff)
10628 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10629 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
10630 inst
.instruction
|= Rn
<< r0off
;
10631 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
10635 /* See if we can do this with a 16-bit instruction. */
10638 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10639 inst
.instruction
|= Rn
;
10640 inst
.instruction
|= Rm
<< 3;
10644 constraint (inst
.operands
[1].shifted
10645 && inst
.operands
[1].immisreg
,
10646 _("shift must be constant"));
10647 if (inst
.instruction
< 0xffff)
10648 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10649 inst
.instruction
|= Rn
<< r0off
;
10650 encode_thumb32_shifted_operand (1);
10656 constraint (inst
.instruction
> 0xffff
10657 || inst
.instruction
== T_MNEM_mvns
, BAD_THUMB32
);
10658 constraint (!inst
.operands
[1].isreg
|| inst
.operands
[1].shifted
,
10659 _("unshifted register required"));
10660 constraint (Rn
> 7 || Rm
> 7,
10663 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10664 inst
.instruction
|= Rn
;
10665 inst
.instruction
|= Rm
<< 3;
10675 if (do_vfp_nsyn_mrs () == SUCCESS
)
10678 flags
= inst
.operands
[1].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
|SPSR_BIT
);
10681 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_m
),
10682 _("selected processor does not support "
10683 "requested special purpose register"));
10687 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
),
10688 _("selected processor does not support "
10689 "requested special purpose register"));
10690 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
10691 constraint ((flags
& ~SPSR_BIT
) != (PSR_c
|PSR_f
),
10692 _("'CPSR' or 'SPSR' expected"));
10695 Rd
= inst
.operands
[0].reg
;
10696 reject_bad_reg (Rd
);
10698 inst
.instruction
|= Rd
<< 8;
10699 inst
.instruction
|= (flags
& SPSR_BIT
) >> 2;
10700 inst
.instruction
|= inst
.operands
[1].imm
& 0xff;
10709 if (do_vfp_nsyn_msr () == SUCCESS
)
10712 constraint (!inst
.operands
[1].isreg
,
10713 _("Thumb encoding does not support an immediate here"));
10714 flags
= inst
.operands
[0].imm
;
10717 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
),
10718 _("selected processor does not support "
10719 "requested special purpose register"));
10723 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_m
),
10724 _("selected processor does not support "
10725 "requested special purpose register"));
10729 Rn
= inst
.operands
[1].reg
;
10730 reject_bad_reg (Rn
);
10732 inst
.instruction
|= (flags
& SPSR_BIT
) >> 2;
10733 inst
.instruction
|= (flags
& ~SPSR_BIT
) >> 8;
10734 inst
.instruction
|= (flags
& 0xff);
10735 inst
.instruction
|= Rn
<< 16;
10741 bfd_boolean narrow
;
10742 unsigned Rd
, Rn
, Rm
;
10744 if (!inst
.operands
[2].present
)
10745 inst
.operands
[2].reg
= inst
.operands
[0].reg
;
10747 Rd
= inst
.operands
[0].reg
;
10748 Rn
= inst
.operands
[1].reg
;
10749 Rm
= inst
.operands
[2].reg
;
10751 if (unified_syntax
)
10753 if (inst
.size_req
== 4
10759 else if (inst
.instruction
== T_MNEM_muls
)
10760 narrow
= !in_it_block ();
10762 narrow
= in_it_block ();
10766 constraint (inst
.instruction
== T_MNEM_muls
, BAD_THUMB32
);
10767 constraint (Rn
> 7 || Rm
> 7,
10774 /* 16-bit MULS/Conditional MUL. */
10775 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10776 inst
.instruction
|= Rd
;
10779 inst
.instruction
|= Rm
<< 3;
10781 inst
.instruction
|= Rn
<< 3;
10783 constraint (1, _("dest must overlap one source register"));
10787 constraint (inst
.instruction
!= T_MNEM_mul
,
10788 _("Thumb-2 MUL must not set flags"));
10790 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10791 inst
.instruction
|= Rd
<< 8;
10792 inst
.instruction
|= Rn
<< 16;
10793 inst
.instruction
|= Rm
<< 0;
10795 reject_bad_reg (Rd
);
10796 reject_bad_reg (Rn
);
10797 reject_bad_reg (Rm
);
10804 unsigned RdLo
, RdHi
, Rn
, Rm
;
10806 RdLo
= inst
.operands
[0].reg
;
10807 RdHi
= inst
.operands
[1].reg
;
10808 Rn
= inst
.operands
[2].reg
;
10809 Rm
= inst
.operands
[3].reg
;
10811 reject_bad_reg (RdLo
);
10812 reject_bad_reg (RdHi
);
10813 reject_bad_reg (Rn
);
10814 reject_bad_reg (Rm
);
10816 inst
.instruction
|= RdLo
<< 12;
10817 inst
.instruction
|= RdHi
<< 8;
10818 inst
.instruction
|= Rn
<< 16;
10819 inst
.instruction
|= Rm
;
10822 as_tsktsk (_("rdhi and rdlo must be different"));
10828 set_it_insn_type (NEUTRAL_IT_INSN
);
10830 if (unified_syntax
)
10832 if (inst
.size_req
== 4 || inst
.operands
[0].imm
> 15)
10834 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10835 inst
.instruction
|= inst
.operands
[0].imm
;
10839 /* PR9722: Check for Thumb2 availability before
10840 generating a thumb2 nop instruction. */
10841 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6t2
))
10843 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10844 inst
.instruction
|= inst
.operands
[0].imm
<< 4;
10847 inst
.instruction
= 0x46c0;
10852 constraint (inst
.operands
[0].present
,
10853 _("Thumb does not support NOP with hints"));
10854 inst
.instruction
= 0x46c0;
10861 if (unified_syntax
)
10863 bfd_boolean narrow
;
10865 if (THUMB_SETS_FLAGS (inst
.instruction
))
10866 narrow
= !in_it_block ();
10868 narrow
= in_it_block ();
10869 if (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7)
10871 if (inst
.size_req
== 4)
10876 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10877 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
10878 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10882 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10883 inst
.instruction
|= inst
.operands
[0].reg
;
10884 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
10889 constraint (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7,
10891 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
10893 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10894 inst
.instruction
|= inst
.operands
[0].reg
;
10895 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
10904 Rd
= inst
.operands
[0].reg
;
10905 Rn
= inst
.operands
[1].present
? inst
.operands
[1].reg
: Rd
;
10907 reject_bad_reg (Rd
);
10908 /* Rn == REG_SP is unpredictable; Rn == REG_PC is MVN. */
10909 reject_bad_reg (Rn
);
10911 inst
.instruction
|= Rd
<< 8;
10912 inst
.instruction
|= Rn
<< 16;
10914 if (!inst
.operands
[2].isreg
)
10916 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
10917 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
10923 Rm
= inst
.operands
[2].reg
;
10924 reject_bad_reg (Rm
);
10926 constraint (inst
.operands
[2].shifted
10927 && inst
.operands
[2].immisreg
,
10928 _("shift must be constant"));
10929 encode_thumb32_shifted_operand (2);
10936 unsigned Rd
, Rn
, Rm
;
10938 Rd
= inst
.operands
[0].reg
;
10939 Rn
= inst
.operands
[1].reg
;
10940 Rm
= inst
.operands
[2].reg
;
10942 reject_bad_reg (Rd
);
10943 reject_bad_reg (Rn
);
10944 reject_bad_reg (Rm
);
10946 inst
.instruction
|= Rd
<< 8;
10947 inst
.instruction
|= Rn
<< 16;
10948 inst
.instruction
|= Rm
;
10949 if (inst
.operands
[3].present
)
10951 unsigned int val
= inst
.reloc
.exp
.X_add_number
;
10952 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
10953 _("expression too complex"));
10954 inst
.instruction
|= (val
& 0x1c) << 10;
10955 inst
.instruction
|= (val
& 0x03) << 6;
10962 if (!inst
.operands
[3].present
)
10966 inst
.instruction
&= ~0x00000020;
10968 /* PR 10168. Swap the Rm and Rn registers. */
10969 Rtmp
= inst
.operands
[1].reg
;
10970 inst
.operands
[1].reg
= inst
.operands
[2].reg
;
10971 inst
.operands
[2].reg
= Rtmp
;
10979 if (inst
.operands
[0].immisreg
)
10980 reject_bad_reg (inst
.operands
[0].imm
);
10982 encode_thumb32_addr_mode (0, /*is_t=*/FALSE
, /*is_d=*/FALSE
);
10986 do_t_push_pop (void)
10990 constraint (inst
.operands
[0].writeback
,
10991 _("push/pop do not support {reglist}^"));
10992 constraint (inst
.reloc
.type
!= BFD_RELOC_UNUSED
,
10993 _("expression too complex"));
10995 mask
= inst
.operands
[0].imm
;
10996 if ((mask
& ~0xff) == 0)
10997 inst
.instruction
= THUMB_OP16 (inst
.instruction
) | mask
;
10998 else if ((inst
.instruction
== T_MNEM_push
10999 && (mask
& ~0xff) == 1 << REG_LR
)
11000 || (inst
.instruction
== T_MNEM_pop
11001 && (mask
& ~0xff) == 1 << REG_PC
))
11003 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11004 inst
.instruction
|= THUMB_PP_PC_LR
;
11005 inst
.instruction
|= mask
& 0xff;
11007 else if (unified_syntax
)
11009 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11010 encode_thumb2_ldmstm (13, mask
, TRUE
);
11014 inst
.error
= _("invalid register list to push/pop instruction");
11024 Rd
= inst
.operands
[0].reg
;
11025 Rm
= inst
.operands
[1].reg
;
11027 reject_bad_reg (Rd
);
11028 reject_bad_reg (Rm
);
11030 inst
.instruction
|= Rd
<< 8;
11031 inst
.instruction
|= Rm
<< 16;
11032 inst
.instruction
|= Rm
;
11040 Rd
= inst
.operands
[0].reg
;
11041 Rm
= inst
.operands
[1].reg
;
11043 reject_bad_reg (Rd
);
11044 reject_bad_reg (Rm
);
11046 if (Rd
<= 7 && Rm
<= 7
11047 && inst
.size_req
!= 4)
11049 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11050 inst
.instruction
|= Rd
;
11051 inst
.instruction
|= Rm
<< 3;
11053 else if (unified_syntax
)
11055 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11056 inst
.instruction
|= Rd
<< 8;
11057 inst
.instruction
|= Rm
<< 16;
11058 inst
.instruction
|= Rm
;
11061 inst
.error
= BAD_HIREG
;
11069 Rd
= inst
.operands
[0].reg
;
11070 Rm
= inst
.operands
[1].reg
;
11072 reject_bad_reg (Rd
);
11073 reject_bad_reg (Rm
);
11075 inst
.instruction
|= Rd
<< 8;
11076 inst
.instruction
|= Rm
;
11084 Rd
= inst
.operands
[0].reg
;
11085 Rs
= (inst
.operands
[1].present
11086 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
11087 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
11089 reject_bad_reg (Rd
);
11090 reject_bad_reg (Rs
);
11091 if (inst
.operands
[2].isreg
)
11092 reject_bad_reg (inst
.operands
[2].reg
);
11094 inst
.instruction
|= Rd
<< 8;
11095 inst
.instruction
|= Rs
<< 16;
11096 if (!inst
.operands
[2].isreg
)
11098 bfd_boolean narrow
;
11100 if ((inst
.instruction
& 0x00100000) != 0)
11101 narrow
= !in_it_block ();
11103 narrow
= in_it_block ();
11105 if (Rd
> 7 || Rs
> 7)
11108 if (inst
.size_req
== 4 || !unified_syntax
)
11111 if (inst
.reloc
.exp
.X_op
!= O_constant
11112 || inst
.reloc
.exp
.X_add_number
!= 0)
11115 /* Turn rsb #0 into 16-bit neg. We should probably do this via
11116 relaxation, but it doesn't seem worth the hassle. */
11119 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
11120 inst
.instruction
= THUMB_OP16 (T_MNEM_negs
);
11121 inst
.instruction
|= Rs
<< 3;
11122 inst
.instruction
|= Rd
;
11126 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
11127 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
11131 encode_thumb32_shifted_operand (2);
11137 set_it_insn_type (OUTSIDE_IT_INSN
);
11138 if (inst
.operands
[0].imm
)
11139 inst
.instruction
|= 0x8;
11145 if (!inst
.operands
[1].present
)
11146 inst
.operands
[1].reg
= inst
.operands
[0].reg
;
11148 if (unified_syntax
)
11150 bfd_boolean narrow
;
11153 switch (inst
.instruction
)
11156 case T_MNEM_asrs
: shift_kind
= SHIFT_ASR
; break;
11158 case T_MNEM_lsls
: shift_kind
= SHIFT_LSL
; break;
11160 case T_MNEM_lsrs
: shift_kind
= SHIFT_LSR
; break;
11162 case T_MNEM_rors
: shift_kind
= SHIFT_ROR
; break;
11166 if (THUMB_SETS_FLAGS (inst
.instruction
))
11167 narrow
= !in_it_block ();
11169 narrow
= in_it_block ();
11170 if (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7)
11172 if (!inst
.operands
[2].isreg
&& shift_kind
== SHIFT_ROR
)
11174 if (inst
.operands
[2].isreg
11175 && (inst
.operands
[1].reg
!= inst
.operands
[0].reg
11176 || inst
.operands
[2].reg
> 7))
11178 if (inst
.size_req
== 4)
11181 reject_bad_reg (inst
.operands
[0].reg
);
11182 reject_bad_reg (inst
.operands
[1].reg
);
11186 if (inst
.operands
[2].isreg
)
11188 reject_bad_reg (inst
.operands
[2].reg
);
11189 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11190 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
11191 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
11192 inst
.instruction
|= inst
.operands
[2].reg
;
11196 inst
.operands
[1].shifted
= 1;
11197 inst
.operands
[1].shift_kind
= shift_kind
;
11198 inst
.instruction
= THUMB_OP32 (THUMB_SETS_FLAGS (inst
.instruction
)
11199 ? T_MNEM_movs
: T_MNEM_mov
);
11200 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
11201 encode_thumb32_shifted_operand (1);
11202 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
11203 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
11208 if (inst
.operands
[2].isreg
)
11210 switch (shift_kind
)
11212 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_R
; break;
11213 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_R
; break;
11214 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_R
; break;
11215 case SHIFT_ROR
: inst
.instruction
= T_OPCODE_ROR_R
; break;
11219 inst
.instruction
|= inst
.operands
[0].reg
;
11220 inst
.instruction
|= inst
.operands
[2].reg
<< 3;
11224 switch (shift_kind
)
11226 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_I
; break;
11227 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_I
; break;
11228 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_I
; break;
11231 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_SHIFT
;
11232 inst
.instruction
|= inst
.operands
[0].reg
;
11233 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
11239 constraint (inst
.operands
[0].reg
> 7
11240 || inst
.operands
[1].reg
> 7, BAD_HIREG
);
11241 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
11243 if (inst
.operands
[2].isreg
) /* Rd, {Rs,} Rn */
11245 constraint (inst
.operands
[2].reg
> 7, BAD_HIREG
);
11246 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
11247 _("source1 and dest must be same register"));
11249 switch (inst
.instruction
)
11251 case T_MNEM_asr
: inst
.instruction
= T_OPCODE_ASR_R
; break;
11252 case T_MNEM_lsl
: inst
.instruction
= T_OPCODE_LSL_R
; break;
11253 case T_MNEM_lsr
: inst
.instruction
= T_OPCODE_LSR_R
; break;
11254 case T_MNEM_ror
: inst
.instruction
= T_OPCODE_ROR_R
; break;
11258 inst
.instruction
|= inst
.operands
[0].reg
;
11259 inst
.instruction
|= inst
.operands
[2].reg
<< 3;
11263 switch (inst
.instruction
)
11265 case T_MNEM_asr
: inst
.instruction
= T_OPCODE_ASR_I
; break;
11266 case T_MNEM_lsl
: inst
.instruction
= T_OPCODE_LSL_I
; break;
11267 case T_MNEM_lsr
: inst
.instruction
= T_OPCODE_LSR_I
; break;
11268 case T_MNEM_ror
: inst
.error
= _("ror #imm not supported"); return;
11271 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_SHIFT
;
11272 inst
.instruction
|= inst
.operands
[0].reg
;
11273 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
11281 unsigned Rd
, Rn
, Rm
;
11283 Rd
= inst
.operands
[0].reg
;
11284 Rn
= inst
.operands
[1].reg
;
11285 Rm
= inst
.operands
[2].reg
;
11287 reject_bad_reg (Rd
);
11288 reject_bad_reg (Rn
);
11289 reject_bad_reg (Rm
);
11291 inst
.instruction
|= Rd
<< 8;
11292 inst
.instruction
|= Rn
<< 16;
11293 inst
.instruction
|= Rm
;
11299 unsigned Rd
, Rn
, Rm
;
11301 Rd
= inst
.operands
[0].reg
;
11302 Rm
= inst
.operands
[1].reg
;
11303 Rn
= inst
.operands
[2].reg
;
11305 reject_bad_reg (Rd
);
11306 reject_bad_reg (Rn
);
11307 reject_bad_reg (Rm
);
11309 inst
.instruction
|= Rd
<< 8;
11310 inst
.instruction
|= Rn
<< 16;
11311 inst
.instruction
|= Rm
;
11317 unsigned int value
= inst
.reloc
.exp
.X_add_number
;
11318 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
11319 _("expression too complex"));
11320 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
11321 inst
.instruction
|= (value
& 0xf000) >> 12;
11322 inst
.instruction
|= (value
& 0x0ff0);
11323 inst
.instruction
|= (value
& 0x000f) << 16;
11327 do_t_ssat_usat (int bias
)
11331 Rd
= inst
.operands
[0].reg
;
11332 Rn
= inst
.operands
[2].reg
;
11334 reject_bad_reg (Rd
);
11335 reject_bad_reg (Rn
);
11337 inst
.instruction
|= Rd
<< 8;
11338 inst
.instruction
|= inst
.operands
[1].imm
- bias
;
11339 inst
.instruction
|= Rn
<< 16;
11341 if (inst
.operands
[3].present
)
11343 offsetT shift_amount
= inst
.reloc
.exp
.X_add_number
;
11345 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
11347 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
11348 _("expression too complex"));
11350 if (shift_amount
!= 0)
11352 constraint (shift_amount
> 31,
11353 _("shift expression is too large"));
11355 if (inst
.operands
[3].shift_kind
== SHIFT_ASR
)
11356 inst
.instruction
|= 0x00200000; /* sh bit. */
11358 inst
.instruction
|= (shift_amount
& 0x1c) << 10;
11359 inst
.instruction
|= (shift_amount
& 0x03) << 6;
11367 do_t_ssat_usat (1);
11375 Rd
= inst
.operands
[0].reg
;
11376 Rn
= inst
.operands
[2].reg
;
11378 reject_bad_reg (Rd
);
11379 reject_bad_reg (Rn
);
11381 inst
.instruction
|= Rd
<< 8;
11382 inst
.instruction
|= inst
.operands
[1].imm
- 1;
11383 inst
.instruction
|= Rn
<< 16;
11389 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
11390 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
11391 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
11392 || inst
.operands
[2].negative
,
11395 constraint (inst
.operands
[2].reg
== REG_PC
, BAD_PC
);
11397 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
11398 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
11399 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
11400 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_U8
;
11406 if (!inst
.operands
[2].present
)
11407 inst
.operands
[2].reg
= inst
.operands
[1].reg
+ 1;
11409 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
11410 || inst
.operands
[0].reg
== inst
.operands
[2].reg
11411 || inst
.operands
[0].reg
== inst
.operands
[3].reg
,
11414 inst
.instruction
|= inst
.operands
[0].reg
;
11415 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
11416 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
11417 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
11423 unsigned Rd
, Rn
, Rm
;
11425 Rd
= inst
.operands
[0].reg
;
11426 Rn
= inst
.operands
[1].reg
;
11427 Rm
= inst
.operands
[2].reg
;
11429 reject_bad_reg (Rd
);
11430 reject_bad_reg (Rn
);
11431 reject_bad_reg (Rm
);
11433 inst
.instruction
|= Rd
<< 8;
11434 inst
.instruction
|= Rn
<< 16;
11435 inst
.instruction
|= Rm
;
11436 inst
.instruction
|= inst
.operands
[3].imm
<< 4;
11444 Rd
= inst
.operands
[0].reg
;
11445 Rm
= inst
.operands
[1].reg
;
11447 reject_bad_reg (Rd
);
11448 reject_bad_reg (Rm
);
11450 if (inst
.instruction
<= 0xffff
11451 && inst
.size_req
!= 4
11452 && Rd
<= 7 && Rm
<= 7
11453 && (!inst
.operands
[2].present
|| inst
.operands
[2].imm
== 0))
11455 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11456 inst
.instruction
|= Rd
;
11457 inst
.instruction
|= Rm
<< 3;
11459 else if (unified_syntax
)
11461 if (inst
.instruction
<= 0xffff)
11462 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11463 inst
.instruction
|= Rd
<< 8;
11464 inst
.instruction
|= Rm
;
11465 inst
.instruction
|= inst
.operands
[2].imm
<< 4;
11469 constraint (inst
.operands
[2].present
&& inst
.operands
[2].imm
!= 0,
11470 _("Thumb encoding does not support rotation"));
11471 constraint (1, BAD_HIREG
);
11478 inst
.reloc
.type
= BFD_RELOC_ARM_SWI
;
11487 half
= (inst
.instruction
& 0x10) != 0;
11488 set_it_insn_type_last ();
11489 constraint (inst
.operands
[0].immisreg
,
11490 _("instruction requires register index"));
11492 Rn
= inst
.operands
[0].reg
;
11493 Rm
= inst
.operands
[0].imm
;
11495 constraint (Rn
== REG_SP
, BAD_SP
);
11496 reject_bad_reg (Rm
);
11498 constraint (!half
&& inst
.operands
[0].shifted
,
11499 _("instruction does not allow shifted index"));
11500 inst
.instruction
|= (Rn
<< 16) | Rm
;
11506 do_t_ssat_usat (0);
11514 Rd
= inst
.operands
[0].reg
;
11515 Rn
= inst
.operands
[2].reg
;
11517 reject_bad_reg (Rd
);
11518 reject_bad_reg (Rn
);
11520 inst
.instruction
|= Rd
<< 8;
11521 inst
.instruction
|= inst
.operands
[1].imm
;
11522 inst
.instruction
|= Rn
<< 16;
11525 /* Neon instruction encoder helpers. */
11527 /* Encodings for the different types for various Neon opcodes. */
11529 /* An "invalid" code for the following tables. */
11532 struct neon_tab_entry
11535 unsigned float_or_poly
;
11536 unsigned scalar_or_imm
;
11539 /* Map overloaded Neon opcodes to their respective encodings. */
11540 #define NEON_ENC_TAB \
11541 X(vabd, 0x0000700, 0x1200d00, N_INV), \
11542 X(vmax, 0x0000600, 0x0000f00, N_INV), \
11543 X(vmin, 0x0000610, 0x0200f00, N_INV), \
11544 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
11545 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
11546 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
11547 X(vadd, 0x0000800, 0x0000d00, N_INV), \
11548 X(vsub, 0x1000800, 0x0200d00, N_INV), \
11549 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
11550 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
11551 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
11552 /* Register variants of the following two instructions are encoded as
11553 vcge / vcgt with the operands reversed. */ \
11554 X(vclt, 0x0000300, 0x1200e00, 0x1b10200), \
11555 X(vcle, 0x0000310, 0x1000e00, 0x1b10180), \
11556 X(vfma, N_INV, 0x0000c10, N_INV), \
11557 X(vfms, N_INV, 0x0200c10, N_INV), \
11558 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
11559 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
11560 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
11561 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
11562 X(vmlal, 0x0800800, N_INV, 0x0800240), \
11563 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
11564 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
11565 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
11566 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
11567 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
11568 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
11569 X(vshl, 0x0000400, N_INV, 0x0800510), \
11570 X(vqshl, 0x0000410, N_INV, 0x0800710), \
11571 X(vand, 0x0000110, N_INV, 0x0800030), \
11572 X(vbic, 0x0100110, N_INV, 0x0800030), \
11573 X(veor, 0x1000110, N_INV, N_INV), \
11574 X(vorn, 0x0300110, N_INV, 0x0800010), \
11575 X(vorr, 0x0200110, N_INV, 0x0800010), \
11576 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
11577 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
11578 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
11579 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
11580 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
11581 X(vst1, 0x0000000, 0x0800000, N_INV), \
11582 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
11583 X(vst2, 0x0000100, 0x0800100, N_INV), \
11584 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
11585 X(vst3, 0x0000200, 0x0800200, N_INV), \
11586 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
11587 X(vst4, 0x0000300, 0x0800300, N_INV), \
11588 X(vmovn, 0x1b20200, N_INV, N_INV), \
11589 X(vtrn, 0x1b20080, N_INV, N_INV), \
11590 X(vqmovn, 0x1b20200, N_INV, N_INV), \
11591 X(vqmovun, 0x1b20240, N_INV, N_INV), \
11592 X(vnmul, 0xe200a40, 0xe200b40, N_INV), \
11593 X(vnmla, 0xe100a40, 0xe100b40, N_INV), \
11594 X(vnmls, 0xe100a00, 0xe100b00, N_INV), \
11595 X(vfnma, 0xe900a40, 0xe900b40, N_INV), \
11596 X(vfnms, 0xe900a00, 0xe900b00, N_INV), \
11597 X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \
11598 X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \
11599 X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \
11600 X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV)
11604 #define X(OPC,I,F,S) N_MNEM_##OPC
11609 static const struct neon_tab_entry neon_enc_tab
[] =
11611 #define X(OPC,I,F,S) { (I), (F), (S) }
11616 /* Do not use these macros; instead, use NEON_ENCODE defined below. */
11617 #define NEON_ENC_INTEGER_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
11618 #define NEON_ENC_ARMREG_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
11619 #define NEON_ENC_POLY_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
11620 #define NEON_ENC_FLOAT_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
11621 #define NEON_ENC_SCALAR_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
11622 #define NEON_ENC_IMMED_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
11623 #define NEON_ENC_INTERLV_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
11624 #define NEON_ENC_LANE_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
11625 #define NEON_ENC_DUP_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
11626 #define NEON_ENC_SINGLE_(X) \
11627 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
11628 #define NEON_ENC_DOUBLE_(X) \
11629 ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
11631 #define NEON_ENCODE(type, inst) \
11634 inst.instruction = NEON_ENC_##type##_ (inst.instruction); \
11635 inst.is_neon = 1; \
11639 #define check_neon_suffixes \
11642 if (!inst.error && inst.vectype.elems > 0 && !inst.is_neon) \
11644 as_bad (_("invalid neon suffix for non neon instruction")); \
11650 /* Define shapes for instruction operands. The following mnemonic characters
11651 are used in this table:
11653 F - VFP S<n> register
11654 D - Neon D<n> register
11655 Q - Neon Q<n> register
11659 L - D<n> register list
11661 This table is used to generate various data:
11662 - enumerations of the form NS_DDR to be used as arguments to
11664 - a table classifying shapes into single, double, quad, mixed.
11665 - a table used to drive neon_select_shape. */
11667 #define NEON_SHAPE_DEF \
11668 X(3, (D, D, D), DOUBLE), \
11669 X(3, (Q, Q, Q), QUAD), \
11670 X(3, (D, D, I), DOUBLE), \
11671 X(3, (Q, Q, I), QUAD), \
11672 X(3, (D, D, S), DOUBLE), \
11673 X(3, (Q, Q, S), QUAD), \
11674 X(2, (D, D), DOUBLE), \
11675 X(2, (Q, Q), QUAD), \
11676 X(2, (D, S), DOUBLE), \
11677 X(2, (Q, S), QUAD), \
11678 X(2, (D, R), DOUBLE), \
11679 X(2, (Q, R), QUAD), \
11680 X(2, (D, I), DOUBLE), \
11681 X(2, (Q, I), QUAD), \
11682 X(3, (D, L, D), DOUBLE), \
11683 X(2, (D, Q), MIXED), \
11684 X(2, (Q, D), MIXED), \
11685 X(3, (D, Q, I), MIXED), \
11686 X(3, (Q, D, I), MIXED), \
11687 X(3, (Q, D, D), MIXED), \
11688 X(3, (D, Q, Q), MIXED), \
11689 X(3, (Q, Q, D), MIXED), \
11690 X(3, (Q, D, S), MIXED), \
11691 X(3, (D, Q, S), MIXED), \
11692 X(4, (D, D, D, I), DOUBLE), \
11693 X(4, (Q, Q, Q, I), QUAD), \
11694 X(2, (F, F), SINGLE), \
11695 X(3, (F, F, F), SINGLE), \
11696 X(2, (F, I), SINGLE), \
11697 X(2, (F, D), MIXED), \
11698 X(2, (D, F), MIXED), \
11699 X(3, (F, F, I), MIXED), \
11700 X(4, (R, R, F, F), SINGLE), \
11701 X(4, (F, F, R, R), SINGLE), \
11702 X(3, (D, R, R), DOUBLE), \
11703 X(3, (R, R, D), DOUBLE), \
11704 X(2, (S, R), SINGLE), \
11705 X(2, (R, S), SINGLE), \
11706 X(2, (F, R), SINGLE), \
11707 X(2, (R, F), SINGLE)
11709 #define S2(A,B) NS_##A##B
11710 #define S3(A,B,C) NS_##A##B##C
11711 #define S4(A,B,C,D) NS_##A##B##C##D
11713 #define X(N, L, C) S##N L
11726 enum neon_shape_class
11734 #define X(N, L, C) SC_##C
11736 static enum neon_shape_class neon_shape_class
[] =
11754 /* Register widths of above. */
11755 static unsigned neon_shape_el_size
[] =
11766 struct neon_shape_info
11769 enum neon_shape_el el
[NEON_MAX_TYPE_ELS
];
11772 #define S2(A,B) { SE_##A, SE_##B }
11773 #define S3(A,B,C) { SE_##A, SE_##B, SE_##C }
11774 #define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D }
11776 #define X(N, L, C) { N, S##N L }
11778 static struct neon_shape_info neon_shape_tab
[] =
11788 /* Bit masks used in type checking given instructions.
11789 'N_EQK' means the type must be the same as (or based on in some way) the key
11790 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
11791 set, various other bits can be set as well in order to modify the meaning of
11792 the type constraint. */
11794 enum neon_type_mask
11817 N_KEY
= 0x1000000, /* Key element (main type specifier). */
11818 N_EQK
= 0x2000000, /* Given operand has the same type & size as the key. */
11819 N_VFP
= 0x4000000, /* VFP mode: operand size must match register width. */
11820 N_DBL
= 0x0000001, /* If N_EQK, this operand is twice the size. */
11821 N_HLF
= 0x0000002, /* If N_EQK, this operand is half the size. */
11822 N_SGN
= 0x0000004, /* If N_EQK, this operand is forced to be signed. */
11823 N_UNS
= 0x0000008, /* If N_EQK, this operand is forced to be unsigned. */
11824 N_INT
= 0x0000010, /* If N_EQK, this operand is forced to be integer. */
11825 N_FLT
= 0x0000020, /* If N_EQK, this operand is forced to be float. */
11826 N_SIZ
= 0x0000040, /* If N_EQK, this operand is forced to be size-only. */
11828 N_MAX_NONSPECIAL
= N_F64
11831 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
11833 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
11834 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
11835 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
11836 #define N_SUF_32 (N_SU_32 | N_F32)
11837 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
11838 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F32)
11840 /* Pass this as the first type argument to neon_check_type to ignore types
11842 #define N_IGNORE_TYPE (N_KEY | N_EQK)
11844 /* Select a "shape" for the current instruction (describing register types or
11845 sizes) from a list of alternatives. Return NS_NULL if the current instruction
11846 doesn't fit. For non-polymorphic shapes, checking is usually done as a
11847 function of operand parsing, so this function doesn't need to be called.
11848 Shapes should be listed in order of decreasing length. */
11850 static enum neon_shape
11851 neon_select_shape (enum neon_shape shape
, ...)
11854 enum neon_shape first_shape
= shape
;
11856 /* Fix missing optional operands. FIXME: we don't know at this point how
11857 many arguments we should have, so this makes the assumption that we have
11858 > 1. This is true of all current Neon opcodes, I think, but may not be
11859 true in the future. */
11860 if (!inst
.operands
[1].present
)
11861 inst
.operands
[1] = inst
.operands
[0];
11863 va_start (ap
, shape
);
11865 for (; shape
!= NS_NULL
; shape
= (enum neon_shape
) va_arg (ap
, int))
11870 for (j
= 0; j
< neon_shape_tab
[shape
].els
; j
++)
11872 if (!inst
.operands
[j
].present
)
11878 switch (neon_shape_tab
[shape
].el
[j
])
11881 if (!(inst
.operands
[j
].isreg
11882 && inst
.operands
[j
].isvec
11883 && inst
.operands
[j
].issingle
11884 && !inst
.operands
[j
].isquad
))
11889 if (!(inst
.operands
[j
].isreg
11890 && inst
.operands
[j
].isvec
11891 && !inst
.operands
[j
].isquad
11892 && !inst
.operands
[j
].issingle
))
11897 if (!(inst
.operands
[j
].isreg
11898 && !inst
.operands
[j
].isvec
))
11903 if (!(inst
.operands
[j
].isreg
11904 && inst
.operands
[j
].isvec
11905 && inst
.operands
[j
].isquad
11906 && !inst
.operands
[j
].issingle
))
11911 if (!(!inst
.operands
[j
].isreg
11912 && !inst
.operands
[j
].isscalar
))
11917 if (!(!inst
.operands
[j
].isreg
11918 && inst
.operands
[j
].isscalar
))
11934 if (shape
== NS_NULL
&& first_shape
!= NS_NULL
)
11935 first_error (_("invalid instruction shape"));
11940 /* True if SHAPE is predominantly a quadword operation (most of the time, this
11941 means the Q bit should be set). */
11944 neon_quad (enum neon_shape shape
)
11946 return neon_shape_class
[shape
] == SC_QUAD
;
11950 neon_modify_type_size (unsigned typebits
, enum neon_el_type
*g_type
,
11953 /* Allow modification to be made to types which are constrained to be
11954 based on the key element, based on bits set alongside N_EQK. */
11955 if ((typebits
& N_EQK
) != 0)
11957 if ((typebits
& N_HLF
) != 0)
11959 else if ((typebits
& N_DBL
) != 0)
11961 if ((typebits
& N_SGN
) != 0)
11962 *g_type
= NT_signed
;
11963 else if ((typebits
& N_UNS
) != 0)
11964 *g_type
= NT_unsigned
;
11965 else if ((typebits
& N_INT
) != 0)
11966 *g_type
= NT_integer
;
11967 else if ((typebits
& N_FLT
) != 0)
11968 *g_type
= NT_float
;
11969 else if ((typebits
& N_SIZ
) != 0)
11970 *g_type
= NT_untyped
;
11974 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
11975 operand type, i.e. the single type specified in a Neon instruction when it
11976 is the only one given. */
11978 static struct neon_type_el
11979 neon_type_promote (struct neon_type_el
*key
, unsigned thisarg
)
11981 struct neon_type_el dest
= *key
;
11983 gas_assert ((thisarg
& N_EQK
) != 0);
11985 neon_modify_type_size (thisarg
, &dest
.type
, &dest
.size
);
11990 /* Convert Neon type and size into compact bitmask representation. */
11992 static enum neon_type_mask
11993 type_chk_of_el_type (enum neon_el_type type
, unsigned size
)
12000 case 8: return N_8
;
12001 case 16: return N_16
;
12002 case 32: return N_32
;
12003 case 64: return N_64
;
12011 case 8: return N_I8
;
12012 case 16: return N_I16
;
12013 case 32: return N_I32
;
12014 case 64: return N_I64
;
12022 case 16: return N_F16
;
12023 case 32: return N_F32
;
12024 case 64: return N_F64
;
12032 case 8: return N_P8
;
12033 case 16: return N_P16
;
12041 case 8: return N_S8
;
12042 case 16: return N_S16
;
12043 case 32: return N_S32
;
12044 case 64: return N_S64
;
12052 case 8: return N_U8
;
12053 case 16: return N_U16
;
12054 case 32: return N_U32
;
12055 case 64: return N_U64
;
12066 /* Convert compact Neon bitmask type representation to a type and size. Only
12067 handles the case where a single bit is set in the mask. */
12070 el_type_of_type_chk (enum neon_el_type
*type
, unsigned *size
,
12071 enum neon_type_mask mask
)
12073 if ((mask
& N_EQK
) != 0)
12076 if ((mask
& (N_S8
| N_U8
| N_I8
| N_8
| N_P8
)) != 0)
12078 else if ((mask
& (N_S16
| N_U16
| N_I16
| N_16
| N_P16
)) != 0)
12080 else if ((mask
& (N_S32
| N_U32
| N_I32
| N_32
| N_F32
)) != 0)
12082 else if ((mask
& (N_S64
| N_U64
| N_I64
| N_64
| N_F64
)) != 0)
12087 if ((mask
& (N_S8
| N_S16
| N_S32
| N_S64
)) != 0)
12089 else if ((mask
& (N_U8
| N_U16
| N_U32
| N_U64
)) != 0)
12090 *type
= NT_unsigned
;
12091 else if ((mask
& (N_I8
| N_I16
| N_I32
| N_I64
)) != 0)
12092 *type
= NT_integer
;
12093 else if ((mask
& (N_8
| N_16
| N_32
| N_64
)) != 0)
12094 *type
= NT_untyped
;
12095 else if ((mask
& (N_P8
| N_P16
)) != 0)
12097 else if ((mask
& (N_F32
| N_F64
)) != 0)
12105 /* Modify a bitmask of allowed types. This is only needed for type
12109 modify_types_allowed (unsigned allowed
, unsigned mods
)
12112 enum neon_el_type type
;
12118 for (i
= 1; i
<= N_MAX_NONSPECIAL
; i
<<= 1)
12120 if (el_type_of_type_chk (&type
, &size
,
12121 (enum neon_type_mask
) (allowed
& i
)) == SUCCESS
)
12123 neon_modify_type_size (mods
, &type
, &size
);
12124 destmask
|= type_chk_of_el_type (type
, size
);
12131 /* Check type and return type classification.
12132 The manual states (paraphrase): If one datatype is given, it indicates the
12134 - the second operand, if there is one
12135 - the operand, if there is no second operand
12136 - the result, if there are no operands.
12137 This isn't quite good enough though, so we use a concept of a "key" datatype
12138 which is set on a per-instruction basis, which is the one which matters when
12139 only one data type is written.
12140 Note: this function has side-effects (e.g. filling in missing operands). All
12141 Neon instructions should call it before performing bit encoding. */
12143 static struct neon_type_el
12144 neon_check_type (unsigned els
, enum neon_shape ns
, ...)
12147 unsigned i
, pass
, key_el
= 0;
12148 unsigned types
[NEON_MAX_TYPE_ELS
];
12149 enum neon_el_type k_type
= NT_invtype
;
12150 unsigned k_size
= -1u;
12151 struct neon_type_el badtype
= {NT_invtype
, -1};
12152 unsigned key_allowed
= 0;
12154 /* Optional registers in Neon instructions are always (not) in operand 1.
12155 Fill in the missing operand here, if it was omitted. */
12156 if (els
> 1 && !inst
.operands
[1].present
)
12157 inst
.operands
[1] = inst
.operands
[0];
12159 /* Suck up all the varargs. */
12161 for (i
= 0; i
< els
; i
++)
12163 unsigned thisarg
= va_arg (ap
, unsigned);
12164 if (thisarg
== N_IGNORE_TYPE
)
12169 types
[i
] = thisarg
;
12170 if ((thisarg
& N_KEY
) != 0)
12175 if (inst
.vectype
.elems
> 0)
12176 for (i
= 0; i
< els
; i
++)
12177 if (inst
.operands
[i
].vectype
.type
!= NT_invtype
)
12179 first_error (_("types specified in both the mnemonic and operands"));
12183 /* Duplicate inst.vectype elements here as necessary.
12184 FIXME: No idea if this is exactly the same as the ARM assembler,
12185 particularly when an insn takes one register and one non-register
12187 if (inst
.vectype
.elems
== 1 && els
> 1)
12190 inst
.vectype
.elems
= els
;
12191 inst
.vectype
.el
[key_el
] = inst
.vectype
.el
[0];
12192 for (j
= 0; j
< els
; j
++)
12194 inst
.vectype
.el
[j
] = neon_type_promote (&inst
.vectype
.el
[key_el
],
12197 else if (inst
.vectype
.elems
== 0 && els
> 0)
12200 /* No types were given after the mnemonic, so look for types specified
12201 after each operand. We allow some flexibility here; as long as the
12202 "key" operand has a type, we can infer the others. */
12203 for (j
= 0; j
< els
; j
++)
12204 if (inst
.operands
[j
].vectype
.type
!= NT_invtype
)
12205 inst
.vectype
.el
[j
] = inst
.operands
[j
].vectype
;
12207 if (inst
.operands
[key_el
].vectype
.type
!= NT_invtype
)
12209 for (j
= 0; j
< els
; j
++)
12210 if (inst
.operands
[j
].vectype
.type
== NT_invtype
)
12211 inst
.vectype
.el
[j
] = neon_type_promote (&inst
.vectype
.el
[key_el
],
12216 first_error (_("operand types can't be inferred"));
12220 else if (inst
.vectype
.elems
!= els
)
12222 first_error (_("type specifier has the wrong number of parts"));
12226 for (pass
= 0; pass
< 2; pass
++)
12228 for (i
= 0; i
< els
; i
++)
12230 unsigned thisarg
= types
[i
];
12231 unsigned types_allowed
= ((thisarg
& N_EQK
) != 0 && pass
!= 0)
12232 ? modify_types_allowed (key_allowed
, thisarg
) : thisarg
;
12233 enum neon_el_type g_type
= inst
.vectype
.el
[i
].type
;
12234 unsigned g_size
= inst
.vectype
.el
[i
].size
;
12236 /* Decay more-specific signed & unsigned types to sign-insensitive
12237 integer types if sign-specific variants are unavailable. */
12238 if ((g_type
== NT_signed
|| g_type
== NT_unsigned
)
12239 && (types_allowed
& N_SU_ALL
) == 0)
12240 g_type
= NT_integer
;
12242 /* If only untyped args are allowed, decay any more specific types to
12243 them. Some instructions only care about signs for some element
12244 sizes, so handle that properly. */
12245 if ((g_size
== 8 && (types_allowed
& N_8
) != 0)
12246 || (g_size
== 16 && (types_allowed
& N_16
) != 0)
12247 || (g_size
== 32 && (types_allowed
& N_32
) != 0)
12248 || (g_size
== 64 && (types_allowed
& N_64
) != 0))
12249 g_type
= NT_untyped
;
12253 if ((thisarg
& N_KEY
) != 0)
12257 key_allowed
= thisarg
& ~N_KEY
;
12262 if ((thisarg
& N_VFP
) != 0)
12264 enum neon_shape_el regshape
;
12265 unsigned regwidth
, match
;
12267 /* PR 11136: Catch the case where we are passed a shape of NS_NULL. */
12270 first_error (_("invalid instruction shape"));
12273 regshape
= neon_shape_tab
[ns
].el
[i
];
12274 regwidth
= neon_shape_el_size
[regshape
];
12276 /* In VFP mode, operands must match register widths. If we
12277 have a key operand, use its width, else use the width of
12278 the current operand. */
12284 if (regwidth
!= match
)
12286 first_error (_("operand size must match register width"));
12291 if ((thisarg
& N_EQK
) == 0)
12293 unsigned given_type
= type_chk_of_el_type (g_type
, g_size
);
12295 if ((given_type
& types_allowed
) == 0)
12297 first_error (_("bad type in Neon instruction"));
12303 enum neon_el_type mod_k_type
= k_type
;
12304 unsigned mod_k_size
= k_size
;
12305 neon_modify_type_size (thisarg
, &mod_k_type
, &mod_k_size
);
12306 if (g_type
!= mod_k_type
|| g_size
!= mod_k_size
)
12308 first_error (_("inconsistent types in Neon instruction"));
12316 return inst
.vectype
.el
[key_el
];
12319 /* Neon-style VFP instruction forwarding. */
12321 /* Thumb VFP instructions have 0xE in the condition field. */
12324 do_vfp_cond_or_thumb (void)
12329 inst
.instruction
|= 0xe0000000;
12331 inst
.instruction
|= inst
.cond
<< 28;
12334 /* Look up and encode a simple mnemonic, for use as a helper function for the
12335 Neon-style VFP syntax. This avoids duplication of bits of the insns table,
12336 etc. It is assumed that operand parsing has already been done, and that the
12337 operands are in the form expected by the given opcode (this isn't necessarily
12338 the same as the form in which they were parsed, hence some massaging must
12339 take place before this function is called).
12340 Checks current arch version against that in the looked-up opcode. */
12343 do_vfp_nsyn_opcode (const char *opname
)
12345 const struct asm_opcode
*opcode
;
12347 opcode
= (const struct asm_opcode
*) hash_find (arm_ops_hsh
, opname
);
12352 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
,
12353 thumb_mode
? *opcode
->tvariant
: *opcode
->avariant
),
12360 inst
.instruction
= opcode
->tvalue
;
12361 opcode
->tencode ();
12365 inst
.instruction
= (inst
.cond
<< 28) | opcode
->avalue
;
12366 opcode
->aencode ();
12371 do_vfp_nsyn_add_sub (enum neon_shape rs
)
12373 int is_add
= (inst
.instruction
& 0x0fffffff) == N_MNEM_vadd
;
12378 do_vfp_nsyn_opcode ("fadds");
12380 do_vfp_nsyn_opcode ("fsubs");
12385 do_vfp_nsyn_opcode ("faddd");
12387 do_vfp_nsyn_opcode ("fsubd");
12391 /* Check operand types to see if this is a VFP instruction, and if so call
12395 try_vfp_nsyn (int args
, void (*pfn
) (enum neon_shape
))
12397 enum neon_shape rs
;
12398 struct neon_type_el et
;
12403 rs
= neon_select_shape (NS_FF
, NS_DD
, NS_NULL
);
12404 et
= neon_check_type (2, rs
,
12405 N_EQK
| N_VFP
, N_F32
| N_F64
| N_KEY
| N_VFP
);
12409 rs
= neon_select_shape (NS_FFF
, NS_DDD
, NS_NULL
);
12410 et
= neon_check_type (3, rs
,
12411 N_EQK
| N_VFP
, N_EQK
| N_VFP
, N_F32
| N_F64
| N_KEY
| N_VFP
);
12418 if (et
.type
!= NT_invtype
)
12429 do_vfp_nsyn_mla_mls (enum neon_shape rs
)
12431 int is_mla
= (inst
.instruction
& 0x0fffffff) == N_MNEM_vmla
;
12436 do_vfp_nsyn_opcode ("fmacs");
12438 do_vfp_nsyn_opcode ("fnmacs");
12443 do_vfp_nsyn_opcode ("fmacd");
12445 do_vfp_nsyn_opcode ("fnmacd");
12450 do_vfp_nsyn_fma_fms (enum neon_shape rs
)
12452 int is_fma
= (inst
.instruction
& 0x0fffffff) == N_MNEM_vfma
;
12457 do_vfp_nsyn_opcode ("ffmas");
12459 do_vfp_nsyn_opcode ("ffnmas");
12464 do_vfp_nsyn_opcode ("ffmad");
12466 do_vfp_nsyn_opcode ("ffnmad");
12471 do_vfp_nsyn_mul (enum neon_shape rs
)
12474 do_vfp_nsyn_opcode ("fmuls");
12476 do_vfp_nsyn_opcode ("fmuld");
12480 do_vfp_nsyn_abs_neg (enum neon_shape rs
)
12482 int is_neg
= (inst
.instruction
& 0x80) != 0;
12483 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F32
| N_F64
| N_VFP
| N_KEY
);
12488 do_vfp_nsyn_opcode ("fnegs");
12490 do_vfp_nsyn_opcode ("fabss");
12495 do_vfp_nsyn_opcode ("fnegd");
12497 do_vfp_nsyn_opcode ("fabsd");
12501 /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
12502 insns belong to Neon, and are handled elsewhere. */
12505 do_vfp_nsyn_ldm_stm (int is_dbmode
)
12507 int is_ldm
= (inst
.instruction
& (1 << 20)) != 0;
12511 do_vfp_nsyn_opcode ("fldmdbs");
12513 do_vfp_nsyn_opcode ("fldmias");
12518 do_vfp_nsyn_opcode ("fstmdbs");
12520 do_vfp_nsyn_opcode ("fstmias");
12525 do_vfp_nsyn_sqrt (void)
12527 enum neon_shape rs
= neon_select_shape (NS_FF
, NS_DD
, NS_NULL
);
12528 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F32
| N_F64
| N_KEY
| N_VFP
);
12531 do_vfp_nsyn_opcode ("fsqrts");
12533 do_vfp_nsyn_opcode ("fsqrtd");
12537 do_vfp_nsyn_div (void)
12539 enum neon_shape rs
= neon_select_shape (NS_FFF
, NS_DDD
, NS_NULL
);
12540 neon_check_type (3, rs
, N_EQK
| N_VFP
, N_EQK
| N_VFP
,
12541 N_F32
| N_F64
| N_KEY
| N_VFP
);
12544 do_vfp_nsyn_opcode ("fdivs");
12546 do_vfp_nsyn_opcode ("fdivd");
12550 do_vfp_nsyn_nmul (void)
12552 enum neon_shape rs
= neon_select_shape (NS_FFF
, NS_DDD
, NS_NULL
);
12553 neon_check_type (3, rs
, N_EQK
| N_VFP
, N_EQK
| N_VFP
,
12554 N_F32
| N_F64
| N_KEY
| N_VFP
);
12558 NEON_ENCODE (SINGLE
, inst
);
12559 do_vfp_sp_dyadic ();
12563 NEON_ENCODE (DOUBLE
, inst
);
12564 do_vfp_dp_rd_rn_rm ();
12566 do_vfp_cond_or_thumb ();
12570 do_vfp_nsyn_cmp (void)
12572 if (inst
.operands
[1].isreg
)
12574 enum neon_shape rs
= neon_select_shape (NS_FF
, NS_DD
, NS_NULL
);
12575 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F32
| N_F64
| N_KEY
| N_VFP
);
12579 NEON_ENCODE (SINGLE
, inst
);
12580 do_vfp_sp_monadic ();
12584 NEON_ENCODE (DOUBLE
, inst
);
12585 do_vfp_dp_rd_rm ();
12590 enum neon_shape rs
= neon_select_shape (NS_FI
, NS_DI
, NS_NULL
);
12591 neon_check_type (2, rs
, N_F32
| N_F64
| N_KEY
| N_VFP
, N_EQK
);
12593 switch (inst
.instruction
& 0x0fffffff)
12596 inst
.instruction
+= N_MNEM_vcmpz
- N_MNEM_vcmp
;
12599 inst
.instruction
+= N_MNEM_vcmpez
- N_MNEM_vcmpe
;
12607 NEON_ENCODE (SINGLE
, inst
);
12608 do_vfp_sp_compare_z ();
12612 NEON_ENCODE (DOUBLE
, inst
);
12616 do_vfp_cond_or_thumb ();
12620 nsyn_insert_sp (void)
12622 inst
.operands
[1] = inst
.operands
[0];
12623 memset (&inst
.operands
[0], '\0', sizeof (inst
.operands
[0]));
12624 inst
.operands
[0].reg
= REG_SP
;
12625 inst
.operands
[0].isreg
= 1;
12626 inst
.operands
[0].writeback
= 1;
12627 inst
.operands
[0].present
= 1;
12631 do_vfp_nsyn_push (void)
12634 if (inst
.operands
[1].issingle
)
12635 do_vfp_nsyn_opcode ("fstmdbs");
12637 do_vfp_nsyn_opcode ("fstmdbd");
12641 do_vfp_nsyn_pop (void)
12644 if (inst
.operands
[1].issingle
)
12645 do_vfp_nsyn_opcode ("fldmias");
12647 do_vfp_nsyn_opcode ("fldmiad");
12650 /* Fix up Neon data-processing instructions, ORing in the correct bits for
12651 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
12654 neon_dp_fixup (struct arm_it
* insn
)
12656 unsigned int i
= insn
->instruction
;
12661 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
12672 insn
->instruction
= i
;
12675 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
12679 neon_logbits (unsigned x
)
12681 return ffs (x
) - 4;
12684 #define LOW4(R) ((R) & 0xf)
12685 #define HI1(R) (((R) >> 4) & 1)
12687 /* Encode insns with bit pattern:
12689 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
12690 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
12692 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
12693 different meaning for some instruction. */
12696 neon_three_same (int isquad
, int ubit
, int size
)
12698 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
12699 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
12700 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
12701 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
12702 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
12703 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
12704 inst
.instruction
|= (isquad
!= 0) << 6;
12705 inst
.instruction
|= (ubit
!= 0) << 24;
12707 inst
.instruction
|= neon_logbits (size
) << 20;
12709 neon_dp_fixup (&inst
);
12712 /* Encode instructions of the form:
12714 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
12715 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
12717 Don't write size if SIZE == -1. */
12720 neon_two_same (int qbit
, int ubit
, int size
)
12722 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
12723 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
12724 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
12725 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
12726 inst
.instruction
|= (qbit
!= 0) << 6;
12727 inst
.instruction
|= (ubit
!= 0) << 24;
12730 inst
.instruction
|= neon_logbits (size
) << 18;
12732 neon_dp_fixup (&inst
);
12735 /* Neon instruction encoders, in approximate order of appearance. */
12738 do_neon_dyadic_i_su (void)
12740 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
12741 struct neon_type_el et
= neon_check_type (3, rs
,
12742 N_EQK
, N_EQK
, N_SU_32
| N_KEY
);
12743 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
12747 do_neon_dyadic_i64_su (void)
12749 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
12750 struct neon_type_el et
= neon_check_type (3, rs
,
12751 N_EQK
, N_EQK
, N_SU_ALL
| N_KEY
);
12752 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
12756 neon_imm_shift (int write_ubit
, int uval
, int isquad
, struct neon_type_el et
,
12759 unsigned size
= et
.size
>> 3;
12760 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
12761 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
12762 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
12763 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
12764 inst
.instruction
|= (isquad
!= 0) << 6;
12765 inst
.instruction
|= immbits
<< 16;
12766 inst
.instruction
|= (size
>> 3) << 7;
12767 inst
.instruction
|= (size
& 0x7) << 19;
12769 inst
.instruction
|= (uval
!= 0) << 24;
12771 neon_dp_fixup (&inst
);
12775 do_neon_shl_imm (void)
12777 if (!inst
.operands
[2].isreg
)
12779 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
12780 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_KEY
| N_I_ALL
);
12781 NEON_ENCODE (IMMED
, inst
);
12782 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, inst
.operands
[2].imm
);
12786 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
12787 struct neon_type_el et
= neon_check_type (3, rs
,
12788 N_EQK
, N_SU_ALL
| N_KEY
, N_EQK
| N_SGN
);
12791 /* VSHL/VQSHL 3-register variants have syntax such as:
12793 whereas other 3-register operations encoded by neon_three_same have
12796 (i.e. with Dn & Dm reversed). Swap operands[1].reg and operands[2].reg
12798 tmp
= inst
.operands
[2].reg
;
12799 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
12800 inst
.operands
[1].reg
= tmp
;
12801 NEON_ENCODE (INTEGER
, inst
);
12802 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
12807 do_neon_qshl_imm (void)
12809 if (!inst
.operands
[2].isreg
)
12811 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
12812 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_ALL
| N_KEY
);
12814 NEON_ENCODE (IMMED
, inst
);
12815 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, neon_quad (rs
), et
,
12816 inst
.operands
[2].imm
);
12820 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
12821 struct neon_type_el et
= neon_check_type (3, rs
,
12822 N_EQK
, N_SU_ALL
| N_KEY
, N_EQK
| N_SGN
);
12825 /* See note in do_neon_shl_imm. */
12826 tmp
= inst
.operands
[2].reg
;
12827 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
12828 inst
.operands
[1].reg
= tmp
;
12829 NEON_ENCODE (INTEGER
, inst
);
12830 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
12835 do_neon_rshl (void)
12837 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
12838 struct neon_type_el et
= neon_check_type (3, rs
,
12839 N_EQK
, N_EQK
, N_SU_ALL
| N_KEY
);
12842 tmp
= inst
.operands
[2].reg
;
12843 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
12844 inst
.operands
[1].reg
= tmp
;
12845 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
12849 neon_cmode_for_logic_imm (unsigned immediate
, unsigned *immbits
, int size
)
12851 /* Handle .I8 pseudo-instructions. */
12854 /* Unfortunately, this will make everything apart from zero out-of-range.
12855 FIXME is this the intended semantics? There doesn't seem much point in
12856 accepting .I8 if so. */
12857 immediate
|= immediate
<< 8;
12863 if (immediate
== (immediate
& 0x000000ff))
12865 *immbits
= immediate
;
12868 else if (immediate
== (immediate
& 0x0000ff00))
12870 *immbits
= immediate
>> 8;
12873 else if (immediate
== (immediate
& 0x00ff0000))
12875 *immbits
= immediate
>> 16;
12878 else if (immediate
== (immediate
& 0xff000000))
12880 *immbits
= immediate
>> 24;
12883 if ((immediate
& 0xffff) != (immediate
>> 16))
12884 goto bad_immediate
;
12885 immediate
&= 0xffff;
12888 if (immediate
== (immediate
& 0x000000ff))
12890 *immbits
= immediate
;
12893 else if (immediate
== (immediate
& 0x0000ff00))
12895 *immbits
= immediate
>> 8;
12900 first_error (_("immediate value out of range"));
12904 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
12908 neon_bits_same_in_bytes (unsigned imm
)
12910 return ((imm
& 0x000000ff) == 0 || (imm
& 0x000000ff) == 0x000000ff)
12911 && ((imm
& 0x0000ff00) == 0 || (imm
& 0x0000ff00) == 0x0000ff00)
12912 && ((imm
& 0x00ff0000) == 0 || (imm
& 0x00ff0000) == 0x00ff0000)
12913 && ((imm
& 0xff000000) == 0 || (imm
& 0xff000000) == 0xff000000);
12916 /* For immediate of above form, return 0bABCD. */
12919 neon_squash_bits (unsigned imm
)
12921 return (imm
& 0x01) | ((imm
& 0x0100) >> 7) | ((imm
& 0x010000) >> 14)
12922 | ((imm
& 0x01000000) >> 21);
12925 /* Compress quarter-float representation to 0b...000 abcdefgh. */
12928 neon_qfloat_bits (unsigned imm
)
12930 return ((imm
>> 19) & 0x7f) | ((imm
>> 24) & 0x80);
12933 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
12934 the instruction. *OP is passed as the initial value of the op field, and
12935 may be set to a different value depending on the constant (i.e.
12936 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
12937 MVN). If the immediate looks like a repeated pattern then also
12938 try smaller element sizes. */
12941 neon_cmode_for_move_imm (unsigned immlo
, unsigned immhi
, int float_p
,
12942 unsigned *immbits
, int *op
, int size
,
12943 enum neon_el_type type
)
12945 /* Only permit float immediates (including 0.0/-0.0) if the operand type is
12947 if (type
== NT_float
&& !float_p
)
12950 if (type
== NT_float
&& is_quarter_float (immlo
) && immhi
== 0)
12952 if (size
!= 32 || *op
== 1)
12954 *immbits
= neon_qfloat_bits (immlo
);
12960 if (neon_bits_same_in_bytes (immhi
)
12961 && neon_bits_same_in_bytes (immlo
))
12965 *immbits
= (neon_squash_bits (immhi
) << 4)
12966 | neon_squash_bits (immlo
);
12971 if (immhi
!= immlo
)
12977 if (immlo
== (immlo
& 0x000000ff))
12982 else if (immlo
== (immlo
& 0x0000ff00))
12984 *immbits
= immlo
>> 8;
12987 else if (immlo
== (immlo
& 0x00ff0000))
12989 *immbits
= immlo
>> 16;
12992 else if (immlo
== (immlo
& 0xff000000))
12994 *immbits
= immlo
>> 24;
12997 else if (immlo
== ((immlo
& 0x0000ff00) | 0x000000ff))
12999 *immbits
= (immlo
>> 8) & 0xff;
13002 else if (immlo
== ((immlo
& 0x00ff0000) | 0x0000ffff))
13004 *immbits
= (immlo
>> 16) & 0xff;
13008 if ((immlo
& 0xffff) != (immlo
>> 16))
13015 if (immlo
== (immlo
& 0x000000ff))
13020 else if (immlo
== (immlo
& 0x0000ff00))
13022 *immbits
= immlo
>> 8;
13026 if ((immlo
& 0xff) != (immlo
>> 8))
13031 if (immlo
== (immlo
& 0x000000ff))
13033 /* Don't allow MVN with 8-bit immediate. */
13043 /* Write immediate bits [7:0] to the following locations:
13045 |28/24|23 19|18 16|15 4|3 0|
13046 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
13048 This function is used by VMOV/VMVN/VORR/VBIC. */
13051 neon_write_immbits (unsigned immbits
)
13053 inst
.instruction
|= immbits
& 0xf;
13054 inst
.instruction
|= ((immbits
>> 4) & 0x7) << 16;
13055 inst
.instruction
|= ((immbits
>> 7) & 0x1) << 24;
13058 /* Invert low-order SIZE bits of XHI:XLO. */
13061 neon_invert_size (unsigned *xlo
, unsigned *xhi
, int size
)
13063 unsigned immlo
= xlo
? *xlo
: 0;
13064 unsigned immhi
= xhi
? *xhi
: 0;
13069 immlo
= (~immlo
) & 0xff;
13073 immlo
= (~immlo
) & 0xffff;
13077 immhi
= (~immhi
) & 0xffffffff;
13078 /* fall through. */
13081 immlo
= (~immlo
) & 0xffffffff;
13096 do_neon_logic (void)
13098 if (inst
.operands
[2].present
&& inst
.operands
[2].isreg
)
13100 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
13101 neon_check_type (3, rs
, N_IGNORE_TYPE
);
13102 /* U bit and size field were set as part of the bitmask. */
13103 NEON_ENCODE (INTEGER
, inst
);
13104 neon_three_same (neon_quad (rs
), 0, -1);
13108 const int three_ops_form
= (inst
.operands
[2].present
13109 && !inst
.operands
[2].isreg
);
13110 const int immoperand
= (three_ops_form
? 2 : 1);
13111 enum neon_shape rs
= (three_ops_form
13112 ? neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
)
13113 : neon_select_shape (NS_DI
, NS_QI
, NS_NULL
));
13114 struct neon_type_el et
= neon_check_type (2, rs
,
13115 N_I8
| N_I16
| N_I32
| N_I64
| N_F32
| N_KEY
, N_EQK
);
13116 enum neon_opc opcode
= (enum neon_opc
) inst
.instruction
& 0x0fffffff;
13120 if (et
.type
== NT_invtype
)
13123 if (three_ops_form
)
13124 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
13125 _("first and second operands shall be the same register"));
13127 NEON_ENCODE (IMMED
, inst
);
13129 immbits
= inst
.operands
[immoperand
].imm
;
13132 /* .i64 is a pseudo-op, so the immediate must be a repeating
13134 if (immbits
!= (inst
.operands
[immoperand
].regisimm
?
13135 inst
.operands
[immoperand
].reg
: 0))
13137 /* Set immbits to an invalid constant. */
13138 immbits
= 0xdeadbeef;
13145 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
13149 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
13153 /* Pseudo-instruction for VBIC. */
13154 neon_invert_size (&immbits
, 0, et
.size
);
13155 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
13159 /* Pseudo-instruction for VORR. */
13160 neon_invert_size (&immbits
, 0, et
.size
);
13161 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
13171 inst
.instruction
|= neon_quad (rs
) << 6;
13172 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
13173 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
13174 inst
.instruction
|= cmode
<< 8;
13175 neon_write_immbits (immbits
);
13177 neon_dp_fixup (&inst
);
13182 do_neon_bitfield (void)
13184 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
13185 neon_check_type (3, rs
, N_IGNORE_TYPE
);
13186 neon_three_same (neon_quad (rs
), 0, -1);
13190 neon_dyadic_misc (enum neon_el_type ubit_meaning
, unsigned types
,
13193 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
13194 struct neon_type_el et
= neon_check_type (3, rs
, N_EQK
| destbits
, N_EQK
,
13196 if (et
.type
== NT_float
)
13198 NEON_ENCODE (FLOAT
, inst
);
13199 neon_three_same (neon_quad (rs
), 0, -1);
13203 NEON_ENCODE (INTEGER
, inst
);
13204 neon_three_same (neon_quad (rs
), et
.type
== ubit_meaning
, et
.size
);
13209 do_neon_dyadic_if_su (void)
13211 neon_dyadic_misc (NT_unsigned
, N_SUF_32
, 0);
13215 do_neon_dyadic_if_su_d (void)
13217 /* This version only allow D registers, but that constraint is enforced during
13218 operand parsing so we don't need to do anything extra here. */
13219 neon_dyadic_misc (NT_unsigned
, N_SUF_32
, 0);
13223 do_neon_dyadic_if_i_d (void)
13225 /* The "untyped" case can't happen. Do this to stop the "U" bit being
13226 affected if we specify unsigned args. */
13227 neon_dyadic_misc (NT_untyped
, N_IF_32
, 0);
13230 enum vfp_or_neon_is_neon_bits
13233 NEON_CHECK_ARCH
= 2
13236 /* Call this function if an instruction which may have belonged to the VFP or
13237 Neon instruction sets, but turned out to be a Neon instruction (due to the
13238 operand types involved, etc.). We have to check and/or fix-up a couple of
13241 - Make sure the user hasn't attempted to make a Neon instruction
13243 - Alter the value in the condition code field if necessary.
13244 - Make sure that the arch supports Neon instructions.
13246 Which of these operations take place depends on bits from enum
13247 vfp_or_neon_is_neon_bits.
13249 WARNING: This function has side effects! If NEON_CHECK_CC is used and the
13250 current instruction's condition is COND_ALWAYS, the condition field is
13251 changed to inst.uncond_value. This is necessary because instructions shared
13252 between VFP and Neon may be conditional for the VFP variants only, and the
13253 unconditional Neon version must have, e.g., 0xF in the condition field. */
13256 vfp_or_neon_is_neon (unsigned check
)
13258 /* Conditions are always legal in Thumb mode (IT blocks). */
13259 if (!thumb_mode
&& (check
& NEON_CHECK_CC
))
13261 if (inst
.cond
!= COND_ALWAYS
)
13263 first_error (_(BAD_COND
));
13266 if (inst
.uncond_value
!= -1)
13267 inst
.instruction
|= inst
.uncond_value
<< 28;
13270 if ((check
& NEON_CHECK_ARCH
)
13271 && !ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
))
13273 first_error (_(BAD_FPU
));
13281 do_neon_addsub_if_i (void)
13283 if (try_vfp_nsyn (3, do_vfp_nsyn_add_sub
) == SUCCESS
)
13286 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
13289 /* The "untyped" case can't happen. Do this to stop the "U" bit being
13290 affected if we specify unsigned args. */
13291 neon_dyadic_misc (NT_untyped
, N_IF_32
| N_I64
, 0);
13294 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
13296 V<op> A,B (A is operand 0, B is operand 2)
13301 so handle that case specially. */
13304 neon_exchange_operands (void)
13306 void *scratch
= alloca (sizeof (inst
.operands
[0]));
13307 if (inst
.operands
[1].present
)
13309 /* Swap operands[1] and operands[2]. */
13310 memcpy (scratch
, &inst
.operands
[1], sizeof (inst
.operands
[0]));
13311 inst
.operands
[1] = inst
.operands
[2];
13312 memcpy (&inst
.operands
[2], scratch
, sizeof (inst
.operands
[0]));
13316 inst
.operands
[1] = inst
.operands
[2];
13317 inst
.operands
[2] = inst
.operands
[0];
13322 neon_compare (unsigned regtypes
, unsigned immtypes
, int invert
)
13324 if (inst
.operands
[2].isreg
)
13327 neon_exchange_operands ();
13328 neon_dyadic_misc (NT_unsigned
, regtypes
, N_SIZ
);
13332 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
13333 struct neon_type_el et
= neon_check_type (2, rs
,
13334 N_EQK
| N_SIZ
, immtypes
| N_KEY
);
13336 NEON_ENCODE (IMMED
, inst
);
13337 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
13338 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
13339 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
13340 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
13341 inst
.instruction
|= neon_quad (rs
) << 6;
13342 inst
.instruction
|= (et
.type
== NT_float
) << 10;
13343 inst
.instruction
|= neon_logbits (et
.size
) << 18;
13345 neon_dp_fixup (&inst
);
13352 neon_compare (N_SUF_32
, N_S8
| N_S16
| N_S32
| N_F32
, FALSE
);
13356 do_neon_cmp_inv (void)
13358 neon_compare (N_SUF_32
, N_S8
| N_S16
| N_S32
| N_F32
, TRUE
);
13364 neon_compare (N_IF_32
, N_IF_32
, FALSE
);
13367 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
13368 scalars, which are encoded in 5 bits, M : Rm.
13369 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
13370 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
13374 neon_scalar_for_mul (unsigned scalar
, unsigned elsize
)
13376 unsigned regno
= NEON_SCALAR_REG (scalar
);
13377 unsigned elno
= NEON_SCALAR_INDEX (scalar
);
13382 if (regno
> 7 || elno
> 3)
13384 return regno
| (elno
<< 3);
13387 if (regno
> 15 || elno
> 1)
13389 return regno
| (elno
<< 4);
13393 first_error (_("scalar out of range for multiply instruction"));
13399 /* Encode multiply / multiply-accumulate scalar instructions. */
13402 neon_mul_mac (struct neon_type_el et
, int ubit
)
13406 /* Give a more helpful error message if we have an invalid type. */
13407 if (et
.type
== NT_invtype
)
13410 scalar
= neon_scalar_for_mul (inst
.operands
[2].reg
, et
.size
);
13411 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
13412 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
13413 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
13414 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
13415 inst
.instruction
|= LOW4 (scalar
);
13416 inst
.instruction
|= HI1 (scalar
) << 5;
13417 inst
.instruction
|= (et
.type
== NT_float
) << 8;
13418 inst
.instruction
|= neon_logbits (et
.size
) << 20;
13419 inst
.instruction
|= (ubit
!= 0) << 24;
13421 neon_dp_fixup (&inst
);
13425 do_neon_mac_maybe_scalar (void)
13427 if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls
) == SUCCESS
)
13430 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
13433 if (inst
.operands
[2].isscalar
)
13435 enum neon_shape rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
13436 struct neon_type_el et
= neon_check_type (3, rs
,
13437 N_EQK
, N_EQK
, N_I16
| N_I32
| N_F32
| N_KEY
);
13438 NEON_ENCODE (SCALAR
, inst
);
13439 neon_mul_mac (et
, neon_quad (rs
));
13443 /* The "untyped" case can't happen. Do this to stop the "U" bit being
13444 affected if we specify unsigned args. */
13445 neon_dyadic_misc (NT_untyped
, N_IF_32
, 0);
13450 do_neon_fmac (void)
13452 if (try_vfp_nsyn (3, do_vfp_nsyn_fma_fms
) == SUCCESS
)
13455 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
13458 neon_dyadic_misc (NT_untyped
, N_IF_32
, 0);
13464 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
13465 struct neon_type_el et
= neon_check_type (3, rs
,
13466 N_EQK
, N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
13467 neon_three_same (neon_quad (rs
), 0, et
.size
);
13470 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
13471 same types as the MAC equivalents. The polynomial type for this instruction
13472 is encoded the same as the integer type. */
13477 if (try_vfp_nsyn (3, do_vfp_nsyn_mul
) == SUCCESS
)
13480 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
13483 if (inst
.operands
[2].isscalar
)
13484 do_neon_mac_maybe_scalar ();
13486 neon_dyadic_misc (NT_poly
, N_I8
| N_I16
| N_I32
| N_F32
| N_P8
, 0);
13490 do_neon_qdmulh (void)
13492 if (inst
.operands
[2].isscalar
)
13494 enum neon_shape rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
13495 struct neon_type_el et
= neon_check_type (3, rs
,
13496 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
13497 NEON_ENCODE (SCALAR
, inst
);
13498 neon_mul_mac (et
, neon_quad (rs
));
13502 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
13503 struct neon_type_el et
= neon_check_type (3, rs
,
13504 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
13505 NEON_ENCODE (INTEGER
, inst
);
13506 /* The U bit (rounding) comes from bit mask. */
13507 neon_three_same (neon_quad (rs
), 0, et
.size
);
13512 do_neon_fcmp_absolute (void)
13514 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
13515 neon_check_type (3, rs
, N_EQK
, N_EQK
, N_F32
| N_KEY
);
13516 /* Size field comes from bit mask. */
13517 neon_three_same (neon_quad (rs
), 1, -1);
13521 do_neon_fcmp_absolute_inv (void)
13523 neon_exchange_operands ();
13524 do_neon_fcmp_absolute ();
13528 do_neon_step (void)
13530 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
13531 neon_check_type (3, rs
, N_EQK
, N_EQK
, N_F32
| N_KEY
);
13532 neon_three_same (neon_quad (rs
), 0, -1);
13536 do_neon_abs_neg (void)
13538 enum neon_shape rs
;
13539 struct neon_type_el et
;
13541 if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg
) == SUCCESS
)
13544 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
13547 rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
13548 et
= neon_check_type (2, rs
, N_EQK
, N_S8
| N_S16
| N_S32
| N_F32
| N_KEY
);
13550 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
13551 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
13552 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
13553 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
13554 inst
.instruction
|= neon_quad (rs
) << 6;
13555 inst
.instruction
|= (et
.type
== NT_float
) << 10;
13556 inst
.instruction
|= neon_logbits (et
.size
) << 18;
13558 neon_dp_fixup (&inst
);
13564 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
13565 struct neon_type_el et
= neon_check_type (2, rs
,
13566 N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
13567 int imm
= inst
.operands
[2].imm
;
13568 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
13569 _("immediate out of range for insert"));
13570 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, imm
);
13576 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
13577 struct neon_type_el et
= neon_check_type (2, rs
,
13578 N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
13579 int imm
= inst
.operands
[2].imm
;
13580 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
13581 _("immediate out of range for insert"));
13582 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, et
.size
- imm
);
13586 do_neon_qshlu_imm (void)
13588 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
13589 struct neon_type_el et
= neon_check_type (2, rs
,
13590 N_EQK
| N_UNS
, N_S8
| N_S16
| N_S32
| N_S64
| N_KEY
);
13591 int imm
= inst
.operands
[2].imm
;
13592 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
13593 _("immediate out of range for shift"));
13594 /* Only encodes the 'U present' variant of the instruction.
13595 In this case, signed types have OP (bit 8) set to 0.
13596 Unsigned types have OP set to 1. */
13597 inst
.instruction
|= (et
.type
== NT_unsigned
) << 8;
13598 /* The rest of the bits are the same as other immediate shifts. */
13599 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, imm
);
13603 do_neon_qmovn (void)
13605 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
13606 N_EQK
| N_HLF
, N_SU_16_64
| N_KEY
);
13607 /* Saturating move where operands can be signed or unsigned, and the
13608 destination has the same signedness. */
13609 NEON_ENCODE (INTEGER
, inst
);
13610 if (et
.type
== NT_unsigned
)
13611 inst
.instruction
|= 0xc0;
13613 inst
.instruction
|= 0x80;
13614 neon_two_same (0, 1, et
.size
/ 2);
13618 do_neon_qmovun (void)
13620 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
13621 N_EQK
| N_HLF
| N_UNS
, N_S16
| N_S32
| N_S64
| N_KEY
);
13622 /* Saturating move with unsigned results. Operands must be signed. */
13623 NEON_ENCODE (INTEGER
, inst
);
13624 neon_two_same (0, 1, et
.size
/ 2);
13628 do_neon_rshift_sat_narrow (void)
13630 /* FIXME: Types for narrowing. If operands are signed, results can be signed
13631 or unsigned. If operands are unsigned, results must also be unsigned. */
13632 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
13633 N_EQK
| N_HLF
, N_SU_16_64
| N_KEY
);
13634 int imm
= inst
.operands
[2].imm
;
13635 /* This gets the bounds check, size encoding and immediate bits calculation
13639 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
13640 VQMOVN.I<size> <Dd>, <Qm>. */
13643 inst
.operands
[2].present
= 0;
13644 inst
.instruction
= N_MNEM_vqmovn
;
13649 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
13650 _("immediate out of range"));
13651 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, 0, et
, et
.size
- imm
);
13655 do_neon_rshift_sat_narrow_u (void)
13657 /* FIXME: Types for narrowing. If operands are signed, results can be signed
13658 or unsigned. If operands are unsigned, results must also be unsigned. */
13659 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
13660 N_EQK
| N_HLF
| N_UNS
, N_S16
| N_S32
| N_S64
| N_KEY
);
13661 int imm
= inst
.operands
[2].imm
;
13662 /* This gets the bounds check, size encoding and immediate bits calculation
13666 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
13667 VQMOVUN.I<size> <Dd>, <Qm>. */
13670 inst
.operands
[2].present
= 0;
13671 inst
.instruction
= N_MNEM_vqmovun
;
13676 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
13677 _("immediate out of range"));
13678 /* FIXME: The manual is kind of unclear about what value U should have in
13679 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
13681 neon_imm_shift (TRUE
, 1, 0, et
, et
.size
- imm
);
13685 do_neon_movn (void)
13687 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
13688 N_EQK
| N_HLF
, N_I16
| N_I32
| N_I64
| N_KEY
);
13689 NEON_ENCODE (INTEGER
, inst
);
13690 neon_two_same (0, 1, et
.size
/ 2);
13694 do_neon_rshift_narrow (void)
13696 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
13697 N_EQK
| N_HLF
, N_I16
| N_I32
| N_I64
| N_KEY
);
13698 int imm
= inst
.operands
[2].imm
;
13699 /* This gets the bounds check, size encoding and immediate bits calculation
13703 /* If immediate is zero then we are a pseudo-instruction for
13704 VMOVN.I<size> <Dd>, <Qm> */
13707 inst
.operands
[2].present
= 0;
13708 inst
.instruction
= N_MNEM_vmovn
;
13713 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
13714 _("immediate out of range for narrowing operation"));
13715 neon_imm_shift (FALSE
, 0, 0, et
, et
.size
- imm
);
13719 do_neon_shll (void)
13721 /* FIXME: Type checking when lengthening. */
13722 struct neon_type_el et
= neon_check_type (2, NS_QDI
,
13723 N_EQK
| N_DBL
, N_I8
| N_I16
| N_I32
| N_KEY
);
13724 unsigned imm
= inst
.operands
[2].imm
;
13726 if (imm
== et
.size
)
13728 /* Maximum shift variant. */
13729 NEON_ENCODE (INTEGER
, inst
);
13730 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
13731 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
13732 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
13733 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
13734 inst
.instruction
|= neon_logbits (et
.size
) << 18;
13736 neon_dp_fixup (&inst
);
13740 /* A more-specific type check for non-max versions. */
13741 et
= neon_check_type (2, NS_QDI
,
13742 N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
13743 NEON_ENCODE (IMMED
, inst
);
13744 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, 0, et
, imm
);
13748 /* Check the various types for the VCVT instruction, and return which version
13749 the current instruction is. */
13752 neon_cvt_flavour (enum neon_shape rs
)
13754 #define CVT_VAR(C,X,Y) \
13755 et = neon_check_type (2, rs, whole_reg | (X), whole_reg | (Y)); \
13756 if (et.type != NT_invtype) \
13758 inst.error = NULL; \
13761 struct neon_type_el et
;
13762 unsigned whole_reg
= (rs
== NS_FFI
|| rs
== NS_FD
|| rs
== NS_DF
13763 || rs
== NS_FF
) ? N_VFP
: 0;
13764 /* The instruction versions which take an immediate take one register
13765 argument, which is extended to the width of the full register. Thus the
13766 "source" and "destination" registers must have the same width. Hack that
13767 here by making the size equal to the key (wider, in this case) operand. */
13768 unsigned key
= (rs
== NS_QQI
|| rs
== NS_DDI
|| rs
== NS_FFI
) ? N_KEY
: 0;
13770 CVT_VAR (0, N_S32
, N_F32
);
13771 CVT_VAR (1, N_U32
, N_F32
);
13772 CVT_VAR (2, N_F32
, N_S32
);
13773 CVT_VAR (3, N_F32
, N_U32
);
13774 /* Half-precision conversions. */
13775 CVT_VAR (4, N_F32
, N_F16
);
13776 CVT_VAR (5, N_F16
, N_F32
);
13780 /* VFP instructions. */
13781 CVT_VAR (6, N_F32
, N_F64
);
13782 CVT_VAR (7, N_F64
, N_F32
);
13783 CVT_VAR (8, N_S32
, N_F64
| key
);
13784 CVT_VAR (9, N_U32
, N_F64
| key
);
13785 CVT_VAR (10, N_F64
| key
, N_S32
);
13786 CVT_VAR (11, N_F64
| key
, N_U32
);
13787 /* VFP instructions with bitshift. */
13788 CVT_VAR (12, N_F32
| key
, N_S16
);
13789 CVT_VAR (13, N_F32
| key
, N_U16
);
13790 CVT_VAR (14, N_F64
| key
, N_S16
);
13791 CVT_VAR (15, N_F64
| key
, N_U16
);
13792 CVT_VAR (16, N_S16
, N_F32
| key
);
13793 CVT_VAR (17, N_U16
, N_F32
| key
);
13794 CVT_VAR (18, N_S16
, N_F64
| key
);
13795 CVT_VAR (19, N_U16
, N_F64
| key
);
13801 /* Neon-syntax VFP conversions. */
13804 do_vfp_nsyn_cvt (enum neon_shape rs
, int flavour
)
13806 const char *opname
= 0;
13808 if (rs
== NS_DDI
|| rs
== NS_QQI
|| rs
== NS_FFI
)
13810 /* Conversions with immediate bitshift. */
13811 const char *enc
[] =
13835 if (flavour
>= 0 && flavour
< (int) ARRAY_SIZE (enc
))
13837 opname
= enc
[flavour
];
13838 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
13839 _("operands 0 and 1 must be the same register"));
13840 inst
.operands
[1] = inst
.operands
[2];
13841 memset (&inst
.operands
[2], '\0', sizeof (inst
.operands
[2]));
13846 /* Conversions without bitshift. */
13847 const char *enc
[] =
13863 if (flavour
>= 0 && flavour
< (int) ARRAY_SIZE (enc
))
13864 opname
= enc
[flavour
];
13868 do_vfp_nsyn_opcode (opname
);
13872 do_vfp_nsyn_cvtz (void)
13874 enum neon_shape rs
= neon_select_shape (NS_FF
, NS_FD
, NS_NULL
);
13875 int flavour
= neon_cvt_flavour (rs
);
13876 const char *enc
[] =
13890 if (flavour
>= 0 && flavour
< (int) ARRAY_SIZE (enc
) && enc
[flavour
])
13891 do_vfp_nsyn_opcode (enc
[flavour
]);
13895 do_neon_cvt_1 (bfd_boolean round_to_zero ATTRIBUTE_UNUSED
)
13897 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_FFI
, NS_DD
, NS_QQ
,
13898 NS_FD
, NS_DF
, NS_FF
, NS_QD
, NS_DQ
, NS_NULL
);
13899 int flavour
= neon_cvt_flavour (rs
);
13901 /* PR11109: Handle round-to-zero for VCVT conversions. */
13903 && ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_arch_vfp_v2
)
13904 && (flavour
== 0 || flavour
== 1 || flavour
== 8 || flavour
== 9)
13905 && (rs
== NS_FD
|| rs
== NS_FF
))
13907 do_vfp_nsyn_cvtz ();
13911 /* VFP rather than Neon conversions. */
13914 do_vfp_nsyn_cvt (rs
, flavour
);
13924 unsigned enctab
[] = { 0x0000100, 0x1000100, 0x0, 0x1000000 };
13926 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
13929 /* Fixed-point conversion with #0 immediate is encoded as an
13930 integer conversion. */
13931 if (inst
.operands
[2].present
&& inst
.operands
[2].imm
== 0)
13933 immbits
= 32 - inst
.operands
[2].imm
;
13934 NEON_ENCODE (IMMED
, inst
);
13936 inst
.instruction
|= enctab
[flavour
];
13937 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
13938 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
13939 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
13940 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
13941 inst
.instruction
|= neon_quad (rs
) << 6;
13942 inst
.instruction
|= 1 << 21;
13943 inst
.instruction
|= immbits
<< 16;
13945 neon_dp_fixup (&inst
);
13953 unsigned enctab
[] = { 0x100, 0x180, 0x0, 0x080 };
13955 NEON_ENCODE (INTEGER
, inst
);
13957 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
13961 inst
.instruction
|= enctab
[flavour
];
13963 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
13964 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
13965 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
13966 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
13967 inst
.instruction
|= neon_quad (rs
) << 6;
13968 inst
.instruction
|= 2 << 18;
13970 neon_dp_fixup (&inst
);
13974 /* Half-precision conversions for Advanced SIMD -- neon. */
13979 && (inst
.vectype
.el
[0].size
!= 16 || inst
.vectype
.el
[1].size
!= 32))
13981 as_bad (_("operand size must match register width"));
13986 && ((inst
.vectype
.el
[0].size
!= 32 || inst
.vectype
.el
[1].size
!= 16)))
13988 as_bad (_("operand size must match register width"));
13993 inst
.instruction
= 0x3b60600;
13995 inst
.instruction
= 0x3b60700;
13997 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
13998 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
13999 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
14000 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
14001 neon_dp_fixup (&inst
);
14005 /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */
14006 do_vfp_nsyn_cvt (rs
, flavour
);
14011 do_neon_cvtr (void)
14013 do_neon_cvt_1 (FALSE
);
14019 do_neon_cvt_1 (TRUE
);
14023 do_neon_cvtb (void)
14025 inst
.instruction
= 0xeb20a40;
14027 /* The sizes are attached to the mnemonic. */
14028 if (inst
.vectype
.el
[0].type
!= NT_invtype
14029 && inst
.vectype
.el
[0].size
== 16)
14030 inst
.instruction
|= 0x00010000;
14032 /* Programmer's syntax: the sizes are attached to the operands. */
14033 else if (inst
.operands
[0].vectype
.type
!= NT_invtype
14034 && inst
.operands
[0].vectype
.size
== 16)
14035 inst
.instruction
|= 0x00010000;
14037 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
14038 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sm
);
14039 do_vfp_cond_or_thumb ();
14044 do_neon_cvtt (void)
14047 inst
.instruction
|= 0x80;
14051 neon_move_immediate (void)
14053 enum neon_shape rs
= neon_select_shape (NS_DI
, NS_QI
, NS_NULL
);
14054 struct neon_type_el et
= neon_check_type (2, rs
,
14055 N_I8
| N_I16
| N_I32
| N_I64
| N_F32
| N_KEY
, N_EQK
);
14056 unsigned immlo
, immhi
= 0, immbits
;
14057 int op
, cmode
, float_p
;
14059 constraint (et
.type
== NT_invtype
,
14060 _("operand size must be specified for immediate VMOV"));
14062 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
14063 op
= (inst
.instruction
& (1 << 5)) != 0;
14065 immlo
= inst
.operands
[1].imm
;
14066 if (inst
.operands
[1].regisimm
)
14067 immhi
= inst
.operands
[1].reg
;
14069 constraint (et
.size
< 32 && (immlo
& ~((1 << et
.size
) - 1)) != 0,
14070 _("immediate has bits set outside the operand size"));
14072 float_p
= inst
.operands
[1].immisfloat
;
14074 if ((cmode
= neon_cmode_for_move_imm (immlo
, immhi
, float_p
, &immbits
, &op
,
14075 et
.size
, et
.type
)) == FAIL
)
14077 /* Invert relevant bits only. */
14078 neon_invert_size (&immlo
, &immhi
, et
.size
);
14079 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
14080 with one or the other; those cases are caught by
14081 neon_cmode_for_move_imm. */
14083 if ((cmode
= neon_cmode_for_move_imm (immlo
, immhi
, float_p
, &immbits
,
14084 &op
, et
.size
, et
.type
)) == FAIL
)
14086 first_error (_("immediate out of range"));
14091 inst
.instruction
&= ~(1 << 5);
14092 inst
.instruction
|= op
<< 5;
14094 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14095 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14096 inst
.instruction
|= neon_quad (rs
) << 6;
14097 inst
.instruction
|= cmode
<< 8;
14099 neon_write_immbits (immbits
);
14105 if (inst
.operands
[1].isreg
)
14107 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
14109 NEON_ENCODE (INTEGER
, inst
);
14110 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14111 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14112 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
14113 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
14114 inst
.instruction
|= neon_quad (rs
) << 6;
14118 NEON_ENCODE (IMMED
, inst
);
14119 neon_move_immediate ();
14122 neon_dp_fixup (&inst
);
14125 /* Encode instructions of form:
14127 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
14128 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm | */
14131 neon_mixed_length (struct neon_type_el et
, unsigned size
)
14133 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14134 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14135 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
14136 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
14137 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
14138 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
14139 inst
.instruction
|= (et
.type
== NT_unsigned
) << 24;
14140 inst
.instruction
|= neon_logbits (size
) << 20;
14142 neon_dp_fixup (&inst
);
14146 do_neon_dyadic_long (void)
14148 /* FIXME: Type checking for lengthening op. */
14149 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
14150 N_EQK
| N_DBL
, N_EQK
, N_SU_32
| N_KEY
);
14151 neon_mixed_length (et
, et
.size
);
14155 do_neon_abal (void)
14157 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
14158 N_EQK
| N_INT
| N_DBL
, N_EQK
, N_SU_32
| N_KEY
);
14159 neon_mixed_length (et
, et
.size
);
14163 neon_mac_reg_scalar_long (unsigned regtypes
, unsigned scalartypes
)
14165 if (inst
.operands
[2].isscalar
)
14167 struct neon_type_el et
= neon_check_type (3, NS_QDS
,
14168 N_EQK
| N_DBL
, N_EQK
, regtypes
| N_KEY
);
14169 NEON_ENCODE (SCALAR
, inst
);
14170 neon_mul_mac (et
, et
.type
== NT_unsigned
);
14174 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
14175 N_EQK
| N_DBL
, N_EQK
, scalartypes
| N_KEY
);
14176 NEON_ENCODE (INTEGER
, inst
);
14177 neon_mixed_length (et
, et
.size
);
14182 do_neon_mac_maybe_scalar_long (void)
14184 neon_mac_reg_scalar_long (N_S16
| N_S32
| N_U16
| N_U32
, N_SU_32
);
14188 do_neon_dyadic_wide (void)
14190 struct neon_type_el et
= neon_check_type (3, NS_QQD
,
14191 N_EQK
| N_DBL
, N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
14192 neon_mixed_length (et
, et
.size
);
14196 do_neon_dyadic_narrow (void)
14198 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
14199 N_EQK
| N_DBL
, N_EQK
, N_I16
| N_I32
| N_I64
| N_KEY
);
14200 /* Operand sign is unimportant, and the U bit is part of the opcode,
14201 so force the operand type to integer. */
14202 et
.type
= NT_integer
;
14203 neon_mixed_length (et
, et
.size
/ 2);
14207 do_neon_mul_sat_scalar_long (void)
14209 neon_mac_reg_scalar_long (N_S16
| N_S32
, N_S16
| N_S32
);
14213 do_neon_vmull (void)
14215 if (inst
.operands
[2].isscalar
)
14216 do_neon_mac_maybe_scalar_long ();
14219 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
14220 N_EQK
| N_DBL
, N_EQK
, N_SU_32
| N_P8
| N_KEY
);
14221 if (et
.type
== NT_poly
)
14222 NEON_ENCODE (POLY
, inst
);
14224 NEON_ENCODE (INTEGER
, inst
);
14225 /* For polynomial encoding, size field must be 0b00 and the U bit must be
14226 zero. Should be OK as-is. */
14227 neon_mixed_length (et
, et
.size
);
14234 enum neon_shape rs
= neon_select_shape (NS_DDDI
, NS_QQQI
, NS_NULL
);
14235 struct neon_type_el et
= neon_check_type (3, rs
,
14236 N_EQK
, N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
14237 unsigned imm
= (inst
.operands
[3].imm
* et
.size
) / 8;
14239 constraint (imm
>= (unsigned) (neon_quad (rs
) ? 16 : 8),
14240 _("shift out of range"));
14241 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14242 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14243 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
14244 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
14245 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
14246 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
14247 inst
.instruction
|= neon_quad (rs
) << 6;
14248 inst
.instruction
|= imm
<< 8;
14250 neon_dp_fixup (&inst
);
14256 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
14257 struct neon_type_el et
= neon_check_type (2, rs
,
14258 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
14259 unsigned op
= (inst
.instruction
>> 7) & 3;
14260 /* N (width of reversed regions) is encoded as part of the bitmask. We
14261 extract it here to check the elements to be reversed are smaller.
14262 Otherwise we'd get a reserved instruction. */
14263 unsigned elsize
= (op
== 2) ? 16 : (op
== 1) ? 32 : (op
== 0) ? 64 : 0;
14264 gas_assert (elsize
!= 0);
14265 constraint (et
.size
>= elsize
,
14266 _("elements must be smaller than reversal region"));
14267 neon_two_same (neon_quad (rs
), 1, et
.size
);
14273 if (inst
.operands
[1].isscalar
)
14275 enum neon_shape rs
= neon_select_shape (NS_DS
, NS_QS
, NS_NULL
);
14276 struct neon_type_el et
= neon_check_type (2, rs
,
14277 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
14278 unsigned sizebits
= et
.size
>> 3;
14279 unsigned dm
= NEON_SCALAR_REG (inst
.operands
[1].reg
);
14280 int logsize
= neon_logbits (et
.size
);
14281 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[1].reg
) << logsize
;
14283 if (vfp_or_neon_is_neon (NEON_CHECK_CC
) == FAIL
)
14286 NEON_ENCODE (SCALAR
, inst
);
14287 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14288 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14289 inst
.instruction
|= LOW4 (dm
);
14290 inst
.instruction
|= HI1 (dm
) << 5;
14291 inst
.instruction
|= neon_quad (rs
) << 6;
14292 inst
.instruction
|= x
<< 17;
14293 inst
.instruction
|= sizebits
<< 16;
14295 neon_dp_fixup (&inst
);
14299 enum neon_shape rs
= neon_select_shape (NS_DR
, NS_QR
, NS_NULL
);
14300 struct neon_type_el et
= neon_check_type (2, rs
,
14301 N_8
| N_16
| N_32
| N_KEY
, N_EQK
);
14302 /* Duplicate ARM register to lanes of vector. */
14303 NEON_ENCODE (ARMREG
, inst
);
14306 case 8: inst
.instruction
|= 0x400000; break;
14307 case 16: inst
.instruction
|= 0x000020; break;
14308 case 32: inst
.instruction
|= 0x000000; break;
14311 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 12;
14312 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 16;
14313 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 7;
14314 inst
.instruction
|= neon_quad (rs
) << 21;
14315 /* The encoding for this instruction is identical for the ARM and Thumb
14316 variants, except for the condition field. */
14317 do_vfp_cond_or_thumb ();
14321 /* VMOV has particularly many variations. It can be one of:
14322 0. VMOV<c><q> <Qd>, <Qm>
14323 1. VMOV<c><q> <Dd>, <Dm>
14324 (Register operations, which are VORR with Rm = Rn.)
14325 2. VMOV<c><q>.<dt> <Qd>, #<imm>
14326 3. VMOV<c><q>.<dt> <Dd>, #<imm>
14328 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
14329 (ARM register to scalar.)
14330 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
14331 (Two ARM registers to vector.)
14332 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
14333 (Scalar to ARM register.)
14334 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
14335 (Vector to two ARM registers.)
14336 8. VMOV.F32 <Sd>, <Sm>
14337 9. VMOV.F64 <Dd>, <Dm>
14338 (VFP register moves.)
14339 10. VMOV.F32 <Sd>, #imm
14340 11. VMOV.F64 <Dd>, #imm
14341 (VFP float immediate load.)
14342 12. VMOV <Rd>, <Sm>
14343 (VFP single to ARM reg.)
14344 13. VMOV <Sd>, <Rm>
14345 (ARM reg to VFP single.)
14346 14. VMOV <Rd>, <Re>, <Sn>, <Sm>
14347 (Two ARM regs to two VFP singles.)
14348 15. VMOV <Sd>, <Se>, <Rn>, <Rm>
14349 (Two VFP singles to two ARM regs.)
14351 These cases can be disambiguated using neon_select_shape, except cases 1/9
14352 and 3/11 which depend on the operand type too.
14354 All the encoded bits are hardcoded by this function.
14356 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
14357 Cases 5, 7 may be used with VFPv2 and above.
14359 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
14360 can specify a type where it doesn't make sense to, and is ignored). */
14365 enum neon_shape rs
= neon_select_shape (NS_RRFF
, NS_FFRR
, NS_DRR
, NS_RRD
,
14366 NS_QQ
, NS_DD
, NS_QI
, NS_DI
, NS_SR
, NS_RS
, NS_FF
, NS_FI
, NS_RF
, NS_FR
,
14368 struct neon_type_el et
;
14369 const char *ldconst
= 0;
14373 case NS_DD
: /* case 1/9. */
14374 et
= neon_check_type (2, rs
, N_EQK
, N_F64
| N_KEY
);
14375 /* It is not an error here if no type is given. */
14377 if (et
.type
== NT_float
&& et
.size
== 64)
14379 do_vfp_nsyn_opcode ("fcpyd");
14382 /* fall through. */
14384 case NS_QQ
: /* case 0/1. */
14386 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
14388 /* The architecture manual I have doesn't explicitly state which
14389 value the U bit should have for register->register moves, but
14390 the equivalent VORR instruction has U = 0, so do that. */
14391 inst
.instruction
= 0x0200110;
14392 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14393 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14394 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
14395 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
14396 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
14397 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
14398 inst
.instruction
|= neon_quad (rs
) << 6;
14400 neon_dp_fixup (&inst
);
14404 case NS_DI
: /* case 3/11. */
14405 et
= neon_check_type (2, rs
, N_EQK
, N_F64
| N_KEY
);
14407 if (et
.type
== NT_float
&& et
.size
== 64)
14409 /* case 11 (fconstd). */
14410 ldconst
= "fconstd";
14411 goto encode_fconstd
;
14413 /* fall through. */
14415 case NS_QI
: /* case 2/3. */
14416 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
14418 inst
.instruction
= 0x0800010;
14419 neon_move_immediate ();
14420 neon_dp_fixup (&inst
);
14423 case NS_SR
: /* case 4. */
14425 unsigned bcdebits
= 0;
14427 unsigned dn
= NEON_SCALAR_REG (inst
.operands
[0].reg
);
14428 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[0].reg
);
14430 et
= neon_check_type (2, NS_NULL
, N_8
| N_16
| N_32
| N_KEY
, N_EQK
);
14431 logsize
= neon_logbits (et
.size
);
14433 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v1
),
14435 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
)
14436 && et
.size
!= 32, _(BAD_FPU
));
14437 constraint (et
.type
== NT_invtype
, _("bad type for scalar"));
14438 constraint (x
>= 64 / et
.size
, _("scalar index out of range"));
14442 case 8: bcdebits
= 0x8; break;
14443 case 16: bcdebits
= 0x1; break;
14444 case 32: bcdebits
= 0x0; break;
14448 bcdebits
|= x
<< logsize
;
14450 inst
.instruction
= 0xe000b10;
14451 do_vfp_cond_or_thumb ();
14452 inst
.instruction
|= LOW4 (dn
) << 16;
14453 inst
.instruction
|= HI1 (dn
) << 7;
14454 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
14455 inst
.instruction
|= (bcdebits
& 3) << 5;
14456 inst
.instruction
|= (bcdebits
>> 2) << 21;
14460 case NS_DRR
: /* case 5 (fmdrr). */
14461 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v2
),
14464 inst
.instruction
= 0xc400b10;
14465 do_vfp_cond_or_thumb ();
14466 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
);
14467 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 5;
14468 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
14469 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
14472 case NS_RS
: /* case 6. */
14475 unsigned dn
= NEON_SCALAR_REG (inst
.operands
[1].reg
);
14476 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[1].reg
);
14477 unsigned abcdebits
= 0;
14479 et
= neon_check_type (2, NS_NULL
,
14480 N_EQK
, N_S8
| N_S16
| N_U8
| N_U16
| N_32
| N_KEY
);
14481 logsize
= neon_logbits (et
.size
);
14483 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v1
),
14485 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
)
14486 && et
.size
!= 32, _(BAD_FPU
));
14487 constraint (et
.type
== NT_invtype
, _("bad type for scalar"));
14488 constraint (x
>= 64 / et
.size
, _("scalar index out of range"));
14492 case 8: abcdebits
= (et
.type
== NT_signed
) ? 0x08 : 0x18; break;
14493 case 16: abcdebits
= (et
.type
== NT_signed
) ? 0x01 : 0x11; break;
14494 case 32: abcdebits
= 0x00; break;
14498 abcdebits
|= x
<< logsize
;
14499 inst
.instruction
= 0xe100b10;
14500 do_vfp_cond_or_thumb ();
14501 inst
.instruction
|= LOW4 (dn
) << 16;
14502 inst
.instruction
|= HI1 (dn
) << 7;
14503 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
14504 inst
.instruction
|= (abcdebits
& 3) << 5;
14505 inst
.instruction
|= (abcdebits
>> 2) << 21;
14509 case NS_RRD
: /* case 7 (fmrrd). */
14510 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v2
),
14513 inst
.instruction
= 0xc500b10;
14514 do_vfp_cond_or_thumb ();
14515 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
14516 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
14517 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
14518 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
14521 case NS_FF
: /* case 8 (fcpys). */
14522 do_vfp_nsyn_opcode ("fcpys");
14525 case NS_FI
: /* case 10 (fconsts). */
14526 ldconst
= "fconsts";
14528 if (is_quarter_float (inst
.operands
[1].imm
))
14530 inst
.operands
[1].imm
= neon_qfloat_bits (inst
.operands
[1].imm
);
14531 do_vfp_nsyn_opcode (ldconst
);
14534 first_error (_("immediate out of range"));
14537 case NS_RF
: /* case 12 (fmrs). */
14538 do_vfp_nsyn_opcode ("fmrs");
14541 case NS_FR
: /* case 13 (fmsr). */
14542 do_vfp_nsyn_opcode ("fmsr");
14545 /* The encoders for the fmrrs and fmsrr instructions expect three operands
14546 (one of which is a list), but we have parsed four. Do some fiddling to
14547 make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
14549 case NS_RRFF
: /* case 14 (fmrrs). */
14550 constraint (inst
.operands
[3].reg
!= inst
.operands
[2].reg
+ 1,
14551 _("VFP registers must be adjacent"));
14552 inst
.operands
[2].imm
= 2;
14553 memset (&inst
.operands
[3], '\0', sizeof (inst
.operands
[3]));
14554 do_vfp_nsyn_opcode ("fmrrs");
14557 case NS_FFRR
: /* case 15 (fmsrr). */
14558 constraint (inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
14559 _("VFP registers must be adjacent"));
14560 inst
.operands
[1] = inst
.operands
[2];
14561 inst
.operands
[2] = inst
.operands
[3];
14562 inst
.operands
[0].imm
= 2;
14563 memset (&inst
.operands
[3], '\0', sizeof (inst
.operands
[3]));
14564 do_vfp_nsyn_opcode ("fmsrr");
14573 do_neon_rshift_round_imm (void)
14575 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
14576 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_ALL
| N_KEY
);
14577 int imm
= inst
.operands
[2].imm
;
14579 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
14582 inst
.operands
[2].present
= 0;
14587 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
14588 _("immediate out of range for shift"));
14589 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, neon_quad (rs
), et
,
14594 do_neon_movl (void)
14596 struct neon_type_el et
= neon_check_type (2, NS_QD
,
14597 N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
14598 unsigned sizebits
= et
.size
>> 3;
14599 inst
.instruction
|= sizebits
<< 19;
14600 neon_two_same (0, et
.type
== NT_unsigned
, -1);
14606 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
14607 struct neon_type_el et
= neon_check_type (2, rs
,
14608 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
14609 NEON_ENCODE (INTEGER
, inst
);
14610 neon_two_same (neon_quad (rs
), 1, et
.size
);
14614 do_neon_zip_uzp (void)
14616 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
14617 struct neon_type_el et
= neon_check_type (2, rs
,
14618 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
14619 if (rs
== NS_DD
&& et
.size
== 32)
14621 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
14622 inst
.instruction
= N_MNEM_vtrn
;
14626 neon_two_same (neon_quad (rs
), 1, et
.size
);
14630 do_neon_sat_abs_neg (void)
14632 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
14633 struct neon_type_el et
= neon_check_type (2, rs
,
14634 N_EQK
, N_S8
| N_S16
| N_S32
| N_KEY
);
14635 neon_two_same (neon_quad (rs
), 1, et
.size
);
14639 do_neon_pair_long (void)
14641 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
14642 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_32
| N_KEY
);
14643 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
14644 inst
.instruction
|= (et
.type
== NT_unsigned
) << 7;
14645 neon_two_same (neon_quad (rs
), 1, et
.size
);
14649 do_neon_recip_est (void)
14651 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
14652 struct neon_type_el et
= neon_check_type (2, rs
,
14653 N_EQK
| N_FLT
, N_F32
| N_U32
| N_KEY
);
14654 inst
.instruction
|= (et
.type
== NT_float
) << 8;
14655 neon_two_same (neon_quad (rs
), 1, et
.size
);
14661 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
14662 struct neon_type_el et
= neon_check_type (2, rs
,
14663 N_EQK
, N_S8
| N_S16
| N_S32
| N_KEY
);
14664 neon_two_same (neon_quad (rs
), 1, et
.size
);
14670 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
14671 struct neon_type_el et
= neon_check_type (2, rs
,
14672 N_EQK
, N_I8
| N_I16
| N_I32
| N_KEY
);
14673 neon_two_same (neon_quad (rs
), 1, et
.size
);
14679 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
14680 struct neon_type_el et
= neon_check_type (2, rs
,
14681 N_EQK
| N_INT
, N_8
| N_KEY
);
14682 neon_two_same (neon_quad (rs
), 1, et
.size
);
14688 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
14689 neon_two_same (neon_quad (rs
), 1, -1);
14693 do_neon_tbl_tbx (void)
14695 unsigned listlenbits
;
14696 neon_check_type (3, NS_DLD
, N_EQK
, N_EQK
, N_8
| N_KEY
);
14698 if (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 4)
14700 first_error (_("bad list length for table lookup"));
14704 listlenbits
= inst
.operands
[1].imm
- 1;
14705 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14706 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14707 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
14708 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
14709 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
14710 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
14711 inst
.instruction
|= listlenbits
<< 8;
14713 neon_dp_fixup (&inst
);
14717 do_neon_ldm_stm (void)
14719 /* P, U and L bits are part of bitmask. */
14720 int is_dbmode
= (inst
.instruction
& (1 << 24)) != 0;
14721 unsigned offsetbits
= inst
.operands
[1].imm
* 2;
14723 if (inst
.operands
[1].issingle
)
14725 do_vfp_nsyn_ldm_stm (is_dbmode
);
14729 constraint (is_dbmode
&& !inst
.operands
[0].writeback
,
14730 _("writeback (!) must be used for VLDMDB and VSTMDB"));
14732 constraint (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 16,
14733 _("register list must contain at least 1 and at most 16 "
14736 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
14737 inst
.instruction
|= inst
.operands
[0].writeback
<< 21;
14738 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 12;
14739 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 22;
14741 inst
.instruction
|= offsetbits
;
14743 do_vfp_cond_or_thumb ();
14747 do_neon_ldr_str (void)
14749 int is_ldr
= (inst
.instruction
& (1 << 20)) != 0;
14751 if (inst
.operands
[0].issingle
)
14754 do_vfp_nsyn_opcode ("flds");
14756 do_vfp_nsyn_opcode ("fsts");
14761 do_vfp_nsyn_opcode ("fldd");
14763 do_vfp_nsyn_opcode ("fstd");
14767 /* "interleave" version also handles non-interleaving register VLD1/VST1
14771 do_neon_ld_st_interleave (void)
14773 struct neon_type_el et
= neon_check_type (1, NS_NULL
,
14774 N_8
| N_16
| N_32
| N_64
);
14775 unsigned alignbits
= 0;
14777 /* The bits in this table go:
14778 0: register stride of one (0) or two (1)
14779 1,2: register list length, minus one (1, 2, 3, 4).
14780 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
14781 We use -1 for invalid entries. */
14782 const int typetable
[] =
14784 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
14785 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
14786 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
14787 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
14791 if (et
.type
== NT_invtype
)
14794 if (inst
.operands
[1].immisalign
)
14795 switch (inst
.operands
[1].imm
>> 8)
14797 case 64: alignbits
= 1; break;
14799 if (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 2
14800 && NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 4)
14801 goto bad_alignment
;
14805 if (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 4)
14806 goto bad_alignment
;
14811 first_error (_("bad alignment"));
14815 inst
.instruction
|= alignbits
<< 4;
14816 inst
.instruction
|= neon_logbits (et
.size
) << 6;
14818 /* Bits [4:6] of the immediate in a list specifier encode register stride
14819 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
14820 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
14821 up the right value for "type" in a table based on this value and the given
14822 list style, then stick it back. */
14823 idx
= ((inst
.operands
[0].imm
>> 4) & 7)
14824 | (((inst
.instruction
>> 8) & 3) << 3);
14826 typebits
= typetable
[idx
];
14828 constraint (typebits
== -1, _("bad list type for instruction"));
14830 inst
.instruction
&= ~0xf00;
14831 inst
.instruction
|= typebits
<< 8;
14834 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
14835 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
14836 otherwise. The variable arguments are a list of pairs of legal (size, align)
14837 values, terminated with -1. */
14840 neon_alignment_bit (int size
, int align
, int *do_align
, ...)
14843 int result
= FAIL
, thissize
, thisalign
;
14845 if (!inst
.operands
[1].immisalign
)
14851 va_start (ap
, do_align
);
14855 thissize
= va_arg (ap
, int);
14856 if (thissize
== -1)
14858 thisalign
= va_arg (ap
, int);
14860 if (size
== thissize
&& align
== thisalign
)
14863 while (result
!= SUCCESS
);
14867 if (result
== SUCCESS
)
14870 first_error (_("unsupported alignment for instruction"));
14876 do_neon_ld_st_lane (void)
14878 struct neon_type_el et
= neon_check_type (1, NS_NULL
, N_8
| N_16
| N_32
);
14879 int align_good
, do_align
= 0;
14880 int logsize
= neon_logbits (et
.size
);
14881 int align
= inst
.operands
[1].imm
>> 8;
14882 int n
= (inst
.instruction
>> 8) & 3;
14883 int max_el
= 64 / et
.size
;
14885 if (et
.type
== NT_invtype
)
14888 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != n
+ 1,
14889 _("bad list length"));
14890 constraint (NEON_LANE (inst
.operands
[0].imm
) >= max_el
,
14891 _("scalar index out of range"));
14892 constraint (n
!= 0 && NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2
14894 _("stride of 2 unavailable when element size is 8"));
14898 case 0: /* VLD1 / VST1. */
14899 align_good
= neon_alignment_bit (et
.size
, align
, &do_align
, 16, 16,
14901 if (align_good
== FAIL
)
14905 unsigned alignbits
= 0;
14908 case 16: alignbits
= 0x1; break;
14909 case 32: alignbits
= 0x3; break;
14912 inst
.instruction
|= alignbits
<< 4;
14916 case 1: /* VLD2 / VST2. */
14917 align_good
= neon_alignment_bit (et
.size
, align
, &do_align
, 8, 16, 16, 32,
14919 if (align_good
== FAIL
)
14922 inst
.instruction
|= 1 << 4;
14925 case 2: /* VLD3 / VST3. */
14926 constraint (inst
.operands
[1].immisalign
,
14927 _("can't use alignment with this instruction"));
14930 case 3: /* VLD4 / VST4. */
14931 align_good
= neon_alignment_bit (et
.size
, align
, &do_align
, 8, 32,
14932 16, 64, 32, 64, 32, 128, -1);
14933 if (align_good
== FAIL
)
14937 unsigned alignbits
= 0;
14940 case 8: alignbits
= 0x1; break;
14941 case 16: alignbits
= 0x1; break;
14942 case 32: alignbits
= (align
== 64) ? 0x1 : 0x2; break;
14945 inst
.instruction
|= alignbits
<< 4;
14952 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
14953 if (n
!= 0 && NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
14954 inst
.instruction
|= 1 << (4 + logsize
);
14956 inst
.instruction
|= NEON_LANE (inst
.operands
[0].imm
) << (logsize
+ 5);
14957 inst
.instruction
|= logsize
<< 10;
14960 /* Encode single n-element structure to all lanes VLD<n> instructions. */
14963 do_neon_ld_dup (void)
14965 struct neon_type_el et
= neon_check_type (1, NS_NULL
, N_8
| N_16
| N_32
);
14966 int align_good
, do_align
= 0;
14968 if (et
.type
== NT_invtype
)
14971 switch ((inst
.instruction
>> 8) & 3)
14973 case 0: /* VLD1. */
14974 gas_assert (NEON_REG_STRIDE (inst
.operands
[0].imm
) != 2);
14975 align_good
= neon_alignment_bit (et
.size
, inst
.operands
[1].imm
>> 8,
14976 &do_align
, 16, 16, 32, 32, -1);
14977 if (align_good
== FAIL
)
14979 switch (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
))
14982 case 2: inst
.instruction
|= 1 << 5; break;
14983 default: first_error (_("bad list length")); return;
14985 inst
.instruction
|= neon_logbits (et
.size
) << 6;
14988 case 1: /* VLD2. */
14989 align_good
= neon_alignment_bit (et
.size
, inst
.operands
[1].imm
>> 8,
14990 &do_align
, 8, 16, 16, 32, 32, 64, -1);
14991 if (align_good
== FAIL
)
14993 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 2,
14994 _("bad list length"));
14995 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
14996 inst
.instruction
|= 1 << 5;
14997 inst
.instruction
|= neon_logbits (et
.size
) << 6;
15000 case 2: /* VLD3. */
15001 constraint (inst
.operands
[1].immisalign
,
15002 _("can't use alignment with this instruction"));
15003 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 3,
15004 _("bad list length"));
15005 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
15006 inst
.instruction
|= 1 << 5;
15007 inst
.instruction
|= neon_logbits (et
.size
) << 6;
15010 case 3: /* VLD4. */
15012 int align
= inst
.operands
[1].imm
>> 8;
15013 align_good
= neon_alignment_bit (et
.size
, align
, &do_align
, 8, 32,
15014 16, 64, 32, 64, 32, 128, -1);
15015 if (align_good
== FAIL
)
15017 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 4,
15018 _("bad list length"));
15019 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
15020 inst
.instruction
|= 1 << 5;
15021 if (et
.size
== 32 && align
== 128)
15022 inst
.instruction
|= 0x3 << 6;
15024 inst
.instruction
|= neon_logbits (et
.size
) << 6;
15031 inst
.instruction
|= do_align
<< 4;
15034 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
15035 apart from bits [11:4]. */
15038 do_neon_ldx_stx (void)
15040 if (inst
.operands
[1].isreg
)
15041 constraint (inst
.operands
[1].reg
== REG_PC
, BAD_PC
);
15043 switch (NEON_LANE (inst
.operands
[0].imm
))
15045 case NEON_INTERLEAVE_LANES
:
15046 NEON_ENCODE (INTERLV
, inst
);
15047 do_neon_ld_st_interleave ();
15050 case NEON_ALL_LANES
:
15051 NEON_ENCODE (DUP
, inst
);
15056 NEON_ENCODE (LANE
, inst
);
15057 do_neon_ld_st_lane ();
15060 /* L bit comes from bit mask. */
15061 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15062 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15063 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
15065 if (inst
.operands
[1].postind
)
15067 int postreg
= inst
.operands
[1].imm
& 0xf;
15068 constraint (!inst
.operands
[1].immisreg
,
15069 _("post-index must be a register"));
15070 constraint (postreg
== 0xd || postreg
== 0xf,
15071 _("bad register for post-index"));
15072 inst
.instruction
|= postreg
;
15074 else if (inst
.operands
[1].writeback
)
15076 inst
.instruction
|= 0xd;
15079 inst
.instruction
|= 0xf;
15082 inst
.instruction
|= 0xf9000000;
15084 inst
.instruction
|= 0xf4000000;
15087 /* Overall per-instruction processing. */
15089 /* We need to be able to fix up arbitrary expressions in some statements.
15090 This is so that we can handle symbols that are an arbitrary distance from
15091 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
15092 which returns part of an address in a form which will be valid for
15093 a data instruction. We do this by pushing the expression into a symbol
15094 in the expr_section, and creating a fix for that. */
15097 fix_new_arm (fragS
* frag
,
15112 new_fix
= fix_new_exp (frag
, where
, size
, exp
, pc_rel
,
15113 (enum bfd_reloc_code_real
) reloc
);
15117 new_fix
= (fixS
*) fix_new (frag
, where
, size
, make_expr_symbol (exp
), 0,
15118 pc_rel
, (enum bfd_reloc_code_real
) reloc
);
15122 /* Mark whether the fix is to a THUMB instruction, or an ARM
15124 new_fix
->tc_fix_data
= thumb_mode
;
15127 /* Create a frg for an instruction requiring relaxation. */
15129 output_relax_insn (void)
15135 /* The size of the instruction is unknown, so tie the debug info to the
15136 start of the instruction. */
15137 dwarf2_emit_insn (0);
15139 switch (inst
.reloc
.exp
.X_op
)
15142 sym
= inst
.reloc
.exp
.X_add_symbol
;
15143 offset
= inst
.reloc
.exp
.X_add_number
;
15147 offset
= inst
.reloc
.exp
.X_add_number
;
15150 sym
= make_expr_symbol (&inst
.reloc
.exp
);
15154 to
= frag_var (rs_machine_dependent
, INSN_SIZE
, THUMB_SIZE
,
15155 inst
.relax
, sym
, offset
, NULL
/*offset, opcode*/);
15156 md_number_to_chars (to
, inst
.instruction
, THUMB_SIZE
);
15159 /* Write a 32-bit thumb instruction to buf. */
15161 put_thumb32_insn (char * buf
, unsigned long insn
)
15163 md_number_to_chars (buf
, insn
>> 16, THUMB_SIZE
);
15164 md_number_to_chars (buf
+ THUMB_SIZE
, insn
, THUMB_SIZE
);
15168 output_inst (const char * str
)
15174 as_bad ("%s -- `%s'", inst
.error
, str
);
15179 output_relax_insn ();
15182 if (inst
.size
== 0)
15185 to
= frag_more (inst
.size
);
15186 /* PR 9814: Record the thumb mode into the current frag so that we know
15187 what type of NOP padding to use, if necessary. We override any previous
15188 setting so that if the mode has changed then the NOPS that we use will
15189 match the encoding of the last instruction in the frag. */
15190 frag_now
->tc_frag_data
.thumb_mode
= thumb_mode
| MODE_RECORDED
;
15192 if (thumb_mode
&& (inst
.size
> THUMB_SIZE
))
15194 gas_assert (inst
.size
== (2 * THUMB_SIZE
));
15195 put_thumb32_insn (to
, inst
.instruction
);
15197 else if (inst
.size
> INSN_SIZE
)
15199 gas_assert (inst
.size
== (2 * INSN_SIZE
));
15200 md_number_to_chars (to
, inst
.instruction
, INSN_SIZE
);
15201 md_number_to_chars (to
+ INSN_SIZE
, inst
.instruction
, INSN_SIZE
);
15204 md_number_to_chars (to
, inst
.instruction
, inst
.size
);
15206 if (inst
.reloc
.type
!= BFD_RELOC_UNUSED
)
15207 fix_new_arm (frag_now
, to
- frag_now
->fr_literal
,
15208 inst
.size
, & inst
.reloc
.exp
, inst
.reloc
.pc_rel
,
15211 dwarf2_emit_insn (inst
.size
);
15215 output_it_inst (int cond
, int mask
, char * to
)
15217 unsigned long instruction
= 0xbf00;
15220 instruction
|= mask
;
15221 instruction
|= cond
<< 4;
15225 to
= frag_more (2);
15227 dwarf2_emit_insn (2);
15231 md_number_to_chars (to
, instruction
, 2);
15236 /* Tag values used in struct asm_opcode's tag field. */
15239 OT_unconditional
, /* Instruction cannot be conditionalized.
15240 The ARM condition field is still 0xE. */
15241 OT_unconditionalF
, /* Instruction cannot be conditionalized
15242 and carries 0xF in its ARM condition field. */
15243 OT_csuffix
, /* Instruction takes a conditional suffix. */
15244 OT_csuffixF
, /* Some forms of the instruction take a conditional
15245 suffix, others place 0xF where the condition field
15247 OT_cinfix3
, /* Instruction takes a conditional infix,
15248 beginning at character index 3. (In
15249 unified mode, it becomes a suffix.) */
15250 OT_cinfix3_deprecated
, /* The same as OT_cinfix3. This is used for
15251 tsts, cmps, cmns, and teqs. */
15252 OT_cinfix3_legacy
, /* Legacy instruction takes a conditional infix at
15253 character index 3, even in unified mode. Used for
15254 legacy instructions where suffix and infix forms
15255 may be ambiguous. */
15256 OT_csuf_or_in3
, /* Instruction takes either a conditional
15257 suffix or an infix at character index 3. */
15258 OT_odd_infix_unc
, /* This is the unconditional variant of an
15259 instruction that takes a conditional infix
15260 at an unusual position. In unified mode,
15261 this variant will accept a suffix. */
15262 OT_odd_infix_0
/* Values greater than or equal to OT_odd_infix_0
15263 are the conditional variants of instructions that
15264 take conditional infixes in unusual positions.
15265 The infix appears at character index
15266 (tag - OT_odd_infix_0). These are not accepted
15267 in unified mode. */
15270 /* Subroutine of md_assemble, responsible for looking up the primary
15271 opcode from the mnemonic the user wrote. STR points to the
15272 beginning of the mnemonic.
15274 This is not simply a hash table lookup, because of conditional
15275 variants. Most instructions have conditional variants, which are
15276 expressed with a _conditional affix_ to the mnemonic. If we were
15277 to encode each conditional variant as a literal string in the opcode
15278 table, it would have approximately 20,000 entries.
15280 Most mnemonics take this affix as a suffix, and in unified syntax,
15281 'most' is upgraded to 'all'. However, in the divided syntax, some
15282 instructions take the affix as an infix, notably the s-variants of
15283 the arithmetic instructions. Of those instructions, all but six
15284 have the infix appear after the third character of the mnemonic.
15286 Accordingly, the algorithm for looking up primary opcodes given
15289 1. Look up the identifier in the opcode table.
15290 If we find a match, go to step U.
15292 2. Look up the last two characters of the identifier in the
15293 conditions table. If we find a match, look up the first N-2
15294 characters of the identifier in the opcode table. If we
15295 find a match, go to step CE.
15297 3. Look up the fourth and fifth characters of the identifier in
15298 the conditions table. If we find a match, extract those
15299 characters from the identifier, and look up the remaining
15300 characters in the opcode table. If we find a match, go
15305 U. Examine the tag field of the opcode structure, in case this is
15306 one of the six instructions with its conditional infix in an
15307 unusual place. If it is, the tag tells us where to find the
15308 infix; look it up in the conditions table and set inst.cond
15309 accordingly. Otherwise, this is an unconditional instruction.
15310 Again set inst.cond accordingly. Return the opcode structure.
15312 CE. Examine the tag field to make sure this is an instruction that
15313 should receive a conditional suffix. If it is not, fail.
15314 Otherwise, set inst.cond from the suffix we already looked up,
15315 and return the opcode structure.
15317 CM. Examine the tag field to make sure this is an instruction that
15318 should receive a conditional infix after the third character.
15319 If it is not, fail. Otherwise, undo the edits to the current
15320 line of input and proceed as for case CE. */
15322 static const struct asm_opcode
*
15323 opcode_lookup (char **str
)
15327 const struct asm_opcode
*opcode
;
15328 const struct asm_cond
*cond
;
15331 /* Scan up to the end of the mnemonic, which must end in white space,
15332 '.' (in unified mode, or for Neon/VFP instructions), or end of string. */
15333 for (base
= end
= *str
; *end
!= '\0'; end
++)
15334 if (*end
== ' ' || *end
== '.')
15340 /* Handle a possible width suffix and/or Neon type suffix. */
15345 /* The .w and .n suffixes are only valid if the unified syntax is in
15347 if (unified_syntax
&& end
[1] == 'w')
15349 else if (unified_syntax
&& end
[1] == 'n')
15354 inst
.vectype
.elems
= 0;
15356 *str
= end
+ offset
;
15358 if (end
[offset
] == '.')
15360 /* See if we have a Neon type suffix (possible in either unified or
15361 non-unified ARM syntax mode). */
15362 if (parse_neon_type (&inst
.vectype
, str
) == FAIL
)
15365 else if (end
[offset
] != '\0' && end
[offset
] != ' ')
15371 /* Look for unaffixed or special-case affixed mnemonic. */
15372 opcode
= (const struct asm_opcode
*) hash_find_n (arm_ops_hsh
, base
,
15377 if (opcode
->tag
< OT_odd_infix_0
)
15379 inst
.cond
= COND_ALWAYS
;
15383 if (warn_on_deprecated
&& unified_syntax
)
15384 as_warn (_("conditional infixes are deprecated in unified syntax"));
15385 affix
= base
+ (opcode
->tag
- OT_odd_infix_0
);
15386 cond
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, affix
, 2);
15389 inst
.cond
= cond
->value
;
15393 /* Cannot have a conditional suffix on a mnemonic of less than two
15395 if (end
- base
< 3)
15398 /* Look for suffixed mnemonic. */
15400 cond
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, affix
, 2);
15401 opcode
= (const struct asm_opcode
*) hash_find_n (arm_ops_hsh
, base
,
15403 if (opcode
&& cond
)
15406 switch (opcode
->tag
)
15408 case OT_cinfix3_legacy
:
15409 /* Ignore conditional suffixes matched on infix only mnemonics. */
15413 case OT_cinfix3_deprecated
:
15414 case OT_odd_infix_unc
:
15415 if (!unified_syntax
)
15417 /* else fall through */
15421 case OT_csuf_or_in3
:
15422 inst
.cond
= cond
->value
;
15425 case OT_unconditional
:
15426 case OT_unconditionalF
:
15428 inst
.cond
= cond
->value
;
15431 /* Delayed diagnostic. */
15432 inst
.error
= BAD_COND
;
15433 inst
.cond
= COND_ALWAYS
;
15442 /* Cannot have a usual-position infix on a mnemonic of less than
15443 six characters (five would be a suffix). */
15444 if (end
- base
< 6)
15447 /* Look for infixed mnemonic in the usual position. */
15449 cond
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, affix
, 2);
15453 memcpy (save
, affix
, 2);
15454 memmove (affix
, affix
+ 2, (end
- affix
) - 2);
15455 opcode
= (const struct asm_opcode
*) hash_find_n (arm_ops_hsh
, base
,
15457 memmove (affix
+ 2, affix
, (end
- affix
) - 2);
15458 memcpy (affix
, save
, 2);
15461 && (opcode
->tag
== OT_cinfix3
15462 || opcode
->tag
== OT_cinfix3_deprecated
15463 || opcode
->tag
== OT_csuf_or_in3
15464 || opcode
->tag
== OT_cinfix3_legacy
))
15467 if (warn_on_deprecated
&& unified_syntax
15468 && (opcode
->tag
== OT_cinfix3
15469 || opcode
->tag
== OT_cinfix3_deprecated
))
15470 as_warn (_("conditional infixes are deprecated in unified syntax"));
15472 inst
.cond
= cond
->value
;
15479 /* This function generates an initial IT instruction, leaving its block
15480 virtually open for the new instructions. Eventually,
15481 the mask will be updated by now_it_add_mask () each time
15482 a new instruction needs to be included in the IT block.
15483 Finally, the block is closed with close_automatic_it_block ().
15484 The block closure can be requested either from md_assemble (),
15485 a tencode (), or due to a label hook. */
15488 new_automatic_it_block (int cond
)
15490 now_it
.state
= AUTOMATIC_IT_BLOCK
;
15491 now_it
.mask
= 0x18;
15493 now_it
.block_length
= 1;
15494 mapping_state (MAP_THUMB
);
15495 now_it
.insn
= output_it_inst (cond
, now_it
.mask
, NULL
);
15498 /* Close an automatic IT block.
15499 See comments in new_automatic_it_block (). */
15502 close_automatic_it_block (void)
15504 now_it
.mask
= 0x10;
15505 now_it
.block_length
= 0;
15508 /* Update the mask of the current automatically-generated IT
15509 instruction. See comments in new_automatic_it_block (). */
15512 now_it_add_mask (int cond
)
15514 #define CLEAR_BIT(value, nbit) ((value) & ~(1 << (nbit)))
15515 #define SET_BIT_VALUE(value, bitvalue, nbit) (CLEAR_BIT (value, nbit) \
15516 | ((bitvalue) << (nbit)))
15517 const int resulting_bit
= (cond
& 1);
15519 now_it
.mask
&= 0xf;
15520 now_it
.mask
= SET_BIT_VALUE (now_it
.mask
,
15522 (5 - now_it
.block_length
));
15523 now_it
.mask
= SET_BIT_VALUE (now_it
.mask
,
15525 ((5 - now_it
.block_length
) - 1) );
15526 output_it_inst (now_it
.cc
, now_it
.mask
, now_it
.insn
);
15529 #undef SET_BIT_VALUE
15532 /* The IT blocks handling machinery is accessed through the these functions:
15533 it_fsm_pre_encode () from md_assemble ()
15534 set_it_insn_type () optional, from the tencode functions
15535 set_it_insn_type_last () ditto
15536 in_it_block () ditto
15537 it_fsm_post_encode () from md_assemble ()
15538 force_automatic_it_block_close () from label habdling functions
15541 1) md_assemble () calls it_fsm_pre_encode () before calling tencode (),
15542 initializing the IT insn type with a generic initial value depending
15543 on the inst.condition.
15544 2) During the tencode function, two things may happen:
15545 a) The tencode function overrides the IT insn type by
15546 calling either set_it_insn_type (type) or set_it_insn_type_last ().
15547 b) The tencode function queries the IT block state by
15548 calling in_it_block () (i.e. to determine narrow/not narrow mode).
15550 Both set_it_insn_type and in_it_block run the internal FSM state
15551 handling function (handle_it_state), because: a) setting the IT insn
15552 type may incur in an invalid state (exiting the function),
15553 and b) querying the state requires the FSM to be updated.
15554 Specifically we want to avoid creating an IT block for conditional
15555 branches, so it_fsm_pre_encode is actually a guess and we can't
15556 determine whether an IT block is required until the tencode () routine
15557 has decided what type of instruction this actually it.
15558 Because of this, if set_it_insn_type and in_it_block have to be used,
15559 set_it_insn_type has to be called first.
15561 set_it_insn_type_last () is a wrapper of set_it_insn_type (type), that
15562 determines the insn IT type depending on the inst.cond code.
15563 When a tencode () routine encodes an instruction that can be
15564 either outside an IT block, or, in the case of being inside, has to be
15565 the last one, set_it_insn_type_last () will determine the proper
15566 IT instruction type based on the inst.cond code. Otherwise,
15567 set_it_insn_type can be called for overriding that logic or
15568 for covering other cases.
15570 Calling handle_it_state () may not transition the IT block state to
15571 OUTSIDE_IT_BLOCK immediatelly, since the (current) state could be
15572 still queried. Instead, if the FSM determines that the state should
15573 be transitioned to OUTSIDE_IT_BLOCK, a flag is marked to be closed
15574 after the tencode () function: that's what it_fsm_post_encode () does.
15576 Since in_it_block () calls the state handling function to get an
15577 updated state, an error may occur (due to invalid insns combination).
15578 In that case, inst.error is set.
15579 Therefore, inst.error has to be checked after the execution of
15580 the tencode () routine.
15582 3) Back in md_assemble(), it_fsm_post_encode () is called to commit
15583 any pending state change (if any) that didn't take place in
15584 handle_it_state () as explained above. */
15587 it_fsm_pre_encode (void)
15589 if (inst
.cond
!= COND_ALWAYS
)
15590 inst
.it_insn_type
= INSIDE_IT_INSN
;
15592 inst
.it_insn_type
= OUTSIDE_IT_INSN
;
15594 now_it
.state_handled
= 0;
15597 /* IT state FSM handling function. */
15600 handle_it_state (void)
15602 now_it
.state_handled
= 1;
15604 switch (now_it
.state
)
15606 case OUTSIDE_IT_BLOCK
:
15607 switch (inst
.it_insn_type
)
15609 case OUTSIDE_IT_INSN
:
15612 case INSIDE_IT_INSN
:
15613 case INSIDE_IT_LAST_INSN
:
15614 if (thumb_mode
== 0)
15617 && !(implicit_it_mode
& IMPLICIT_IT_MODE_ARM
))
15618 as_tsktsk (_("Warning: conditional outside an IT block"\
15623 if ((implicit_it_mode
& IMPLICIT_IT_MODE_THUMB
)
15624 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_arch_t2
))
15626 /* Automatically generate the IT instruction. */
15627 new_automatic_it_block (inst
.cond
);
15628 if (inst
.it_insn_type
== INSIDE_IT_LAST_INSN
)
15629 close_automatic_it_block ();
15633 inst
.error
= BAD_OUT_IT
;
15639 case IF_INSIDE_IT_LAST_INSN
:
15640 case NEUTRAL_IT_INSN
:
15644 now_it
.state
= MANUAL_IT_BLOCK
;
15645 now_it
.block_length
= 0;
15650 case AUTOMATIC_IT_BLOCK
:
15651 /* Three things may happen now:
15652 a) We should increment current it block size;
15653 b) We should close current it block (closing insn or 4 insns);
15654 c) We should close current it block and start a new one (due
15655 to incompatible conditions or
15656 4 insns-length block reached). */
15658 switch (inst
.it_insn_type
)
15660 case OUTSIDE_IT_INSN
:
15661 /* The closure of the block shall happen immediatelly,
15662 so any in_it_block () call reports the block as closed. */
15663 force_automatic_it_block_close ();
15666 case INSIDE_IT_INSN
:
15667 case INSIDE_IT_LAST_INSN
:
15668 case IF_INSIDE_IT_LAST_INSN
:
15669 now_it
.block_length
++;
15671 if (now_it
.block_length
> 4
15672 || !now_it_compatible (inst
.cond
))
15674 force_automatic_it_block_close ();
15675 if (inst
.it_insn_type
!= IF_INSIDE_IT_LAST_INSN
)
15676 new_automatic_it_block (inst
.cond
);
15680 now_it_add_mask (inst
.cond
);
15683 if (now_it
.state
== AUTOMATIC_IT_BLOCK
15684 && (inst
.it_insn_type
== INSIDE_IT_LAST_INSN
15685 || inst
.it_insn_type
== IF_INSIDE_IT_LAST_INSN
))
15686 close_automatic_it_block ();
15689 case NEUTRAL_IT_INSN
:
15690 now_it
.block_length
++;
15692 if (now_it
.block_length
> 4)
15693 force_automatic_it_block_close ();
15695 now_it_add_mask (now_it
.cc
& 1);
15699 close_automatic_it_block ();
15700 now_it
.state
= MANUAL_IT_BLOCK
;
15705 case MANUAL_IT_BLOCK
:
15707 /* Check conditional suffixes. */
15708 const int cond
= now_it
.cc
^ ((now_it
.mask
>> 4) & 1) ^ 1;
15711 now_it
.mask
&= 0x1f;
15712 is_last
= (now_it
.mask
== 0x10);
15714 switch (inst
.it_insn_type
)
15716 case OUTSIDE_IT_INSN
:
15717 inst
.error
= BAD_NOT_IT
;
15720 case INSIDE_IT_INSN
:
15721 if (cond
!= inst
.cond
)
15723 inst
.error
= BAD_IT_COND
;
15728 case INSIDE_IT_LAST_INSN
:
15729 case IF_INSIDE_IT_LAST_INSN
:
15730 if (cond
!= inst
.cond
)
15732 inst
.error
= BAD_IT_COND
;
15737 inst
.error
= BAD_BRANCH
;
15742 case NEUTRAL_IT_INSN
:
15743 /* The BKPT instruction is unconditional even in an IT block. */
15747 inst
.error
= BAD_IT_IT
;
15758 it_fsm_post_encode (void)
15762 if (!now_it
.state_handled
)
15763 handle_it_state ();
15765 is_last
= (now_it
.mask
== 0x10);
15768 now_it
.state
= OUTSIDE_IT_BLOCK
;
15774 force_automatic_it_block_close (void)
15776 if (now_it
.state
== AUTOMATIC_IT_BLOCK
)
15778 close_automatic_it_block ();
15779 now_it
.state
= OUTSIDE_IT_BLOCK
;
15787 if (!now_it
.state_handled
)
15788 handle_it_state ();
15790 return now_it
.state
!= OUTSIDE_IT_BLOCK
;
15794 md_assemble (char *str
)
15797 const struct asm_opcode
* opcode
;
15799 /* Align the previous label if needed. */
15800 if (last_label_seen
!= NULL
)
15802 symbol_set_frag (last_label_seen
, frag_now
);
15803 S_SET_VALUE (last_label_seen
, (valueT
) frag_now_fix ());
15804 S_SET_SEGMENT (last_label_seen
, now_seg
);
15807 memset (&inst
, '\0', sizeof (inst
));
15808 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
15810 opcode
= opcode_lookup (&p
);
15813 /* It wasn't an instruction, but it might be a register alias of
15814 the form alias .req reg, or a Neon .dn/.qn directive. */
15815 if (! create_register_alias (str
, p
)
15816 && ! create_neon_reg_alias (str
, p
))
15817 as_bad (_("bad instruction `%s'"), str
);
15822 if (warn_on_deprecated
&& opcode
->tag
== OT_cinfix3_deprecated
)
15823 as_warn (_("s suffix on comparison instruction is deprecated"));
15825 /* The value which unconditional instructions should have in place of the
15826 condition field. */
15827 inst
.uncond_value
= (opcode
->tag
== OT_csuffixF
) ? 0xf : -1;
15831 arm_feature_set variant
;
15833 variant
= cpu_variant
;
15834 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
15835 if (!ARM_CPU_HAS_FEATURE (variant
, arm_arch_t2
))
15836 ARM_CLEAR_FEATURE (variant
, variant
, fpu_any_hard
);
15837 /* Check that this instruction is supported for this CPU. */
15838 if (!opcode
->tvariant
15839 || (thumb_mode
== 1
15840 && !ARM_CPU_HAS_FEATURE (variant
, *opcode
->tvariant
)))
15842 as_bad (_("selected processor does not support Thumb mode `%s'"), str
);
15845 if (inst
.cond
!= COND_ALWAYS
&& !unified_syntax
15846 && opcode
->tencode
!= do_t_branch
)
15848 as_bad (_("Thumb does not support conditional execution"));
15852 if (!ARM_CPU_HAS_FEATURE (variant
, arm_ext_v6t2
))
15854 if (opcode
->tencode
!= do_t_blx
&& opcode
->tencode
!= do_t_branch23
15855 && !(ARM_CPU_HAS_FEATURE(*opcode
->tvariant
, arm_ext_msr
)
15856 || ARM_CPU_HAS_FEATURE(*opcode
->tvariant
, arm_ext_barrier
)))
15858 /* Two things are addressed here.
15859 1) Implicit require narrow instructions on Thumb-1.
15860 This avoids relaxation accidentally introducing Thumb-2
15862 2) Reject wide instructions in non Thumb-2 cores. */
15863 if (inst
.size_req
== 0)
15865 else if (inst
.size_req
== 4)
15867 as_bad (_("selected processor does not support Thumb-2 mode `%s'"), str
);
15873 inst
.instruction
= opcode
->tvalue
;
15875 if (!parse_operands (p
, opcode
->operands
, /*thumb=*/TRUE
))
15877 /* Prepare the it_insn_type for those encodings that don't set
15879 it_fsm_pre_encode ();
15881 opcode
->tencode ();
15883 it_fsm_post_encode ();
15886 if (!(inst
.error
|| inst
.relax
))
15888 gas_assert (inst
.instruction
< 0xe800 || inst
.instruction
> 0xffff);
15889 inst
.size
= (inst
.instruction
> 0xffff ? 4 : 2);
15890 if (inst
.size_req
&& inst
.size_req
!= inst
.size
)
15892 as_bad (_("cannot honor width suffix -- `%s'"), str
);
15897 /* Something has gone badly wrong if we try to relax a fixed size
15899 gas_assert (inst
.size_req
== 0 || !inst
.relax
);
15901 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
15902 *opcode
->tvariant
);
15903 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
15904 set those bits when Thumb-2 32-bit instructions are seen. ie.
15905 anything other than bl/blx and v6-M instructions.
15906 This is overly pessimistic for relaxable instructions. */
15907 if (((inst
.size
== 4 && (inst
.instruction
& 0xf800e800) != 0xf000e800)
15909 && !(ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_msr
)
15910 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_barrier
)))
15911 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
15914 check_neon_suffixes
;
15918 mapping_state (MAP_THUMB
);
15921 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
15925 /* bx is allowed on v5 cores, and sometimes on v4 cores. */
15926 is_bx
= (opcode
->aencode
== do_bx
);
15928 /* Check that this instruction is supported for this CPU. */
15929 if (!(is_bx
&& fix_v4bx
)
15930 && !(opcode
->avariant
&&
15931 ARM_CPU_HAS_FEATURE (cpu_variant
, *opcode
->avariant
)))
15933 as_bad (_("selected processor does not support ARM mode `%s'"), str
);
15938 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str
);
15942 inst
.instruction
= opcode
->avalue
;
15943 if (opcode
->tag
== OT_unconditionalF
)
15944 inst
.instruction
|= 0xF << 28;
15946 inst
.instruction
|= inst
.cond
<< 28;
15947 inst
.size
= INSN_SIZE
;
15948 if (!parse_operands (p
, opcode
->operands
, /*thumb=*/FALSE
))
15950 it_fsm_pre_encode ();
15951 opcode
->aencode ();
15952 it_fsm_post_encode ();
15954 /* Arm mode bx is marked as both v4T and v5 because it's still required
15955 on a hypothetical non-thumb v5 core. */
15957 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
, arm_ext_v4t
);
15959 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
15960 *opcode
->avariant
);
15962 check_neon_suffixes
;
15966 mapping_state (MAP_ARM
);
15971 as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
15979 check_it_blocks_finished (void)
15984 for (sect
= stdoutput
->sections
; sect
!= NULL
; sect
= sect
->next
)
15985 if (seg_info (sect
)->tc_segment_info_data
.current_it
.state
15986 == MANUAL_IT_BLOCK
)
15988 as_warn (_("section '%s' finished with an open IT block."),
15992 if (now_it
.state
== MANUAL_IT_BLOCK
)
15993 as_warn (_("file finished with an open IT block."));
15997 /* Various frobbings of labels and their addresses. */
16000 arm_start_line_hook (void)
16002 last_label_seen
= NULL
;
16006 arm_frob_label (symbolS
* sym
)
16008 last_label_seen
= sym
;
16010 ARM_SET_THUMB (sym
, thumb_mode
);
16012 #if defined OBJ_COFF || defined OBJ_ELF
16013 ARM_SET_INTERWORK (sym
, support_interwork
);
16016 force_automatic_it_block_close ();
16018 /* Note - do not allow local symbols (.Lxxx) to be labelled
16019 as Thumb functions. This is because these labels, whilst
16020 they exist inside Thumb code, are not the entry points for
16021 possible ARM->Thumb calls. Also, these labels can be used
16022 as part of a computed goto or switch statement. eg gcc
16023 can generate code that looks like this:
16025 ldr r2, [pc, .Laaa]
16035 The first instruction loads the address of the jump table.
16036 The second instruction converts a table index into a byte offset.
16037 The third instruction gets the jump address out of the table.
16038 The fourth instruction performs the jump.
16040 If the address stored at .Laaa is that of a symbol which has the
16041 Thumb_Func bit set, then the linker will arrange for this address
16042 to have the bottom bit set, which in turn would mean that the
16043 address computation performed by the third instruction would end
16044 up with the bottom bit set. Since the ARM is capable of unaligned
16045 word loads, the instruction would then load the incorrect address
16046 out of the jump table, and chaos would ensue. */
16047 if (label_is_thumb_function_name
16048 && (S_GET_NAME (sym
)[0] != '.' || S_GET_NAME (sym
)[1] != 'L')
16049 && (bfd_get_section_flags (stdoutput
, now_seg
) & SEC_CODE
) != 0)
16051 /* When the address of a Thumb function is taken the bottom
16052 bit of that address should be set. This will allow
16053 interworking between Arm and Thumb functions to work
16056 THUMB_SET_FUNC (sym
, 1);
16058 label_is_thumb_function_name
= FALSE
;
16061 dwarf2_emit_label (sym
);
16065 arm_data_in_code (void)
16067 if (thumb_mode
&& ! strncmp (input_line_pointer
+ 1, "data:", 5))
16069 *input_line_pointer
= '/';
16070 input_line_pointer
+= 5;
16071 *input_line_pointer
= 0;
16079 arm_canonicalize_symbol_name (char * name
)
16083 if (thumb_mode
&& (len
= strlen (name
)) > 5
16084 && streq (name
+ len
- 5, "/data"))
16085 *(name
+ len
- 5) = 0;
16090 /* Table of all register names defined by default. The user can
16091 define additional names with .req. Note that all register names
16092 should appear in both upper and lowercase variants. Some registers
16093 also have mixed-case names. */
16095 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
16096 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
16097 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
16098 #define REGSET(p,t) \
16099 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
16100 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
16101 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
16102 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
16103 #define REGSETH(p,t) \
16104 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
16105 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
16106 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
16107 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
16108 #define REGSET2(p,t) \
16109 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
16110 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
16111 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
16112 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
16114 static const struct reg_entry reg_names
[] =
16116 /* ARM integer registers. */
16117 REGSET(r
, RN
), REGSET(R
, RN
),
16119 /* ATPCS synonyms. */
16120 REGDEF(a1
,0,RN
), REGDEF(a2
,1,RN
), REGDEF(a3
, 2,RN
), REGDEF(a4
, 3,RN
),
16121 REGDEF(v1
,4,RN
), REGDEF(v2
,5,RN
), REGDEF(v3
, 6,RN
), REGDEF(v4
, 7,RN
),
16122 REGDEF(v5
,8,RN
), REGDEF(v6
,9,RN
), REGDEF(v7
,10,RN
), REGDEF(v8
,11,RN
),
16124 REGDEF(A1
,0,RN
), REGDEF(A2
,1,RN
), REGDEF(A3
, 2,RN
), REGDEF(A4
, 3,RN
),
16125 REGDEF(V1
,4,RN
), REGDEF(V2
,5,RN
), REGDEF(V3
, 6,RN
), REGDEF(V4
, 7,RN
),
16126 REGDEF(V5
,8,RN
), REGDEF(V6
,9,RN
), REGDEF(V7
,10,RN
), REGDEF(V8
,11,RN
),
16128 /* Well-known aliases. */
16129 REGDEF(wr
, 7,RN
), REGDEF(sb
, 9,RN
), REGDEF(sl
,10,RN
), REGDEF(fp
,11,RN
),
16130 REGDEF(ip
,12,RN
), REGDEF(sp
,13,RN
), REGDEF(lr
,14,RN
), REGDEF(pc
,15,RN
),
16132 REGDEF(WR
, 7,RN
), REGDEF(SB
, 9,RN
), REGDEF(SL
,10,RN
), REGDEF(FP
,11,RN
),
16133 REGDEF(IP
,12,RN
), REGDEF(SP
,13,RN
), REGDEF(LR
,14,RN
), REGDEF(PC
,15,RN
),
16135 /* Coprocessor numbers. */
16136 REGSET(p
, CP
), REGSET(P
, CP
),
16138 /* Coprocessor register numbers. The "cr" variants are for backward
16140 REGSET(c
, CN
), REGSET(C
, CN
),
16141 REGSET(cr
, CN
), REGSET(CR
, CN
),
16143 /* FPA registers. */
16144 REGNUM(f
,0,FN
), REGNUM(f
,1,FN
), REGNUM(f
,2,FN
), REGNUM(f
,3,FN
),
16145 REGNUM(f
,4,FN
), REGNUM(f
,5,FN
), REGNUM(f
,6,FN
), REGNUM(f
,7, FN
),
16147 REGNUM(F
,0,FN
), REGNUM(F
,1,FN
), REGNUM(F
,2,FN
), REGNUM(F
,3,FN
),
16148 REGNUM(F
,4,FN
), REGNUM(F
,5,FN
), REGNUM(F
,6,FN
), REGNUM(F
,7, FN
),
16150 /* VFP SP registers. */
16151 REGSET(s
,VFS
), REGSET(S
,VFS
),
16152 REGSETH(s
,VFS
), REGSETH(S
,VFS
),
16154 /* VFP DP Registers. */
16155 REGSET(d
,VFD
), REGSET(D
,VFD
),
16156 /* Extra Neon DP registers. */
16157 REGSETH(d
,VFD
), REGSETH(D
,VFD
),
16159 /* Neon QP registers. */
16160 REGSET2(q
,NQ
), REGSET2(Q
,NQ
),
16162 /* VFP control registers. */
16163 REGDEF(fpsid
,0,VFC
), REGDEF(fpscr
,1,VFC
), REGDEF(fpexc
,8,VFC
),
16164 REGDEF(FPSID
,0,VFC
), REGDEF(FPSCR
,1,VFC
), REGDEF(FPEXC
,8,VFC
),
16165 REGDEF(fpinst
,9,VFC
), REGDEF(fpinst2
,10,VFC
),
16166 REGDEF(FPINST
,9,VFC
), REGDEF(FPINST2
,10,VFC
),
16167 REGDEF(mvfr0
,7,VFC
), REGDEF(mvfr1
,6,VFC
),
16168 REGDEF(MVFR0
,7,VFC
), REGDEF(MVFR1
,6,VFC
),
16170 /* Maverick DSP coprocessor registers. */
16171 REGSET(mvf
,MVF
), REGSET(mvd
,MVD
), REGSET(mvfx
,MVFX
), REGSET(mvdx
,MVDX
),
16172 REGSET(MVF
,MVF
), REGSET(MVD
,MVD
), REGSET(MVFX
,MVFX
), REGSET(MVDX
,MVDX
),
16174 REGNUM(mvax
,0,MVAX
), REGNUM(mvax
,1,MVAX
),
16175 REGNUM(mvax
,2,MVAX
), REGNUM(mvax
,3,MVAX
),
16176 REGDEF(dspsc
,0,DSPSC
),
16178 REGNUM(MVAX
,0,MVAX
), REGNUM(MVAX
,1,MVAX
),
16179 REGNUM(MVAX
,2,MVAX
), REGNUM(MVAX
,3,MVAX
),
16180 REGDEF(DSPSC
,0,DSPSC
),
16182 /* iWMMXt data registers - p0, c0-15. */
16183 REGSET(wr
,MMXWR
), REGSET(wR
,MMXWR
), REGSET(WR
, MMXWR
),
16185 /* iWMMXt control registers - p1, c0-3. */
16186 REGDEF(wcid
, 0,MMXWC
), REGDEF(wCID
, 0,MMXWC
), REGDEF(WCID
, 0,MMXWC
),
16187 REGDEF(wcon
, 1,MMXWC
), REGDEF(wCon
, 1,MMXWC
), REGDEF(WCON
, 1,MMXWC
),
16188 REGDEF(wcssf
, 2,MMXWC
), REGDEF(wCSSF
, 2,MMXWC
), REGDEF(WCSSF
, 2,MMXWC
),
16189 REGDEF(wcasf
, 3,MMXWC
), REGDEF(wCASF
, 3,MMXWC
), REGDEF(WCASF
, 3,MMXWC
),
16191 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
16192 REGDEF(wcgr0
, 8,MMXWCG
), REGDEF(wCGR0
, 8,MMXWCG
), REGDEF(WCGR0
, 8,MMXWCG
),
16193 REGDEF(wcgr1
, 9,MMXWCG
), REGDEF(wCGR1
, 9,MMXWCG
), REGDEF(WCGR1
, 9,MMXWCG
),
16194 REGDEF(wcgr2
,10,MMXWCG
), REGDEF(wCGR2
,10,MMXWCG
), REGDEF(WCGR2
,10,MMXWCG
),
16195 REGDEF(wcgr3
,11,MMXWCG
), REGDEF(wCGR3
,11,MMXWCG
), REGDEF(WCGR3
,11,MMXWCG
),
16197 /* XScale accumulator registers. */
16198 REGNUM(acc
,0,XSCALE
), REGNUM(ACC
,0,XSCALE
),
16204 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
16205 within psr_required_here. */
16206 static const struct asm_psr psrs
[] =
16208 /* Backward compatibility notation. Note that "all" is no longer
16209 truly all possible PSR bits. */
16210 {"all", PSR_c
| PSR_f
},
16214 /* Individual flags. */
16221 /* Combinations of flags. */
16222 {"fs", PSR_f
| PSR_s
},
16223 {"fx", PSR_f
| PSR_x
},
16224 {"fc", PSR_f
| PSR_c
},
16225 {"sf", PSR_s
| PSR_f
},
16226 {"sx", PSR_s
| PSR_x
},
16227 {"sc", PSR_s
| PSR_c
},
16228 {"xf", PSR_x
| PSR_f
},
16229 {"xs", PSR_x
| PSR_s
},
16230 {"xc", PSR_x
| PSR_c
},
16231 {"cf", PSR_c
| PSR_f
},
16232 {"cs", PSR_c
| PSR_s
},
16233 {"cx", PSR_c
| PSR_x
},
16234 {"fsx", PSR_f
| PSR_s
| PSR_x
},
16235 {"fsc", PSR_f
| PSR_s
| PSR_c
},
16236 {"fxs", PSR_f
| PSR_x
| PSR_s
},
16237 {"fxc", PSR_f
| PSR_x
| PSR_c
},
16238 {"fcs", PSR_f
| PSR_c
| PSR_s
},
16239 {"fcx", PSR_f
| PSR_c
| PSR_x
},
16240 {"sfx", PSR_s
| PSR_f
| PSR_x
},
16241 {"sfc", PSR_s
| PSR_f
| PSR_c
},
16242 {"sxf", PSR_s
| PSR_x
| PSR_f
},
16243 {"sxc", PSR_s
| PSR_x
| PSR_c
},
16244 {"scf", PSR_s
| PSR_c
| PSR_f
},
16245 {"scx", PSR_s
| PSR_c
| PSR_x
},
16246 {"xfs", PSR_x
| PSR_f
| PSR_s
},
16247 {"xfc", PSR_x
| PSR_f
| PSR_c
},
16248 {"xsf", PSR_x
| PSR_s
| PSR_f
},
16249 {"xsc", PSR_x
| PSR_s
| PSR_c
},
16250 {"xcf", PSR_x
| PSR_c
| PSR_f
},
16251 {"xcs", PSR_x
| PSR_c
| PSR_s
},
16252 {"cfs", PSR_c
| PSR_f
| PSR_s
},
16253 {"cfx", PSR_c
| PSR_f
| PSR_x
},
16254 {"csf", PSR_c
| PSR_s
| PSR_f
},
16255 {"csx", PSR_c
| PSR_s
| PSR_x
},
16256 {"cxf", PSR_c
| PSR_x
| PSR_f
},
16257 {"cxs", PSR_c
| PSR_x
| PSR_s
},
16258 {"fsxc", PSR_f
| PSR_s
| PSR_x
| PSR_c
},
16259 {"fscx", PSR_f
| PSR_s
| PSR_c
| PSR_x
},
16260 {"fxsc", PSR_f
| PSR_x
| PSR_s
| PSR_c
},
16261 {"fxcs", PSR_f
| PSR_x
| PSR_c
| PSR_s
},
16262 {"fcsx", PSR_f
| PSR_c
| PSR_s
| PSR_x
},
16263 {"fcxs", PSR_f
| PSR_c
| PSR_x
| PSR_s
},
16264 {"sfxc", PSR_s
| PSR_f
| PSR_x
| PSR_c
},
16265 {"sfcx", PSR_s
| PSR_f
| PSR_c
| PSR_x
},
16266 {"sxfc", PSR_s
| PSR_x
| PSR_f
| PSR_c
},
16267 {"sxcf", PSR_s
| PSR_x
| PSR_c
| PSR_f
},
16268 {"scfx", PSR_s
| PSR_c
| PSR_f
| PSR_x
},
16269 {"scxf", PSR_s
| PSR_c
| PSR_x
| PSR_f
},
16270 {"xfsc", PSR_x
| PSR_f
| PSR_s
| PSR_c
},
16271 {"xfcs", PSR_x
| PSR_f
| PSR_c
| PSR_s
},
16272 {"xsfc", PSR_x
| PSR_s
| PSR_f
| PSR_c
},
16273 {"xscf", PSR_x
| PSR_s
| PSR_c
| PSR_f
},
16274 {"xcfs", PSR_x
| PSR_c
| PSR_f
| PSR_s
},
16275 {"xcsf", PSR_x
| PSR_c
| PSR_s
| PSR_f
},
16276 {"cfsx", PSR_c
| PSR_f
| PSR_s
| PSR_x
},
16277 {"cfxs", PSR_c
| PSR_f
| PSR_x
| PSR_s
},
16278 {"csfx", PSR_c
| PSR_s
| PSR_f
| PSR_x
},
16279 {"csxf", PSR_c
| PSR_s
| PSR_x
| PSR_f
},
16280 {"cxfs", PSR_c
| PSR_x
| PSR_f
| PSR_s
},
16281 {"cxsf", PSR_c
| PSR_x
| PSR_s
| PSR_f
},
16285 {"nzcvqg", PSR_s
| PSR_f
}
16288 /* Table of V7M psr names. */
16289 static const struct asm_psr v7m_psrs
[] =
16291 {"apsr", 0 }, {"APSR", 0 },
16292 {"iapsr", 1 }, {"IAPSR", 1 },
16293 {"eapsr", 2 }, {"EAPSR", 2 },
16294 {"psr", 3 }, {"PSR", 3 },
16295 {"xpsr", 3 }, {"XPSR", 3 }, {"xPSR", 3 },
16296 {"ipsr", 5 }, {"IPSR", 5 },
16297 {"epsr", 6 }, {"EPSR", 6 },
16298 {"iepsr", 7 }, {"IEPSR", 7 },
16299 {"msp", 8 }, {"MSP", 8 },
16300 {"psp", 9 }, {"PSP", 9 },
16301 {"primask", 16}, {"PRIMASK", 16},
16302 {"basepri", 17}, {"BASEPRI", 17},
16303 {"basepri_max", 18}, {"BASEPRI_MAX", 18},
16304 {"faultmask", 19}, {"FAULTMASK", 19},
16305 {"control", 20}, {"CONTROL", 20}
16308 /* Table of all shift-in-operand names. */
16309 static const struct asm_shift_name shift_names
[] =
16311 { "asl", SHIFT_LSL
}, { "ASL", SHIFT_LSL
},
16312 { "lsl", SHIFT_LSL
}, { "LSL", SHIFT_LSL
},
16313 { "lsr", SHIFT_LSR
}, { "LSR", SHIFT_LSR
},
16314 { "asr", SHIFT_ASR
}, { "ASR", SHIFT_ASR
},
16315 { "ror", SHIFT_ROR
}, { "ROR", SHIFT_ROR
},
16316 { "rrx", SHIFT_RRX
}, { "RRX", SHIFT_RRX
}
16319 /* Table of all explicit relocation names. */
16321 static struct reloc_entry reloc_names
[] =
16323 { "got", BFD_RELOC_ARM_GOT32
}, { "GOT", BFD_RELOC_ARM_GOT32
},
16324 { "gotoff", BFD_RELOC_ARM_GOTOFF
}, { "GOTOFF", BFD_RELOC_ARM_GOTOFF
},
16325 { "plt", BFD_RELOC_ARM_PLT32
}, { "PLT", BFD_RELOC_ARM_PLT32
},
16326 { "target1", BFD_RELOC_ARM_TARGET1
}, { "TARGET1", BFD_RELOC_ARM_TARGET1
},
16327 { "target2", BFD_RELOC_ARM_TARGET2
}, { "TARGET2", BFD_RELOC_ARM_TARGET2
},
16328 { "sbrel", BFD_RELOC_ARM_SBREL32
}, { "SBREL", BFD_RELOC_ARM_SBREL32
},
16329 { "tlsgd", BFD_RELOC_ARM_TLS_GD32
}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32
},
16330 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32
}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32
},
16331 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32
}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32
},
16332 { "gottpoff",BFD_RELOC_ARM_TLS_IE32
}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32
},
16333 { "tpoff", BFD_RELOC_ARM_TLS_LE32
}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32
},
16334 { "got_prel", BFD_RELOC_ARM_GOT_PREL
}, { "GOT_PREL", BFD_RELOC_ARM_GOT_PREL
}
16338 /* Table of all conditional affixes. 0xF is not defined as a condition code. */
16339 static const struct asm_cond conds
[] =
16343 {"cs", 0x2}, {"hs", 0x2},
16344 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
16358 static struct asm_barrier_opt barrier_opt_names
[] =
16360 { "sy", 0xf }, { "SY", 0xf },
16361 { "un", 0x7 }, { "UN", 0x7 },
16362 { "st", 0xe }, { "ST", 0xe },
16363 { "unst", 0x6 }, { "UNST", 0x6 },
16364 { "ish", 0xb }, { "ISH", 0xb },
16365 { "sh", 0xb }, { "SH", 0xb },
16366 { "ishst", 0xa }, { "ISHST", 0xa },
16367 { "shst", 0xa }, { "SHST", 0xa },
16368 { "nsh", 0x7 }, { "NSH", 0x7 },
16369 { "nshst", 0x6 }, { "NSHST", 0x6 },
16370 { "osh", 0x3 }, { "OSH", 0x3 },
16371 { "oshst", 0x2 }, { "OSHST", 0x2 }
16374 /* Table of ARM-format instructions. */
16376 /* Macros for gluing together operand strings. N.B. In all cases
16377 other than OPS0, the trailing OP_stop comes from default
16378 zero-initialization of the unspecified elements of the array. */
16379 #define OPS0() { OP_stop, }
16380 #define OPS1(a) { OP_##a, }
16381 #define OPS2(a,b) { OP_##a,OP_##b, }
16382 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
16383 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
16384 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
16385 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
16387 /* These macros are similar to the OPSn, but do not prepend the OP_ prefix.
16388 This is useful when mixing operands for ARM and THUMB, i.e. using the
16389 MIX_ARM_THUMB_OPERANDS macro.
16390 In order to use these macros, prefix the number of operands with _
16392 #define OPS_1(a) { a, }
16393 #define OPS_2(a,b) { a,b, }
16394 #define OPS_3(a,b,c) { a,b,c, }
16395 #define OPS_4(a,b,c,d) { a,b,c,d, }
16396 #define OPS_5(a,b,c,d,e) { a,b,c,d,e, }
16397 #define OPS_6(a,b,c,d,e,f) { a,b,c,d,e,f, }
16399 /* These macros abstract out the exact format of the mnemonic table and
16400 save some repeated characters. */
16402 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
16403 #define TxCE(mnem, op, top, nops, ops, ae, te) \
16404 { mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
16405 THUMB_VARIANT, do_##ae, do_##te }
16407 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
16408 a T_MNEM_xyz enumerator. */
16409 #define TCE(mnem, aop, top, nops, ops, ae, te) \
16410 TxCE (mnem, aop, 0x##top, nops, ops, ae, te)
16411 #define tCE(mnem, aop, top, nops, ops, ae, te) \
16412 TxCE (mnem, aop, T_MNEM##top, nops, ops, ae, te)
16414 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
16415 infix after the third character. */
16416 #define TxC3(mnem, op, top, nops, ops, ae, te) \
16417 { mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
16418 THUMB_VARIANT, do_##ae, do_##te }
16419 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
16420 { mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
16421 THUMB_VARIANT, do_##ae, do_##te }
16422 #define TC3(mnem, aop, top, nops, ops, ae, te) \
16423 TxC3 (mnem, aop, 0x##top, nops, ops, ae, te)
16424 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
16425 TxC3w (mnem, aop, 0x##top, nops, ops, ae, te)
16426 #define tC3(mnem, aop, top, nops, ops, ae, te) \
16427 TxC3 (mnem, aop, T_MNEM##top, nops, ops, ae, te)
16428 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
16429 TxC3w (mnem, aop, T_MNEM##top, nops, ops, ae, te)
16431 /* Mnemonic with a conditional infix in an unusual place. Each and every variant has to
16432 appear in the condition table. */
16433 #define TxCM_(m1, m2, m3, op, top, nops, ops, ae, te) \
16434 { m1 #m2 m3, OPS##nops ops, sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (m1) - 1, \
16435 0x##op, top, ARM_VARIANT, THUMB_VARIANT, do_##ae, do_##te }
16437 #define TxCM(m1, m2, op, top, nops, ops, ae, te) \
16438 TxCM_ (m1, , m2, op, top, nops, ops, ae, te), \
16439 TxCM_ (m1, eq, m2, op, top, nops, ops, ae, te), \
16440 TxCM_ (m1, ne, m2, op, top, nops, ops, ae, te), \
16441 TxCM_ (m1, cs, m2, op, top, nops, ops, ae, te), \
16442 TxCM_ (m1, hs, m2, op, top, nops, ops, ae, te), \
16443 TxCM_ (m1, cc, m2, op, top, nops, ops, ae, te), \
16444 TxCM_ (m1, ul, m2, op, top, nops, ops, ae, te), \
16445 TxCM_ (m1, lo, m2, op, top, nops, ops, ae, te), \
16446 TxCM_ (m1, mi, m2, op, top, nops, ops, ae, te), \
16447 TxCM_ (m1, pl, m2, op, top, nops, ops, ae, te), \
16448 TxCM_ (m1, vs, m2, op, top, nops, ops, ae, te), \
16449 TxCM_ (m1, vc, m2, op, top, nops, ops, ae, te), \
16450 TxCM_ (m1, hi, m2, op, top, nops, ops, ae, te), \
16451 TxCM_ (m1, ls, m2, op, top, nops, ops, ae, te), \
16452 TxCM_ (m1, ge, m2, op, top, nops, ops, ae, te), \
16453 TxCM_ (m1, lt, m2, op, top, nops, ops, ae, te), \
16454 TxCM_ (m1, gt, m2, op, top, nops, ops, ae, te), \
16455 TxCM_ (m1, le, m2, op, top, nops, ops, ae, te), \
16456 TxCM_ (m1, al, m2, op, top, nops, ops, ae, te)
16458 #define TCM(m1,m2, aop, top, nops, ops, ae, te) \
16459 TxCM (m1,m2, aop, 0x##top, nops, ops, ae, te)
16460 #define tCM(m1,m2, aop, top, nops, ops, ae, te) \
16461 TxCM (m1,m2, aop, T_MNEM##top, nops, ops, ae, te)
16463 /* Mnemonic that cannot be conditionalized. The ARM condition-code
16464 field is still 0xE. Many of the Thumb variants can be executed
16465 conditionally, so this is checked separately. */
16466 #define TUE(mnem, op, top, nops, ops, ae, te) \
16467 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
16468 THUMB_VARIANT, do_##ae, do_##te }
16470 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
16471 condition code field. */
16472 #define TUF(mnem, op, top, nops, ops, ae, te) \
16473 { mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
16474 THUMB_VARIANT, do_##ae, do_##te }
16476 /* ARM-only variants of all the above. */
16477 #define CE(mnem, op, nops, ops, ae) \
16478 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
16480 #define C3(mnem, op, nops, ops, ae) \
16481 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
16483 /* Legacy mnemonics that always have conditional infix after the third
16485 #define CL(mnem, op, nops, ops, ae) \
16486 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
16487 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
16489 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
16490 #define cCE(mnem, op, nops, ops, ae) \
16491 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
16493 /* Legacy coprocessor instructions where conditional infix and conditional
16494 suffix are ambiguous. For consistency this includes all FPA instructions,
16495 not just the potentially ambiguous ones. */
16496 #define cCL(mnem, op, nops, ops, ae) \
16497 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
16498 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
16500 /* Coprocessor, takes either a suffix or a position-3 infix
16501 (for an FPA corner case). */
16502 #define C3E(mnem, op, nops, ops, ae) \
16503 { mnem, OPS##nops ops, OT_csuf_or_in3, \
16504 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
16506 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
16507 { m1 #m2 m3, OPS##nops ops, \
16508 sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (m1) - 1, \
16509 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
16511 #define CM(m1, m2, op, nops, ops, ae) \
16512 xCM_ (m1, , m2, op, nops, ops, ae), \
16513 xCM_ (m1, eq, m2, op, nops, ops, ae), \
16514 xCM_ (m1, ne, m2, op, nops, ops, ae), \
16515 xCM_ (m1, cs, m2, op, nops, ops, ae), \
16516 xCM_ (m1, hs, m2, op, nops, ops, ae), \
16517 xCM_ (m1, cc, m2, op, nops, ops, ae), \
16518 xCM_ (m1, ul, m2, op, nops, ops, ae), \
16519 xCM_ (m1, lo, m2, op, nops, ops, ae), \
16520 xCM_ (m1, mi, m2, op, nops, ops, ae), \
16521 xCM_ (m1, pl, m2, op, nops, ops, ae), \
16522 xCM_ (m1, vs, m2, op, nops, ops, ae), \
16523 xCM_ (m1, vc, m2, op, nops, ops, ae), \
16524 xCM_ (m1, hi, m2, op, nops, ops, ae), \
16525 xCM_ (m1, ls, m2, op, nops, ops, ae), \
16526 xCM_ (m1, ge, m2, op, nops, ops, ae), \
16527 xCM_ (m1, lt, m2, op, nops, ops, ae), \
16528 xCM_ (m1, gt, m2, op, nops, ops, ae), \
16529 xCM_ (m1, le, m2, op, nops, ops, ae), \
16530 xCM_ (m1, al, m2, op, nops, ops, ae)
16532 #define UE(mnem, op, nops, ops, ae) \
16533 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
16535 #define UF(mnem, op, nops, ops, ae) \
16536 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
16538 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
16539 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
16540 use the same encoding function for each. */
16541 #define NUF(mnem, op, nops, ops, enc) \
16542 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
16543 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
16545 /* Neon data processing, version which indirects through neon_enc_tab for
16546 the various overloaded versions of opcodes. */
16547 #define nUF(mnem, op, nops, ops, enc) \
16548 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op, \
16549 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
16551 /* Neon insn with conditional suffix for the ARM version, non-overloaded
16553 #define NCE_tag(mnem, op, nops, ops, enc, tag) \
16554 { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT, \
16555 THUMB_VARIANT, do_##enc, do_##enc }
16557 #define NCE(mnem, op, nops, ops, enc) \
16558 NCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
16560 #define NCEF(mnem, op, nops, ops, enc) \
16561 NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
16563 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
16564 #define nCE_tag(mnem, op, nops, ops, enc, tag) \
16565 { #mnem, OPS##nops ops, tag, N_MNEM##op, N_MNEM##op, \
16566 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
16568 #define nCE(mnem, op, nops, ops, enc) \
16569 nCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
16571 #define nCEF(mnem, op, nops, ops, enc) \
16572 nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
16576 static const struct asm_opcode insns
[] =
16578 #define ARM_VARIANT &arm_ext_v1 /* Core ARM Instructions. */
16579 #define THUMB_VARIANT &arm_ext_v4t
16580 tCE("and", 0000000, _and
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
16581 tC3("ands", 0100000, _ands
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
16582 tCE("eor", 0200000, _eor
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
16583 tC3("eors", 0300000, _eors
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
16584 tCE("sub", 0400000, _sub
, 3, (RR
, oRR
, SH
), arit
, t_add_sub
),
16585 tC3("subs", 0500000, _subs
, 3, (RR
, oRR
, SH
), arit
, t_add_sub
),
16586 tCE("add", 0800000, _add
, 3, (RR
, oRR
, SHG
), arit
, t_add_sub
),
16587 tC3("adds", 0900000, _adds
, 3, (RR
, oRR
, SHG
), arit
, t_add_sub
),
16588 tCE("adc", 0a00000
, _adc
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
16589 tC3("adcs", 0b00000, _adcs
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
16590 tCE("sbc", 0c00000
, _sbc
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
16591 tC3("sbcs", 0d00000
, _sbcs
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
16592 tCE("orr", 1800000, _orr
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
16593 tC3("orrs", 1900000, _orrs
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
16594 tCE("bic", 1c00000
, _bic
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
16595 tC3("bics", 1d00000
, _bics
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
16597 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
16598 for setting PSR flag bits. They are obsolete in V6 and do not
16599 have Thumb equivalents. */
16600 tCE("tst", 1100000, _tst
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
16601 tC3w("tsts", 1100000, _tst
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
16602 CL("tstp", 110f000
, 2, (RR
, SH
), cmp
),
16603 tCE("cmp", 1500000, _cmp
, 2, (RR
, SH
), cmp
, t_mov_cmp
),
16604 tC3w("cmps", 1500000, _cmp
, 2, (RR
, SH
), cmp
, t_mov_cmp
),
16605 CL("cmpp", 150f000
, 2, (RR
, SH
), cmp
),
16606 tCE("cmn", 1700000, _cmn
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
16607 tC3w("cmns", 1700000, _cmn
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
16608 CL("cmnp", 170f000
, 2, (RR
, SH
), cmp
),
16610 tCE("mov", 1a00000
, _mov
, 2, (RR
, SH
), mov
, t_mov_cmp
),
16611 tC3("movs", 1b00000
, _movs
, 2, (RR
, SH
), mov
, t_mov_cmp
),
16612 tCE("mvn", 1e00000
, _mvn
, 2, (RR
, SH
), mov
, t_mvn_tst
),
16613 tC3("mvns", 1f00000
, _mvns
, 2, (RR
, SH
), mov
, t_mvn_tst
),
16615 tCE("ldr", 4100000, _ldr
, 2, (RR
, ADDRGLDR
),ldst
, t_ldst
),
16616 tC3("ldrb", 4500000, _ldrb
, 2, (RRnpc_npcsp
, ADDRGLDR
),ldst
, t_ldst
),
16617 tCE("str", 4000000, _str
, _2
, (MIX_ARM_THUMB_OPERANDS (OP_RR
,
16619 OP_ADDRGLDR
),ldst
, t_ldst
),
16620 tC3("strb", 4400000, _strb
, 2, (RRnpc_npcsp
, ADDRGLDR
),ldst
, t_ldst
),
16622 tCE("stm", 8800000, _stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
16623 tC3("stmia", 8800000, _stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
16624 tC3("stmea", 8800000, _stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
16625 tCE("ldm", 8900000, _ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
16626 tC3("ldmia", 8900000, _ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
16627 tC3("ldmfd", 8900000, _ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
16629 TCE("swi", f000000
, df00
, 1, (EXPi
), swi
, t_swi
),
16630 TCE("svc", f000000
, df00
, 1, (EXPi
), swi
, t_swi
),
16631 tCE("b", a000000
, _b
, 1, (EXPr
), branch
, t_branch
),
16632 TCE("bl", b000000
, f000f800
, 1, (EXPr
), bl
, t_branch23
),
16635 tCE("adr", 28f0000
, _adr
, 2, (RR
, EXP
), adr
, t_adr
),
16636 C3(adrl
, 28f0000
, 2, (RR
, EXP
), adrl
),
16637 tCE("nop", 1a00000
, _nop
, 1, (oI255c
), nop
, t_nop
),
16639 /* Thumb-compatibility pseudo ops. */
16640 tCE("lsl", 1a00000
, _lsl
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
16641 tC3("lsls", 1b00000
, _lsls
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
16642 tCE("lsr", 1a00020
, _lsr
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
16643 tC3("lsrs", 1b00020
, _lsrs
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
16644 tCE("asr", 1a00040
, _asr
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
16645 tC3("asrs", 1b00040
, _asrs
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
16646 tCE("ror", 1a00060
, _ror
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
16647 tC3("rors", 1b00060
, _rors
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
16648 tCE("neg", 2600000, _neg
, 2, (RR
, RR
), rd_rn
, t_neg
),
16649 tC3("negs", 2700000, _negs
, 2, (RR
, RR
), rd_rn
, t_neg
),
16650 tCE("push", 92d0000
, _push
, 1, (REGLST
), push_pop
, t_push_pop
),
16651 tCE("pop", 8bd0000
, _pop
, 1, (REGLST
), push_pop
, t_push_pop
),
16653 /* These may simplify to neg. */
16654 TCE("rsb", 0600000, ebc00000
, 3, (RR
, oRR
, SH
), arit
, t_rsb
),
16655 TC3("rsbs", 0700000, ebd00000
, 3, (RR
, oRR
, SH
), arit
, t_rsb
),
16657 #undef THUMB_VARIANT
16658 #define THUMB_VARIANT & arm_ext_v6
16660 TCE("cpy", 1a00000
, 4600, 2, (RR
, RR
), rd_rm
, t_cpy
),
16662 /* V1 instructions with no Thumb analogue prior to V6T2. */
16663 #undef THUMB_VARIANT
16664 #define THUMB_VARIANT & arm_ext_v6t2
16666 TCE("teq", 1300000, ea900f00
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
16667 TC3w("teqs", 1300000, ea900f00
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
16668 CL("teqp", 130f000
, 2, (RR
, SH
), cmp
),
16670 TC3("ldrt", 4300000, f8500e00
, 2, (RRnpc_npcsp
, ADDR
),ldstt
, t_ldstt
),
16671 TC3("ldrbt", 4700000, f8100e00
, 2, (RRnpc_npcsp
, ADDR
),ldstt
, t_ldstt
),
16672 TC3("strt", 4200000, f8400e00
, 2, (RR_npcsp
, ADDR
), ldstt
, t_ldstt
),
16673 TC3("strbt", 4600000, f8000e00
, 2, (RRnpc_npcsp
, ADDR
),ldstt
, t_ldstt
),
16675 TC3("stmdb", 9000000, e9000000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
16676 TC3("stmfd", 9000000, e9000000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
16678 TC3("ldmdb", 9100000, e9100000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
16679 TC3("ldmea", 9100000, e9100000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
16681 /* V1 instructions with no Thumb analogue at all. */
16682 CE("rsc", 0e00000
, 3, (RR
, oRR
, SH
), arit
),
16683 C3(rscs
, 0f00000
, 3, (RR
, oRR
, SH
), arit
),
16685 C3(stmib
, 9800000, 2, (RRw
, REGLST
), ldmstm
),
16686 C3(stmfa
, 9800000, 2, (RRw
, REGLST
), ldmstm
),
16687 C3(stmda
, 8000000, 2, (RRw
, REGLST
), ldmstm
),
16688 C3(stmed
, 8000000, 2, (RRw
, REGLST
), ldmstm
),
16689 C3(ldmib
, 9900000, 2, (RRw
, REGLST
), ldmstm
),
16690 C3(ldmed
, 9900000, 2, (RRw
, REGLST
), ldmstm
),
16691 C3(ldmda
, 8100000, 2, (RRw
, REGLST
), ldmstm
),
16692 C3(ldmfa
, 8100000, 2, (RRw
, REGLST
), ldmstm
),
16695 #define ARM_VARIANT & arm_ext_v2 /* ARM 2 - multiplies. */
16696 #undef THUMB_VARIANT
16697 #define THUMB_VARIANT & arm_ext_v4t
16699 tCE("mul", 0000090, _mul
, 3, (RRnpc
, RRnpc
, oRR
), mul
, t_mul
),
16700 tC3("muls", 0100090, _muls
, 3, (RRnpc
, RRnpc
, oRR
), mul
, t_mul
),
16702 #undef THUMB_VARIANT
16703 #define THUMB_VARIANT & arm_ext_v6t2
16705 TCE("mla", 0200090, fb000000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
, t_mla
),
16706 C3(mlas
, 0300090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
),
16708 /* Generic coprocessor instructions. */
16709 TCE("cdp", e000000
, ee000000
, 6, (RCP
, I15b
, RCN
, RCN
, RCN
, oI7b
), cdp
, cdp
),
16710 TCE("ldc", c100000
, ec100000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
16711 TC3("ldcl", c500000
, ec500000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
16712 TCE("stc", c000000
, ec000000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
16713 TC3("stcl", c400000
, ec400000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
16714 TCE("mcr", e000010
, ee000010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
16715 TCE("mrc", e100010
, ee100010
, 6, (RCP
, I7b
, APSR_RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
16718 #define ARM_VARIANT & arm_ext_v2s /* ARM 3 - swp instructions. */
16720 CE("swp", 1000090, 3, (RRnpc
, RRnpc
, RRnpcb
), rd_rm_rn
),
16721 C3(swpb
, 1400090, 3, (RRnpc
, RRnpc
, RRnpcb
), rd_rm_rn
),
16724 #define ARM_VARIANT & arm_ext_v3 /* ARM 6 Status register instructions. */
16725 #undef THUMB_VARIANT
16726 #define THUMB_VARIANT & arm_ext_msr
16728 TCE("mrs", 10f0000
, f3ef8000
, 2, (APSR_RR
, RVC_PSR
), mrs
, t_mrs
),
16729 TCE("msr", 120f000
, f3808000
, 2, (RVC_PSR
, RR_EXi
), msr
, t_msr
),
16732 #define ARM_VARIANT & arm_ext_v3m /* ARM 7M long multiplies. */
16733 #undef THUMB_VARIANT
16734 #define THUMB_VARIANT & arm_ext_v6t2
16736 TCE("smull", 0c00090
, fb800000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
16737 CM("smull","s", 0d00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
16738 TCE("umull", 0800090, fba00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
16739 CM("umull","s", 0900090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
16740 TCE("smlal", 0e00090
, fbc00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
16741 CM("smlal","s", 0f00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
16742 TCE("umlal", 0a00090
, fbe00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
16743 CM("umlal","s", 0b00090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
16746 #define ARM_VARIANT & arm_ext_v4 /* ARM Architecture 4. */
16747 #undef THUMB_VARIANT
16748 #define THUMB_VARIANT & arm_ext_v4t
16750 tC3("ldrh", 01000b0
, _ldrh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
16751 tC3("strh", 00000b0
, _strh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
16752 tC3("ldrsh", 01000f0
, _ldrsh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
16753 tC3("ldrsb", 01000d0
, _ldrsb
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
16754 tCM("ld","sh", 01000f0
, _ldrsh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
16755 tCM("ld","sb", 01000d0
, _ldrsb
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
16758 #define ARM_VARIANT & arm_ext_v4t_5
16760 /* ARM Architecture 4T. */
16761 /* Note: bx (and blx) are required on V5, even if the processor does
16762 not support Thumb. */
16763 TCE("bx", 12fff10
, 4700, 1, (RR
), bx
, t_bx
),
16766 #define ARM_VARIANT & arm_ext_v5 /* ARM Architecture 5T. */
16767 #undef THUMB_VARIANT
16768 #define THUMB_VARIANT & arm_ext_v5t
16770 /* Note: blx has 2 variants; the .value coded here is for
16771 BLX(2). Only this variant has conditional execution. */
16772 TCE("blx", 12fff30
, 4780, 1, (RR_EXr
), blx
, t_blx
),
16773 TUE("bkpt", 1200070, be00
, 1, (oIffffb
), bkpt
, t_bkpt
),
16775 #undef THUMB_VARIANT
16776 #define THUMB_VARIANT & arm_ext_v6t2
16778 TCE("clz", 16f0f10
, fab0f080
, 2, (RRnpc
, RRnpc
), rd_rm
, t_clz
),
16779 TUF("ldc2", c100000
, fc100000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
16780 TUF("ldc2l", c500000
, fc500000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
16781 TUF("stc2", c000000
, fc000000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
16782 TUF("stc2l", c400000
, fc400000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
16783 TUF("cdp2", e000000
, fe000000
, 6, (RCP
, I15b
, RCN
, RCN
, RCN
, oI7b
), cdp
, cdp
),
16784 TUF("mcr2", e000010
, fe000010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
16785 TUF("mrc2", e100010
, fe100010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
16788 #define ARM_VARIANT & arm_ext_v5exp /* ARM Architecture 5TExP. */
16789 #undef THUMB_VARIANT
16790 #define THUMB_VARIANT &arm_ext_v5exp
16792 TCE("smlabb", 1000080, fb100000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
16793 TCE("smlatb", 10000a0
, fb100020
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
16794 TCE("smlabt", 10000c0
, fb100010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
16795 TCE("smlatt", 10000e0
, fb100030
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
16797 TCE("smlawb", 1200080, fb300000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
16798 TCE("smlawt", 12000c0
, fb300010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
16800 TCE("smlalbb", 1400080, fbc00080
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
16801 TCE("smlaltb", 14000a0
, fbc000a0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
16802 TCE("smlalbt", 14000c0
, fbc00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
16803 TCE("smlaltt", 14000e0
, fbc000b0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
16805 TCE("smulbb", 1600080, fb10f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
16806 TCE("smultb", 16000a0
, fb10f020
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
16807 TCE("smulbt", 16000c0
, fb10f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
16808 TCE("smultt", 16000e0
, fb10f030
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
16810 TCE("smulwb", 12000a0
, fb30f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
16811 TCE("smulwt", 12000e0
, fb30f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
16813 TCE("qadd", 1000050, fa80f080
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
16814 TCE("qdadd", 1400050, fa80f090
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
16815 TCE("qsub", 1200050, fa80f0a0
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
16816 TCE("qdsub", 1600050, fa80f0b0
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
16819 #define ARM_VARIANT & arm_ext_v5e /* ARM Architecture 5TE. */
16820 #undef THUMB_VARIANT
16821 #define THUMB_VARIANT &arm_ext_v6t2
16823 TUF("pld", 450f000
, f810f000
, 1, (ADDR
), pld
, t_pld
),
16824 TC3("ldrd", 00000d0
, e8500000
, 3, (RRnpc_npcsp
, oRRnpc_npcsp
, ADDRGLDRS
),
16826 TC3("strd", 00000f0
, e8400000
, 3, (RRnpc_npcsp
, oRRnpc_npcsp
,
16827 ADDRGLDRS
), ldrd
, t_ldstd
),
16829 TCE("mcrr", c400000
, ec400000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
16830 TCE("mrrc", c500000
, ec500000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
16833 #define ARM_VARIANT & arm_ext_v5j /* ARM Architecture 5TEJ. */
16835 TCE("bxj", 12fff20
, f3c08f00
, 1, (RR
), bxj
, t_bxj
),
16838 #define ARM_VARIANT & arm_ext_v6 /* ARM V6. */
16839 #undef THUMB_VARIANT
16840 #define THUMB_VARIANT & arm_ext_v6
16842 TUF("cpsie", 1080000, b660
, 2, (CPSF
, oI31b
), cpsi
, t_cpsi
),
16843 TUF("cpsid", 10c0000
, b670
, 2, (CPSF
, oI31b
), cpsi
, t_cpsi
),
16844 tCE("rev", 6bf0f30
, _rev
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
16845 tCE("rev16", 6bf0fb0
, _rev16
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
16846 tCE("revsh", 6ff0fb0
, _revsh
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
16847 tCE("sxth", 6bf0070
, _sxth
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
16848 tCE("uxth", 6ff0070
, _uxth
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
16849 tCE("sxtb", 6af0070
, _sxtb
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
16850 tCE("uxtb", 6ef0070
, _uxtb
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
16851 TUF("setend", 1010000, b650
, 1, (ENDI
), setend
, t_setend
),
16853 #undef THUMB_VARIANT
16854 #define THUMB_VARIANT & arm_ext_v6t2
16856 TCE("ldrex", 1900f9f
, e8500f00
, 2, (RRnpc_npcsp
, ADDR
), ldrex
, t_ldrex
),
16857 TCE("strex", 1800f90
, e8400000
, 3, (RRnpc_npcsp
, RRnpc_npcsp
, ADDR
),
16859 TUF("mcrr2", c400000
, fc400000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
16860 TUF("mrrc2", c500000
, fc500000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
16862 TCE("ssat", 6a00010
, f3000000
, 4, (RRnpc
, I32
, RRnpc
, oSHllar
),ssat
, t_ssat
),
16863 TCE("usat", 6e00010
, f3800000
, 4, (RRnpc
, I31
, RRnpc
, oSHllar
),usat
, t_usat
),
16865 /* ARM V6 not included in V7M. */
16866 #undef THUMB_VARIANT
16867 #define THUMB_VARIANT & arm_ext_v6_notm
16868 TUF("rfeia", 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
16869 UF(rfeib
, 9900a00
, 1, (RRw
), rfe
),
16870 UF(rfeda
, 8100a00
, 1, (RRw
), rfe
),
16871 TUF("rfedb", 9100a00
, e810c000
, 1, (RRw
), rfe
, rfe
),
16872 TUF("rfefd", 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
16873 UF(rfefa
, 9900a00
, 1, (RRw
), rfe
),
16874 UF(rfeea
, 8100a00
, 1, (RRw
), rfe
),
16875 TUF("rfeed", 9100a00
, e810c000
, 1, (RRw
), rfe
, rfe
),
16876 TUF("srsia", 8c00500
, e980c000
, 2, (oRRw
, I31w
), srs
, srs
),
16877 UF(srsib
, 9c00500
, 2, (oRRw
, I31w
), srs
),
16878 UF(srsda
, 8400500, 2, (oRRw
, I31w
), srs
),
16879 TUF("srsdb", 9400500, e800c000
, 2, (oRRw
, I31w
), srs
, srs
),
16881 /* ARM V6 not included in V7M (eg. integer SIMD). */
16882 #undef THUMB_VARIANT
16883 #define THUMB_VARIANT & arm_ext_v6_dsp
16884 TUF("cps", 1020000, f3af8100
, 1, (I31b
), imm0
, t_cps
),
16885 TCE("pkhbt", 6800010, eac00000
, 4, (RRnpc
, RRnpc
, RRnpc
, oSHll
), pkhbt
, t_pkhbt
),
16886 TCE("pkhtb", 6800050, eac00020
, 4, (RRnpc
, RRnpc
, RRnpc
, oSHar
), pkhtb
, t_pkhtb
),
16887 TCE("qadd16", 6200f10
, fa90f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16888 TCE("qadd8", 6200f90
, fa80f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16889 TCE("qasx", 6200f30
, faa0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16890 /* Old name for QASX. */
16891 TCE("qaddsubx", 6200f30
, faa0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16892 TCE("qsax", 6200f50
, fae0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16893 /* Old name for QSAX. */
16894 TCE("qsubaddx", 6200f50
, fae0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16895 TCE("qsub16", 6200f70
, fad0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16896 TCE("qsub8", 6200ff0
, fac0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16897 TCE("sadd16", 6100f10
, fa90f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16898 TCE("sadd8", 6100f90
, fa80f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16899 TCE("sasx", 6100f30
, faa0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16900 /* Old name for SASX. */
16901 TCE("saddsubx", 6100f30
, faa0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16902 TCE("shadd16", 6300f10
, fa90f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16903 TCE("shadd8", 6300f90
, fa80f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16904 TCE("shasx", 6300f30
, faa0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16905 /* Old name for SHASX. */
16906 TCE("shaddsubx", 6300f30
, faa0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16907 TCE("shsax", 6300f50
, fae0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16908 /* Old name for SHSAX. */
16909 TCE("shsubaddx", 6300f50
, fae0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16910 TCE("shsub16", 6300f70
, fad0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16911 TCE("shsub8", 6300ff0
, fac0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16912 TCE("ssax", 6100f50
, fae0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16913 /* Old name for SSAX. */
16914 TCE("ssubaddx", 6100f50
, fae0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16915 TCE("ssub16", 6100f70
, fad0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16916 TCE("ssub8", 6100ff0
, fac0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16917 TCE("uadd16", 6500f10
, fa90f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16918 TCE("uadd8", 6500f90
, fa80f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16919 TCE("uasx", 6500f30
, faa0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16920 /* Old name for UASX. */
16921 TCE("uaddsubx", 6500f30
, faa0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16922 TCE("uhadd16", 6700f10
, fa90f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16923 TCE("uhadd8", 6700f90
, fa80f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16924 TCE("uhasx", 6700f30
, faa0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16925 /* Old name for UHASX. */
16926 TCE("uhaddsubx", 6700f30
, faa0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16927 TCE("uhsax", 6700f50
, fae0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16928 /* Old name for UHSAX. */
16929 TCE("uhsubaddx", 6700f50
, fae0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16930 TCE("uhsub16", 6700f70
, fad0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16931 TCE("uhsub8", 6700ff0
, fac0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16932 TCE("uqadd16", 6600f10
, fa90f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16933 TCE("uqadd8", 6600f90
, fa80f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16934 TCE("uqasx", 6600f30
, faa0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16935 /* Old name for UQASX. */
16936 TCE("uqaddsubx", 6600f30
, faa0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16937 TCE("uqsax", 6600f50
, fae0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16938 /* Old name for UQSAX. */
16939 TCE("uqsubaddx", 6600f50
, fae0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16940 TCE("uqsub16", 6600f70
, fad0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16941 TCE("uqsub8", 6600ff0
, fac0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16942 TCE("usub16", 6500f70
, fad0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16943 TCE("usax", 6500f50
, fae0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16944 /* Old name for USAX. */
16945 TCE("usubaddx", 6500f50
, fae0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16946 TCE("usub8", 6500ff0
, fac0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16947 TCE("sxtah", 6b00070
, fa00f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
16948 TCE("sxtab16", 6800070, fa20f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
16949 TCE("sxtab", 6a00070
, fa40f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
16950 TCE("sxtb16", 68f0070
, fa2ff080
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
16951 TCE("uxtah", 6f00070
, fa10f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
16952 TCE("uxtab16", 6c00070
, fa30f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
16953 TCE("uxtab", 6e00070
, fa50f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
16954 TCE("uxtb16", 6cf0070
, fa3ff080
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
16955 TCE("sel", 6800fb0
, faa0f080
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16956 TCE("smlad", 7000010, fb200000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
16957 TCE("smladx", 7000030, fb200010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
16958 TCE("smlald", 7400010, fbc000c0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
16959 TCE("smlaldx", 7400030, fbc000d0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
16960 TCE("smlsd", 7000050, fb400000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
16961 TCE("smlsdx", 7000070, fb400010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
16962 TCE("smlsld", 7400050, fbd000c0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
16963 TCE("smlsldx", 7400070, fbd000d0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
16964 TCE("smmla", 7500010, fb500000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
16965 TCE("smmlar", 7500030, fb500010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
16966 TCE("smmls", 75000d0
, fb600000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
16967 TCE("smmlsr", 75000f0
, fb600010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
16968 TCE("smmul", 750f010
, fb50f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
16969 TCE("smmulr", 750f030
, fb50f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
16970 TCE("smuad", 700f010
, fb20f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
16971 TCE("smuadx", 700f030
, fb20f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
16972 TCE("smusd", 700f050
, fb40f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
16973 TCE("smusdx", 700f070
, fb40f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
16974 TCE("ssat16", 6a00f30
, f3200000
, 3, (RRnpc
, I16
, RRnpc
), ssat16
, t_ssat16
),
16975 TCE("umaal", 0400090, fbe00060
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
, t_mlal
),
16976 TCE("usad8", 780f010
, fb70f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
16977 TCE("usada8", 7800010, fb700000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
16978 TCE("usat16", 6e00f30
, f3a00000
, 3, (RRnpc
, I15
, RRnpc
), usat16
, t_usat16
),
16981 #define ARM_VARIANT & arm_ext_v6k
16982 #undef THUMB_VARIANT
16983 #define THUMB_VARIANT & arm_ext_v6k
16985 tCE("yield", 320f001
, _yield
, 0, (), noargs
, t_hint
),
16986 tCE("wfe", 320f002
, _wfe
, 0, (), noargs
, t_hint
),
16987 tCE("wfi", 320f003
, _wfi
, 0, (), noargs
, t_hint
),
16988 tCE("sev", 320f004
, _sev
, 0, (), noargs
, t_hint
),
16990 #undef THUMB_VARIANT
16991 #define THUMB_VARIANT & arm_ext_v6_notm
16992 TCE("ldrexd", 1b00f9f
, e8d0007f
, 3, (RRnpc_npcsp
, oRRnpc_npcsp
, RRnpcb
),
16994 TCE("strexd", 1a00f90
, e8c00070
, 4, (RRnpc_npcsp
, RRnpc_npcsp
, oRRnpc_npcsp
,
16995 RRnpcb
), strexd
, t_strexd
),
16997 #undef THUMB_VARIANT
16998 #define THUMB_VARIANT & arm_ext_v6t2
16999 TCE("ldrexb", 1d00f9f
, e8d00f4f
, 2, (RRnpc_npcsp
,RRnpcb
),
17001 TCE("ldrexh", 1f00f9f
, e8d00f5f
, 2, (RRnpc_npcsp
, RRnpcb
),
17003 TCE("strexb", 1c00f90
, e8c00f40
, 3, (RRnpc_npcsp
, RRnpc_npcsp
, ADDR
),
17005 TCE("strexh", 1e00f90
, e8c00f50
, 3, (RRnpc_npcsp
, RRnpc_npcsp
, ADDR
),
17007 TUF("clrex", 57ff01f
, f3bf8f2f
, 0, (), noargs
, noargs
),
17010 #define ARM_VARIANT & arm_ext_v6z
17012 TCE("smc", 1600070, f7f08000
, 1, (EXPi
), smc
, t_smc
),
17015 #define ARM_VARIANT & arm_ext_v6t2
17017 TCE("bfc", 7c0001f
, f36f0000
, 3, (RRnpc
, I31
, I32
), bfc
, t_bfc
),
17018 TCE("bfi", 7c00010
, f3600000
, 4, (RRnpc
, RRnpc_I0
, I31
, I32
), bfi
, t_bfi
),
17019 TCE("sbfx", 7a00050
, f3400000
, 4, (RR
, RR
, I31
, I32
), bfx
, t_bfx
),
17020 TCE("ubfx", 7e00050
, f3c00000
, 4, (RR
, RR
, I31
, I32
), bfx
, t_bfx
),
17022 TCE("mls", 0600090, fb000010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
, t_mla
),
17023 TCE("movw", 3000000, f2400000
, 2, (RRnpc
, HALF
), mov16
, t_mov16
),
17024 TCE("movt", 3400000, f2c00000
, 2, (RRnpc
, HALF
), mov16
, t_mov16
),
17025 TCE("rbit", 6ff0f30
, fa90f0a0
, 2, (RR
, RR
), rd_rm
, t_rbit
),
17027 TC3("ldrht", 03000b0
, f8300e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
17028 TC3("ldrsht", 03000f0
, f9300e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
17029 TC3("ldrsbt", 03000d0
, f9100e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
17030 TC3("strht", 02000b0
, f8200e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
17032 /* Thumb-only instructions. */
17034 #define ARM_VARIANT NULL
17035 TUE("cbnz", 0, b900
, 2, (RR
, EXP
), 0, t_cbz
),
17036 TUE("cbz", 0, b100
, 2, (RR
, EXP
), 0, t_cbz
),
17038 /* ARM does not really have an IT instruction, so always allow it.
17039 The opcode is copied from Thumb in order to allow warnings in
17040 -mimplicit-it=[never | arm] modes. */
17042 #define ARM_VARIANT & arm_ext_v1
17044 TUE("it", bf08
, bf08
, 1, (COND
), it
, t_it
),
17045 TUE("itt", bf0c
, bf0c
, 1, (COND
), it
, t_it
),
17046 TUE("ite", bf04
, bf04
, 1, (COND
), it
, t_it
),
17047 TUE("ittt", bf0e
, bf0e
, 1, (COND
), it
, t_it
),
17048 TUE("itet", bf06
, bf06
, 1, (COND
), it
, t_it
),
17049 TUE("itte", bf0a
, bf0a
, 1, (COND
), it
, t_it
),
17050 TUE("itee", bf02
, bf02
, 1, (COND
), it
, t_it
),
17051 TUE("itttt", bf0f
, bf0f
, 1, (COND
), it
, t_it
),
17052 TUE("itett", bf07
, bf07
, 1, (COND
), it
, t_it
),
17053 TUE("ittet", bf0b
, bf0b
, 1, (COND
), it
, t_it
),
17054 TUE("iteet", bf03
, bf03
, 1, (COND
), it
, t_it
),
17055 TUE("ittte", bf0d
, bf0d
, 1, (COND
), it
, t_it
),
17056 TUE("itete", bf05
, bf05
, 1, (COND
), it
, t_it
),
17057 TUE("ittee", bf09
, bf09
, 1, (COND
), it
, t_it
),
17058 TUE("iteee", bf01
, bf01
, 1, (COND
), it
, t_it
),
17059 /* ARM/Thumb-2 instructions with no Thumb-1 equivalent. */
17060 TC3("rrx", 01a00060
, ea4f0030
, 2, (RR
, RR
), rd_rm
, t_rrx
),
17061 TC3("rrxs", 01b00060
, ea5f0030
, 2, (RR
, RR
), rd_rm
, t_rrx
),
17063 /* Thumb2 only instructions. */
17065 #define ARM_VARIANT NULL
17067 TCE("addw", 0, f2000000
, 3, (RR
, RR
, EXPi
), 0, t_add_sub_w
),
17068 TCE("subw", 0, f2a00000
, 3, (RR
, RR
, EXPi
), 0, t_add_sub_w
),
17069 TCE("orn", 0, ea600000
, 3, (RR
, oRR
, SH
), 0, t_orn
),
17070 TCE("orns", 0, ea700000
, 3, (RR
, oRR
, SH
), 0, t_orn
),
17071 TCE("tbb", 0, e8d0f000
, 1, (TB
), 0, t_tb
),
17072 TCE("tbh", 0, e8d0f010
, 1, (TB
), 0, t_tb
),
17074 /* Thumb-2 hardware division instructions (R and M profiles only). */
17075 #undef THUMB_VARIANT
17076 #define THUMB_VARIANT & arm_ext_div
17078 TCE("sdiv", 0, fb90f0f0
, 3, (RR
, oRR
, RR
), 0, t_div
),
17079 TCE("udiv", 0, fbb0f0f0
, 3, (RR
, oRR
, RR
), 0, t_div
),
17081 /* ARM V6M/V7 instructions. */
17083 #define ARM_VARIANT & arm_ext_barrier
17084 #undef THUMB_VARIANT
17085 #define THUMB_VARIANT & arm_ext_barrier
17087 TUF("dmb", 57ff050
, f3bf8f50
, 1, (oBARRIER_I15
), barrier
, t_barrier
),
17088 TUF("dsb", 57ff040
, f3bf8f40
, 1, (oBARRIER_I15
), barrier
, t_barrier
),
17089 TUF("isb", 57ff060
, f3bf8f60
, 1, (oBARRIER_I15
), barrier
, t_barrier
),
17091 /* ARM V7 instructions. */
17093 #define ARM_VARIANT & arm_ext_v7
17094 #undef THUMB_VARIANT
17095 #define THUMB_VARIANT & arm_ext_v7
17097 TUF("pli", 450f000
, f910f000
, 1, (ADDR
), pli
, t_pld
),
17098 TCE("dbg", 320f0f0
, f3af80f0
, 1, (I15
), dbg
, t_dbg
),
17101 #define ARM_VARIANT & fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
17103 cCE("wfs", e200110
, 1, (RR
), rd
),
17104 cCE("rfs", e300110
, 1, (RR
), rd
),
17105 cCE("wfc", e400110
, 1, (RR
), rd
),
17106 cCE("rfc", e500110
, 1, (RR
), rd
),
17108 cCL("ldfs", c100100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
17109 cCL("ldfd", c108100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
17110 cCL("ldfe", c500100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
17111 cCL("ldfp", c508100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
17113 cCL("stfs", c000100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
17114 cCL("stfd", c008100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
17115 cCL("stfe", c400100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
17116 cCL("stfp", c408100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
17118 cCL("mvfs", e008100
, 2, (RF
, RF_IF
), rd_rm
),
17119 cCL("mvfsp", e008120
, 2, (RF
, RF_IF
), rd_rm
),
17120 cCL("mvfsm", e008140
, 2, (RF
, RF_IF
), rd_rm
),
17121 cCL("mvfsz", e008160
, 2, (RF
, RF_IF
), rd_rm
),
17122 cCL("mvfd", e008180
, 2, (RF
, RF_IF
), rd_rm
),
17123 cCL("mvfdp", e0081a0
, 2, (RF
, RF_IF
), rd_rm
),
17124 cCL("mvfdm", e0081c0
, 2, (RF
, RF_IF
), rd_rm
),
17125 cCL("mvfdz", e0081e0
, 2, (RF
, RF_IF
), rd_rm
),
17126 cCL("mvfe", e088100
, 2, (RF
, RF_IF
), rd_rm
),
17127 cCL("mvfep", e088120
, 2, (RF
, RF_IF
), rd_rm
),
17128 cCL("mvfem", e088140
, 2, (RF
, RF_IF
), rd_rm
),
17129 cCL("mvfez", e088160
, 2, (RF
, RF_IF
), rd_rm
),
17131 cCL("mnfs", e108100
, 2, (RF
, RF_IF
), rd_rm
),
17132 cCL("mnfsp", e108120
, 2, (RF
, RF_IF
), rd_rm
),
17133 cCL("mnfsm", e108140
, 2, (RF
, RF_IF
), rd_rm
),
17134 cCL("mnfsz", e108160
, 2, (RF
, RF_IF
), rd_rm
),
17135 cCL("mnfd", e108180
, 2, (RF
, RF_IF
), rd_rm
),
17136 cCL("mnfdp", e1081a0
, 2, (RF
, RF_IF
), rd_rm
),
17137 cCL("mnfdm", e1081c0
, 2, (RF
, RF_IF
), rd_rm
),
17138 cCL("mnfdz", e1081e0
, 2, (RF
, RF_IF
), rd_rm
),
17139 cCL("mnfe", e188100
, 2, (RF
, RF_IF
), rd_rm
),
17140 cCL("mnfep", e188120
, 2, (RF
, RF_IF
), rd_rm
),
17141 cCL("mnfem", e188140
, 2, (RF
, RF_IF
), rd_rm
),
17142 cCL("mnfez", e188160
, 2, (RF
, RF_IF
), rd_rm
),
17144 cCL("abss", e208100
, 2, (RF
, RF_IF
), rd_rm
),
17145 cCL("abssp", e208120
, 2, (RF
, RF_IF
), rd_rm
),
17146 cCL("abssm", e208140
, 2, (RF
, RF_IF
), rd_rm
),
17147 cCL("abssz", e208160
, 2, (RF
, RF_IF
), rd_rm
),
17148 cCL("absd", e208180
, 2, (RF
, RF_IF
), rd_rm
),
17149 cCL("absdp", e2081a0
, 2, (RF
, RF_IF
), rd_rm
),
17150 cCL("absdm", e2081c0
, 2, (RF
, RF_IF
), rd_rm
),
17151 cCL("absdz", e2081e0
, 2, (RF
, RF_IF
), rd_rm
),
17152 cCL("abse", e288100
, 2, (RF
, RF_IF
), rd_rm
),
17153 cCL("absep", e288120
, 2, (RF
, RF_IF
), rd_rm
),
17154 cCL("absem", e288140
, 2, (RF
, RF_IF
), rd_rm
),
17155 cCL("absez", e288160
, 2, (RF
, RF_IF
), rd_rm
),
17157 cCL("rnds", e308100
, 2, (RF
, RF_IF
), rd_rm
),
17158 cCL("rndsp", e308120
, 2, (RF
, RF_IF
), rd_rm
),
17159 cCL("rndsm", e308140
, 2, (RF
, RF_IF
), rd_rm
),
17160 cCL("rndsz", e308160
, 2, (RF
, RF_IF
), rd_rm
),
17161 cCL("rndd", e308180
, 2, (RF
, RF_IF
), rd_rm
),
17162 cCL("rnddp", e3081a0
, 2, (RF
, RF_IF
), rd_rm
),
17163 cCL("rnddm", e3081c0
, 2, (RF
, RF_IF
), rd_rm
),
17164 cCL("rnddz", e3081e0
, 2, (RF
, RF_IF
), rd_rm
),
17165 cCL("rnde", e388100
, 2, (RF
, RF_IF
), rd_rm
),
17166 cCL("rndep", e388120
, 2, (RF
, RF_IF
), rd_rm
),
17167 cCL("rndem", e388140
, 2, (RF
, RF_IF
), rd_rm
),
17168 cCL("rndez", e388160
, 2, (RF
, RF_IF
), rd_rm
),
17170 cCL("sqts", e408100
, 2, (RF
, RF_IF
), rd_rm
),
17171 cCL("sqtsp", e408120
, 2, (RF
, RF_IF
), rd_rm
),
17172 cCL("sqtsm", e408140
, 2, (RF
, RF_IF
), rd_rm
),
17173 cCL("sqtsz", e408160
, 2, (RF
, RF_IF
), rd_rm
),
17174 cCL("sqtd", e408180
, 2, (RF
, RF_IF
), rd_rm
),
17175 cCL("sqtdp", e4081a0
, 2, (RF
, RF_IF
), rd_rm
),
17176 cCL("sqtdm", e4081c0
, 2, (RF
, RF_IF
), rd_rm
),
17177 cCL("sqtdz", e4081e0
, 2, (RF
, RF_IF
), rd_rm
),
17178 cCL("sqte", e488100
, 2, (RF
, RF_IF
), rd_rm
),
17179 cCL("sqtep", e488120
, 2, (RF
, RF_IF
), rd_rm
),
17180 cCL("sqtem", e488140
, 2, (RF
, RF_IF
), rd_rm
),
17181 cCL("sqtez", e488160
, 2, (RF
, RF_IF
), rd_rm
),
17183 cCL("logs", e508100
, 2, (RF
, RF_IF
), rd_rm
),
17184 cCL("logsp", e508120
, 2, (RF
, RF_IF
), rd_rm
),
17185 cCL("logsm", e508140
, 2, (RF
, RF_IF
), rd_rm
),
17186 cCL("logsz", e508160
, 2, (RF
, RF_IF
), rd_rm
),
17187 cCL("logd", e508180
, 2, (RF
, RF_IF
), rd_rm
),
17188 cCL("logdp", e5081a0
, 2, (RF
, RF_IF
), rd_rm
),
17189 cCL("logdm", e5081c0
, 2, (RF
, RF_IF
), rd_rm
),
17190 cCL("logdz", e5081e0
, 2, (RF
, RF_IF
), rd_rm
),
17191 cCL("loge", e588100
, 2, (RF
, RF_IF
), rd_rm
),
17192 cCL("logep", e588120
, 2, (RF
, RF_IF
), rd_rm
),
17193 cCL("logem", e588140
, 2, (RF
, RF_IF
), rd_rm
),
17194 cCL("logez", e588160
, 2, (RF
, RF_IF
), rd_rm
),
17196 cCL("lgns", e608100
, 2, (RF
, RF_IF
), rd_rm
),
17197 cCL("lgnsp", e608120
, 2, (RF
, RF_IF
), rd_rm
),
17198 cCL("lgnsm", e608140
, 2, (RF
, RF_IF
), rd_rm
),
17199 cCL("lgnsz", e608160
, 2, (RF
, RF_IF
), rd_rm
),
17200 cCL("lgnd", e608180
, 2, (RF
, RF_IF
), rd_rm
),
17201 cCL("lgndp", e6081a0
, 2, (RF
, RF_IF
), rd_rm
),
17202 cCL("lgndm", e6081c0
, 2, (RF
, RF_IF
), rd_rm
),
17203 cCL("lgndz", e6081e0
, 2, (RF
, RF_IF
), rd_rm
),
17204 cCL("lgne", e688100
, 2, (RF
, RF_IF
), rd_rm
),
17205 cCL("lgnep", e688120
, 2, (RF
, RF_IF
), rd_rm
),
17206 cCL("lgnem", e688140
, 2, (RF
, RF_IF
), rd_rm
),
17207 cCL("lgnez", e688160
, 2, (RF
, RF_IF
), rd_rm
),
17209 cCL("exps", e708100
, 2, (RF
, RF_IF
), rd_rm
),
17210 cCL("expsp", e708120
, 2, (RF
, RF_IF
), rd_rm
),
17211 cCL("expsm", e708140
, 2, (RF
, RF_IF
), rd_rm
),
17212 cCL("expsz", e708160
, 2, (RF
, RF_IF
), rd_rm
),
17213 cCL("expd", e708180
, 2, (RF
, RF_IF
), rd_rm
),
17214 cCL("expdp", e7081a0
, 2, (RF
, RF_IF
), rd_rm
),
17215 cCL("expdm", e7081c0
, 2, (RF
, RF_IF
), rd_rm
),
17216 cCL("expdz", e7081e0
, 2, (RF
, RF_IF
), rd_rm
),
17217 cCL("expe", e788100
, 2, (RF
, RF_IF
), rd_rm
),
17218 cCL("expep", e788120
, 2, (RF
, RF_IF
), rd_rm
),
17219 cCL("expem", e788140
, 2, (RF
, RF_IF
), rd_rm
),
17220 cCL("expdz", e788160
, 2, (RF
, RF_IF
), rd_rm
),
17222 cCL("sins", e808100
, 2, (RF
, RF_IF
), rd_rm
),
17223 cCL("sinsp", e808120
, 2, (RF
, RF_IF
), rd_rm
),
17224 cCL("sinsm", e808140
, 2, (RF
, RF_IF
), rd_rm
),
17225 cCL("sinsz", e808160
, 2, (RF
, RF_IF
), rd_rm
),
17226 cCL("sind", e808180
, 2, (RF
, RF_IF
), rd_rm
),
17227 cCL("sindp", e8081a0
, 2, (RF
, RF_IF
), rd_rm
),
17228 cCL("sindm", e8081c0
, 2, (RF
, RF_IF
), rd_rm
),
17229 cCL("sindz", e8081e0
, 2, (RF
, RF_IF
), rd_rm
),
17230 cCL("sine", e888100
, 2, (RF
, RF_IF
), rd_rm
),
17231 cCL("sinep", e888120
, 2, (RF
, RF_IF
), rd_rm
),
17232 cCL("sinem", e888140
, 2, (RF
, RF_IF
), rd_rm
),
17233 cCL("sinez", e888160
, 2, (RF
, RF_IF
), rd_rm
),
17235 cCL("coss", e908100
, 2, (RF
, RF_IF
), rd_rm
),
17236 cCL("cossp", e908120
, 2, (RF
, RF_IF
), rd_rm
),
17237 cCL("cossm", e908140
, 2, (RF
, RF_IF
), rd_rm
),
17238 cCL("cossz", e908160
, 2, (RF
, RF_IF
), rd_rm
),
17239 cCL("cosd", e908180
, 2, (RF
, RF_IF
), rd_rm
),
17240 cCL("cosdp", e9081a0
, 2, (RF
, RF_IF
), rd_rm
),
17241 cCL("cosdm", e9081c0
, 2, (RF
, RF_IF
), rd_rm
),
17242 cCL("cosdz", e9081e0
, 2, (RF
, RF_IF
), rd_rm
),
17243 cCL("cose", e988100
, 2, (RF
, RF_IF
), rd_rm
),
17244 cCL("cosep", e988120
, 2, (RF
, RF_IF
), rd_rm
),
17245 cCL("cosem", e988140
, 2, (RF
, RF_IF
), rd_rm
),
17246 cCL("cosez", e988160
, 2, (RF
, RF_IF
), rd_rm
),
17248 cCL("tans", ea08100
, 2, (RF
, RF_IF
), rd_rm
),
17249 cCL("tansp", ea08120
, 2, (RF
, RF_IF
), rd_rm
),
17250 cCL("tansm", ea08140
, 2, (RF
, RF_IF
), rd_rm
),
17251 cCL("tansz", ea08160
, 2, (RF
, RF_IF
), rd_rm
),
17252 cCL("tand", ea08180
, 2, (RF
, RF_IF
), rd_rm
),
17253 cCL("tandp", ea081a0
, 2, (RF
, RF_IF
), rd_rm
),
17254 cCL("tandm", ea081c0
, 2, (RF
, RF_IF
), rd_rm
),
17255 cCL("tandz", ea081e0
, 2, (RF
, RF_IF
), rd_rm
),
17256 cCL("tane", ea88100
, 2, (RF
, RF_IF
), rd_rm
),
17257 cCL("tanep", ea88120
, 2, (RF
, RF_IF
), rd_rm
),
17258 cCL("tanem", ea88140
, 2, (RF
, RF_IF
), rd_rm
),
17259 cCL("tanez", ea88160
, 2, (RF
, RF_IF
), rd_rm
),
17261 cCL("asns", eb08100
, 2, (RF
, RF_IF
), rd_rm
),
17262 cCL("asnsp", eb08120
, 2, (RF
, RF_IF
), rd_rm
),
17263 cCL("asnsm", eb08140
, 2, (RF
, RF_IF
), rd_rm
),
17264 cCL("asnsz", eb08160
, 2, (RF
, RF_IF
), rd_rm
),
17265 cCL("asnd", eb08180
, 2, (RF
, RF_IF
), rd_rm
),
17266 cCL("asndp", eb081a0
, 2, (RF
, RF_IF
), rd_rm
),
17267 cCL("asndm", eb081c0
, 2, (RF
, RF_IF
), rd_rm
),
17268 cCL("asndz", eb081e0
, 2, (RF
, RF_IF
), rd_rm
),
17269 cCL("asne", eb88100
, 2, (RF
, RF_IF
), rd_rm
),
17270 cCL("asnep", eb88120
, 2, (RF
, RF_IF
), rd_rm
),
17271 cCL("asnem", eb88140
, 2, (RF
, RF_IF
), rd_rm
),
17272 cCL("asnez", eb88160
, 2, (RF
, RF_IF
), rd_rm
),
17274 cCL("acss", ec08100
, 2, (RF
, RF_IF
), rd_rm
),
17275 cCL("acssp", ec08120
, 2, (RF
, RF_IF
), rd_rm
),
17276 cCL("acssm", ec08140
, 2, (RF
, RF_IF
), rd_rm
),
17277 cCL("acssz", ec08160
, 2, (RF
, RF_IF
), rd_rm
),
17278 cCL("acsd", ec08180
, 2, (RF
, RF_IF
), rd_rm
),
17279 cCL("acsdp", ec081a0
, 2, (RF
, RF_IF
), rd_rm
),
17280 cCL("acsdm", ec081c0
, 2, (RF
, RF_IF
), rd_rm
),
17281 cCL("acsdz", ec081e0
, 2, (RF
, RF_IF
), rd_rm
),
17282 cCL("acse", ec88100
, 2, (RF
, RF_IF
), rd_rm
),
17283 cCL("acsep", ec88120
, 2, (RF
, RF_IF
), rd_rm
),
17284 cCL("acsem", ec88140
, 2, (RF
, RF_IF
), rd_rm
),
17285 cCL("acsez", ec88160
, 2, (RF
, RF_IF
), rd_rm
),
17287 cCL("atns", ed08100
, 2, (RF
, RF_IF
), rd_rm
),
17288 cCL("atnsp", ed08120
, 2, (RF
, RF_IF
), rd_rm
),
17289 cCL("atnsm", ed08140
, 2, (RF
, RF_IF
), rd_rm
),
17290 cCL("atnsz", ed08160
, 2, (RF
, RF_IF
), rd_rm
),
17291 cCL("atnd", ed08180
, 2, (RF
, RF_IF
), rd_rm
),
17292 cCL("atndp", ed081a0
, 2, (RF
, RF_IF
), rd_rm
),
17293 cCL("atndm", ed081c0
, 2, (RF
, RF_IF
), rd_rm
),
17294 cCL("atndz", ed081e0
, 2, (RF
, RF_IF
), rd_rm
),
17295 cCL("atne", ed88100
, 2, (RF
, RF_IF
), rd_rm
),
17296 cCL("atnep", ed88120
, 2, (RF
, RF_IF
), rd_rm
),
17297 cCL("atnem", ed88140
, 2, (RF
, RF_IF
), rd_rm
),
17298 cCL("atnez", ed88160
, 2, (RF
, RF_IF
), rd_rm
),
17300 cCL("urds", ee08100
, 2, (RF
, RF_IF
), rd_rm
),
17301 cCL("urdsp", ee08120
, 2, (RF
, RF_IF
), rd_rm
),
17302 cCL("urdsm", ee08140
, 2, (RF
, RF_IF
), rd_rm
),
17303 cCL("urdsz", ee08160
, 2, (RF
, RF_IF
), rd_rm
),
17304 cCL("urdd", ee08180
, 2, (RF
, RF_IF
), rd_rm
),
17305 cCL("urddp", ee081a0
, 2, (RF
, RF_IF
), rd_rm
),
17306 cCL("urddm", ee081c0
, 2, (RF
, RF_IF
), rd_rm
),
17307 cCL("urddz", ee081e0
, 2, (RF
, RF_IF
), rd_rm
),
17308 cCL("urde", ee88100
, 2, (RF
, RF_IF
), rd_rm
),
17309 cCL("urdep", ee88120
, 2, (RF
, RF_IF
), rd_rm
),
17310 cCL("urdem", ee88140
, 2, (RF
, RF_IF
), rd_rm
),
17311 cCL("urdez", ee88160
, 2, (RF
, RF_IF
), rd_rm
),
17313 cCL("nrms", ef08100
, 2, (RF
, RF_IF
), rd_rm
),
17314 cCL("nrmsp", ef08120
, 2, (RF
, RF_IF
), rd_rm
),
17315 cCL("nrmsm", ef08140
, 2, (RF
, RF_IF
), rd_rm
),
17316 cCL("nrmsz", ef08160
, 2, (RF
, RF_IF
), rd_rm
),
17317 cCL("nrmd", ef08180
, 2, (RF
, RF_IF
), rd_rm
),
17318 cCL("nrmdp", ef081a0
, 2, (RF
, RF_IF
), rd_rm
),
17319 cCL("nrmdm", ef081c0
, 2, (RF
, RF_IF
), rd_rm
),
17320 cCL("nrmdz", ef081e0
, 2, (RF
, RF_IF
), rd_rm
),
17321 cCL("nrme", ef88100
, 2, (RF
, RF_IF
), rd_rm
),
17322 cCL("nrmep", ef88120
, 2, (RF
, RF_IF
), rd_rm
),
17323 cCL("nrmem", ef88140
, 2, (RF
, RF_IF
), rd_rm
),
17324 cCL("nrmez", ef88160
, 2, (RF
, RF_IF
), rd_rm
),
17326 cCL("adfs", e000100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17327 cCL("adfsp", e000120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17328 cCL("adfsm", e000140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17329 cCL("adfsz", e000160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17330 cCL("adfd", e000180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17331 cCL("adfdp", e0001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17332 cCL("adfdm", e0001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17333 cCL("adfdz", e0001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17334 cCL("adfe", e080100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17335 cCL("adfep", e080120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17336 cCL("adfem", e080140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17337 cCL("adfez", e080160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17339 cCL("sufs", e200100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17340 cCL("sufsp", e200120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17341 cCL("sufsm", e200140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17342 cCL("sufsz", e200160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17343 cCL("sufd", e200180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17344 cCL("sufdp", e2001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17345 cCL("sufdm", e2001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17346 cCL("sufdz", e2001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17347 cCL("sufe", e280100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17348 cCL("sufep", e280120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17349 cCL("sufem", e280140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17350 cCL("sufez", e280160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17352 cCL("rsfs", e300100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17353 cCL("rsfsp", e300120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17354 cCL("rsfsm", e300140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17355 cCL("rsfsz", e300160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17356 cCL("rsfd", e300180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17357 cCL("rsfdp", e3001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17358 cCL("rsfdm", e3001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17359 cCL("rsfdz", e3001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17360 cCL("rsfe", e380100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17361 cCL("rsfep", e380120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17362 cCL("rsfem", e380140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17363 cCL("rsfez", e380160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17365 cCL("mufs", e100100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17366 cCL("mufsp", e100120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17367 cCL("mufsm", e100140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17368 cCL("mufsz", e100160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17369 cCL("mufd", e100180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17370 cCL("mufdp", e1001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17371 cCL("mufdm", e1001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17372 cCL("mufdz", e1001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17373 cCL("mufe", e180100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17374 cCL("mufep", e180120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17375 cCL("mufem", e180140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17376 cCL("mufez", e180160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17378 cCL("dvfs", e400100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17379 cCL("dvfsp", e400120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17380 cCL("dvfsm", e400140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17381 cCL("dvfsz", e400160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17382 cCL("dvfd", e400180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17383 cCL("dvfdp", e4001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17384 cCL("dvfdm", e4001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17385 cCL("dvfdz", e4001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17386 cCL("dvfe", e480100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17387 cCL("dvfep", e480120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17388 cCL("dvfem", e480140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17389 cCL("dvfez", e480160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17391 cCL("rdfs", e500100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17392 cCL("rdfsp", e500120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17393 cCL("rdfsm", e500140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17394 cCL("rdfsz", e500160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17395 cCL("rdfd", e500180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17396 cCL("rdfdp", e5001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17397 cCL("rdfdm", e5001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17398 cCL("rdfdz", e5001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17399 cCL("rdfe", e580100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17400 cCL("rdfep", e580120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17401 cCL("rdfem", e580140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17402 cCL("rdfez", e580160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17404 cCL("pows", e600100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17405 cCL("powsp", e600120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17406 cCL("powsm", e600140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17407 cCL("powsz", e600160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17408 cCL("powd", e600180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17409 cCL("powdp", e6001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17410 cCL("powdm", e6001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17411 cCL("powdz", e6001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17412 cCL("powe", e680100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17413 cCL("powep", e680120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17414 cCL("powem", e680140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17415 cCL("powez", e680160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17417 cCL("rpws", e700100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17418 cCL("rpwsp", e700120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17419 cCL("rpwsm", e700140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17420 cCL("rpwsz", e700160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17421 cCL("rpwd", e700180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17422 cCL("rpwdp", e7001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17423 cCL("rpwdm", e7001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17424 cCL("rpwdz", e7001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17425 cCL("rpwe", e780100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17426 cCL("rpwep", e780120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17427 cCL("rpwem", e780140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17428 cCL("rpwez", e780160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17430 cCL("rmfs", e800100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17431 cCL("rmfsp", e800120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17432 cCL("rmfsm", e800140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17433 cCL("rmfsz", e800160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17434 cCL("rmfd", e800180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17435 cCL("rmfdp", e8001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17436 cCL("rmfdm", e8001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17437 cCL("rmfdz", e8001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17438 cCL("rmfe", e880100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17439 cCL("rmfep", e880120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17440 cCL("rmfem", e880140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17441 cCL("rmfez", e880160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17443 cCL("fmls", e900100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17444 cCL("fmlsp", e900120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17445 cCL("fmlsm", e900140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17446 cCL("fmlsz", e900160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17447 cCL("fmld", e900180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17448 cCL("fmldp", e9001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17449 cCL("fmldm", e9001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17450 cCL("fmldz", e9001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17451 cCL("fmle", e980100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17452 cCL("fmlep", e980120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17453 cCL("fmlem", e980140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17454 cCL("fmlez", e980160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17456 cCL("fdvs", ea00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17457 cCL("fdvsp", ea00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17458 cCL("fdvsm", ea00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17459 cCL("fdvsz", ea00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17460 cCL("fdvd", ea00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17461 cCL("fdvdp", ea001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17462 cCL("fdvdm", ea001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17463 cCL("fdvdz", ea001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17464 cCL("fdve", ea80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17465 cCL("fdvep", ea80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17466 cCL("fdvem", ea80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17467 cCL("fdvez", ea80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17469 cCL("frds", eb00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17470 cCL("frdsp", eb00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17471 cCL("frdsm", eb00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17472 cCL("frdsz", eb00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17473 cCL("frdd", eb00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17474 cCL("frddp", eb001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17475 cCL("frddm", eb001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17476 cCL("frddz", eb001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17477 cCL("frde", eb80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17478 cCL("frdep", eb80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17479 cCL("frdem", eb80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17480 cCL("frdez", eb80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17482 cCL("pols", ec00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17483 cCL("polsp", ec00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17484 cCL("polsm", ec00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17485 cCL("polsz", ec00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17486 cCL("pold", ec00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17487 cCL("poldp", ec001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17488 cCL("poldm", ec001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17489 cCL("poldz", ec001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17490 cCL("pole", ec80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17491 cCL("polep", ec80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17492 cCL("polem", ec80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17493 cCL("polez", ec80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17495 cCE("cmf", e90f110
, 2, (RF
, RF_IF
), fpa_cmp
),
17496 C3E("cmfe", ed0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
17497 cCE("cnf", eb0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
17498 C3E("cnfe", ef0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
17500 cCL("flts", e000110
, 2, (RF
, RR
), rn_rd
),
17501 cCL("fltsp", e000130
, 2, (RF
, RR
), rn_rd
),
17502 cCL("fltsm", e000150
, 2, (RF
, RR
), rn_rd
),
17503 cCL("fltsz", e000170
, 2, (RF
, RR
), rn_rd
),
17504 cCL("fltd", e000190
, 2, (RF
, RR
), rn_rd
),
17505 cCL("fltdp", e0001b0
, 2, (RF
, RR
), rn_rd
),
17506 cCL("fltdm", e0001d0
, 2, (RF
, RR
), rn_rd
),
17507 cCL("fltdz", e0001f0
, 2, (RF
, RR
), rn_rd
),
17508 cCL("flte", e080110
, 2, (RF
, RR
), rn_rd
),
17509 cCL("fltep", e080130
, 2, (RF
, RR
), rn_rd
),
17510 cCL("fltem", e080150
, 2, (RF
, RR
), rn_rd
),
17511 cCL("fltez", e080170
, 2, (RF
, RR
), rn_rd
),
17513 /* The implementation of the FIX instruction is broken on some
17514 assemblers, in that it accepts a precision specifier as well as a
17515 rounding specifier, despite the fact that this is meaningless.
17516 To be more compatible, we accept it as well, though of course it
17517 does not set any bits. */
17518 cCE("fix", e100110
, 2, (RR
, RF
), rd_rm
),
17519 cCL("fixp", e100130
, 2, (RR
, RF
), rd_rm
),
17520 cCL("fixm", e100150
, 2, (RR
, RF
), rd_rm
),
17521 cCL("fixz", e100170
, 2, (RR
, RF
), rd_rm
),
17522 cCL("fixsp", e100130
, 2, (RR
, RF
), rd_rm
),
17523 cCL("fixsm", e100150
, 2, (RR
, RF
), rd_rm
),
17524 cCL("fixsz", e100170
, 2, (RR
, RF
), rd_rm
),
17525 cCL("fixdp", e100130
, 2, (RR
, RF
), rd_rm
),
17526 cCL("fixdm", e100150
, 2, (RR
, RF
), rd_rm
),
17527 cCL("fixdz", e100170
, 2, (RR
, RF
), rd_rm
),
17528 cCL("fixep", e100130
, 2, (RR
, RF
), rd_rm
),
17529 cCL("fixem", e100150
, 2, (RR
, RF
), rd_rm
),
17530 cCL("fixez", e100170
, 2, (RR
, RF
), rd_rm
),
17532 /* Instructions that were new with the real FPA, call them V2. */
17534 #define ARM_VARIANT & fpu_fpa_ext_v2
17536 cCE("lfm", c100200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
17537 cCL("lfmfd", c900200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
17538 cCL("lfmea", d100200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
17539 cCE("sfm", c000200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
17540 cCL("sfmfd", d000200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
17541 cCL("sfmea", c800200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
17544 #define ARM_VARIANT & fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
17546 /* Moves and type conversions. */
17547 cCE("fcpys", eb00a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
17548 cCE("fmrs", e100a10
, 2, (RR
, RVS
), vfp_reg_from_sp
),
17549 cCE("fmsr", e000a10
, 2, (RVS
, RR
), vfp_sp_from_reg
),
17550 cCE("fmstat", ef1fa10
, 0, (), noargs
),
17551 cCE("vmrs", ef10a10
, 2, (APSR_RR
, RVC
), vmrs
),
17552 cCE("vmsr", ee10a10
, 2, (RVC
, RR
), vmsr
),
17553 cCE("fsitos", eb80ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
17554 cCE("fuitos", eb80a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
17555 cCE("ftosis", ebd0a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
17556 cCE("ftosizs", ebd0ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
17557 cCE("ftouis", ebc0a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
17558 cCE("ftouizs", ebc0ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
17559 cCE("fmrx", ef00a10
, 2, (RR
, RVC
), rd_rn
),
17560 cCE("fmxr", ee00a10
, 2, (RVC
, RR
), rn_rd
),
17562 /* Memory operations. */
17563 cCE("flds", d100a00
, 2, (RVS
, ADDRGLDC
), vfp_sp_ldst
),
17564 cCE("fsts", d000a00
, 2, (RVS
, ADDRGLDC
), vfp_sp_ldst
),
17565 cCE("fldmias", c900a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
17566 cCE("fldmfds", c900a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
17567 cCE("fldmdbs", d300a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
17568 cCE("fldmeas", d300a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
17569 cCE("fldmiax", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
17570 cCE("fldmfdx", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
17571 cCE("fldmdbx", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
17572 cCE("fldmeax", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
17573 cCE("fstmias", c800a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
17574 cCE("fstmeas", c800a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
17575 cCE("fstmdbs", d200a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
17576 cCE("fstmfds", d200a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
17577 cCE("fstmiax", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
17578 cCE("fstmeax", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
17579 cCE("fstmdbx", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
17580 cCE("fstmfdx", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
17582 /* Monadic operations. */
17583 cCE("fabss", eb00ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
17584 cCE("fnegs", eb10a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
17585 cCE("fsqrts", eb10ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
17587 /* Dyadic operations. */
17588 cCE("fadds", e300a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
17589 cCE("fsubs", e300a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
17590 cCE("fmuls", e200a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
17591 cCE("fdivs", e800a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
17592 cCE("fmacs", e000a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
17593 cCE("fmscs", e100a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
17594 cCE("fnmuls", e200a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
17595 cCE("fnmacs", e000a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
17596 cCE("fnmscs", e100a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
17599 cCE("fcmps", eb40a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
17600 cCE("fcmpzs", eb50a40
, 1, (RVS
), vfp_sp_compare_z
),
17601 cCE("fcmpes", eb40ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
17602 cCE("fcmpezs", eb50ac0
, 1, (RVS
), vfp_sp_compare_z
),
17604 /* Double precision load/store are still present on single precision
17605 implementations. */
17606 cCE("fldd", d100b00
, 2, (RVD
, ADDRGLDC
), vfp_dp_ldst
),
17607 cCE("fstd", d000b00
, 2, (RVD
, ADDRGLDC
), vfp_dp_ldst
),
17608 cCE("fldmiad", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
17609 cCE("fldmfdd", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
17610 cCE("fldmdbd", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
17611 cCE("fldmead", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
17612 cCE("fstmiad", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
17613 cCE("fstmead", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
17614 cCE("fstmdbd", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
17615 cCE("fstmfdd", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
17618 #define ARM_VARIANT & fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
17620 /* Moves and type conversions. */
17621 cCE("fcpyd", eb00b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
17622 cCE("fcvtds", eb70ac0
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
17623 cCE("fcvtsd", eb70bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
17624 cCE("fmdhr", e200b10
, 2, (RVD
, RR
), vfp_dp_rn_rd
),
17625 cCE("fmdlr", e000b10
, 2, (RVD
, RR
), vfp_dp_rn_rd
),
17626 cCE("fmrdh", e300b10
, 2, (RR
, RVD
), vfp_dp_rd_rn
),
17627 cCE("fmrdl", e100b10
, 2, (RR
, RVD
), vfp_dp_rd_rn
),
17628 cCE("fsitod", eb80bc0
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
17629 cCE("fuitod", eb80b40
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
17630 cCE("ftosid", ebd0b40
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
17631 cCE("ftosizd", ebd0bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
17632 cCE("ftouid", ebc0b40
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
17633 cCE("ftouizd", ebc0bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
17635 /* Monadic operations. */
17636 cCE("fabsd", eb00bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
17637 cCE("fnegd", eb10b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
17638 cCE("fsqrtd", eb10bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
17640 /* Dyadic operations. */
17641 cCE("faddd", e300b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
17642 cCE("fsubd", e300b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
17643 cCE("fmuld", e200b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
17644 cCE("fdivd", e800b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
17645 cCE("fmacd", e000b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
17646 cCE("fmscd", e100b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
17647 cCE("fnmuld", e200b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
17648 cCE("fnmacd", e000b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
17649 cCE("fnmscd", e100b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
17652 cCE("fcmpd", eb40b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
17653 cCE("fcmpzd", eb50b40
, 1, (RVD
), vfp_dp_rd
),
17654 cCE("fcmped", eb40bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
17655 cCE("fcmpezd", eb50bc0
, 1, (RVD
), vfp_dp_rd
),
17658 #define ARM_VARIANT & fpu_vfp_ext_v2
17660 cCE("fmsrr", c400a10
, 3, (VRSLST
, RR
, RR
), vfp_sp2_from_reg2
),
17661 cCE("fmrrs", c500a10
, 3, (RR
, RR
, VRSLST
), vfp_reg2_from_sp2
),
17662 cCE("fmdrr", c400b10
, 3, (RVD
, RR
, RR
), vfp_dp_rm_rd_rn
),
17663 cCE("fmrrd", c500b10
, 3, (RR
, RR
, RVD
), vfp_dp_rd_rn_rm
),
17665 /* Instructions which may belong to either the Neon or VFP instruction sets.
17666 Individual encoder functions perform additional architecture checks. */
17668 #define ARM_VARIANT & fpu_vfp_ext_v1xd
17669 #undef THUMB_VARIANT
17670 #define THUMB_VARIANT & fpu_vfp_ext_v1xd
17672 /* These mnemonics are unique to VFP. */
17673 NCE(vsqrt
, 0, 2, (RVSD
, RVSD
), vfp_nsyn_sqrt
),
17674 NCE(vdiv
, 0, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_div
),
17675 nCE(vnmul
, _vnmul
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
17676 nCE(vnmla
, _vnmla
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
17677 nCE(vnmls
, _vnmls
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
17678 nCE(vcmp
, _vcmp
, 2, (RVSD
, RVSD_I0
), vfp_nsyn_cmp
),
17679 nCE(vcmpe
, _vcmpe
, 2, (RVSD
, RVSD_I0
), vfp_nsyn_cmp
),
17680 NCE(vpush
, 0, 1, (VRSDLST
), vfp_nsyn_push
),
17681 NCE(vpop
, 0, 1, (VRSDLST
), vfp_nsyn_pop
),
17682 NCE(vcvtz
, 0, 2, (RVSD
, RVSD
), vfp_nsyn_cvtz
),
17684 /* Mnemonics shared by Neon and VFP. */
17685 nCEF(vmul
, _vmul
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mul
),
17686 nCEF(vmla
, _vmla
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mac_maybe_scalar
),
17687 nCEF(vmls
, _vmls
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mac_maybe_scalar
),
17689 nCEF(vadd
, _vadd
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_addsub_if_i
),
17690 nCEF(vsub
, _vsub
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_addsub_if_i
),
17692 NCEF(vabs
, 1b10300
, 2, (RNSDQ
, RNSDQ
), neon_abs_neg
),
17693 NCEF(vneg
, 1b10380
, 2, (RNSDQ
, RNSDQ
), neon_abs_neg
),
17695 NCE(vldm
, c900b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
17696 NCE(vldmia
, c900b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
17697 NCE(vldmdb
, d100b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
17698 NCE(vstm
, c800b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
17699 NCE(vstmia
, c800b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
17700 NCE(vstmdb
, d000b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
17701 NCE(vldr
, d100b00
, 2, (RVSD
, ADDRGLDC
), neon_ldr_str
),
17702 NCE(vstr
, d000b00
, 2, (RVSD
, ADDRGLDC
), neon_ldr_str
),
17704 nCEF(vcvt
, _vcvt
, 3, (RNSDQ
, RNSDQ
, oI32b
), neon_cvt
),
17705 nCEF(vcvtr
, _vcvt
, 2, (RNSDQ
, RNSDQ
), neon_cvtr
),
17706 nCEF(vcvtb
, _vcvt
, 2, (RVS
, RVS
), neon_cvtb
),
17707 nCEF(vcvtt
, _vcvt
, 2, (RVS
, RVS
), neon_cvtt
),
17710 /* NOTE: All VMOV encoding is special-cased! */
17711 NCE(vmov
, 0, 1, (VMOV
), neon_mov
),
17712 NCE(vmovq
, 0, 1, (VMOV
), neon_mov
),
17714 #undef THUMB_VARIANT
17715 #define THUMB_VARIANT & fpu_neon_ext_v1
17717 #define ARM_VARIANT & fpu_neon_ext_v1
17719 /* Data processing with three registers of the same length. */
17720 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
17721 NUF(vaba
, 0000710, 3, (RNDQ
, RNDQ
, RNDQ
), neon_dyadic_i_su
),
17722 NUF(vabaq
, 0000710, 3, (RNQ
, RNQ
, RNQ
), neon_dyadic_i_su
),
17723 NUF(vhadd
, 0000000, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
17724 NUF(vhaddq
, 0000000, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
17725 NUF(vrhadd
, 0000100, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
17726 NUF(vrhaddq
, 0000100, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
17727 NUF(vhsub
, 0000200, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
17728 NUF(vhsubq
, 0000200, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
17729 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
17730 NUF(vqadd
, 0000010, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i64_su
),
17731 NUF(vqaddq
, 0000010, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i64_su
),
17732 NUF(vqsub
, 0000210, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i64_su
),
17733 NUF(vqsubq
, 0000210, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i64_su
),
17734 NUF(vrshl
, 0000500, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_rshl
),
17735 NUF(vrshlq
, 0000500, 3, (RNQ
, oRNQ
, RNQ
), neon_rshl
),
17736 NUF(vqrshl
, 0000510, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_rshl
),
17737 NUF(vqrshlq
, 0000510, 3, (RNQ
, oRNQ
, RNQ
), neon_rshl
),
17738 /* If not immediate, fall back to neon_dyadic_i64_su.
17739 shl_imm should accept I8 I16 I32 I64,
17740 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
17741 nUF(vshl
, _vshl
, 3, (RNDQ
, oRNDQ
, RNDQ_I63b
), neon_shl_imm
),
17742 nUF(vshlq
, _vshl
, 3, (RNQ
, oRNQ
, RNDQ_I63b
), neon_shl_imm
),
17743 nUF(vqshl
, _vqshl
, 3, (RNDQ
, oRNDQ
, RNDQ_I63b
), neon_qshl_imm
),
17744 nUF(vqshlq
, _vqshl
, 3, (RNQ
, oRNQ
, RNDQ_I63b
), neon_qshl_imm
),
17745 /* Logic ops, types optional & ignored. */
17746 nUF(vand
, _vand
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
17747 nUF(vandq
, _vand
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
17748 nUF(vbic
, _vbic
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
17749 nUF(vbicq
, _vbic
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
17750 nUF(vorr
, _vorr
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
17751 nUF(vorrq
, _vorr
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
17752 nUF(vorn
, _vorn
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
17753 nUF(vornq
, _vorn
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
17754 nUF(veor
, _veor
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_logic
),
17755 nUF(veorq
, _veor
, 3, (RNQ
, oRNQ
, RNQ
), neon_logic
),
17756 /* Bitfield ops, untyped. */
17757 NUF(vbsl
, 1100110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
17758 NUF(vbslq
, 1100110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
17759 NUF(vbit
, 1200110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
17760 NUF(vbitq
, 1200110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
17761 NUF(vbif
, 1300110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
17762 NUF(vbifq
, 1300110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
17763 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F32. */
17764 nUF(vabd
, _vabd
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
17765 nUF(vabdq
, _vabd
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
17766 nUF(vmax
, _vmax
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
17767 nUF(vmaxq
, _vmax
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
17768 nUF(vmin
, _vmin
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
17769 nUF(vminq
, _vmin
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
17770 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
17771 back to neon_dyadic_if_su. */
17772 nUF(vcge
, _vcge
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp
),
17773 nUF(vcgeq
, _vcge
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp
),
17774 nUF(vcgt
, _vcgt
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp
),
17775 nUF(vcgtq
, _vcgt
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp
),
17776 nUF(vclt
, _vclt
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp_inv
),
17777 nUF(vcltq
, _vclt
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp_inv
),
17778 nUF(vcle
, _vcle
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp_inv
),
17779 nUF(vcleq
, _vcle
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp_inv
),
17780 /* Comparison. Type I8 I16 I32 F32. */
17781 nUF(vceq
, _vceq
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_ceq
),
17782 nUF(vceqq
, _vceq
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_ceq
),
17783 /* As above, D registers only. */
17784 nUF(vpmax
, _vpmax
, 3, (RND
, oRND
, RND
), neon_dyadic_if_su_d
),
17785 nUF(vpmin
, _vpmin
, 3, (RND
, oRND
, RND
), neon_dyadic_if_su_d
),
17786 /* Int and float variants, signedness unimportant. */
17787 nUF(vmlaq
, _vmla
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mac_maybe_scalar
),
17788 nUF(vmlsq
, _vmls
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mac_maybe_scalar
),
17789 nUF(vpadd
, _vpadd
, 3, (RND
, oRND
, RND
), neon_dyadic_if_i_d
),
17790 /* Add/sub take types I8 I16 I32 I64 F32. */
17791 nUF(vaddq
, _vadd
, 3, (RNQ
, oRNQ
, RNQ
), neon_addsub_if_i
),
17792 nUF(vsubq
, _vsub
, 3, (RNQ
, oRNQ
, RNQ
), neon_addsub_if_i
),
17793 /* vtst takes sizes 8, 16, 32. */
17794 NUF(vtst
, 0000810, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_tst
),
17795 NUF(vtstq
, 0000810, 3, (RNQ
, oRNQ
, RNQ
), neon_tst
),
17796 /* VMUL takes I8 I16 I32 F32 P8. */
17797 nUF(vmulq
, _vmul
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mul
),
17798 /* VQD{R}MULH takes S16 S32. */
17799 nUF(vqdmulh
, _vqdmulh
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qdmulh
),
17800 nUF(vqdmulhq
, _vqdmulh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qdmulh
),
17801 nUF(vqrdmulh
, _vqrdmulh
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qdmulh
),
17802 nUF(vqrdmulhq
, _vqrdmulh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qdmulh
),
17803 NUF(vacge
, 0000e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute
),
17804 NUF(vacgeq
, 0000e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute
),
17805 NUF(vacgt
, 0200e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute
),
17806 NUF(vacgtq
, 0200e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute
),
17807 NUF(vaclt
, 0200e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute_inv
),
17808 NUF(vacltq
, 0200e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute_inv
),
17809 NUF(vacle
, 0000e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute_inv
),
17810 NUF(vacleq
, 0000e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute_inv
),
17811 NUF(vrecps
, 0000f10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_step
),
17812 NUF(vrecpsq
, 0000f10
, 3, (RNQ
, oRNQ
, RNQ
), neon_step
),
17813 NUF(vrsqrts
, 0200f10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_step
),
17814 NUF(vrsqrtsq
, 0200f10
, 3, (RNQ
, oRNQ
, RNQ
), neon_step
),
17816 /* Two address, int/float. Types S8 S16 S32 F32. */
17817 NUF(vabsq
, 1b10300
, 2, (RNQ
, RNQ
), neon_abs_neg
),
17818 NUF(vnegq
, 1b10380
, 2, (RNQ
, RNQ
), neon_abs_neg
),
17820 /* Data processing with two registers and a shift amount. */
17821 /* Right shifts, and variants with rounding.
17822 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
17823 NUF(vshr
, 0800010, 3, (RNDQ
, oRNDQ
, I64z
), neon_rshift_round_imm
),
17824 NUF(vshrq
, 0800010, 3, (RNQ
, oRNQ
, I64z
), neon_rshift_round_imm
),
17825 NUF(vrshr
, 0800210, 3, (RNDQ
, oRNDQ
, I64z
), neon_rshift_round_imm
),
17826 NUF(vrshrq
, 0800210, 3, (RNQ
, oRNQ
, I64z
), neon_rshift_round_imm
),
17827 NUF(vsra
, 0800110, 3, (RNDQ
, oRNDQ
, I64
), neon_rshift_round_imm
),
17828 NUF(vsraq
, 0800110, 3, (RNQ
, oRNQ
, I64
), neon_rshift_round_imm
),
17829 NUF(vrsra
, 0800310, 3, (RNDQ
, oRNDQ
, I64
), neon_rshift_round_imm
),
17830 NUF(vrsraq
, 0800310, 3, (RNQ
, oRNQ
, I64
), neon_rshift_round_imm
),
17831 /* Shift and insert. Sizes accepted 8 16 32 64. */
17832 NUF(vsli
, 1800510, 3, (RNDQ
, oRNDQ
, I63
), neon_sli
),
17833 NUF(vsliq
, 1800510, 3, (RNQ
, oRNQ
, I63
), neon_sli
),
17834 NUF(vsri
, 1800410, 3, (RNDQ
, oRNDQ
, I64
), neon_sri
),
17835 NUF(vsriq
, 1800410, 3, (RNQ
, oRNQ
, I64
), neon_sri
),
17836 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
17837 NUF(vqshlu
, 1800610, 3, (RNDQ
, oRNDQ
, I63
), neon_qshlu_imm
),
17838 NUF(vqshluq
, 1800610, 3, (RNQ
, oRNQ
, I63
), neon_qshlu_imm
),
17839 /* Right shift immediate, saturating & narrowing, with rounding variants.
17840 Types accepted S16 S32 S64 U16 U32 U64. */
17841 NUF(vqshrn
, 0800910, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow
),
17842 NUF(vqrshrn
, 0800950, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow
),
17843 /* As above, unsigned. Types accepted S16 S32 S64. */
17844 NUF(vqshrun
, 0800810, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow_u
),
17845 NUF(vqrshrun
, 0800850, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow_u
),
17846 /* Right shift narrowing. Types accepted I16 I32 I64. */
17847 NUF(vshrn
, 0800810, 3, (RND
, RNQ
, I32z
), neon_rshift_narrow
),
17848 NUF(vrshrn
, 0800850, 3, (RND
, RNQ
, I32z
), neon_rshift_narrow
),
17849 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
17850 nUF(vshll
, _vshll
, 3, (RNQ
, RND
, I32
), neon_shll
),
17851 /* CVT with optional immediate for fixed-point variant. */
17852 nUF(vcvtq
, _vcvt
, 3, (RNQ
, RNQ
, oI32b
), neon_cvt
),
17854 nUF(vmvn
, _vmvn
, 2, (RNDQ
, RNDQ_Ibig
), neon_mvn
),
17855 nUF(vmvnq
, _vmvn
, 2, (RNQ
, RNDQ_Ibig
), neon_mvn
),
17857 /* Data processing, three registers of different lengths. */
17858 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
17859 NUF(vabal
, 0800500, 3, (RNQ
, RND
, RND
), neon_abal
),
17860 NUF(vabdl
, 0800700, 3, (RNQ
, RND
, RND
), neon_dyadic_long
),
17861 NUF(vaddl
, 0800000, 3, (RNQ
, RND
, RND
), neon_dyadic_long
),
17862 NUF(vsubl
, 0800200, 3, (RNQ
, RND
, RND
), neon_dyadic_long
),
17863 /* If not scalar, fall back to neon_dyadic_long.
17864 Vector types as above, scalar types S16 S32 U16 U32. */
17865 nUF(vmlal
, _vmlal
, 3, (RNQ
, RND
, RND_RNSC
), neon_mac_maybe_scalar_long
),
17866 nUF(vmlsl
, _vmlsl
, 3, (RNQ
, RND
, RND_RNSC
), neon_mac_maybe_scalar_long
),
17867 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
17868 NUF(vaddw
, 0800100, 3, (RNQ
, oRNQ
, RND
), neon_dyadic_wide
),
17869 NUF(vsubw
, 0800300, 3, (RNQ
, oRNQ
, RND
), neon_dyadic_wide
),
17870 /* Dyadic, narrowing insns. Types I16 I32 I64. */
17871 NUF(vaddhn
, 0800400, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
17872 NUF(vraddhn
, 1800400, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
17873 NUF(vsubhn
, 0800600, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
17874 NUF(vrsubhn
, 1800600, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
17875 /* Saturating doubling multiplies. Types S16 S32. */
17876 nUF(vqdmlal
, _vqdmlal
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
17877 nUF(vqdmlsl
, _vqdmlsl
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
17878 nUF(vqdmull
, _vqdmull
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
17879 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
17880 S16 S32 U16 U32. */
17881 nUF(vmull
, _vmull
, 3, (RNQ
, RND
, RND_RNSC
), neon_vmull
),
17883 /* Extract. Size 8. */
17884 NUF(vext
, 0b00000, 4, (RNDQ
, oRNDQ
, RNDQ
, I15
), neon_ext
),
17885 NUF(vextq
, 0b00000, 4, (RNQ
, oRNQ
, RNQ
, I15
), neon_ext
),
17887 /* Two registers, miscellaneous. */
17888 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
17889 NUF(vrev64
, 1b00000
, 2, (RNDQ
, RNDQ
), neon_rev
),
17890 NUF(vrev64q
, 1b00000
, 2, (RNQ
, RNQ
), neon_rev
),
17891 NUF(vrev32
, 1b00080
, 2, (RNDQ
, RNDQ
), neon_rev
),
17892 NUF(vrev32q
, 1b00080
, 2, (RNQ
, RNQ
), neon_rev
),
17893 NUF(vrev16
, 1b00100
, 2, (RNDQ
, RNDQ
), neon_rev
),
17894 NUF(vrev16q
, 1b00100
, 2, (RNQ
, RNQ
), neon_rev
),
17895 /* Vector replicate. Sizes 8 16 32. */
17896 nCE(vdup
, _vdup
, 2, (RNDQ
, RR_RNSC
), neon_dup
),
17897 nCE(vdupq
, _vdup
, 2, (RNQ
, RR_RNSC
), neon_dup
),
17898 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
17899 NUF(vmovl
, 0800a10
, 2, (RNQ
, RND
), neon_movl
),
17900 /* VMOVN. Types I16 I32 I64. */
17901 nUF(vmovn
, _vmovn
, 2, (RND
, RNQ
), neon_movn
),
17902 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
17903 nUF(vqmovn
, _vqmovn
, 2, (RND
, RNQ
), neon_qmovn
),
17904 /* VQMOVUN. Types S16 S32 S64. */
17905 nUF(vqmovun
, _vqmovun
, 2, (RND
, RNQ
), neon_qmovun
),
17906 /* VZIP / VUZP. Sizes 8 16 32. */
17907 NUF(vzip
, 1b20180
, 2, (RNDQ
, RNDQ
), neon_zip_uzp
),
17908 NUF(vzipq
, 1b20180
, 2, (RNQ
, RNQ
), neon_zip_uzp
),
17909 NUF(vuzp
, 1b20100
, 2, (RNDQ
, RNDQ
), neon_zip_uzp
),
17910 NUF(vuzpq
, 1b20100
, 2, (RNQ
, RNQ
), neon_zip_uzp
),
17911 /* VQABS / VQNEG. Types S8 S16 S32. */
17912 NUF(vqabs
, 1b00700
, 2, (RNDQ
, RNDQ
), neon_sat_abs_neg
),
17913 NUF(vqabsq
, 1b00700
, 2, (RNQ
, RNQ
), neon_sat_abs_neg
),
17914 NUF(vqneg
, 1b00780
, 2, (RNDQ
, RNDQ
), neon_sat_abs_neg
),
17915 NUF(vqnegq
, 1b00780
, 2, (RNQ
, RNQ
), neon_sat_abs_neg
),
17916 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
17917 NUF(vpadal
, 1b00600
, 2, (RNDQ
, RNDQ
), neon_pair_long
),
17918 NUF(vpadalq
, 1b00600
, 2, (RNQ
, RNQ
), neon_pair_long
),
17919 NUF(vpaddl
, 1b00200
, 2, (RNDQ
, RNDQ
), neon_pair_long
),
17920 NUF(vpaddlq
, 1b00200
, 2, (RNQ
, RNQ
), neon_pair_long
),
17921 /* Reciprocal estimates. Types U32 F32. */
17922 NUF(vrecpe
, 1b30400
, 2, (RNDQ
, RNDQ
), neon_recip_est
),
17923 NUF(vrecpeq
, 1b30400
, 2, (RNQ
, RNQ
), neon_recip_est
),
17924 NUF(vrsqrte
, 1b30480
, 2, (RNDQ
, RNDQ
), neon_recip_est
),
17925 NUF(vrsqrteq
, 1b30480
, 2, (RNQ
, RNQ
), neon_recip_est
),
17926 /* VCLS. Types S8 S16 S32. */
17927 NUF(vcls
, 1b00400
, 2, (RNDQ
, RNDQ
), neon_cls
),
17928 NUF(vclsq
, 1b00400
, 2, (RNQ
, RNQ
), neon_cls
),
17929 /* VCLZ. Types I8 I16 I32. */
17930 NUF(vclz
, 1b00480
, 2, (RNDQ
, RNDQ
), neon_clz
),
17931 NUF(vclzq
, 1b00480
, 2, (RNQ
, RNQ
), neon_clz
),
17932 /* VCNT. Size 8. */
17933 NUF(vcnt
, 1b00500
, 2, (RNDQ
, RNDQ
), neon_cnt
),
17934 NUF(vcntq
, 1b00500
, 2, (RNQ
, RNQ
), neon_cnt
),
17935 /* Two address, untyped. */
17936 NUF(vswp
, 1b20000
, 2, (RNDQ
, RNDQ
), neon_swp
),
17937 NUF(vswpq
, 1b20000
, 2, (RNQ
, RNQ
), neon_swp
),
17938 /* VTRN. Sizes 8 16 32. */
17939 nUF(vtrn
, _vtrn
, 2, (RNDQ
, RNDQ
), neon_trn
),
17940 nUF(vtrnq
, _vtrn
, 2, (RNQ
, RNQ
), neon_trn
),
17942 /* Table lookup. Size 8. */
17943 NUF(vtbl
, 1b00800
, 3, (RND
, NRDLST
, RND
), neon_tbl_tbx
),
17944 NUF(vtbx
, 1b00840
, 3, (RND
, NRDLST
, RND
), neon_tbl_tbx
),
17946 #undef THUMB_VARIANT
17947 #define THUMB_VARIANT & fpu_vfp_v3_or_neon_ext
17949 #define ARM_VARIANT & fpu_vfp_v3_or_neon_ext
17951 /* Neon element/structure load/store. */
17952 nUF(vld1
, _vld1
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
17953 nUF(vst1
, _vst1
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
17954 nUF(vld2
, _vld2
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
17955 nUF(vst2
, _vst2
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
17956 nUF(vld3
, _vld3
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
17957 nUF(vst3
, _vst3
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
17958 nUF(vld4
, _vld4
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
17959 nUF(vst4
, _vst4
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
17961 #undef THUMB_VARIANT
17962 #define THUMB_VARIANT &fpu_vfp_ext_v3xd
17964 #define ARM_VARIANT &fpu_vfp_ext_v3xd
17965 cCE("fconsts", eb00a00
, 2, (RVS
, I255
), vfp_sp_const
),
17966 cCE("fshtos", eba0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
17967 cCE("fsltos", eba0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
17968 cCE("fuhtos", ebb0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
17969 cCE("fultos", ebb0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
17970 cCE("ftoshs", ebe0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
17971 cCE("ftosls", ebe0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
17972 cCE("ftouhs", ebf0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
17973 cCE("ftouls", ebf0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
17975 #undef THUMB_VARIANT
17976 #define THUMB_VARIANT & fpu_vfp_ext_v3
17978 #define ARM_VARIANT & fpu_vfp_ext_v3
17980 cCE("fconstd", eb00b00
, 2, (RVD
, I255
), vfp_dp_const
),
17981 cCE("fshtod", eba0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
17982 cCE("fsltod", eba0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
17983 cCE("fuhtod", ebb0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
17984 cCE("fultod", ebb0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
17985 cCE("ftoshd", ebe0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
17986 cCE("ftosld", ebe0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
17987 cCE("ftouhd", ebf0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
17988 cCE("ftould", ebf0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
17991 #define ARM_VARIANT &fpu_vfp_ext_fma
17992 #undef THUMB_VARIANT
17993 #define THUMB_VARIANT &fpu_vfp_ext_fma
17994 /* Mnemonics shared by Neon and VFP. These are included in the
17995 VFP FMA variant; NEON and VFP FMA always includes the NEON
17996 FMA instructions. */
17997 nCEF(vfma
, _vfma
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_fmac
),
17998 nCEF(vfms
, _vfms
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_fmac
),
17999 /* ffmas/ffmad/ffmss/ffmsd are dummy mnemonics to satisfy gas;
18000 the v form should always be used. */
18001 cCE("ffmas", ea00a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
18002 cCE("ffnmas", ea00a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
18003 cCE("ffmad", ea00b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
18004 cCE("ffnmad", ea00b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
18005 nCE(vfnma
, _vfnma
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
18006 nCE(vfnms
, _vfnms
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
18008 #undef THUMB_VARIANT
18010 #define ARM_VARIANT & arm_cext_xscale /* Intel XScale extensions. */
18012 cCE("mia", e200010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
18013 cCE("miaph", e280010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
18014 cCE("miabb", e2c0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
18015 cCE("miabt", e2d0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
18016 cCE("miatb", e2e0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
18017 cCE("miatt", e2f0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
18018 cCE("mar", c400000
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mar
),
18019 cCE("mra", c500000
, 3, (RRnpc
, RRnpc
, RXA
), xsc_mra
),
18022 #define ARM_VARIANT & arm_cext_iwmmxt /* Intel Wireless MMX technology. */
18024 cCE("tandcb", e13f130
, 1, (RR
), iwmmxt_tandorc
),
18025 cCE("tandch", e53f130
, 1, (RR
), iwmmxt_tandorc
),
18026 cCE("tandcw", e93f130
, 1, (RR
), iwmmxt_tandorc
),
18027 cCE("tbcstb", e400010
, 2, (RIWR
, RR
), rn_rd
),
18028 cCE("tbcsth", e400050
, 2, (RIWR
, RR
), rn_rd
),
18029 cCE("tbcstw", e400090
, 2, (RIWR
, RR
), rn_rd
),
18030 cCE("textrcb", e130170
, 2, (RR
, I7
), iwmmxt_textrc
),
18031 cCE("textrch", e530170
, 2, (RR
, I7
), iwmmxt_textrc
),
18032 cCE("textrcw", e930170
, 2, (RR
, I7
), iwmmxt_textrc
),
18033 cCE("textrmub", e100070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
18034 cCE("textrmuh", e500070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
18035 cCE("textrmuw", e900070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
18036 cCE("textrmsb", e100078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
18037 cCE("textrmsh", e500078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
18038 cCE("textrmsw", e900078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
18039 cCE("tinsrb", e600010
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
18040 cCE("tinsrh", e600050
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
18041 cCE("tinsrw", e600090
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
18042 cCE("tmcr", e000110
, 2, (RIWC_RIWG
, RR
), rn_rd
),
18043 cCE("tmcrr", c400000
, 3, (RIWR
, RR
, RR
), rm_rd_rn
),
18044 cCE("tmia", e200010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
18045 cCE("tmiaph", e280010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
18046 cCE("tmiabb", e2c0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
18047 cCE("tmiabt", e2d0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
18048 cCE("tmiatb", e2e0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
18049 cCE("tmiatt", e2f0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
18050 cCE("tmovmskb", e100030
, 2, (RR
, RIWR
), rd_rn
),
18051 cCE("tmovmskh", e500030
, 2, (RR
, RIWR
), rd_rn
),
18052 cCE("tmovmskw", e900030
, 2, (RR
, RIWR
), rd_rn
),
18053 cCE("tmrc", e100110
, 2, (RR
, RIWC_RIWG
), rd_rn
),
18054 cCE("tmrrc", c500000
, 3, (RR
, RR
, RIWR
), rd_rn_rm
),
18055 cCE("torcb", e13f150
, 1, (RR
), iwmmxt_tandorc
),
18056 cCE("torch", e53f150
, 1, (RR
), iwmmxt_tandorc
),
18057 cCE("torcw", e93f150
, 1, (RR
), iwmmxt_tandorc
),
18058 cCE("waccb", e0001c0
, 2, (RIWR
, RIWR
), rd_rn
),
18059 cCE("wacch", e4001c0
, 2, (RIWR
, RIWR
), rd_rn
),
18060 cCE("waccw", e8001c0
, 2, (RIWR
, RIWR
), rd_rn
),
18061 cCE("waddbss", e300180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18062 cCE("waddb", e000180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18063 cCE("waddbus", e100180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18064 cCE("waddhss", e700180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18065 cCE("waddh", e400180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18066 cCE("waddhus", e500180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18067 cCE("waddwss", eb00180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18068 cCE("waddw", e800180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18069 cCE("waddwus", e900180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18070 cCE("waligni", e000020
, 4, (RIWR
, RIWR
, RIWR
, I7
), iwmmxt_waligni
),
18071 cCE("walignr0", e800020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18072 cCE("walignr1", e900020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18073 cCE("walignr2", ea00020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18074 cCE("walignr3", eb00020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18075 cCE("wand", e200000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18076 cCE("wandn", e300000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18077 cCE("wavg2b", e800000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18078 cCE("wavg2br", e900000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18079 cCE("wavg2h", ec00000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18080 cCE("wavg2hr", ed00000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18081 cCE("wcmpeqb", e000060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18082 cCE("wcmpeqh", e400060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18083 cCE("wcmpeqw", e800060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18084 cCE("wcmpgtub", e100060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18085 cCE("wcmpgtuh", e500060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18086 cCE("wcmpgtuw", e900060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18087 cCE("wcmpgtsb", e300060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18088 cCE("wcmpgtsh", e700060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18089 cCE("wcmpgtsw", eb00060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18090 cCE("wldrb", c100000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
18091 cCE("wldrh", c500000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
18092 cCE("wldrw", c100100
, 2, (RIWR_RIWC
, ADDR
), iwmmxt_wldstw
),
18093 cCE("wldrd", c500100
, 2, (RIWR
, ADDR
), iwmmxt_wldstd
),
18094 cCE("wmacs", e600100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18095 cCE("wmacsz", e700100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18096 cCE("wmacu", e400100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18097 cCE("wmacuz", e500100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18098 cCE("wmadds", ea00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18099 cCE("wmaddu", e800100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18100 cCE("wmaxsb", e200160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18101 cCE("wmaxsh", e600160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18102 cCE("wmaxsw", ea00160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18103 cCE("wmaxub", e000160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18104 cCE("wmaxuh", e400160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18105 cCE("wmaxuw", e800160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18106 cCE("wminsb", e300160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18107 cCE("wminsh", e700160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18108 cCE("wminsw", eb00160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18109 cCE("wminub", e100160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18110 cCE("wminuh", e500160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18111 cCE("wminuw", e900160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18112 cCE("wmov", e000000
, 2, (RIWR
, RIWR
), iwmmxt_wmov
),
18113 cCE("wmulsm", e300100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18114 cCE("wmulsl", e200100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18115 cCE("wmulum", e100100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18116 cCE("wmulul", e000100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18117 cCE("wor", e000000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18118 cCE("wpackhss", e700080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18119 cCE("wpackhus", e500080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18120 cCE("wpackwss", eb00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18121 cCE("wpackwus", e900080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18122 cCE("wpackdss", ef00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18123 cCE("wpackdus", ed00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18124 cCE("wrorh", e700040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
18125 cCE("wrorhg", e700148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
18126 cCE("wrorw", eb00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
18127 cCE("wrorwg", eb00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
18128 cCE("wrord", ef00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
18129 cCE("wrordg", ef00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
18130 cCE("wsadb", e000120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18131 cCE("wsadbz", e100120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18132 cCE("wsadh", e400120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18133 cCE("wsadhz", e500120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18134 cCE("wshufh", e0001e0
, 3, (RIWR
, RIWR
, I255
), iwmmxt_wshufh
),
18135 cCE("wsllh", e500040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
18136 cCE("wsllhg", e500148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
18137 cCE("wsllw", e900040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
18138 cCE("wsllwg", e900148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
18139 cCE("wslld", ed00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
18140 cCE("wslldg", ed00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
18141 cCE("wsrah", e400040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
18142 cCE("wsrahg", e400148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
18143 cCE("wsraw", e800040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
18144 cCE("wsrawg", e800148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
18145 cCE("wsrad", ec00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
18146 cCE("wsradg", ec00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
18147 cCE("wsrlh", e600040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
18148 cCE("wsrlhg", e600148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
18149 cCE("wsrlw", ea00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
18150 cCE("wsrlwg", ea00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
18151 cCE("wsrld", ee00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
18152 cCE("wsrldg", ee00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
18153 cCE("wstrb", c000000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
18154 cCE("wstrh", c400000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
18155 cCE("wstrw", c000100
, 2, (RIWR_RIWC
, ADDR
), iwmmxt_wldstw
),
18156 cCE("wstrd", c400100
, 2, (RIWR
, ADDR
), iwmmxt_wldstd
),
18157 cCE("wsubbss", e3001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18158 cCE("wsubb", e0001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18159 cCE("wsubbus", e1001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18160 cCE("wsubhss", e7001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18161 cCE("wsubh", e4001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18162 cCE("wsubhus", e5001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18163 cCE("wsubwss", eb001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18164 cCE("wsubw", e8001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18165 cCE("wsubwus", e9001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18166 cCE("wunpckehub",e0000c0
, 2, (RIWR
, RIWR
), rd_rn
),
18167 cCE("wunpckehuh",e4000c0
, 2, (RIWR
, RIWR
), rd_rn
),
18168 cCE("wunpckehuw",e8000c0
, 2, (RIWR
, RIWR
), rd_rn
),
18169 cCE("wunpckehsb",e2000c0
, 2, (RIWR
, RIWR
), rd_rn
),
18170 cCE("wunpckehsh",e6000c0
, 2, (RIWR
, RIWR
), rd_rn
),
18171 cCE("wunpckehsw",ea000c0
, 2, (RIWR
, RIWR
), rd_rn
),
18172 cCE("wunpckihb", e1000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18173 cCE("wunpckihh", e5000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18174 cCE("wunpckihw", e9000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18175 cCE("wunpckelub",e0000e0
, 2, (RIWR
, RIWR
), rd_rn
),
18176 cCE("wunpckeluh",e4000e0
, 2, (RIWR
, RIWR
), rd_rn
),
18177 cCE("wunpckeluw",e8000e0
, 2, (RIWR
, RIWR
), rd_rn
),
18178 cCE("wunpckelsb",e2000e0
, 2, (RIWR
, RIWR
), rd_rn
),
18179 cCE("wunpckelsh",e6000e0
, 2, (RIWR
, RIWR
), rd_rn
),
18180 cCE("wunpckelsw",ea000e0
, 2, (RIWR
, RIWR
), rd_rn
),
18181 cCE("wunpckilb", e1000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18182 cCE("wunpckilh", e5000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18183 cCE("wunpckilw", e9000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18184 cCE("wxor", e100000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18185 cCE("wzero", e300000
, 1, (RIWR
), iwmmxt_wzero
),
18188 #define ARM_VARIANT & arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2. */
18190 cCE("torvscb", e12f190
, 1, (RR
), iwmmxt_tandorc
),
18191 cCE("torvsch", e52f190
, 1, (RR
), iwmmxt_tandorc
),
18192 cCE("torvscw", e92f190
, 1, (RR
), iwmmxt_tandorc
),
18193 cCE("wabsb", e2001c0
, 2, (RIWR
, RIWR
), rd_rn
),
18194 cCE("wabsh", e6001c0
, 2, (RIWR
, RIWR
), rd_rn
),
18195 cCE("wabsw", ea001c0
, 2, (RIWR
, RIWR
), rd_rn
),
18196 cCE("wabsdiffb", e1001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18197 cCE("wabsdiffh", e5001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18198 cCE("wabsdiffw", e9001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18199 cCE("waddbhusl", e2001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18200 cCE("waddbhusm", e6001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18201 cCE("waddhc", e600180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18202 cCE("waddwc", ea00180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18203 cCE("waddsubhx", ea001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18204 cCE("wavg4", e400000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18205 cCE("wavg4r", e500000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18206 cCE("wmaddsn", ee00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18207 cCE("wmaddsx", eb00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18208 cCE("wmaddun", ec00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18209 cCE("wmaddux", e900100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18210 cCE("wmerge", e000080
, 4, (RIWR
, RIWR
, RIWR
, I7
), iwmmxt_wmerge
),
18211 cCE("wmiabb", e0000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18212 cCE("wmiabt", e1000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18213 cCE("wmiatb", e2000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18214 cCE("wmiatt", e3000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18215 cCE("wmiabbn", e4000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18216 cCE("wmiabtn", e5000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18217 cCE("wmiatbn", e6000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18218 cCE("wmiattn", e7000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18219 cCE("wmiawbb", e800120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18220 cCE("wmiawbt", e900120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18221 cCE("wmiawtb", ea00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18222 cCE("wmiawtt", eb00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18223 cCE("wmiawbbn", ec00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18224 cCE("wmiawbtn", ed00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18225 cCE("wmiawtbn", ee00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18226 cCE("wmiawttn", ef00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18227 cCE("wmulsmr", ef00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18228 cCE("wmulumr", ed00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18229 cCE("wmulwumr", ec000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18230 cCE("wmulwsmr", ee000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18231 cCE("wmulwum", ed000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18232 cCE("wmulwsm", ef000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18233 cCE("wmulwl", eb000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18234 cCE("wqmiabb", e8000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18235 cCE("wqmiabt", e9000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18236 cCE("wqmiatb", ea000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18237 cCE("wqmiatt", eb000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18238 cCE("wqmiabbn", ec000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18239 cCE("wqmiabtn", ed000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18240 cCE("wqmiatbn", ee000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18241 cCE("wqmiattn", ef000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18242 cCE("wqmulm", e100080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18243 cCE("wqmulmr", e300080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18244 cCE("wqmulwm", ec000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18245 cCE("wqmulwmr", ee000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18246 cCE("wsubaddhx", ed001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18249 #define ARM_VARIANT & arm_cext_maverick /* Cirrus Maverick instructions. */
18251 cCE("cfldrs", c100400
, 2, (RMF
, ADDRGLDC
), rd_cpaddr
),
18252 cCE("cfldrd", c500400
, 2, (RMD
, ADDRGLDC
), rd_cpaddr
),
18253 cCE("cfldr32", c100500
, 2, (RMFX
, ADDRGLDC
), rd_cpaddr
),
18254 cCE("cfldr64", c500500
, 2, (RMDX
, ADDRGLDC
), rd_cpaddr
),
18255 cCE("cfstrs", c000400
, 2, (RMF
, ADDRGLDC
), rd_cpaddr
),
18256 cCE("cfstrd", c400400
, 2, (RMD
, ADDRGLDC
), rd_cpaddr
),
18257 cCE("cfstr32", c000500
, 2, (RMFX
, ADDRGLDC
), rd_cpaddr
),
18258 cCE("cfstr64", c400500
, 2, (RMDX
, ADDRGLDC
), rd_cpaddr
),
18259 cCE("cfmvsr", e000450
, 2, (RMF
, RR
), rn_rd
),
18260 cCE("cfmvrs", e100450
, 2, (RR
, RMF
), rd_rn
),
18261 cCE("cfmvdlr", e000410
, 2, (RMD
, RR
), rn_rd
),
18262 cCE("cfmvrdl", e100410
, 2, (RR
, RMD
), rd_rn
),
18263 cCE("cfmvdhr", e000430
, 2, (RMD
, RR
), rn_rd
),
18264 cCE("cfmvrdh", e100430
, 2, (RR
, RMD
), rd_rn
),
18265 cCE("cfmv64lr", e000510
, 2, (RMDX
, RR
), rn_rd
),
18266 cCE("cfmvr64l", e100510
, 2, (RR
, RMDX
), rd_rn
),
18267 cCE("cfmv64hr", e000530
, 2, (RMDX
, RR
), rn_rd
),
18268 cCE("cfmvr64h", e100530
, 2, (RR
, RMDX
), rd_rn
),
18269 cCE("cfmval32", e200440
, 2, (RMAX
, RMFX
), rd_rn
),
18270 cCE("cfmv32al", e100440
, 2, (RMFX
, RMAX
), rd_rn
),
18271 cCE("cfmvam32", e200460
, 2, (RMAX
, RMFX
), rd_rn
),
18272 cCE("cfmv32am", e100460
, 2, (RMFX
, RMAX
), rd_rn
),
18273 cCE("cfmvah32", e200480
, 2, (RMAX
, RMFX
), rd_rn
),
18274 cCE("cfmv32ah", e100480
, 2, (RMFX
, RMAX
), rd_rn
),
18275 cCE("cfmva32", e2004a0
, 2, (RMAX
, RMFX
), rd_rn
),
18276 cCE("cfmv32a", e1004a0
, 2, (RMFX
, RMAX
), rd_rn
),
18277 cCE("cfmva64", e2004c0
, 2, (RMAX
, RMDX
), rd_rn
),
18278 cCE("cfmv64a", e1004c0
, 2, (RMDX
, RMAX
), rd_rn
),
18279 cCE("cfmvsc32", e2004e0
, 2, (RMDS
, RMDX
), mav_dspsc
),
18280 cCE("cfmv32sc", e1004e0
, 2, (RMDX
, RMDS
), rd
),
18281 cCE("cfcpys", e000400
, 2, (RMF
, RMF
), rd_rn
),
18282 cCE("cfcpyd", e000420
, 2, (RMD
, RMD
), rd_rn
),
18283 cCE("cfcvtsd", e000460
, 2, (RMD
, RMF
), rd_rn
),
18284 cCE("cfcvtds", e000440
, 2, (RMF
, RMD
), rd_rn
),
18285 cCE("cfcvt32s", e000480
, 2, (RMF
, RMFX
), rd_rn
),
18286 cCE("cfcvt32d", e0004a0
, 2, (RMD
, RMFX
), rd_rn
),
18287 cCE("cfcvt64s", e0004c0
, 2, (RMF
, RMDX
), rd_rn
),
18288 cCE("cfcvt64d", e0004e0
, 2, (RMD
, RMDX
), rd_rn
),
18289 cCE("cfcvts32", e100580
, 2, (RMFX
, RMF
), rd_rn
),
18290 cCE("cfcvtd32", e1005a0
, 2, (RMFX
, RMD
), rd_rn
),
18291 cCE("cftruncs32",e1005c0
, 2, (RMFX
, RMF
), rd_rn
),
18292 cCE("cftruncd32",e1005e0
, 2, (RMFX
, RMD
), rd_rn
),
18293 cCE("cfrshl32", e000550
, 3, (RMFX
, RMFX
, RR
), mav_triple
),
18294 cCE("cfrshl64", e000570
, 3, (RMDX
, RMDX
, RR
), mav_triple
),
18295 cCE("cfsh32", e000500
, 3, (RMFX
, RMFX
, I63s
), mav_shift
),
18296 cCE("cfsh64", e200500
, 3, (RMDX
, RMDX
, I63s
), mav_shift
),
18297 cCE("cfcmps", e100490
, 3, (RR
, RMF
, RMF
), rd_rn_rm
),
18298 cCE("cfcmpd", e1004b0
, 3, (RR
, RMD
, RMD
), rd_rn_rm
),
18299 cCE("cfcmp32", e100590
, 3, (RR
, RMFX
, RMFX
), rd_rn_rm
),
18300 cCE("cfcmp64", e1005b0
, 3, (RR
, RMDX
, RMDX
), rd_rn_rm
),
18301 cCE("cfabss", e300400
, 2, (RMF
, RMF
), rd_rn
),
18302 cCE("cfabsd", e300420
, 2, (RMD
, RMD
), rd_rn
),
18303 cCE("cfnegs", e300440
, 2, (RMF
, RMF
), rd_rn
),
18304 cCE("cfnegd", e300460
, 2, (RMD
, RMD
), rd_rn
),
18305 cCE("cfadds", e300480
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
18306 cCE("cfaddd", e3004a0
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
18307 cCE("cfsubs", e3004c0
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
18308 cCE("cfsubd", e3004e0
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
18309 cCE("cfmuls", e100400
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
18310 cCE("cfmuld", e100420
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
18311 cCE("cfabs32", e300500
, 2, (RMFX
, RMFX
), rd_rn
),
18312 cCE("cfabs64", e300520
, 2, (RMDX
, RMDX
), rd_rn
),
18313 cCE("cfneg32", e300540
, 2, (RMFX
, RMFX
), rd_rn
),
18314 cCE("cfneg64", e300560
, 2, (RMDX
, RMDX
), rd_rn
),
18315 cCE("cfadd32", e300580
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
18316 cCE("cfadd64", e3005a0
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
18317 cCE("cfsub32", e3005c0
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
18318 cCE("cfsub64", e3005e0
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
18319 cCE("cfmul32", e100500
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
18320 cCE("cfmul64", e100520
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
18321 cCE("cfmac32", e100540
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
18322 cCE("cfmsc32", e100560
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
18323 cCE("cfmadd32", e000600
, 4, (RMAX
, RMFX
, RMFX
, RMFX
), mav_quad
),
18324 cCE("cfmsub32", e100600
, 4, (RMAX
, RMFX
, RMFX
, RMFX
), mav_quad
),
18325 cCE("cfmadda32", e200600
, 4, (RMAX
, RMAX
, RMFX
, RMFX
), mav_quad
),
18326 cCE("cfmsuba32", e300600
, 4, (RMAX
, RMAX
, RMFX
, RMFX
), mav_quad
),
18329 #undef THUMB_VARIANT
18356 /* MD interface: bits in the object file. */
18358 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
18359 for use in the a.out file, and stores them in the array pointed to by buf.
18360 This knows about the endian-ness of the target machine and does
18361 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
18362 2 (short) and 4 (long) Floating numbers are put out as a series of
18363 LITTLENUMS (shorts, here at least). */
18366 md_number_to_chars (char * buf
, valueT val
, int n
)
18368 if (target_big_endian
)
18369 number_to_chars_bigendian (buf
, val
, n
);
18371 number_to_chars_littleendian (buf
, val
, n
);
18375 md_chars_to_number (char * buf
, int n
)
18378 unsigned char * where
= (unsigned char *) buf
;
18380 if (target_big_endian
)
18385 result
|= (*where
++ & 255);
18393 result
|= (where
[n
] & 255);
18400 /* MD interface: Sections. */
18402 /* Estimate the size of a frag before relaxing. Assume everything fits in
18406 md_estimate_size_before_relax (fragS
* fragp
,
18407 segT segtype ATTRIBUTE_UNUSED
)
18413 /* Convert a machine dependent frag. */
18416 md_convert_frag (bfd
*abfd
, segT asec ATTRIBUTE_UNUSED
, fragS
*fragp
)
18418 unsigned long insn
;
18419 unsigned long old_op
;
18427 buf
= fragp
->fr_literal
+ fragp
->fr_fix
;
18429 old_op
= bfd_get_16(abfd
, buf
);
18430 if (fragp
->fr_symbol
)
18432 exp
.X_op
= O_symbol
;
18433 exp
.X_add_symbol
= fragp
->fr_symbol
;
18437 exp
.X_op
= O_constant
;
18439 exp
.X_add_number
= fragp
->fr_offset
;
18440 opcode
= fragp
->fr_subtype
;
18443 case T_MNEM_ldr_pc
:
18444 case T_MNEM_ldr_pc2
:
18445 case T_MNEM_ldr_sp
:
18446 case T_MNEM_str_sp
:
18453 if (fragp
->fr_var
== 4)
18455 insn
= THUMB_OP32 (opcode
);
18456 if ((old_op
>> 12) == 4 || (old_op
>> 12) == 9)
18458 insn
|= (old_op
& 0x700) << 4;
18462 insn
|= (old_op
& 7) << 12;
18463 insn
|= (old_op
& 0x38) << 13;
18465 insn
|= 0x00000c00;
18466 put_thumb32_insn (buf
, insn
);
18467 reloc_type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
18471 reloc_type
= BFD_RELOC_ARM_THUMB_OFFSET
;
18473 pc_rel
= (opcode
== T_MNEM_ldr_pc2
);
18476 if (fragp
->fr_var
== 4)
18478 insn
= THUMB_OP32 (opcode
);
18479 insn
|= (old_op
& 0xf0) << 4;
18480 put_thumb32_insn (buf
, insn
);
18481 reloc_type
= BFD_RELOC_ARM_T32_ADD_PC12
;
18485 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
18486 exp
.X_add_number
-= 4;
18494 if (fragp
->fr_var
== 4)
18496 int r0off
= (opcode
== T_MNEM_mov
18497 || opcode
== T_MNEM_movs
) ? 0 : 8;
18498 insn
= THUMB_OP32 (opcode
);
18499 insn
= (insn
& 0xe1ffffff) | 0x10000000;
18500 insn
|= (old_op
& 0x700) << r0off
;
18501 put_thumb32_insn (buf
, insn
);
18502 reloc_type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
18506 reloc_type
= BFD_RELOC_ARM_THUMB_IMM
;
18511 if (fragp
->fr_var
== 4)
18513 insn
= THUMB_OP32(opcode
);
18514 put_thumb32_insn (buf
, insn
);
18515 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH25
;
18518 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH12
;
18522 if (fragp
->fr_var
== 4)
18524 insn
= THUMB_OP32(opcode
);
18525 insn
|= (old_op
& 0xf00) << 14;
18526 put_thumb32_insn (buf
, insn
);
18527 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH20
;
18530 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH9
;
18533 case T_MNEM_add_sp
:
18534 case T_MNEM_add_pc
:
18535 case T_MNEM_inc_sp
:
18536 case T_MNEM_dec_sp
:
18537 if (fragp
->fr_var
== 4)
18539 /* ??? Choose between add and addw. */
18540 insn
= THUMB_OP32 (opcode
);
18541 insn
|= (old_op
& 0xf0) << 4;
18542 put_thumb32_insn (buf
, insn
);
18543 if (opcode
== T_MNEM_add_pc
)
18544 reloc_type
= BFD_RELOC_ARM_T32_IMM12
;
18546 reloc_type
= BFD_RELOC_ARM_T32_ADD_IMM
;
18549 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
18557 if (fragp
->fr_var
== 4)
18559 insn
= THUMB_OP32 (opcode
);
18560 insn
|= (old_op
& 0xf0) << 4;
18561 insn
|= (old_op
& 0xf) << 16;
18562 put_thumb32_insn (buf
, insn
);
18563 if (insn
& (1 << 20))
18564 reloc_type
= BFD_RELOC_ARM_T32_ADD_IMM
;
18566 reloc_type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
18569 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
18575 fixp
= fix_new_exp (fragp
, fragp
->fr_fix
, fragp
->fr_var
, &exp
, pc_rel
,
18576 (enum bfd_reloc_code_real
) reloc_type
);
18577 fixp
->fx_file
= fragp
->fr_file
;
18578 fixp
->fx_line
= fragp
->fr_line
;
18579 fragp
->fr_fix
+= fragp
->fr_var
;
18582 /* Return the size of a relaxable immediate operand instruction.
18583 SHIFT and SIZE specify the form of the allowable immediate. */
18585 relax_immediate (fragS
*fragp
, int size
, int shift
)
18591 /* ??? Should be able to do better than this. */
18592 if (fragp
->fr_symbol
)
18595 low
= (1 << shift
) - 1;
18596 mask
= (1 << (shift
+ size
)) - (1 << shift
);
18597 offset
= fragp
->fr_offset
;
18598 /* Force misaligned offsets to 32-bit variant. */
18601 if (offset
& ~mask
)
18606 /* Get the address of a symbol during relaxation. */
18608 relaxed_symbol_addr (fragS
*fragp
, long stretch
)
18614 sym
= fragp
->fr_symbol
;
18615 sym_frag
= symbol_get_frag (sym
);
18616 know (S_GET_SEGMENT (sym
) != absolute_section
18617 || sym_frag
== &zero_address_frag
);
18618 addr
= S_GET_VALUE (sym
) + fragp
->fr_offset
;
18620 /* If frag has yet to be reached on this pass, assume it will
18621 move by STRETCH just as we did. If this is not so, it will
18622 be because some frag between grows, and that will force
18626 && sym_frag
->relax_marker
!= fragp
->relax_marker
)
18630 /* Adjust stretch for any alignment frag. Note that if have
18631 been expanding the earlier code, the symbol may be
18632 defined in what appears to be an earlier frag. FIXME:
18633 This doesn't handle the fr_subtype field, which specifies
18634 a maximum number of bytes to skip when doing an
18636 for (f
= fragp
; f
!= NULL
&& f
!= sym_frag
; f
= f
->fr_next
)
18638 if (f
->fr_type
== rs_align
|| f
->fr_type
== rs_align_code
)
18641 stretch
= - ((- stretch
)
18642 & ~ ((1 << (int) f
->fr_offset
) - 1));
18644 stretch
&= ~ ((1 << (int) f
->fr_offset
) - 1);
18656 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
18659 relax_adr (fragS
*fragp
, asection
*sec
, long stretch
)
18664 /* Assume worst case for symbols not known to be in the same section. */
18665 if (fragp
->fr_symbol
== NULL
18666 || !S_IS_DEFINED (fragp
->fr_symbol
)
18667 || sec
!= S_GET_SEGMENT (fragp
->fr_symbol
)
18668 || S_IS_WEAK (fragp
->fr_symbol
))
18671 val
= relaxed_symbol_addr (fragp
, stretch
);
18672 addr
= fragp
->fr_address
+ fragp
->fr_fix
;
18673 addr
= (addr
+ 4) & ~3;
18674 /* Force misaligned targets to 32-bit variant. */
18678 if (val
< 0 || val
> 1020)
18683 /* Return the size of a relaxable add/sub immediate instruction. */
18685 relax_addsub (fragS
*fragp
, asection
*sec
)
18690 buf
= fragp
->fr_literal
+ fragp
->fr_fix
;
18691 op
= bfd_get_16(sec
->owner
, buf
);
18692 if ((op
& 0xf) == ((op
>> 4) & 0xf))
18693 return relax_immediate (fragp
, 8, 0);
18695 return relax_immediate (fragp
, 3, 0);
18699 /* Return the size of a relaxable branch instruction. BITS is the
18700 size of the offset field in the narrow instruction. */
18703 relax_branch (fragS
*fragp
, asection
*sec
, int bits
, long stretch
)
18709 /* Assume worst case for symbols not known to be in the same section. */
18710 if (!S_IS_DEFINED (fragp
->fr_symbol
)
18711 || sec
!= S_GET_SEGMENT (fragp
->fr_symbol
)
18712 || S_IS_WEAK (fragp
->fr_symbol
))
18716 if (S_IS_DEFINED (fragp
->fr_symbol
)
18717 && ARM_IS_FUNC (fragp
->fr_symbol
))
18721 val
= relaxed_symbol_addr (fragp
, stretch
);
18722 addr
= fragp
->fr_address
+ fragp
->fr_fix
+ 4;
18725 /* Offset is a signed value *2 */
18727 if (val
>= limit
|| val
< -limit
)
18733 /* Relax a machine dependent frag. This returns the amount by which
18734 the current size of the frag should change. */
18737 arm_relax_frag (asection
*sec
, fragS
*fragp
, long stretch
)
18742 oldsize
= fragp
->fr_var
;
18743 switch (fragp
->fr_subtype
)
18745 case T_MNEM_ldr_pc2
:
18746 newsize
= relax_adr (fragp
, sec
, stretch
);
18748 case T_MNEM_ldr_pc
:
18749 case T_MNEM_ldr_sp
:
18750 case T_MNEM_str_sp
:
18751 newsize
= relax_immediate (fragp
, 8, 2);
18755 newsize
= relax_immediate (fragp
, 5, 2);
18759 newsize
= relax_immediate (fragp
, 5, 1);
18763 newsize
= relax_immediate (fragp
, 5, 0);
18766 newsize
= relax_adr (fragp
, sec
, stretch
);
18772 newsize
= relax_immediate (fragp
, 8, 0);
18775 newsize
= relax_branch (fragp
, sec
, 11, stretch
);
18778 newsize
= relax_branch (fragp
, sec
, 8, stretch
);
18780 case T_MNEM_add_sp
:
18781 case T_MNEM_add_pc
:
18782 newsize
= relax_immediate (fragp
, 8, 2);
18784 case T_MNEM_inc_sp
:
18785 case T_MNEM_dec_sp
:
18786 newsize
= relax_immediate (fragp
, 7, 2);
18792 newsize
= relax_addsub (fragp
, sec
);
18798 fragp
->fr_var
= newsize
;
18799 /* Freeze wide instructions that are at or before the same location as
18800 in the previous pass. This avoids infinite loops.
18801 Don't freeze them unconditionally because targets may be artificially
18802 misaligned by the expansion of preceding frags. */
18803 if (stretch
<= 0 && newsize
> 2)
18805 md_convert_frag (sec
->owner
, sec
, fragp
);
18809 return newsize
- oldsize
;
18812 /* Round up a section size to the appropriate boundary. */
18815 md_section_align (segT segment ATTRIBUTE_UNUSED
,
18818 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
18819 if (OUTPUT_FLAVOR
== bfd_target_aout_flavour
)
18821 /* For a.out, force the section size to be aligned. If we don't do
18822 this, BFD will align it for us, but it will not write out the
18823 final bytes of the section. This may be a bug in BFD, but it is
18824 easier to fix it here since that is how the other a.out targets
18828 align
= bfd_get_section_alignment (stdoutput
, segment
);
18829 size
= ((size
+ (1 << align
) - 1) & ((valueT
) -1 << align
));
18836 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
18837 of an rs_align_code fragment. */
18840 arm_handle_align (fragS
* fragP
)
18842 static char const arm_noop
[2][2][4] =
18845 {0x00, 0x00, 0xa0, 0xe1}, /* LE */
18846 {0xe1, 0xa0, 0x00, 0x00}, /* BE */
18849 {0x00, 0xf0, 0x20, 0xe3}, /* LE */
18850 {0xe3, 0x20, 0xf0, 0x00}, /* BE */
18853 static char const thumb_noop
[2][2][2] =
18856 {0xc0, 0x46}, /* LE */
18857 {0x46, 0xc0}, /* BE */
18860 {0x00, 0xbf}, /* LE */
18861 {0xbf, 0x00} /* BE */
18864 static char const wide_thumb_noop
[2][4] =
18865 { /* Wide Thumb-2 */
18866 {0xaf, 0xf3, 0x00, 0x80}, /* LE */
18867 {0xf3, 0xaf, 0x80, 0x00}, /* BE */
18870 unsigned bytes
, fix
, noop_size
;
18873 const char *narrow_noop
= NULL
;
18878 if (fragP
->fr_type
!= rs_align_code
)
18881 bytes
= fragP
->fr_next
->fr_address
- fragP
->fr_address
- fragP
->fr_fix
;
18882 p
= fragP
->fr_literal
+ fragP
->fr_fix
;
18885 if (bytes
> MAX_MEM_FOR_RS_ALIGN_CODE
)
18886 bytes
&= MAX_MEM_FOR_RS_ALIGN_CODE
;
18888 gas_assert ((fragP
->tc_frag_data
.thumb_mode
& MODE_RECORDED
) != 0);
18890 if (fragP
->tc_frag_data
.thumb_mode
& (~ MODE_RECORDED
))
18892 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6t2
))
18894 narrow_noop
= thumb_noop
[1][target_big_endian
];
18895 noop
= wide_thumb_noop
[target_big_endian
];
18898 noop
= thumb_noop
[0][target_big_endian
];
18906 noop
= arm_noop
[ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6k
) != 0]
18907 [target_big_endian
];
18914 fragP
->fr_var
= noop_size
;
18916 if (bytes
& (noop_size
- 1))
18918 fix
= bytes
& (noop_size
- 1);
18920 insert_data_mapping_symbol (state
, fragP
->fr_fix
, fragP
, fix
);
18922 memset (p
, 0, fix
);
18929 if (bytes
& noop_size
)
18931 /* Insert a narrow noop. */
18932 memcpy (p
, narrow_noop
, noop_size
);
18934 bytes
-= noop_size
;
18938 /* Use wide noops for the remainder */
18942 while (bytes
>= noop_size
)
18944 memcpy (p
, noop
, noop_size
);
18946 bytes
-= noop_size
;
18950 fragP
->fr_fix
+= fix
;
18953 /* Called from md_do_align. Used to create an alignment
18954 frag in a code section. */
18957 arm_frag_align_code (int n
, int max
)
18961 /* We assume that there will never be a requirement
18962 to support alignments greater than MAX_MEM_FOR_RS_ALIGN_CODE bytes. */
18963 if (max
> MAX_MEM_FOR_RS_ALIGN_CODE
)
18968 _("alignments greater than %d bytes not supported in .text sections."),
18969 MAX_MEM_FOR_RS_ALIGN_CODE
+ 1);
18970 as_fatal ("%s", err_msg
);
18973 p
= frag_var (rs_align_code
,
18974 MAX_MEM_FOR_RS_ALIGN_CODE
,
18976 (relax_substateT
) max
,
18983 /* Perform target specific initialisation of a frag.
18984 Note - despite the name this initialisation is not done when the frag
18985 is created, but only when its type is assigned. A frag can be created
18986 and used a long time before its type is set, so beware of assuming that
18987 this initialisationis performed first. */
18991 arm_init_frag (fragS
* fragP
, int max_chars ATTRIBUTE_UNUSED
)
18993 /* Record whether this frag is in an ARM or a THUMB area. */
18994 fragP
->tc_frag_data
.thumb_mode
= thumb_mode
| MODE_RECORDED
;
18997 #else /* OBJ_ELF is defined. */
18999 arm_init_frag (fragS
* fragP
, int max_chars
)
19001 /* If the current ARM vs THUMB mode has not already
19002 been recorded into this frag then do so now. */
19003 if ((fragP
->tc_frag_data
.thumb_mode
& MODE_RECORDED
) == 0)
19005 fragP
->tc_frag_data
.thumb_mode
= thumb_mode
| MODE_RECORDED
;
19007 /* Record a mapping symbol for alignment frags. We will delete this
19008 later if the alignment ends up empty. */
19009 switch (fragP
->fr_type
)
19012 case rs_align_test
:
19014 mapping_state_2 (MAP_DATA
, max_chars
);
19016 case rs_align_code
:
19017 mapping_state_2 (thumb_mode
? MAP_THUMB
: MAP_ARM
, max_chars
);
19025 /* When we change sections we need to issue a new mapping symbol. */
19028 arm_elf_change_section (void)
19030 /* Link an unlinked unwind index table section to the .text section. */
19031 if (elf_section_type (now_seg
) == SHT_ARM_EXIDX
19032 && elf_linked_to_section (now_seg
) == NULL
)
19033 elf_linked_to_section (now_seg
) = text_section
;
19037 arm_elf_section_type (const char * str
, size_t len
)
19039 if (len
== 5 && strncmp (str
, "exidx", 5) == 0)
19040 return SHT_ARM_EXIDX
;
19045 /* Code to deal with unwinding tables. */
19047 static void add_unwind_adjustsp (offsetT
);
19049 /* Generate any deferred unwind frame offset. */
19052 flush_pending_unwind (void)
19056 offset
= unwind
.pending_offset
;
19057 unwind
.pending_offset
= 0;
19059 add_unwind_adjustsp (offset
);
19062 /* Add an opcode to this list for this function. Two-byte opcodes should
19063 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
19067 add_unwind_opcode (valueT op
, int length
)
19069 /* Add any deferred stack adjustment. */
19070 if (unwind
.pending_offset
)
19071 flush_pending_unwind ();
19073 unwind
.sp_restored
= 0;
19075 if (unwind
.opcode_count
+ length
> unwind
.opcode_alloc
)
19077 unwind
.opcode_alloc
+= ARM_OPCODE_CHUNK_SIZE
;
19078 if (unwind
.opcodes
)
19079 unwind
.opcodes
= (unsigned char *) xrealloc (unwind
.opcodes
,
19080 unwind
.opcode_alloc
);
19082 unwind
.opcodes
= (unsigned char *) xmalloc (unwind
.opcode_alloc
);
19087 unwind
.opcodes
[unwind
.opcode_count
] = op
& 0xff;
19089 unwind
.opcode_count
++;
19093 /* Add unwind opcodes to adjust the stack pointer. */
19096 add_unwind_adjustsp (offsetT offset
)
19100 if (offset
> 0x200)
19102 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
19107 /* Long form: 0xb2, uleb128. */
19108 /* This might not fit in a word so add the individual bytes,
19109 remembering the list is built in reverse order. */
19110 o
= (valueT
) ((offset
- 0x204) >> 2);
19112 add_unwind_opcode (0, 1);
19114 /* Calculate the uleb128 encoding of the offset. */
19118 bytes
[n
] = o
& 0x7f;
19124 /* Add the insn. */
19126 add_unwind_opcode (bytes
[n
- 1], 1);
19127 add_unwind_opcode (0xb2, 1);
19129 else if (offset
> 0x100)
19131 /* Two short opcodes. */
19132 add_unwind_opcode (0x3f, 1);
19133 op
= (offset
- 0x104) >> 2;
19134 add_unwind_opcode (op
, 1);
19136 else if (offset
> 0)
19138 /* Short opcode. */
19139 op
= (offset
- 4) >> 2;
19140 add_unwind_opcode (op
, 1);
19142 else if (offset
< 0)
19145 while (offset
> 0x100)
19147 add_unwind_opcode (0x7f, 1);
19150 op
= ((offset
- 4) >> 2) | 0x40;
19151 add_unwind_opcode (op
, 1);
19155 /* Finish the list of unwind opcodes for this function. */
19157 finish_unwind_opcodes (void)
19161 if (unwind
.fp_used
)
19163 /* Adjust sp as necessary. */
19164 unwind
.pending_offset
+= unwind
.fp_offset
- unwind
.frame_size
;
19165 flush_pending_unwind ();
19167 /* After restoring sp from the frame pointer. */
19168 op
= 0x90 | unwind
.fp_reg
;
19169 add_unwind_opcode (op
, 1);
19172 flush_pending_unwind ();
19176 /* Start an exception table entry. If idx is nonzero this is an index table
19180 start_unwind_section (const segT text_seg
, int idx
)
19182 const char * text_name
;
19183 const char * prefix
;
19184 const char * prefix_once
;
19185 const char * group_name
;
19189 size_t sec_name_len
;
19196 prefix
= ELF_STRING_ARM_unwind
;
19197 prefix_once
= ELF_STRING_ARM_unwind_once
;
19198 type
= SHT_ARM_EXIDX
;
19202 prefix
= ELF_STRING_ARM_unwind_info
;
19203 prefix_once
= ELF_STRING_ARM_unwind_info_once
;
19204 type
= SHT_PROGBITS
;
19207 text_name
= segment_name (text_seg
);
19208 if (streq (text_name
, ".text"))
19211 if (strncmp (text_name
, ".gnu.linkonce.t.",
19212 strlen (".gnu.linkonce.t.")) == 0)
19214 prefix
= prefix_once
;
19215 text_name
+= strlen (".gnu.linkonce.t.");
19218 prefix_len
= strlen (prefix
);
19219 text_len
= strlen (text_name
);
19220 sec_name_len
= prefix_len
+ text_len
;
19221 sec_name
= (char *) xmalloc (sec_name_len
+ 1);
19222 memcpy (sec_name
, prefix
, prefix_len
);
19223 memcpy (sec_name
+ prefix_len
, text_name
, text_len
);
19224 sec_name
[prefix_len
+ text_len
] = '\0';
19230 /* Handle COMDAT group. */
19231 if (prefix
!= prefix_once
&& (text_seg
->flags
& SEC_LINK_ONCE
) != 0)
19233 group_name
= elf_group_name (text_seg
);
19234 if (group_name
== NULL
)
19236 as_bad (_("Group section `%s' has no group signature"),
19237 segment_name (text_seg
));
19238 ignore_rest_of_line ();
19241 flags
|= SHF_GROUP
;
19245 obj_elf_change_section (sec_name
, type
, flags
, 0, group_name
, linkonce
, 0);
19247 /* Set the section link for index tables. */
19249 elf_linked_to_section (now_seg
) = text_seg
;
19253 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
19254 personality routine data. Returns zero, or the index table value for
19255 and inline entry. */
19258 create_unwind_entry (int have_data
)
19263 /* The current word of data. */
19265 /* The number of bytes left in this word. */
19268 finish_unwind_opcodes ();
19270 /* Remember the current text section. */
19271 unwind
.saved_seg
= now_seg
;
19272 unwind
.saved_subseg
= now_subseg
;
19274 start_unwind_section (now_seg
, 0);
19276 if (unwind
.personality_routine
== NULL
)
19278 if (unwind
.personality_index
== -2)
19281 as_bad (_("handlerdata in cantunwind frame"));
19282 return 1; /* EXIDX_CANTUNWIND. */
19285 /* Use a default personality routine if none is specified. */
19286 if (unwind
.personality_index
== -1)
19288 if (unwind
.opcode_count
> 3)
19289 unwind
.personality_index
= 1;
19291 unwind
.personality_index
= 0;
19294 /* Space for the personality routine entry. */
19295 if (unwind
.personality_index
== 0)
19297 if (unwind
.opcode_count
> 3)
19298 as_bad (_("too many unwind opcodes for personality routine 0"));
19302 /* All the data is inline in the index table. */
19305 while (unwind
.opcode_count
> 0)
19307 unwind
.opcode_count
--;
19308 data
= (data
<< 8) | unwind
.opcodes
[unwind
.opcode_count
];
19312 /* Pad with "finish" opcodes. */
19314 data
= (data
<< 8) | 0xb0;
19321 /* We get two opcodes "free" in the first word. */
19322 size
= unwind
.opcode_count
- 2;
19325 /* An extra byte is required for the opcode count. */
19326 size
= unwind
.opcode_count
+ 1;
19328 size
= (size
+ 3) >> 2;
19330 as_bad (_("too many unwind opcodes"));
19332 frag_align (2, 0, 0);
19333 record_alignment (now_seg
, 2);
19334 unwind
.table_entry
= expr_build_dot ();
19336 /* Allocate the table entry. */
19337 ptr
= frag_more ((size
<< 2) + 4);
19338 where
= frag_now_fix () - ((size
<< 2) + 4);
19340 switch (unwind
.personality_index
)
19343 /* ??? Should this be a PLT generating relocation? */
19344 /* Custom personality routine. */
19345 fix_new (frag_now
, where
, 4, unwind
.personality_routine
, 0, 1,
19346 BFD_RELOC_ARM_PREL31
);
19351 /* Set the first byte to the number of additional words. */
19356 /* ABI defined personality routines. */
19358 /* Three opcodes bytes are packed into the first word. */
19365 /* The size and first two opcode bytes go in the first word. */
19366 data
= ((0x80 + unwind
.personality_index
) << 8) | size
;
19371 /* Should never happen. */
19375 /* Pack the opcodes into words (MSB first), reversing the list at the same
19377 while (unwind
.opcode_count
> 0)
19381 md_number_to_chars (ptr
, data
, 4);
19386 unwind
.opcode_count
--;
19388 data
= (data
<< 8) | unwind
.opcodes
[unwind
.opcode_count
];
19391 /* Finish off the last word. */
19394 /* Pad with "finish" opcodes. */
19396 data
= (data
<< 8) | 0xb0;
19398 md_number_to_chars (ptr
, data
, 4);
19403 /* Add an empty descriptor if there is no user-specified data. */
19404 ptr
= frag_more (4);
19405 md_number_to_chars (ptr
, 0, 4);
19412 /* Initialize the DWARF-2 unwind information for this procedure. */
19415 tc_arm_frame_initial_instructions (void)
19417 cfi_add_CFA_def_cfa (REG_SP
, 0);
19419 #endif /* OBJ_ELF */
19421 /* Convert REGNAME to a DWARF-2 register number. */
19424 tc_arm_regname_to_dw2regnum (char *regname
)
19426 int reg
= arm_reg_parse (®name
, REG_TYPE_RN
);
19436 tc_pe_dwarf2_emit_offset (symbolS
*symbol
, unsigned int size
)
19440 exp
.X_op
= O_secrel
;
19441 exp
.X_add_symbol
= symbol
;
19442 exp
.X_add_number
= 0;
19443 emit_expr (&exp
, size
);
19447 /* MD interface: Symbol and relocation handling. */
19449 /* Return the address within the segment that a PC-relative fixup is
19450 relative to. For ARM, PC-relative fixups applied to instructions
19451 are generally relative to the location of the fixup plus 8 bytes.
19452 Thumb branches are offset by 4, and Thumb loads relative to PC
19453 require special handling. */
19456 md_pcrel_from_section (fixS
* fixP
, segT seg
)
19458 offsetT base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
19460 /* If this is pc-relative and we are going to emit a relocation
19461 then we just want to put out any pipeline compensation that the linker
19462 will need. Otherwise we want to use the calculated base.
19463 For WinCE we skip the bias for externals as well, since this
19464 is how the MS ARM-CE assembler behaves and we want to be compatible. */
19466 && ((fixP
->fx_addsy
&& S_GET_SEGMENT (fixP
->fx_addsy
) != seg
)
19467 || (arm_force_relocation (fixP
)
19469 && !S_IS_EXTERNAL (fixP
->fx_addsy
)
19475 switch (fixP
->fx_r_type
)
19477 /* PC relative addressing on the Thumb is slightly odd as the
19478 bottom two bits of the PC are forced to zero for the
19479 calculation. This happens *after* application of the
19480 pipeline offset. However, Thumb adrl already adjusts for
19481 this, so we need not do it again. */
19482 case BFD_RELOC_ARM_THUMB_ADD
:
19485 case BFD_RELOC_ARM_THUMB_OFFSET
:
19486 case BFD_RELOC_ARM_T32_OFFSET_IMM
:
19487 case BFD_RELOC_ARM_T32_ADD_PC12
:
19488 case BFD_RELOC_ARM_T32_CP_OFF_IMM
:
19489 return (base
+ 4) & ~3;
19491 /* Thumb branches are simply offset by +4. */
19492 case BFD_RELOC_THUMB_PCREL_BRANCH7
:
19493 case BFD_RELOC_THUMB_PCREL_BRANCH9
:
19494 case BFD_RELOC_THUMB_PCREL_BRANCH12
:
19495 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
19496 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
19499 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
19501 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
19502 && (!S_IS_EXTERNAL (fixP
->fx_addsy
))
19503 && ARM_IS_FUNC (fixP
->fx_addsy
)
19504 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
19505 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
19508 /* BLX is like branches above, but forces the low two bits of PC to
19510 case BFD_RELOC_THUMB_PCREL_BLX
:
19512 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
19513 && (!S_IS_EXTERNAL (fixP
->fx_addsy
))
19514 && THUMB_IS_FUNC (fixP
->fx_addsy
)
19515 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
19516 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
19517 return (base
+ 4) & ~3;
19519 /* ARM mode branches are offset by +8. However, the Windows CE
19520 loader expects the relocation not to take this into account. */
19521 case BFD_RELOC_ARM_PCREL_BLX
:
19523 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
19524 && (!S_IS_EXTERNAL (fixP
->fx_addsy
))
19525 && ARM_IS_FUNC (fixP
->fx_addsy
)
19526 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
19527 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
19530 case BFD_RELOC_ARM_PCREL_CALL
:
19532 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
19533 && (!S_IS_EXTERNAL (fixP
->fx_addsy
))
19534 && THUMB_IS_FUNC (fixP
->fx_addsy
)
19535 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
19536 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
19539 case BFD_RELOC_ARM_PCREL_BRANCH
:
19540 case BFD_RELOC_ARM_PCREL_JUMP
:
19541 case BFD_RELOC_ARM_PLT32
:
19543 /* When handling fixups immediately, because we have already
19544 discovered the value of a symbol, or the address of the frag involved
19545 we must account for the offset by +8, as the OS loader will never see the reloc.
19546 see fixup_segment() in write.c
19547 The S_IS_EXTERNAL test handles the case of global symbols.
19548 Those need the calculated base, not just the pipe compensation the linker will need. */
19550 && fixP
->fx_addsy
!= NULL
19551 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
19552 && (S_IS_EXTERNAL (fixP
->fx_addsy
) || !arm_force_relocation (fixP
)))
19560 /* ARM mode loads relative to PC are also offset by +8. Unlike
19561 branches, the Windows CE loader *does* expect the relocation
19562 to take this into account. */
19563 case BFD_RELOC_ARM_OFFSET_IMM
:
19564 case BFD_RELOC_ARM_OFFSET_IMM8
:
19565 case BFD_RELOC_ARM_HWLITERAL
:
19566 case BFD_RELOC_ARM_LITERAL
:
19567 case BFD_RELOC_ARM_CP_OFF_IMM
:
19571 /* Other PC-relative relocations are un-offset. */
19577 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
19578 Otherwise we have no need to default values of symbols. */
19581 md_undefined_symbol (char * name ATTRIBUTE_UNUSED
)
19584 if (name
[0] == '_' && name
[1] == 'G'
19585 && streq (name
, GLOBAL_OFFSET_TABLE_NAME
))
19589 if (symbol_find (name
))
19590 as_bad (_("GOT already in the symbol table"));
19592 GOT_symbol
= symbol_new (name
, undefined_section
,
19593 (valueT
) 0, & zero_address_frag
);
19603 /* Subroutine of md_apply_fix. Check to see if an immediate can be
19604 computed as two separate immediate values, added together. We
19605 already know that this value cannot be computed by just one ARM
19608 static unsigned int
19609 validate_immediate_twopart (unsigned int val
,
19610 unsigned int * highpart
)
19615 for (i
= 0; i
< 32; i
+= 2)
19616 if (((a
= rotate_left (val
, i
)) & 0xff) != 0)
19622 * highpart
= (a
>> 8) | ((i
+ 24) << 7);
19624 else if (a
& 0xff0000)
19626 if (a
& 0xff000000)
19628 * highpart
= (a
>> 16) | ((i
+ 16) << 7);
19632 gas_assert (a
& 0xff000000);
19633 * highpart
= (a
>> 24) | ((i
+ 8) << 7);
19636 return (a
& 0xff) | (i
<< 7);
19643 validate_offset_imm (unsigned int val
, int hwse
)
19645 if ((hwse
&& val
> 255) || val
> 4095)
19650 /* Subroutine of md_apply_fix. Do those data_ops which can take a
19651 negative immediate constant by altering the instruction. A bit of
19656 by inverting the second operand, and
19659 by negating the second operand. */
19662 negate_data_op (unsigned long * instruction
,
19663 unsigned long value
)
19666 unsigned long negated
, inverted
;
19668 negated
= encode_arm_immediate (-value
);
19669 inverted
= encode_arm_immediate (~value
);
19671 op
= (*instruction
>> DATA_OP_SHIFT
) & 0xf;
19674 /* First negates. */
19675 case OPCODE_SUB
: /* ADD <-> SUB */
19676 new_inst
= OPCODE_ADD
;
19681 new_inst
= OPCODE_SUB
;
19685 case OPCODE_CMP
: /* CMP <-> CMN */
19686 new_inst
= OPCODE_CMN
;
19691 new_inst
= OPCODE_CMP
;
19695 /* Now Inverted ops. */
19696 case OPCODE_MOV
: /* MOV <-> MVN */
19697 new_inst
= OPCODE_MVN
;
19702 new_inst
= OPCODE_MOV
;
19706 case OPCODE_AND
: /* AND <-> BIC */
19707 new_inst
= OPCODE_BIC
;
19712 new_inst
= OPCODE_AND
;
19716 case OPCODE_ADC
: /* ADC <-> SBC */
19717 new_inst
= OPCODE_SBC
;
19722 new_inst
= OPCODE_ADC
;
19726 /* We cannot do anything. */
19731 if (value
== (unsigned) FAIL
)
19734 *instruction
&= OPCODE_MASK
;
19735 *instruction
|= new_inst
<< DATA_OP_SHIFT
;
19739 /* Like negate_data_op, but for Thumb-2. */
19741 static unsigned int
19742 thumb32_negate_data_op (offsetT
*instruction
, unsigned int value
)
19746 unsigned int negated
, inverted
;
19748 negated
= encode_thumb32_immediate (-value
);
19749 inverted
= encode_thumb32_immediate (~value
);
19751 rd
= (*instruction
>> 8) & 0xf;
19752 op
= (*instruction
>> T2_DATA_OP_SHIFT
) & 0xf;
19755 /* ADD <-> SUB. Includes CMP <-> CMN. */
19756 case T2_OPCODE_SUB
:
19757 new_inst
= T2_OPCODE_ADD
;
19761 case T2_OPCODE_ADD
:
19762 new_inst
= T2_OPCODE_SUB
;
19766 /* ORR <-> ORN. Includes MOV <-> MVN. */
19767 case T2_OPCODE_ORR
:
19768 new_inst
= T2_OPCODE_ORN
;
19772 case T2_OPCODE_ORN
:
19773 new_inst
= T2_OPCODE_ORR
;
19777 /* AND <-> BIC. TST has no inverted equivalent. */
19778 case T2_OPCODE_AND
:
19779 new_inst
= T2_OPCODE_BIC
;
19786 case T2_OPCODE_BIC
:
19787 new_inst
= T2_OPCODE_AND
;
19792 case T2_OPCODE_ADC
:
19793 new_inst
= T2_OPCODE_SBC
;
19797 case T2_OPCODE_SBC
:
19798 new_inst
= T2_OPCODE_ADC
;
19802 /* We cannot do anything. */
19807 if (value
== (unsigned int)FAIL
)
19810 *instruction
&= T2_OPCODE_MASK
;
19811 *instruction
|= new_inst
<< T2_DATA_OP_SHIFT
;
19815 /* Read a 32-bit thumb instruction from buf. */
19816 static unsigned long
19817 get_thumb32_insn (char * buf
)
19819 unsigned long insn
;
19820 insn
= md_chars_to_number (buf
, THUMB_SIZE
) << 16;
19821 insn
|= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
19827 /* We usually want to set the low bit on the address of thumb function
19828 symbols. In particular .word foo - . should have the low bit set.
19829 Generic code tries to fold the difference of two symbols to
19830 a constant. Prevent this and force a relocation when the first symbols
19831 is a thumb function. */
19834 arm_optimize_expr (expressionS
*l
, operatorT op
, expressionS
*r
)
19836 if (op
== O_subtract
19837 && l
->X_op
== O_symbol
19838 && r
->X_op
== O_symbol
19839 && THUMB_IS_FUNC (l
->X_add_symbol
))
19841 l
->X_op
= O_subtract
;
19842 l
->X_op_symbol
= r
->X_add_symbol
;
19843 l
->X_add_number
-= r
->X_add_number
;
19847 /* Process as normal. */
19851 /* Encode Thumb2 unconditional branches and calls. The encoding
19852 for the 2 are identical for the immediate values. */
19855 encode_thumb2_b_bl_offset (char * buf
, offsetT value
)
19857 #define T2I1I2MASK ((1 << 13) | (1 << 11))
19860 addressT S
, I1
, I2
, lo
, hi
;
19862 S
= (value
>> 24) & 0x01;
19863 I1
= (value
>> 23) & 0x01;
19864 I2
= (value
>> 22) & 0x01;
19865 hi
= (value
>> 12) & 0x3ff;
19866 lo
= (value
>> 1) & 0x7ff;
19867 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
19868 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
19869 newval
|= (S
<< 10) | hi
;
19870 newval2
&= ~T2I1I2MASK
;
19871 newval2
|= (((I1
^ S
) << 13) | ((I2
^ S
) << 11) | lo
) ^ T2I1I2MASK
;
19872 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
19873 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
19877 md_apply_fix (fixS
* fixP
,
19881 offsetT value
= * valP
;
19883 unsigned int newimm
;
19884 unsigned long temp
;
19886 char * buf
= fixP
->fx_where
+ fixP
->fx_frag
->fr_literal
;
19888 gas_assert (fixP
->fx_r_type
<= BFD_RELOC_UNUSED
);
19890 /* Note whether this will delete the relocation. */
19892 if (fixP
->fx_addsy
== 0 && !fixP
->fx_pcrel
)
19895 /* On a 64-bit host, silently truncate 'value' to 32 bits for
19896 consistency with the behaviour on 32-bit hosts. Remember value
19898 value
&= 0xffffffff;
19899 value
^= 0x80000000;
19900 value
-= 0x80000000;
19903 fixP
->fx_addnumber
= value
;
19905 /* Same treatment for fixP->fx_offset. */
19906 fixP
->fx_offset
&= 0xffffffff;
19907 fixP
->fx_offset
^= 0x80000000;
19908 fixP
->fx_offset
-= 0x80000000;
19910 switch (fixP
->fx_r_type
)
19912 case BFD_RELOC_NONE
:
19913 /* This will need to go in the object file. */
19917 case BFD_RELOC_ARM_IMMEDIATE
:
19918 /* We claim that this fixup has been processed here,
19919 even if in fact we generate an error because we do
19920 not have a reloc for it, so tc_gen_reloc will reject it. */
19923 if (fixP
->fx_addsy
)
19925 const char *msg
= 0;
19927 if (! S_IS_DEFINED (fixP
->fx_addsy
))
19928 msg
= _("undefined symbol %s used as an immediate value");
19929 else if (S_GET_SEGMENT (fixP
->fx_addsy
) != seg
)
19930 msg
= _("symbol %s is in a different section");
19931 else if (S_IS_WEAK (fixP
->fx_addsy
))
19932 msg
= _("symbol %s is weak and may be overridden later");
19936 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19937 msg
, S_GET_NAME (fixP
->fx_addsy
));
19942 newimm
= encode_arm_immediate (value
);
19943 temp
= md_chars_to_number (buf
, INSN_SIZE
);
19945 /* If the instruction will fail, see if we can fix things up by
19946 changing the opcode. */
19947 if (newimm
== (unsigned int) FAIL
19948 && (newimm
= negate_data_op (&temp
, value
)) == (unsigned int) FAIL
)
19950 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19951 _("invalid constant (%lx) after fixup"),
19952 (unsigned long) value
);
19956 newimm
|= (temp
& 0xfffff000);
19957 md_number_to_chars (buf
, (valueT
) newimm
, INSN_SIZE
);
19960 case BFD_RELOC_ARM_ADRL_IMMEDIATE
:
19962 unsigned int highpart
= 0;
19963 unsigned int newinsn
= 0xe1a00000; /* nop. */
19965 if (fixP
->fx_addsy
)
19967 const char *msg
= 0;
19969 if (! S_IS_DEFINED (fixP
->fx_addsy
))
19970 msg
= _("undefined symbol %s used as an immediate value");
19971 else if (S_GET_SEGMENT (fixP
->fx_addsy
) != seg
)
19972 msg
= _("symbol %s is in a different section");
19973 else if (S_IS_WEAK (fixP
->fx_addsy
))
19974 msg
= _("symbol %s is weak and may be overridden later");
19978 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19979 msg
, S_GET_NAME (fixP
->fx_addsy
));
19984 newimm
= encode_arm_immediate (value
);
19985 temp
= md_chars_to_number (buf
, INSN_SIZE
);
19987 /* If the instruction will fail, see if we can fix things up by
19988 changing the opcode. */
19989 if (newimm
== (unsigned int) FAIL
19990 && (newimm
= negate_data_op (& temp
, value
)) == (unsigned int) FAIL
)
19992 /* No ? OK - try using two ADD instructions to generate
19994 newimm
= validate_immediate_twopart (value
, & highpart
);
19996 /* Yes - then make sure that the second instruction is
19998 if (newimm
!= (unsigned int) FAIL
)
20000 /* Still No ? Try using a negated value. */
20001 else if ((newimm
= validate_immediate_twopart (- value
, & highpart
)) != (unsigned int) FAIL
)
20002 temp
= newinsn
= (temp
& OPCODE_MASK
) | OPCODE_SUB
<< DATA_OP_SHIFT
;
20003 /* Otherwise - give up. */
20006 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20007 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
20012 /* Replace the first operand in the 2nd instruction (which
20013 is the PC) with the destination register. We have
20014 already added in the PC in the first instruction and we
20015 do not want to do it again. */
20016 newinsn
&= ~ 0xf0000;
20017 newinsn
|= ((newinsn
& 0x0f000) << 4);
20020 newimm
|= (temp
& 0xfffff000);
20021 md_number_to_chars (buf
, (valueT
) newimm
, INSN_SIZE
);
20023 highpart
|= (newinsn
& 0xfffff000);
20024 md_number_to_chars (buf
+ INSN_SIZE
, (valueT
) highpart
, INSN_SIZE
);
20028 case BFD_RELOC_ARM_OFFSET_IMM
:
20029 if (!fixP
->fx_done
&& seg
->use_rela_p
)
20032 case BFD_RELOC_ARM_LITERAL
:
20038 if (validate_offset_imm (value
, 0) == FAIL
)
20040 if (fixP
->fx_r_type
== BFD_RELOC_ARM_LITERAL
)
20041 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20042 _("invalid literal constant: pool needs to be closer"));
20044 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20045 _("bad immediate value for offset (%ld)"),
20050 newval
= md_chars_to_number (buf
, INSN_SIZE
);
20051 newval
&= 0xff7ff000;
20052 newval
|= value
| (sign
? INDEX_UP
: 0);
20053 md_number_to_chars (buf
, newval
, INSN_SIZE
);
20056 case BFD_RELOC_ARM_OFFSET_IMM8
:
20057 case BFD_RELOC_ARM_HWLITERAL
:
20063 if (validate_offset_imm (value
, 1) == FAIL
)
20065 if (fixP
->fx_r_type
== BFD_RELOC_ARM_HWLITERAL
)
20066 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20067 _("invalid literal constant: pool needs to be closer"));
20069 as_bad (_("bad immediate value for 8-bit offset (%ld)"),
20074 newval
= md_chars_to_number (buf
, INSN_SIZE
);
20075 newval
&= 0xff7ff0f0;
20076 newval
|= ((value
>> 4) << 8) | (value
& 0xf) | (sign
? INDEX_UP
: 0);
20077 md_number_to_chars (buf
, newval
, INSN_SIZE
);
20080 case BFD_RELOC_ARM_T32_OFFSET_U8
:
20081 if (value
< 0 || value
> 1020 || value
% 4 != 0)
20082 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20083 _("bad immediate value for offset (%ld)"), (long) value
);
20086 newval
= md_chars_to_number (buf
+2, THUMB_SIZE
);
20088 md_number_to_chars (buf
+2, newval
, THUMB_SIZE
);
20091 case BFD_RELOC_ARM_T32_OFFSET_IMM
:
20092 /* This is a complicated relocation used for all varieties of Thumb32
20093 load/store instruction with immediate offset:
20095 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
20096 *4, optional writeback(W)
20097 (doubleword load/store)
20099 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
20100 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
20101 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
20102 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
20103 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
20105 Uppercase letters indicate bits that are already encoded at
20106 this point. Lowercase letters are our problem. For the
20107 second block of instructions, the secondary opcode nybble
20108 (bits 8..11) is present, and bit 23 is zero, even if this is
20109 a PC-relative operation. */
20110 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
20112 newval
|= md_chars_to_number (buf
+THUMB_SIZE
, THUMB_SIZE
);
20114 if ((newval
& 0xf0000000) == 0xe0000000)
20116 /* Doubleword load/store: 8-bit offset, scaled by 4. */
20118 newval
|= (1 << 23);
20121 if (value
% 4 != 0)
20123 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20124 _("offset not a multiple of 4"));
20130 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20131 _("offset out of range"));
20136 else if ((newval
& 0x000f0000) == 0x000f0000)
20138 /* PC-relative, 12-bit offset. */
20140 newval
|= (1 << 23);
20145 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20146 _("offset out of range"));
20151 else if ((newval
& 0x00000100) == 0x00000100)
20153 /* Writeback: 8-bit, +/- offset. */
20155 newval
|= (1 << 9);
20160 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20161 _("offset out of range"));
20166 else if ((newval
& 0x00000f00) == 0x00000e00)
20168 /* T-instruction: positive 8-bit offset. */
20169 if (value
< 0 || value
> 0xff)
20171 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20172 _("offset out of range"));
20180 /* Positive 12-bit or negative 8-bit offset. */
20184 newval
|= (1 << 23);
20194 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20195 _("offset out of range"));
20202 md_number_to_chars (buf
, (newval
>> 16) & 0xffff, THUMB_SIZE
);
20203 md_number_to_chars (buf
+ THUMB_SIZE
, newval
& 0xffff, THUMB_SIZE
);
20206 case BFD_RELOC_ARM_SHIFT_IMM
:
20207 newval
= md_chars_to_number (buf
, INSN_SIZE
);
20208 if (((unsigned long) value
) > 32
20210 && (((newval
& 0x60) == 0) || (newval
& 0x60) == 0x60)))
20212 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20213 _("shift expression is too large"));
20218 /* Shifts of zero must be done as lsl. */
20220 else if (value
== 32)
20222 newval
&= 0xfffff07f;
20223 newval
|= (value
& 0x1f) << 7;
20224 md_number_to_chars (buf
, newval
, INSN_SIZE
);
20227 case BFD_RELOC_ARM_T32_IMMEDIATE
:
20228 case BFD_RELOC_ARM_T32_ADD_IMM
:
20229 case BFD_RELOC_ARM_T32_IMM12
:
20230 case BFD_RELOC_ARM_T32_ADD_PC12
:
20231 /* We claim that this fixup has been processed here,
20232 even if in fact we generate an error because we do
20233 not have a reloc for it, so tc_gen_reloc will reject it. */
20237 && ! S_IS_DEFINED (fixP
->fx_addsy
))
20239 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20240 _("undefined symbol %s used as an immediate value"),
20241 S_GET_NAME (fixP
->fx_addsy
));
20245 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
20247 newval
|= md_chars_to_number (buf
+2, THUMB_SIZE
);
20250 if (fixP
->fx_r_type
== BFD_RELOC_ARM_T32_IMMEDIATE
20251 || fixP
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
)
20253 newimm
= encode_thumb32_immediate (value
);
20254 if (newimm
== (unsigned int) FAIL
)
20255 newimm
= thumb32_negate_data_op (&newval
, value
);
20257 if (fixP
->fx_r_type
!= BFD_RELOC_ARM_T32_IMMEDIATE
20258 && newimm
== (unsigned int) FAIL
)
20260 /* Turn add/sum into addw/subw. */
20261 if (fixP
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
)
20262 newval
= (newval
& 0xfeffffff) | 0x02000000;
20263 /* No flat 12-bit imm encoding for addsw/subsw. */
20264 if ((newval
& 0x00100000) == 0)
20266 /* 12 bit immediate for addw/subw. */
20270 newval
^= 0x00a00000;
20273 newimm
= (unsigned int) FAIL
;
20279 if (newimm
== (unsigned int)FAIL
)
20281 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20282 _("invalid constant (%lx) after fixup"),
20283 (unsigned long) value
);
20287 newval
|= (newimm
& 0x800) << 15;
20288 newval
|= (newimm
& 0x700) << 4;
20289 newval
|= (newimm
& 0x0ff);
20291 md_number_to_chars (buf
, (valueT
) ((newval
>> 16) & 0xffff), THUMB_SIZE
);
20292 md_number_to_chars (buf
+2, (valueT
) (newval
& 0xffff), THUMB_SIZE
);
20295 case BFD_RELOC_ARM_SMC
:
20296 if (((unsigned long) value
) > 0xffff)
20297 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20298 _("invalid smc expression"));
20299 newval
= md_chars_to_number (buf
, INSN_SIZE
);
20300 newval
|= (value
& 0xf) | ((value
& 0xfff0) << 4);
20301 md_number_to_chars (buf
, newval
, INSN_SIZE
);
20304 case BFD_RELOC_ARM_SWI
:
20305 if (fixP
->tc_fix_data
!= 0)
20307 if (((unsigned long) value
) > 0xff)
20308 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20309 _("invalid swi expression"));
20310 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
20312 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
20316 if (((unsigned long) value
) > 0x00ffffff)
20317 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20318 _("invalid swi expression"));
20319 newval
= md_chars_to_number (buf
, INSN_SIZE
);
20321 md_number_to_chars (buf
, newval
, INSN_SIZE
);
20325 case BFD_RELOC_ARM_MULTI
:
20326 if (((unsigned long) value
) > 0xffff)
20327 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20328 _("invalid expression in load/store multiple"));
20329 newval
= value
| md_chars_to_number (buf
, INSN_SIZE
);
20330 md_number_to_chars (buf
, newval
, INSN_SIZE
);
20334 case BFD_RELOC_ARM_PCREL_CALL
:
20336 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
20338 && !S_IS_EXTERNAL (fixP
->fx_addsy
)
20339 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
20340 && THUMB_IS_FUNC (fixP
->fx_addsy
))
20341 /* Flip the bl to blx. This is a simple flip
20342 bit here because we generate PCREL_CALL for
20343 unconditional bls. */
20345 newval
= md_chars_to_number (buf
, INSN_SIZE
);
20346 newval
= newval
| 0x10000000;
20347 md_number_to_chars (buf
, newval
, INSN_SIZE
);
20353 goto arm_branch_common
;
20355 case BFD_RELOC_ARM_PCREL_JUMP
:
20356 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
20358 && !S_IS_EXTERNAL (fixP
->fx_addsy
)
20359 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
20360 && THUMB_IS_FUNC (fixP
->fx_addsy
))
20362 /* This would map to a bl<cond>, b<cond>,
20363 b<always> to a Thumb function. We
20364 need to force a relocation for this particular
20366 newval
= md_chars_to_number (buf
, INSN_SIZE
);
20370 case BFD_RELOC_ARM_PLT32
:
20372 case BFD_RELOC_ARM_PCREL_BRANCH
:
20374 goto arm_branch_common
;
20376 case BFD_RELOC_ARM_PCREL_BLX
:
20379 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
20381 && !S_IS_EXTERNAL (fixP
->fx_addsy
)
20382 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
20383 && ARM_IS_FUNC (fixP
->fx_addsy
))
20385 /* Flip the blx to a bl and warn. */
20386 const char *name
= S_GET_NAME (fixP
->fx_addsy
);
20387 newval
= 0xeb000000;
20388 as_warn_where (fixP
->fx_file
, fixP
->fx_line
,
20389 _("blx to '%s' an ARM ISA state function changed to bl"),
20391 md_number_to_chars (buf
, newval
, INSN_SIZE
);
20397 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
20398 fixP
->fx_r_type
= BFD_RELOC_ARM_PCREL_CALL
;
20402 /* We are going to store value (shifted right by two) in the
20403 instruction, in a 24 bit, signed field. Bits 26 through 32 either
20404 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
20405 also be be clear. */
20407 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20408 _("misaligned branch destination"));
20409 if ((value
& (offsetT
)0xfe000000) != (offsetT
)0
20410 && (value
& (offsetT
)0xfe000000) != (offsetT
)0xfe000000)
20411 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20412 _("branch out of range"));
20414 if (fixP
->fx_done
|| !seg
->use_rela_p
)
20416 newval
= md_chars_to_number (buf
, INSN_SIZE
);
20417 newval
|= (value
>> 2) & 0x00ffffff;
20418 /* Set the H bit on BLX instructions. */
20422 newval
|= 0x01000000;
20424 newval
&= ~0x01000000;
20426 md_number_to_chars (buf
, newval
, INSN_SIZE
);
20430 case BFD_RELOC_THUMB_PCREL_BRANCH7
: /* CBZ */
20431 /* CBZ can only branch forward. */
20433 /* Attempts to use CBZ to branch to the next instruction
20434 (which, strictly speaking, are prohibited) will be turned into
20437 FIXME: It may be better to remove the instruction completely and
20438 perform relaxation. */
20441 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
20442 newval
= 0xbf00; /* NOP encoding T1 */
20443 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
20448 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20449 _("branch out of range"));
20451 if (fixP
->fx_done
|| !seg
->use_rela_p
)
20453 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
20454 newval
|= ((value
& 0x3e) << 2) | ((value
& 0x40) << 3);
20455 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
20460 case BFD_RELOC_THUMB_PCREL_BRANCH9
: /* Conditional branch. */
20461 if ((value
& ~0xff) && ((value
& ~0xff) != ~0xff))
20462 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20463 _("branch out of range"));
20465 if (fixP
->fx_done
|| !seg
->use_rela_p
)
20467 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
20468 newval
|= (value
& 0x1ff) >> 1;
20469 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
20473 case BFD_RELOC_THUMB_PCREL_BRANCH12
: /* Unconditional branch. */
20474 if ((value
& ~0x7ff) && ((value
& ~0x7ff) != ~0x7ff))
20475 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20476 _("branch out of range"));
20478 if (fixP
->fx_done
|| !seg
->use_rela_p
)
20480 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
20481 newval
|= (value
& 0xfff) >> 1;
20482 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
20486 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
20488 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
20489 && !S_IS_EXTERNAL (fixP
->fx_addsy
)
20490 && S_IS_DEFINED (fixP
->fx_addsy
)
20491 && ARM_IS_FUNC (fixP
->fx_addsy
)
20492 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
20494 /* Force a relocation for a branch 20 bits wide. */
20497 if ((value
& ~0x1fffff) && ((value
& ~0x1fffff) != ~0x1fffff))
20498 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20499 _("conditional branch out of range"));
20501 if (fixP
->fx_done
|| !seg
->use_rela_p
)
20504 addressT S
, J1
, J2
, lo
, hi
;
20506 S
= (value
& 0x00100000) >> 20;
20507 J2
= (value
& 0x00080000) >> 19;
20508 J1
= (value
& 0x00040000) >> 18;
20509 hi
= (value
& 0x0003f000) >> 12;
20510 lo
= (value
& 0x00000ffe) >> 1;
20512 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
20513 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
20514 newval
|= (S
<< 10) | hi
;
20515 newval2
|= (J1
<< 13) | (J2
<< 11) | lo
;
20516 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
20517 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
20521 case BFD_RELOC_THUMB_PCREL_BLX
:
20523 /* If there is a blx from a thumb state function to
20524 another thumb function flip this to a bl and warn
20528 && S_IS_DEFINED (fixP
->fx_addsy
)
20529 && !S_IS_EXTERNAL (fixP
->fx_addsy
)
20530 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
20531 && THUMB_IS_FUNC (fixP
->fx_addsy
))
20533 const char *name
= S_GET_NAME (fixP
->fx_addsy
);
20534 as_warn_where (fixP
->fx_file
, fixP
->fx_line
,
20535 _("blx to Thumb func '%s' from Thumb ISA state changed to bl"),
20537 newval
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
20538 newval
= newval
| 0x1000;
20539 md_number_to_chars (buf
+THUMB_SIZE
, newval
, THUMB_SIZE
);
20540 fixP
->fx_r_type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
20545 goto thumb_bl_common
;
20547 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
20549 /* A bl from Thumb state ISA to an internal ARM state function
20550 is converted to a blx. */
20552 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
20553 && !S_IS_EXTERNAL (fixP
->fx_addsy
)
20554 && S_IS_DEFINED (fixP
->fx_addsy
)
20555 && ARM_IS_FUNC (fixP
->fx_addsy
)
20556 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
20558 newval
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
20559 newval
= newval
& ~0x1000;
20560 md_number_to_chars (buf
+THUMB_SIZE
, newval
, THUMB_SIZE
);
20561 fixP
->fx_r_type
= BFD_RELOC_THUMB_PCREL_BLX
;
20568 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
&&
20569 fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BLX
)
20570 fixP
->fx_r_type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
20573 if (fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BLX
)
20574 /* For a BLX instruction, make sure that the relocation is rounded up
20575 to a word boundary. This follows the semantics of the instruction
20576 which specifies that bit 1 of the target address will come from bit
20577 1 of the base address. */
20578 value
= (value
+ 1) & ~ 1;
20581 if ((value
& ~0x3fffff) && ((value
& ~0x3fffff) != ~0x3fffff))
20583 if (!(ARM_CPU_HAS_FEATURE (cpu_variant
, arm_arch_t2
)))
20585 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20586 _("branch out of range"));
20588 else if ((value
& ~0x1ffffff)
20589 && ((value
& ~0x1ffffff) != ~0x1ffffff))
20591 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20592 _("Thumb2 branch out of range"));
20596 if (fixP
->fx_done
|| !seg
->use_rela_p
)
20597 encode_thumb2_b_bl_offset (buf
, value
);
20601 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
20602 if ((value
& ~0x1ffffff) && ((value
& ~0x1ffffff) != ~0x1ffffff))
20603 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20604 _("branch out of range"));
20606 if (fixP
->fx_done
|| !seg
->use_rela_p
)
20607 encode_thumb2_b_bl_offset (buf
, value
);
20612 if (fixP
->fx_done
|| !seg
->use_rela_p
)
20613 md_number_to_chars (buf
, value
, 1);
20617 if (fixP
->fx_done
|| !seg
->use_rela_p
)
20618 md_number_to_chars (buf
, value
, 2);
20622 case BFD_RELOC_ARM_TLS_GD32
:
20623 case BFD_RELOC_ARM_TLS_LE32
:
20624 case BFD_RELOC_ARM_TLS_IE32
:
20625 case BFD_RELOC_ARM_TLS_LDM32
:
20626 case BFD_RELOC_ARM_TLS_LDO32
:
20627 S_SET_THREAD_LOCAL (fixP
->fx_addsy
);
20630 case BFD_RELOC_ARM_GOT32
:
20631 case BFD_RELOC_ARM_GOTOFF
:
20632 if (fixP
->fx_done
|| !seg
->use_rela_p
)
20633 md_number_to_chars (buf
, 0, 4);
20636 case BFD_RELOC_ARM_GOT_PREL
:
20637 if (fixP
->fx_done
|| !seg
->use_rela_p
)
20638 md_number_to_chars (buf
, value
, 4);
20641 case BFD_RELOC_ARM_TARGET2
:
20642 /* TARGET2 is not partial-inplace, so we need to write the
20643 addend here for REL targets, because it won't be written out
20644 during reloc processing later. */
20645 if (fixP
->fx_done
|| !seg
->use_rela_p
)
20646 md_number_to_chars (buf
, fixP
->fx_offset
, 4);
20650 case BFD_RELOC_RVA
:
20652 case BFD_RELOC_ARM_TARGET1
:
20653 case BFD_RELOC_ARM_ROSEGREL32
:
20654 case BFD_RELOC_ARM_SBREL32
:
20655 case BFD_RELOC_32_PCREL
:
20657 case BFD_RELOC_32_SECREL
:
20659 if (fixP
->fx_done
|| !seg
->use_rela_p
)
20661 /* For WinCE we only do this for pcrel fixups. */
20662 if (fixP
->fx_done
|| fixP
->fx_pcrel
)
20664 md_number_to_chars (buf
, value
, 4);
20668 case BFD_RELOC_ARM_PREL31
:
20669 if (fixP
->fx_done
|| !seg
->use_rela_p
)
20671 newval
= md_chars_to_number (buf
, 4) & 0x80000000;
20672 if ((value
^ (value
>> 1)) & 0x40000000)
20674 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20675 _("rel31 relocation overflow"));
20677 newval
|= value
& 0x7fffffff;
20678 md_number_to_chars (buf
, newval
, 4);
20683 case BFD_RELOC_ARM_CP_OFF_IMM
:
20684 case BFD_RELOC_ARM_T32_CP_OFF_IMM
:
20685 if (value
< -1023 || value
> 1023 || (value
& 3))
20686 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20687 _("co-processor offset out of range"));
20692 if (fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
20693 || fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
)
20694 newval
= md_chars_to_number (buf
, INSN_SIZE
);
20696 newval
= get_thumb32_insn (buf
);
20697 newval
&= 0xff7fff00;
20698 newval
|= (value
>> 2) | (sign
? INDEX_UP
: 0);
20699 if (fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
20700 || fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
)
20701 md_number_to_chars (buf
, newval
, INSN_SIZE
);
20703 put_thumb32_insn (buf
, newval
);
20706 case BFD_RELOC_ARM_CP_OFF_IMM_S2
:
20707 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
:
20708 if (value
< -255 || value
> 255)
20709 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20710 _("co-processor offset out of range"));
20712 goto cp_off_common
;
20714 case BFD_RELOC_ARM_THUMB_OFFSET
:
20715 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
20716 /* Exactly what ranges, and where the offset is inserted depends
20717 on the type of instruction, we can establish this from the
20719 switch (newval
>> 12)
20721 case 4: /* PC load. */
20722 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
20723 forced to zero for these loads; md_pcrel_from has already
20724 compensated for this. */
20726 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20727 _("invalid offset, target not word aligned (0x%08lX)"),
20728 (((unsigned long) fixP
->fx_frag
->fr_address
20729 + (unsigned long) fixP
->fx_where
) & ~3)
20730 + (unsigned long) value
);
20732 if (value
& ~0x3fc)
20733 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20734 _("invalid offset, value too big (0x%08lX)"),
20737 newval
|= value
>> 2;
20740 case 9: /* SP load/store. */
20741 if (value
& ~0x3fc)
20742 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20743 _("invalid offset, value too big (0x%08lX)"),
20745 newval
|= value
>> 2;
20748 case 6: /* Word load/store. */
20750 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20751 _("invalid offset, value too big (0x%08lX)"),
20753 newval
|= value
<< 4; /* 6 - 2. */
20756 case 7: /* Byte load/store. */
20758 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20759 _("invalid offset, value too big (0x%08lX)"),
20761 newval
|= value
<< 6;
20764 case 8: /* Halfword load/store. */
20766 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20767 _("invalid offset, value too big (0x%08lX)"),
20769 newval
|= value
<< 5; /* 6 - 1. */
20773 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20774 "Unable to process relocation for thumb opcode: %lx",
20775 (unsigned long) newval
);
20778 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
20781 case BFD_RELOC_ARM_THUMB_ADD
:
20782 /* This is a complicated relocation, since we use it for all of
20783 the following immediate relocations:
20787 9bit ADD/SUB SP word-aligned
20788 10bit ADD PC/SP word-aligned
20790 The type of instruction being processed is encoded in the
20797 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
20799 int rd
= (newval
>> 4) & 0xf;
20800 int rs
= newval
& 0xf;
20801 int subtract
= !!(newval
& 0x8000);
20803 /* Check for HI regs, only very restricted cases allowed:
20804 Adjusting SP, and using PC or SP to get an address. */
20805 if ((rd
> 7 && (rd
!= REG_SP
|| rs
!= REG_SP
))
20806 || (rs
> 7 && rs
!= REG_SP
&& rs
!= REG_PC
))
20807 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20808 _("invalid Hi register with immediate"));
20810 /* If value is negative, choose the opposite instruction. */
20814 subtract
= !subtract
;
20816 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20817 _("immediate value out of range"));
20822 if (value
& ~0x1fc)
20823 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20824 _("invalid immediate for stack address calculation"));
20825 newval
= subtract
? T_OPCODE_SUB_ST
: T_OPCODE_ADD_ST
;
20826 newval
|= value
>> 2;
20828 else if (rs
== REG_PC
|| rs
== REG_SP
)
20830 if (subtract
|| value
& ~0x3fc)
20831 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20832 _("invalid immediate for address calculation (value = 0x%08lX)"),
20833 (unsigned long) value
);
20834 newval
= (rs
== REG_PC
? T_OPCODE_ADD_PC
: T_OPCODE_ADD_SP
);
20836 newval
|= value
>> 2;
20841 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20842 _("immediate value out of range"));
20843 newval
= subtract
? T_OPCODE_SUB_I8
: T_OPCODE_ADD_I8
;
20844 newval
|= (rd
<< 8) | value
;
20849 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20850 _("immediate value out of range"));
20851 newval
= subtract
? T_OPCODE_SUB_I3
: T_OPCODE_ADD_I3
;
20852 newval
|= rd
| (rs
<< 3) | (value
<< 6);
20855 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
20858 case BFD_RELOC_ARM_THUMB_IMM
:
20859 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
20860 if (value
< 0 || value
> 255)
20861 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20862 _("invalid immediate: %ld is out of range"),
20865 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
20868 case BFD_RELOC_ARM_THUMB_SHIFT
:
20869 /* 5bit shift value (0..32). LSL cannot take 32. */
20870 newval
= md_chars_to_number (buf
, THUMB_SIZE
) & 0xf83f;
20871 temp
= newval
& 0xf800;
20872 if (value
< 0 || value
> 32 || (value
== 32 && temp
== T_OPCODE_LSL_I
))
20873 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20874 _("invalid shift value: %ld"), (long) value
);
20875 /* Shifts of zero must be encoded as LSL. */
20877 newval
= (newval
& 0x003f) | T_OPCODE_LSL_I
;
20878 /* Shifts of 32 are encoded as zero. */
20879 else if (value
== 32)
20881 newval
|= value
<< 6;
20882 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
20885 case BFD_RELOC_VTABLE_INHERIT
:
20886 case BFD_RELOC_VTABLE_ENTRY
:
20890 case BFD_RELOC_ARM_MOVW
:
20891 case BFD_RELOC_ARM_MOVT
:
20892 case BFD_RELOC_ARM_THUMB_MOVW
:
20893 case BFD_RELOC_ARM_THUMB_MOVT
:
20894 if (fixP
->fx_done
|| !seg
->use_rela_p
)
20896 /* REL format relocations are limited to a 16-bit addend. */
20897 if (!fixP
->fx_done
)
20899 if (value
< -0x8000 || value
> 0x7fff)
20900 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20901 _("offset out of range"));
20903 else if (fixP
->fx_r_type
== BFD_RELOC_ARM_MOVT
20904 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
)
20909 if (fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVW
20910 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
)
20912 newval
= get_thumb32_insn (buf
);
20913 newval
&= 0xfbf08f00;
20914 newval
|= (value
& 0xf000) << 4;
20915 newval
|= (value
& 0x0800) << 15;
20916 newval
|= (value
& 0x0700) << 4;
20917 newval
|= (value
& 0x00ff);
20918 put_thumb32_insn (buf
, newval
);
20922 newval
= md_chars_to_number (buf
, 4);
20923 newval
&= 0xfff0f000;
20924 newval
|= value
& 0x0fff;
20925 newval
|= (value
& 0xf000) << 4;
20926 md_number_to_chars (buf
, newval
, 4);
20931 case BFD_RELOC_ARM_ALU_PC_G0_NC
:
20932 case BFD_RELOC_ARM_ALU_PC_G0
:
20933 case BFD_RELOC_ARM_ALU_PC_G1_NC
:
20934 case BFD_RELOC_ARM_ALU_PC_G1
:
20935 case BFD_RELOC_ARM_ALU_PC_G2
:
20936 case BFD_RELOC_ARM_ALU_SB_G0_NC
:
20937 case BFD_RELOC_ARM_ALU_SB_G0
:
20938 case BFD_RELOC_ARM_ALU_SB_G1_NC
:
20939 case BFD_RELOC_ARM_ALU_SB_G1
:
20940 case BFD_RELOC_ARM_ALU_SB_G2
:
20941 gas_assert (!fixP
->fx_done
);
20942 if (!seg
->use_rela_p
)
20945 bfd_vma encoded_addend
;
20946 bfd_vma addend_abs
= abs (value
);
20948 /* Check that the absolute value of the addend can be
20949 expressed as an 8-bit constant plus a rotation. */
20950 encoded_addend
= encode_arm_immediate (addend_abs
);
20951 if (encoded_addend
== (unsigned int) FAIL
)
20952 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20953 _("the offset 0x%08lX is not representable"),
20954 (unsigned long) addend_abs
);
20956 /* Extract the instruction. */
20957 insn
= md_chars_to_number (buf
, INSN_SIZE
);
20959 /* If the addend is positive, use an ADD instruction.
20960 Otherwise use a SUB. Take care not to destroy the S bit. */
20961 insn
&= 0xff1fffff;
20967 /* Place the encoded addend into the first 12 bits of the
20969 insn
&= 0xfffff000;
20970 insn
|= encoded_addend
;
20972 /* Update the instruction. */
20973 md_number_to_chars (buf
, insn
, INSN_SIZE
);
20977 case BFD_RELOC_ARM_LDR_PC_G0
:
20978 case BFD_RELOC_ARM_LDR_PC_G1
:
20979 case BFD_RELOC_ARM_LDR_PC_G2
:
20980 case BFD_RELOC_ARM_LDR_SB_G0
:
20981 case BFD_RELOC_ARM_LDR_SB_G1
:
20982 case BFD_RELOC_ARM_LDR_SB_G2
:
20983 gas_assert (!fixP
->fx_done
);
20984 if (!seg
->use_rela_p
)
20987 bfd_vma addend_abs
= abs (value
);
20989 /* Check that the absolute value of the addend can be
20990 encoded in 12 bits. */
20991 if (addend_abs
>= 0x1000)
20992 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20993 _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
20994 (unsigned long) addend_abs
);
20996 /* Extract the instruction. */
20997 insn
= md_chars_to_number (buf
, INSN_SIZE
);
20999 /* If the addend is negative, clear bit 23 of the instruction.
21000 Otherwise set it. */
21002 insn
&= ~(1 << 23);
21006 /* Place the absolute value of the addend into the first 12 bits
21007 of the instruction. */
21008 insn
&= 0xfffff000;
21009 insn
|= addend_abs
;
21011 /* Update the instruction. */
21012 md_number_to_chars (buf
, insn
, INSN_SIZE
);
21016 case BFD_RELOC_ARM_LDRS_PC_G0
:
21017 case BFD_RELOC_ARM_LDRS_PC_G1
:
21018 case BFD_RELOC_ARM_LDRS_PC_G2
:
21019 case BFD_RELOC_ARM_LDRS_SB_G0
:
21020 case BFD_RELOC_ARM_LDRS_SB_G1
:
21021 case BFD_RELOC_ARM_LDRS_SB_G2
:
21022 gas_assert (!fixP
->fx_done
);
21023 if (!seg
->use_rela_p
)
21026 bfd_vma addend_abs
= abs (value
);
21028 /* Check that the absolute value of the addend can be
21029 encoded in 8 bits. */
21030 if (addend_abs
>= 0x100)
21031 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
21032 _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
21033 (unsigned long) addend_abs
);
21035 /* Extract the instruction. */
21036 insn
= md_chars_to_number (buf
, INSN_SIZE
);
21038 /* If the addend is negative, clear bit 23 of the instruction.
21039 Otherwise set it. */
21041 insn
&= ~(1 << 23);
21045 /* Place the first four bits of the absolute value of the addend
21046 into the first 4 bits of the instruction, and the remaining
21047 four into bits 8 .. 11. */
21048 insn
&= 0xfffff0f0;
21049 insn
|= (addend_abs
& 0xf) | ((addend_abs
& 0xf0) << 4);
21051 /* Update the instruction. */
21052 md_number_to_chars (buf
, insn
, INSN_SIZE
);
21056 case BFD_RELOC_ARM_LDC_PC_G0
:
21057 case BFD_RELOC_ARM_LDC_PC_G1
:
21058 case BFD_RELOC_ARM_LDC_PC_G2
:
21059 case BFD_RELOC_ARM_LDC_SB_G0
:
21060 case BFD_RELOC_ARM_LDC_SB_G1
:
21061 case BFD_RELOC_ARM_LDC_SB_G2
:
21062 gas_assert (!fixP
->fx_done
);
21063 if (!seg
->use_rela_p
)
21066 bfd_vma addend_abs
= abs (value
);
21068 /* Check that the absolute value of the addend is a multiple of
21069 four and, when divided by four, fits in 8 bits. */
21070 if (addend_abs
& 0x3)
21071 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
21072 _("bad offset 0x%08lX (must be word-aligned)"),
21073 (unsigned long) addend_abs
);
21075 if ((addend_abs
>> 2) > 0xff)
21076 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
21077 _("bad offset 0x%08lX (must be an 8-bit number of words)"),
21078 (unsigned long) addend_abs
);
21080 /* Extract the instruction. */
21081 insn
= md_chars_to_number (buf
, INSN_SIZE
);
21083 /* If the addend is negative, clear bit 23 of the instruction.
21084 Otherwise set it. */
21086 insn
&= ~(1 << 23);
21090 /* Place the addend (divided by four) into the first eight
21091 bits of the instruction. */
21092 insn
&= 0xfffffff0;
21093 insn
|= addend_abs
>> 2;
21095 /* Update the instruction. */
21096 md_number_to_chars (buf
, insn
, INSN_SIZE
);
21100 case BFD_RELOC_ARM_V4BX
:
21101 /* This will need to go in the object file. */
21105 case BFD_RELOC_UNUSED
:
21107 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
21108 _("bad relocation fixup type (%d)"), fixP
->fx_r_type
);
21112 /* Translate internal representation of relocation info to BFD target
21116 tc_gen_reloc (asection
*section
, fixS
*fixp
)
21119 bfd_reloc_code_real_type code
;
21121 reloc
= (arelent
*) xmalloc (sizeof (arelent
));
21123 reloc
->sym_ptr_ptr
= (asymbol
**) xmalloc (sizeof (asymbol
*));
21124 *reloc
->sym_ptr_ptr
= symbol_get_bfdsym (fixp
->fx_addsy
);
21125 reloc
->address
= fixp
->fx_frag
->fr_address
+ fixp
->fx_where
;
21127 if (fixp
->fx_pcrel
)
21129 if (section
->use_rela_p
)
21130 fixp
->fx_offset
-= md_pcrel_from_section (fixp
, section
);
21132 fixp
->fx_offset
= reloc
->address
;
21134 reloc
->addend
= fixp
->fx_offset
;
21136 switch (fixp
->fx_r_type
)
21139 if (fixp
->fx_pcrel
)
21141 code
= BFD_RELOC_8_PCREL
;
21146 if (fixp
->fx_pcrel
)
21148 code
= BFD_RELOC_16_PCREL
;
21153 if (fixp
->fx_pcrel
)
21155 code
= BFD_RELOC_32_PCREL
;
21159 case BFD_RELOC_ARM_MOVW
:
21160 if (fixp
->fx_pcrel
)
21162 code
= BFD_RELOC_ARM_MOVW_PCREL
;
21166 case BFD_RELOC_ARM_MOVT
:
21167 if (fixp
->fx_pcrel
)
21169 code
= BFD_RELOC_ARM_MOVT_PCREL
;
21173 case BFD_RELOC_ARM_THUMB_MOVW
:
21174 if (fixp
->fx_pcrel
)
21176 code
= BFD_RELOC_ARM_THUMB_MOVW_PCREL
;
21180 case BFD_RELOC_ARM_THUMB_MOVT
:
21181 if (fixp
->fx_pcrel
)
21183 code
= BFD_RELOC_ARM_THUMB_MOVT_PCREL
;
21187 case BFD_RELOC_NONE
:
21188 case BFD_RELOC_ARM_PCREL_BRANCH
:
21189 case BFD_RELOC_ARM_PCREL_BLX
:
21190 case BFD_RELOC_RVA
:
21191 case BFD_RELOC_THUMB_PCREL_BRANCH7
:
21192 case BFD_RELOC_THUMB_PCREL_BRANCH9
:
21193 case BFD_RELOC_THUMB_PCREL_BRANCH12
:
21194 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
21195 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
21196 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
21197 case BFD_RELOC_VTABLE_ENTRY
:
21198 case BFD_RELOC_VTABLE_INHERIT
:
21200 case BFD_RELOC_32_SECREL
:
21202 code
= fixp
->fx_r_type
;
21205 case BFD_RELOC_THUMB_PCREL_BLX
:
21207 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
21208 code
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
21211 code
= BFD_RELOC_THUMB_PCREL_BLX
;
21214 case BFD_RELOC_ARM_LITERAL
:
21215 case BFD_RELOC_ARM_HWLITERAL
:
21216 /* If this is called then the a literal has
21217 been referenced across a section boundary. */
21218 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
21219 _("literal referenced across section boundary"));
21223 case BFD_RELOC_ARM_GOT32
:
21224 case BFD_RELOC_ARM_GOTOFF
:
21225 case BFD_RELOC_ARM_GOT_PREL
:
21226 case BFD_RELOC_ARM_PLT32
:
21227 case BFD_RELOC_ARM_TARGET1
:
21228 case BFD_RELOC_ARM_ROSEGREL32
:
21229 case BFD_RELOC_ARM_SBREL32
:
21230 case BFD_RELOC_ARM_PREL31
:
21231 case BFD_RELOC_ARM_TARGET2
:
21232 case BFD_RELOC_ARM_TLS_LE32
:
21233 case BFD_RELOC_ARM_TLS_LDO32
:
21234 case BFD_RELOC_ARM_PCREL_CALL
:
21235 case BFD_RELOC_ARM_PCREL_JUMP
:
21236 case BFD_RELOC_ARM_ALU_PC_G0_NC
:
21237 case BFD_RELOC_ARM_ALU_PC_G0
:
21238 case BFD_RELOC_ARM_ALU_PC_G1_NC
:
21239 case BFD_RELOC_ARM_ALU_PC_G1
:
21240 case BFD_RELOC_ARM_ALU_PC_G2
:
21241 case BFD_RELOC_ARM_LDR_PC_G0
:
21242 case BFD_RELOC_ARM_LDR_PC_G1
:
21243 case BFD_RELOC_ARM_LDR_PC_G2
:
21244 case BFD_RELOC_ARM_LDRS_PC_G0
:
21245 case BFD_RELOC_ARM_LDRS_PC_G1
:
21246 case BFD_RELOC_ARM_LDRS_PC_G2
:
21247 case BFD_RELOC_ARM_LDC_PC_G0
:
21248 case BFD_RELOC_ARM_LDC_PC_G1
:
21249 case BFD_RELOC_ARM_LDC_PC_G2
:
21250 case BFD_RELOC_ARM_ALU_SB_G0_NC
:
21251 case BFD_RELOC_ARM_ALU_SB_G0
:
21252 case BFD_RELOC_ARM_ALU_SB_G1_NC
:
21253 case BFD_RELOC_ARM_ALU_SB_G1
:
21254 case BFD_RELOC_ARM_ALU_SB_G2
:
21255 case BFD_RELOC_ARM_LDR_SB_G0
:
21256 case BFD_RELOC_ARM_LDR_SB_G1
:
21257 case BFD_RELOC_ARM_LDR_SB_G2
:
21258 case BFD_RELOC_ARM_LDRS_SB_G0
:
21259 case BFD_RELOC_ARM_LDRS_SB_G1
:
21260 case BFD_RELOC_ARM_LDRS_SB_G2
:
21261 case BFD_RELOC_ARM_LDC_SB_G0
:
21262 case BFD_RELOC_ARM_LDC_SB_G1
:
21263 case BFD_RELOC_ARM_LDC_SB_G2
:
21264 case BFD_RELOC_ARM_V4BX
:
21265 code
= fixp
->fx_r_type
;
21268 case BFD_RELOC_ARM_TLS_GD32
:
21269 case BFD_RELOC_ARM_TLS_IE32
:
21270 case BFD_RELOC_ARM_TLS_LDM32
:
21271 /* BFD will include the symbol's address in the addend.
21272 But we don't want that, so subtract it out again here. */
21273 if (!S_IS_COMMON (fixp
->fx_addsy
))
21274 reloc
->addend
-= (*reloc
->sym_ptr_ptr
)->value
;
21275 code
= fixp
->fx_r_type
;
21279 case BFD_RELOC_ARM_IMMEDIATE
:
21280 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
21281 _("internal relocation (type: IMMEDIATE) not fixed up"));
21284 case BFD_RELOC_ARM_ADRL_IMMEDIATE
:
21285 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
21286 _("ADRL used for a symbol not defined in the same file"));
21289 case BFD_RELOC_ARM_OFFSET_IMM
:
21290 if (section
->use_rela_p
)
21292 code
= fixp
->fx_r_type
;
21296 if (fixp
->fx_addsy
!= NULL
21297 && !S_IS_DEFINED (fixp
->fx_addsy
)
21298 && S_IS_LOCAL (fixp
->fx_addsy
))
21300 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
21301 _("undefined local label `%s'"),
21302 S_GET_NAME (fixp
->fx_addsy
));
21306 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
21307 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
21314 switch (fixp
->fx_r_type
)
21316 case BFD_RELOC_NONE
: type
= "NONE"; break;
21317 case BFD_RELOC_ARM_OFFSET_IMM8
: type
= "OFFSET_IMM8"; break;
21318 case BFD_RELOC_ARM_SHIFT_IMM
: type
= "SHIFT_IMM"; break;
21319 case BFD_RELOC_ARM_SMC
: type
= "SMC"; break;
21320 case BFD_RELOC_ARM_SWI
: type
= "SWI"; break;
21321 case BFD_RELOC_ARM_MULTI
: type
= "MULTI"; break;
21322 case BFD_RELOC_ARM_CP_OFF_IMM
: type
= "CP_OFF_IMM"; break;
21323 case BFD_RELOC_ARM_T32_OFFSET_IMM
: type
= "T32_OFFSET_IMM"; break;
21324 case BFD_RELOC_ARM_T32_CP_OFF_IMM
: type
= "T32_CP_OFF_IMM"; break;
21325 case BFD_RELOC_ARM_THUMB_ADD
: type
= "THUMB_ADD"; break;
21326 case BFD_RELOC_ARM_THUMB_SHIFT
: type
= "THUMB_SHIFT"; break;
21327 case BFD_RELOC_ARM_THUMB_IMM
: type
= "THUMB_IMM"; break;
21328 case BFD_RELOC_ARM_THUMB_OFFSET
: type
= "THUMB_OFFSET"; break;
21329 default: type
= _("<unknown>"); break;
21331 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
21332 _("cannot represent %s relocation in this object file format"),
21339 if ((code
== BFD_RELOC_32_PCREL
|| code
== BFD_RELOC_32
)
21341 && fixp
->fx_addsy
== GOT_symbol
)
21343 code
= BFD_RELOC_ARM_GOTPC
;
21344 reloc
->addend
= fixp
->fx_offset
= reloc
->address
;
21348 reloc
->howto
= bfd_reloc_type_lookup (stdoutput
, code
);
21350 if (reloc
->howto
== NULL
)
21352 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
21353 _("cannot represent %s relocation in this object file format"),
21354 bfd_get_reloc_code_name (code
));
21358 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
21359 vtable entry to be used in the relocation's section offset. */
21360 if (fixp
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
21361 reloc
->address
= fixp
->fx_offset
;
21366 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
21369 cons_fix_new_arm (fragS
* frag
,
21374 bfd_reloc_code_real_type type
;
21378 FIXME: @@ Should look at CPU word size. */
21382 type
= BFD_RELOC_8
;
21385 type
= BFD_RELOC_16
;
21389 type
= BFD_RELOC_32
;
21392 type
= BFD_RELOC_64
;
21397 if (exp
->X_op
== O_secrel
)
21399 exp
->X_op
= O_symbol
;
21400 type
= BFD_RELOC_32_SECREL
;
21404 fix_new_exp (frag
, where
, (int) size
, exp
, pcrel
, type
);
21407 #if defined (OBJ_COFF)
21409 arm_validate_fix (fixS
* fixP
)
21411 /* If the destination of the branch is a defined symbol which does not have
21412 the THUMB_FUNC attribute, then we must be calling a function which has
21413 the (interfacearm) attribute. We look for the Thumb entry point to that
21414 function and change the branch to refer to that function instead. */
21415 if (fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BRANCH23
21416 && fixP
->fx_addsy
!= NULL
21417 && S_IS_DEFINED (fixP
->fx_addsy
)
21418 && ! THUMB_IS_FUNC (fixP
->fx_addsy
))
21420 fixP
->fx_addsy
= find_real_start (fixP
->fx_addsy
);
21427 arm_force_relocation (struct fix
* fixp
)
21429 #if defined (OBJ_COFF) && defined (TE_PE)
21430 if (fixp
->fx_r_type
== BFD_RELOC_RVA
)
21434 /* In case we have a call or a branch to a function in ARM ISA mode from
21435 a thumb function or vice-versa force the relocation. These relocations
21436 are cleared off for some cores that might have blx and simple transformations
21440 switch (fixp
->fx_r_type
)
21442 case BFD_RELOC_ARM_PCREL_JUMP
:
21443 case BFD_RELOC_ARM_PCREL_CALL
:
21444 case BFD_RELOC_THUMB_PCREL_BLX
:
21445 if (THUMB_IS_FUNC (fixp
->fx_addsy
))
21449 case BFD_RELOC_ARM_PCREL_BLX
:
21450 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
21451 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
21452 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
21453 if (ARM_IS_FUNC (fixp
->fx_addsy
))
21462 /* Resolve these relocations even if the symbol is extern or weak. */
21463 if (fixp
->fx_r_type
== BFD_RELOC_ARM_IMMEDIATE
21464 || fixp
->fx_r_type
== BFD_RELOC_ARM_OFFSET_IMM
21465 || fixp
->fx_r_type
== BFD_RELOC_ARM_ADRL_IMMEDIATE
21466 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
21467 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_IMMEDIATE
21468 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_IMM12
21469 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_PC12
)
21472 /* Always leave these relocations for the linker. */
21473 if ((fixp
->fx_r_type
>= BFD_RELOC_ARM_ALU_PC_G0_NC
21474 && fixp
->fx_r_type
<= BFD_RELOC_ARM_LDC_SB_G2
)
21475 || fixp
->fx_r_type
== BFD_RELOC_ARM_LDR_PC_G0
)
21478 /* Always generate relocations against function symbols. */
21479 if (fixp
->fx_r_type
== BFD_RELOC_32
21481 && (symbol_get_bfdsym (fixp
->fx_addsy
)->flags
& BSF_FUNCTION
))
21484 return generic_force_reloc (fixp
);
21487 #if defined (OBJ_ELF) || defined (OBJ_COFF)
21488 /* Relocations against function names must be left unadjusted,
21489 so that the linker can use this information to generate interworking
21490 stubs. The MIPS version of this function
21491 also prevents relocations that are mips-16 specific, but I do not
21492 know why it does this.
21495 There is one other problem that ought to be addressed here, but
21496 which currently is not: Taking the address of a label (rather
21497 than a function) and then later jumping to that address. Such
21498 addresses also ought to have their bottom bit set (assuming that
21499 they reside in Thumb code), but at the moment they will not. */
21502 arm_fix_adjustable (fixS
* fixP
)
21504 if (fixP
->fx_addsy
== NULL
)
21507 /* Preserve relocations against symbols with function type. */
21508 if (symbol_get_bfdsym (fixP
->fx_addsy
)->flags
& BSF_FUNCTION
)
21511 if (THUMB_IS_FUNC (fixP
->fx_addsy
)
21512 && fixP
->fx_subsy
== NULL
)
21515 /* We need the symbol name for the VTABLE entries. */
21516 if ( fixP
->fx_r_type
== BFD_RELOC_VTABLE_INHERIT
21517 || fixP
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
21520 /* Don't allow symbols to be discarded on GOT related relocs. */
21521 if (fixP
->fx_r_type
== BFD_RELOC_ARM_PLT32
21522 || fixP
->fx_r_type
== BFD_RELOC_ARM_GOT32
21523 || fixP
->fx_r_type
== BFD_RELOC_ARM_GOTOFF
21524 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_GD32
21525 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LE32
21526 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_IE32
21527 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LDM32
21528 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LDO32
21529 || fixP
->fx_r_type
== BFD_RELOC_ARM_TARGET2
)
21532 /* Similarly for group relocations. */
21533 if ((fixP
->fx_r_type
>= BFD_RELOC_ARM_ALU_PC_G0_NC
21534 && fixP
->fx_r_type
<= BFD_RELOC_ARM_LDC_SB_G2
)
21535 || fixP
->fx_r_type
== BFD_RELOC_ARM_LDR_PC_G0
)
21538 /* MOVW/MOVT REL relocations have limited offsets, so keep the symbols. */
21539 if (fixP
->fx_r_type
== BFD_RELOC_ARM_MOVW
21540 || fixP
->fx_r_type
== BFD_RELOC_ARM_MOVT
21541 || fixP
->fx_r_type
== BFD_RELOC_ARM_MOVW_PCREL
21542 || fixP
->fx_r_type
== BFD_RELOC_ARM_MOVT_PCREL
21543 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVW
21544 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
21545 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVW_PCREL
21546 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT_PCREL
)
21551 #endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
21556 elf32_arm_target_format (void)
21559 return (target_big_endian
21560 ? "elf32-bigarm-symbian"
21561 : "elf32-littlearm-symbian");
21562 #elif defined (TE_VXWORKS)
21563 return (target_big_endian
21564 ? "elf32-bigarm-vxworks"
21565 : "elf32-littlearm-vxworks");
21567 if (target_big_endian
)
21568 return "elf32-bigarm";
21570 return "elf32-littlearm";
21575 armelf_frob_symbol (symbolS
* symp
,
21578 elf_frob_symbol (symp
, puntp
);
21582 /* MD interface: Finalization. */
21587 literal_pool
* pool
;
21589 /* Ensure that all the IT blocks are properly closed. */
21590 check_it_blocks_finished ();
21592 for (pool
= list_of_pools
; pool
; pool
= pool
->next
)
21594 /* Put it at the end of the relevant section. */
21595 subseg_set (pool
->section
, pool
->sub_section
);
21597 arm_elf_change_section ();
21604 /* Remove any excess mapping symbols generated for alignment frags in
21605 SEC. We may have created a mapping symbol before a zero byte
21606 alignment; remove it if there's a mapping symbol after the
21609 check_mapping_symbols (bfd
*abfd ATTRIBUTE_UNUSED
, asection
*sec
,
21610 void *dummy ATTRIBUTE_UNUSED
)
21612 segment_info_type
*seginfo
= seg_info (sec
);
21615 if (seginfo
== NULL
|| seginfo
->frchainP
== NULL
)
21618 for (fragp
= seginfo
->frchainP
->frch_root
;
21620 fragp
= fragp
->fr_next
)
21622 symbolS
*sym
= fragp
->tc_frag_data
.last_map
;
21623 fragS
*next
= fragp
->fr_next
;
21625 /* Variable-sized frags have been converted to fixed size by
21626 this point. But if this was variable-sized to start with,
21627 there will be a fixed-size frag after it. So don't handle
21629 if (sym
== NULL
|| next
== NULL
)
21632 if (S_GET_VALUE (sym
) < next
->fr_address
)
21633 /* Not at the end of this frag. */
21635 know (S_GET_VALUE (sym
) == next
->fr_address
);
21639 if (next
->tc_frag_data
.first_map
!= NULL
)
21641 /* Next frag starts with a mapping symbol. Discard this
21643 symbol_remove (sym
, &symbol_rootP
, &symbol_lastP
);
21647 if (next
->fr_next
== NULL
)
21649 /* This mapping symbol is at the end of the section. Discard
21651 know (next
->fr_fix
== 0 && next
->fr_var
== 0);
21652 symbol_remove (sym
, &symbol_rootP
, &symbol_lastP
);
21656 /* As long as we have empty frags without any mapping symbols,
21658 /* If the next frag is non-empty and does not start with a
21659 mapping symbol, then this mapping symbol is required. */
21660 if (next
->fr_address
!= next
->fr_next
->fr_address
)
21663 next
= next
->fr_next
;
21665 while (next
!= NULL
);
21670 /* Adjust the symbol table. This marks Thumb symbols as distinct from
21674 arm_adjust_symtab (void)
21679 for (sym
= symbol_rootP
; sym
!= NULL
; sym
= symbol_next (sym
))
21681 if (ARM_IS_THUMB (sym
))
21683 if (THUMB_IS_FUNC (sym
))
21685 /* Mark the symbol as a Thumb function. */
21686 if ( S_GET_STORAGE_CLASS (sym
) == C_STAT
21687 || S_GET_STORAGE_CLASS (sym
) == C_LABEL
) /* This can happen! */
21688 S_SET_STORAGE_CLASS (sym
, C_THUMBSTATFUNC
);
21690 else if (S_GET_STORAGE_CLASS (sym
) == C_EXT
)
21691 S_SET_STORAGE_CLASS (sym
, C_THUMBEXTFUNC
);
21693 as_bad (_("%s: unexpected function type: %d"),
21694 S_GET_NAME (sym
), S_GET_STORAGE_CLASS (sym
));
21696 else switch (S_GET_STORAGE_CLASS (sym
))
21699 S_SET_STORAGE_CLASS (sym
, C_THUMBEXT
);
21702 S_SET_STORAGE_CLASS (sym
, C_THUMBSTAT
);
21705 S_SET_STORAGE_CLASS (sym
, C_THUMBLABEL
);
21713 if (ARM_IS_INTERWORK (sym
))
21714 coffsymbol (symbol_get_bfdsym (sym
))->native
->u
.syment
.n_flags
= 0xFF;
21721 for (sym
= symbol_rootP
; sym
!= NULL
; sym
= symbol_next (sym
))
21723 if (ARM_IS_THUMB (sym
))
21725 elf_symbol_type
* elf_sym
;
21727 elf_sym
= elf_symbol (symbol_get_bfdsym (sym
));
21728 bind
= ELF_ST_BIND (elf_sym
->internal_elf_sym
.st_info
);
21730 if (! bfd_is_arm_special_symbol_name (elf_sym
->symbol
.name
,
21731 BFD_ARM_SPECIAL_SYM_TYPE_ANY
))
21733 /* If it's a .thumb_func, declare it as so,
21734 otherwise tag label as .code 16. */
21735 if (THUMB_IS_FUNC (sym
))
21736 elf_sym
->internal_elf_sym
.st_info
=
21737 ELF_ST_INFO (bind
, STT_ARM_TFUNC
);
21738 else if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
21739 elf_sym
->internal_elf_sym
.st_info
=
21740 ELF_ST_INFO (bind
, STT_ARM_16BIT
);
21745 /* Remove any overlapping mapping symbols generated by alignment frags. */
21746 bfd_map_over_sections (stdoutput
, check_mapping_symbols
, (char *) 0);
21750 /* MD interface: Initialization. */
21753 set_constant_flonums (void)
21757 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
21758 if (atof_ieee ((char *) fp_const
[i
], 'x', fp_values
[i
]) == NULL
)
21762 /* Auto-select Thumb mode if it's the only available instruction set for the
21763 given architecture. */
21766 autoselect_thumb_from_cpu_variant (void)
21768 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
21769 opcode_select (16);
21778 if ( (arm_ops_hsh
= hash_new ()) == NULL
21779 || (arm_cond_hsh
= hash_new ()) == NULL
21780 || (arm_shift_hsh
= hash_new ()) == NULL
21781 || (arm_psr_hsh
= hash_new ()) == NULL
21782 || (arm_v7m_psr_hsh
= hash_new ()) == NULL
21783 || (arm_reg_hsh
= hash_new ()) == NULL
21784 || (arm_reloc_hsh
= hash_new ()) == NULL
21785 || (arm_barrier_opt_hsh
= hash_new ()) == NULL
)
21786 as_fatal (_("virtual memory exhausted"));
21788 for (i
= 0; i
< sizeof (insns
) / sizeof (struct asm_opcode
); i
++)
21789 hash_insert (arm_ops_hsh
, insns
[i
].template_name
, (void *) (insns
+ i
));
21790 for (i
= 0; i
< sizeof (conds
) / sizeof (struct asm_cond
); i
++)
21791 hash_insert (arm_cond_hsh
, conds
[i
].template_name
, (void *) (conds
+ i
));
21792 for (i
= 0; i
< sizeof (shift_names
) / sizeof (struct asm_shift_name
); i
++)
21793 hash_insert (arm_shift_hsh
, shift_names
[i
].name
, (void *) (shift_names
+ i
));
21794 for (i
= 0; i
< sizeof (psrs
) / sizeof (struct asm_psr
); i
++)
21795 hash_insert (arm_psr_hsh
, psrs
[i
].template_name
, (void *) (psrs
+ i
));
21796 for (i
= 0; i
< sizeof (v7m_psrs
) / sizeof (struct asm_psr
); i
++)
21797 hash_insert (arm_v7m_psr_hsh
, v7m_psrs
[i
].template_name
,
21798 (void *) (v7m_psrs
+ i
));
21799 for (i
= 0; i
< sizeof (reg_names
) / sizeof (struct reg_entry
); i
++)
21800 hash_insert (arm_reg_hsh
, reg_names
[i
].name
, (void *) (reg_names
+ i
));
21802 i
< sizeof (barrier_opt_names
) / sizeof (struct asm_barrier_opt
);
21804 hash_insert (arm_barrier_opt_hsh
, barrier_opt_names
[i
].template_name
,
21805 (void *) (barrier_opt_names
+ i
));
21807 for (i
= 0; i
< sizeof (reloc_names
) / sizeof (struct reloc_entry
); i
++)
21808 hash_insert (arm_reloc_hsh
, reloc_names
[i
].name
, (void *) (reloc_names
+ i
));
21811 set_constant_flonums ();
21813 /* Set the cpu variant based on the command-line options. We prefer
21814 -mcpu= over -march= if both are set (as for GCC); and we prefer
21815 -mfpu= over any other way of setting the floating point unit.
21816 Use of legacy options with new options are faulted. */
21819 if (mcpu_cpu_opt
|| march_cpu_opt
)
21820 as_bad (_("use of old and new-style options to set CPU type"));
21822 mcpu_cpu_opt
= legacy_cpu
;
21824 else if (!mcpu_cpu_opt
)
21825 mcpu_cpu_opt
= march_cpu_opt
;
21830 as_bad (_("use of old and new-style options to set FPU type"));
21832 mfpu_opt
= legacy_fpu
;
21834 else if (!mfpu_opt
)
21836 #if !(defined (EABI_DEFAULT) || defined (TE_LINUX) \
21837 || defined (TE_NetBSD) || defined (TE_VXWORKS))
21838 /* Some environments specify a default FPU. If they don't, infer it
21839 from the processor. */
21841 mfpu_opt
= mcpu_fpu_opt
;
21843 mfpu_opt
= march_fpu_opt
;
21845 mfpu_opt
= &fpu_default
;
21851 if (mcpu_cpu_opt
!= NULL
)
21852 mfpu_opt
= &fpu_default
;
21853 else if (mcpu_fpu_opt
!= NULL
&& ARM_CPU_HAS_FEATURE (*mcpu_fpu_opt
, arm_ext_v5
))
21854 mfpu_opt
= &fpu_arch_vfp_v2
;
21856 mfpu_opt
= &fpu_arch_fpa
;
21862 mcpu_cpu_opt
= &cpu_default
;
21863 selected_cpu
= cpu_default
;
21867 selected_cpu
= *mcpu_cpu_opt
;
21869 mcpu_cpu_opt
= &arm_arch_any
;
21872 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
21874 autoselect_thumb_from_cpu_variant ();
21876 arm_arch_used
= thumb_arch_used
= arm_arch_none
;
21878 #if defined OBJ_COFF || defined OBJ_ELF
21880 unsigned int flags
= 0;
21882 #if defined OBJ_ELF
21883 flags
= meabi_flags
;
21885 switch (meabi_flags
)
21887 case EF_ARM_EABI_UNKNOWN
:
21889 /* Set the flags in the private structure. */
21890 if (uses_apcs_26
) flags
|= F_APCS26
;
21891 if (support_interwork
) flags
|= F_INTERWORK
;
21892 if (uses_apcs_float
) flags
|= F_APCS_FLOAT
;
21893 if (pic_code
) flags
|= F_PIC
;
21894 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_any_hard
))
21895 flags
|= F_SOFT_FLOAT
;
21897 switch (mfloat_abi_opt
)
21899 case ARM_FLOAT_ABI_SOFT
:
21900 case ARM_FLOAT_ABI_SOFTFP
:
21901 flags
|= F_SOFT_FLOAT
;
21904 case ARM_FLOAT_ABI_HARD
:
21905 if (flags
& F_SOFT_FLOAT
)
21906 as_bad (_("hard-float conflicts with specified fpu"));
21910 /* Using pure-endian doubles (even if soft-float). */
21911 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_endian_pure
))
21912 flags
|= F_VFP_FLOAT
;
21914 #if defined OBJ_ELF
21915 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_arch_maverick
))
21916 flags
|= EF_ARM_MAVERICK_FLOAT
;
21919 case EF_ARM_EABI_VER4
:
21920 case EF_ARM_EABI_VER5
:
21921 /* No additional flags to set. */
21928 bfd_set_private_flags (stdoutput
, flags
);
21930 /* We have run out flags in the COFF header to encode the
21931 status of ATPCS support, so instead we create a dummy,
21932 empty, debug section called .arm.atpcs. */
21937 sec
= bfd_make_section (stdoutput
, ".arm.atpcs");
21941 bfd_set_section_flags
21942 (stdoutput
, sec
, SEC_READONLY
| SEC_DEBUGGING
/* | SEC_HAS_CONTENTS */);
21943 bfd_set_section_size (stdoutput
, sec
, 0);
21944 bfd_set_section_contents (stdoutput
, sec
, NULL
, 0, 0);
21950 /* Record the CPU type as well. */
21951 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
))
21952 mach
= bfd_mach_arm_iWMMXt2
;
21953 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt
))
21954 mach
= bfd_mach_arm_iWMMXt
;
21955 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_xscale
))
21956 mach
= bfd_mach_arm_XScale
;
21957 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_maverick
))
21958 mach
= bfd_mach_arm_ep9312
;
21959 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v5e
))
21960 mach
= bfd_mach_arm_5TE
;
21961 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v5
))
21963 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
21964 mach
= bfd_mach_arm_5T
;
21966 mach
= bfd_mach_arm_5
;
21968 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4
))
21970 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
21971 mach
= bfd_mach_arm_4T
;
21973 mach
= bfd_mach_arm_4
;
21975 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v3m
))
21976 mach
= bfd_mach_arm_3M
;
21977 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v3
))
21978 mach
= bfd_mach_arm_3
;
21979 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v2s
))
21980 mach
= bfd_mach_arm_2a
;
21981 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v2
))
21982 mach
= bfd_mach_arm_2
;
21984 mach
= bfd_mach_arm_unknown
;
21986 bfd_set_arch_mach (stdoutput
, TARGET_ARCH
, mach
);
21989 /* Command line processing. */
21992 Invocation line includes a switch not recognized by the base assembler.
21993 See if it's a processor-specific option.
21995 This routine is somewhat complicated by the need for backwards
21996 compatibility (since older releases of gcc can't be changed).
21997 The new options try to make the interface as compatible as
22000 New options (supported) are:
22002 -mcpu=<cpu name> Assemble for selected processor
22003 -march=<architecture name> Assemble for selected architecture
22004 -mfpu=<fpu architecture> Assemble for selected FPU.
22005 -EB/-mbig-endian Big-endian
22006 -EL/-mlittle-endian Little-endian
22007 -k Generate PIC code
22008 -mthumb Start in Thumb mode
22009 -mthumb-interwork Code supports ARM/Thumb interworking
22011 -m[no-]warn-deprecated Warn about deprecated features
22013 For now we will also provide support for:
22015 -mapcs-32 32-bit Program counter
22016 -mapcs-26 26-bit Program counter
22017 -macps-float Floats passed in FP registers
22018 -mapcs-reentrant Reentrant code
22020 (sometime these will probably be replaced with -mapcs=<list of options>
22021 and -matpcs=<list of options>)
22023 The remaining options are only supported for back-wards compatibility.
22024 Cpu variants, the arm part is optional:
22025 -m[arm]1 Currently not supported.
22026 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
22027 -m[arm]3 Arm 3 processor
22028 -m[arm]6[xx], Arm 6 processors
22029 -m[arm]7[xx][t][[d]m] Arm 7 processors
22030 -m[arm]8[10] Arm 8 processors
22031 -m[arm]9[20][tdmi] Arm 9 processors
22032 -mstrongarm[110[0]] StrongARM processors
22033 -mxscale XScale processors
22034 -m[arm]v[2345[t[e]]] Arm architectures
22035 -mall All (except the ARM1)
22037 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
22038 -mfpe-old (No float load/store multiples)
22039 -mvfpxd VFP Single precision
22041 -mno-fpu Disable all floating point instructions
22043 The following CPU names are recognized:
22044 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
22045 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
22046 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
22047 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
22048 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
22049 arm10t arm10e, arm1020t, arm1020e, arm10200e,
22050 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
22054 const char * md_shortopts
= "m:k";
22056 #ifdef ARM_BI_ENDIAN
22057 #define OPTION_EB (OPTION_MD_BASE + 0)
22058 #define OPTION_EL (OPTION_MD_BASE + 1)
22060 #if TARGET_BYTES_BIG_ENDIAN
22061 #define OPTION_EB (OPTION_MD_BASE + 0)
22063 #define OPTION_EL (OPTION_MD_BASE + 1)
22066 #define OPTION_FIX_V4BX (OPTION_MD_BASE + 2)
22068 struct option md_longopts
[] =
22071 {"EB", no_argument
, NULL
, OPTION_EB
},
22074 {"EL", no_argument
, NULL
, OPTION_EL
},
22076 {"fix-v4bx", no_argument
, NULL
, OPTION_FIX_V4BX
},
22077 {NULL
, no_argument
, NULL
, 0}
22080 size_t md_longopts_size
= sizeof (md_longopts
);
22082 struct arm_option_table
22084 char *option
; /* Option name to match. */
22085 char *help
; /* Help information. */
22086 int *var
; /* Variable to change. */
22087 int value
; /* What to change it to. */
22088 char *deprecated
; /* If non-null, print this message. */
22091 struct arm_option_table arm_opts
[] =
22093 {"k", N_("generate PIC code"), &pic_code
, 1, NULL
},
22094 {"mthumb", N_("assemble Thumb code"), &thumb_mode
, 1, NULL
},
22095 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
22096 &support_interwork
, 1, NULL
},
22097 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26
, 0, NULL
},
22098 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26
, 1, NULL
},
22099 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float
,
22101 {"mapcs-reentrant", N_("re-entrant code"), &pic_code
, 1, NULL
},
22102 {"matpcs", N_("code is ATPCS conformant"), &atpcs
, 1, NULL
},
22103 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian
, 1, NULL
},
22104 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian
, 0,
22107 /* These are recognized by the assembler, but have no affect on code. */
22108 {"mapcs-frame", N_("use frame pointer"), NULL
, 0, NULL
},
22109 {"mapcs-stack-check", N_("use stack size checking"), NULL
, 0, NULL
},
22111 {"mwarn-deprecated", NULL
, &warn_on_deprecated
, 1, NULL
},
22112 {"mno-warn-deprecated", N_("do not warn on use of deprecated feature"),
22113 &warn_on_deprecated
, 0, NULL
},
22114 {NULL
, NULL
, NULL
, 0, NULL
}
22117 struct arm_legacy_option_table
22119 char *option
; /* Option name to match. */
22120 const arm_feature_set
**var
; /* Variable to change. */
22121 const arm_feature_set value
; /* What to change it to. */
22122 char *deprecated
; /* If non-null, print this message. */
22125 const struct arm_legacy_option_table arm_legacy_opts
[] =
22127 /* DON'T add any new processors to this list -- we want the whole list
22128 to go away... Add them to the processors table instead. */
22129 {"marm1", &legacy_cpu
, ARM_ARCH_V1
, N_("use -mcpu=arm1")},
22130 {"m1", &legacy_cpu
, ARM_ARCH_V1
, N_("use -mcpu=arm1")},
22131 {"marm2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -mcpu=arm2")},
22132 {"m2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -mcpu=arm2")},
22133 {"marm250", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm250")},
22134 {"m250", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm250")},
22135 {"marm3", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm3")},
22136 {"m3", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm3")},
22137 {"marm6", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm6")},
22138 {"m6", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm6")},
22139 {"marm600", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm600")},
22140 {"m600", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm600")},
22141 {"marm610", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm610")},
22142 {"m610", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm610")},
22143 {"marm620", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm620")},
22144 {"m620", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm620")},
22145 {"marm7", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7")},
22146 {"m7", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7")},
22147 {"marm70", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm70")},
22148 {"m70", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm70")},
22149 {"marm700", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700")},
22150 {"m700", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700")},
22151 {"marm700i", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700i")},
22152 {"m700i", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700i")},
22153 {"marm710", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710")},
22154 {"m710", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710")},
22155 {"marm710c", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710c")},
22156 {"m710c", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710c")},
22157 {"marm720", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm720")},
22158 {"m720", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm720")},
22159 {"marm7d", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7d")},
22160 {"m7d", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7d")},
22161 {"marm7di", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7di")},
22162 {"m7di", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7di")},
22163 {"marm7m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7m")},
22164 {"m7m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7m")},
22165 {"marm7dm", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dm")},
22166 {"m7dm", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dm")},
22167 {"marm7dmi", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dmi")},
22168 {"m7dmi", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dmi")},
22169 {"marm7100", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7100")},
22170 {"m7100", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7100")},
22171 {"marm7500", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500")},
22172 {"m7500", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500")},
22173 {"marm7500fe", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500fe")},
22174 {"m7500fe", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500fe")},
22175 {"marm7t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
22176 {"m7t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
22177 {"marm7tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
22178 {"m7tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
22179 {"marm710t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm710t")},
22180 {"m710t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm710t")},
22181 {"marm720t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm720t")},
22182 {"m720t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm720t")},
22183 {"marm740t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm740t")},
22184 {"m740t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm740t")},
22185 {"marm8", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm8")},
22186 {"m8", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm8")},
22187 {"marm810", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm810")},
22188 {"m810", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm810")},
22189 {"marm9", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9")},
22190 {"m9", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9")},
22191 {"marm9tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9tdmi")},
22192 {"m9tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9tdmi")},
22193 {"marm920", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm920")},
22194 {"m920", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm920")},
22195 {"marm940", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm940")},
22196 {"m940", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm940")},
22197 {"mstrongarm", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=strongarm")},
22198 {"mstrongarm110", &legacy_cpu
, ARM_ARCH_V4
,
22199 N_("use -mcpu=strongarm110")},
22200 {"mstrongarm1100", &legacy_cpu
, ARM_ARCH_V4
,
22201 N_("use -mcpu=strongarm1100")},
22202 {"mstrongarm1110", &legacy_cpu
, ARM_ARCH_V4
,
22203 N_("use -mcpu=strongarm1110")},
22204 {"mxscale", &legacy_cpu
, ARM_ARCH_XSCALE
, N_("use -mcpu=xscale")},
22205 {"miwmmxt", &legacy_cpu
, ARM_ARCH_IWMMXT
, N_("use -mcpu=iwmmxt")},
22206 {"mall", &legacy_cpu
, ARM_ANY
, N_("use -mcpu=all")},
22208 /* Architecture variants -- don't add any more to this list either. */
22209 {"mv2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -march=armv2")},
22210 {"marmv2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -march=armv2")},
22211 {"mv2a", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -march=armv2a")},
22212 {"marmv2a", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -march=armv2a")},
22213 {"mv3", &legacy_cpu
, ARM_ARCH_V3
, N_("use -march=armv3")},
22214 {"marmv3", &legacy_cpu
, ARM_ARCH_V3
, N_("use -march=armv3")},
22215 {"mv3m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -march=armv3m")},
22216 {"marmv3m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -march=armv3m")},
22217 {"mv4", &legacy_cpu
, ARM_ARCH_V4
, N_("use -march=armv4")},
22218 {"marmv4", &legacy_cpu
, ARM_ARCH_V4
, N_("use -march=armv4")},
22219 {"mv4t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -march=armv4t")},
22220 {"marmv4t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -march=armv4t")},
22221 {"mv5", &legacy_cpu
, ARM_ARCH_V5
, N_("use -march=armv5")},
22222 {"marmv5", &legacy_cpu
, ARM_ARCH_V5
, N_("use -march=armv5")},
22223 {"mv5t", &legacy_cpu
, ARM_ARCH_V5T
, N_("use -march=armv5t")},
22224 {"marmv5t", &legacy_cpu
, ARM_ARCH_V5T
, N_("use -march=armv5t")},
22225 {"mv5e", &legacy_cpu
, ARM_ARCH_V5TE
, N_("use -march=armv5te")},
22226 {"marmv5e", &legacy_cpu
, ARM_ARCH_V5TE
, N_("use -march=armv5te")},
22228 /* Floating point variants -- don't add any more to this list either. */
22229 {"mfpe-old", &legacy_fpu
, FPU_ARCH_FPE
, N_("use -mfpu=fpe")},
22230 {"mfpa10", &legacy_fpu
, FPU_ARCH_FPA
, N_("use -mfpu=fpa10")},
22231 {"mfpa11", &legacy_fpu
, FPU_ARCH_FPA
, N_("use -mfpu=fpa11")},
22232 {"mno-fpu", &legacy_fpu
, ARM_ARCH_NONE
,
22233 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
22235 {NULL
, NULL
, ARM_ARCH_NONE
, NULL
}
22238 struct arm_cpu_option_table
22241 const arm_feature_set value
;
22242 /* For some CPUs we assume an FPU unless the user explicitly sets
22244 const arm_feature_set default_fpu
;
22245 /* The canonical name of the CPU, or NULL to use NAME converted to upper
22247 const char *canonical_name
;
22250 /* This list should, at a minimum, contain all the cpu names
22251 recognized by GCC. */
22252 static const struct arm_cpu_option_table arm_cpus
[] =
22254 {"all", ARM_ANY
, FPU_ARCH_FPA
, NULL
},
22255 {"arm1", ARM_ARCH_V1
, FPU_ARCH_FPA
, NULL
},
22256 {"arm2", ARM_ARCH_V2
, FPU_ARCH_FPA
, NULL
},
22257 {"arm250", ARM_ARCH_V2S
, FPU_ARCH_FPA
, NULL
},
22258 {"arm3", ARM_ARCH_V2S
, FPU_ARCH_FPA
, NULL
},
22259 {"arm6", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
22260 {"arm60", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
22261 {"arm600", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
22262 {"arm610", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
22263 {"arm620", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
22264 {"arm7", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
22265 {"arm7m", ARM_ARCH_V3M
, FPU_ARCH_FPA
, NULL
},
22266 {"arm7d", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
22267 {"arm7dm", ARM_ARCH_V3M
, FPU_ARCH_FPA
, NULL
},
22268 {"arm7di", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
22269 {"arm7dmi", ARM_ARCH_V3M
, FPU_ARCH_FPA
, NULL
},
22270 {"arm70", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
22271 {"arm700", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
22272 {"arm700i", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
22273 {"arm710", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
22274 {"arm710t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
22275 {"arm720", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
22276 {"arm720t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
22277 {"arm740t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
22278 {"arm710c", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
22279 {"arm7100", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
22280 {"arm7500", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
22281 {"arm7500fe", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
22282 {"arm7t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
22283 {"arm7tdmi", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
22284 {"arm7tdmi-s", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
22285 {"arm8", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
22286 {"arm810", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
22287 {"strongarm", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
22288 {"strongarm1", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
22289 {"strongarm110", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
22290 {"strongarm1100", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
22291 {"strongarm1110", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
22292 {"arm9", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
22293 {"arm920", ARM_ARCH_V4T
, FPU_ARCH_FPA
, "ARM920T"},
22294 {"arm920t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
22295 {"arm922t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
22296 {"arm940t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
22297 {"arm9tdmi", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
22298 {"fa526", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
22299 {"fa626", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
22300 /* For V5 or later processors we default to using VFP; but the user
22301 should really set the FPU type explicitly. */
22302 {"arm9e-r0", ARM_ARCH_V5TExP
, FPU_ARCH_VFP_V2
, NULL
},
22303 {"arm9e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
22304 {"arm926ej", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, "ARM926EJ-S"},
22305 {"arm926ejs", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, "ARM926EJ-S"},
22306 {"arm926ej-s", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, NULL
},
22307 {"arm946e-r0", ARM_ARCH_V5TExP
, FPU_ARCH_VFP_V2
, NULL
},
22308 {"arm946e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, "ARM946E-S"},
22309 {"arm946e-s", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
22310 {"arm966e-r0", ARM_ARCH_V5TExP
, FPU_ARCH_VFP_V2
, NULL
},
22311 {"arm966e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, "ARM966E-S"},
22312 {"arm966e-s", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
22313 {"arm968e-s", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
22314 {"arm10t", ARM_ARCH_V5T
, FPU_ARCH_VFP_V1
, NULL
},
22315 {"arm10tdmi", ARM_ARCH_V5T
, FPU_ARCH_VFP_V1
, NULL
},
22316 {"arm10e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
22317 {"arm1020", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, "ARM1020E"},
22318 {"arm1020t", ARM_ARCH_V5T
, FPU_ARCH_VFP_V1
, NULL
},
22319 {"arm1020e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
22320 {"arm1022e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
22321 {"arm1026ejs", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, "ARM1026EJ-S"},
22322 {"arm1026ej-s", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, NULL
},
22323 {"fa626te", ARM_ARCH_V5TE
, FPU_NONE
, NULL
},
22324 {"fa726te", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
22325 {"arm1136js", ARM_ARCH_V6
, FPU_NONE
, "ARM1136J-S"},
22326 {"arm1136j-s", ARM_ARCH_V6
, FPU_NONE
, NULL
},
22327 {"arm1136jfs", ARM_ARCH_V6
, FPU_ARCH_VFP_V2
, "ARM1136JF-S"},
22328 {"arm1136jf-s", ARM_ARCH_V6
, FPU_ARCH_VFP_V2
, NULL
},
22329 {"mpcore", ARM_ARCH_V6K
, FPU_ARCH_VFP_V2
, NULL
},
22330 {"mpcorenovfp", ARM_ARCH_V6K
, FPU_NONE
, NULL
},
22331 {"arm1156t2-s", ARM_ARCH_V6T2
, FPU_NONE
, NULL
},
22332 {"arm1156t2f-s", ARM_ARCH_V6T2
, FPU_ARCH_VFP_V2
, NULL
},
22333 {"arm1176jz-s", ARM_ARCH_V6ZK
, FPU_NONE
, NULL
},
22334 {"arm1176jzf-s", ARM_ARCH_V6ZK
, FPU_ARCH_VFP_V2
, NULL
},
22335 {"cortex-a5", ARM_ARCH_V7A
, FPU_NONE
, NULL
},
22336 {"cortex-a8", ARM_ARCH_V7A
, ARM_FEATURE (0, FPU_VFP_V3
22337 | FPU_NEON_EXT_V1
),
22339 {"cortex-a9", ARM_ARCH_V7A
, ARM_FEATURE (0, FPU_VFP_V3
22340 | FPU_NEON_EXT_V1
),
22342 {"cortex-a15", ARM_ARCH_V7A
, FPU_ARCH_NEON_VFP_V4
,
22344 {"cortex-r4", ARM_ARCH_V7R
, FPU_NONE
, NULL
},
22345 {"cortex-r4f", ARM_ARCH_V7R
, FPU_ARCH_VFP_V3D16
, NULL
},
22346 {"cortex-m4", ARM_ARCH_V7EM
, FPU_NONE
, NULL
},
22347 {"cortex-m3", ARM_ARCH_V7M
, FPU_NONE
, NULL
},
22348 {"cortex-m1", ARM_ARCH_V6M
, FPU_NONE
, NULL
},
22349 {"cortex-m0", ARM_ARCH_V6M
, FPU_NONE
, NULL
},
22350 /* ??? XSCALE is really an architecture. */
22351 {"xscale", ARM_ARCH_XSCALE
, FPU_ARCH_VFP_V2
, NULL
},
22352 /* ??? iwmmxt is not a processor. */
22353 {"iwmmxt", ARM_ARCH_IWMMXT
, FPU_ARCH_VFP_V2
, NULL
},
22354 {"iwmmxt2", ARM_ARCH_IWMMXT2
,FPU_ARCH_VFP_V2
, NULL
},
22355 {"i80200", ARM_ARCH_XSCALE
, FPU_ARCH_VFP_V2
, NULL
},
22357 {"ep9312", ARM_FEATURE (ARM_AEXT_V4T
, ARM_CEXT_MAVERICK
), FPU_ARCH_MAVERICK
, "ARM920T"},
22358 {NULL
, ARM_ARCH_NONE
, ARM_ARCH_NONE
, NULL
}
22361 struct arm_arch_option_table
22364 const arm_feature_set value
;
22365 const arm_feature_set default_fpu
;
22368 /* This list should, at a minimum, contain all the architecture names
22369 recognized by GCC. */
22370 static const struct arm_arch_option_table arm_archs
[] =
22372 {"all", ARM_ANY
, FPU_ARCH_FPA
},
22373 {"armv1", ARM_ARCH_V1
, FPU_ARCH_FPA
},
22374 {"armv2", ARM_ARCH_V2
, FPU_ARCH_FPA
},
22375 {"armv2a", ARM_ARCH_V2S
, FPU_ARCH_FPA
},
22376 {"armv2s", ARM_ARCH_V2S
, FPU_ARCH_FPA
},
22377 {"armv3", ARM_ARCH_V3
, FPU_ARCH_FPA
},
22378 {"armv3m", ARM_ARCH_V3M
, FPU_ARCH_FPA
},
22379 {"armv4", ARM_ARCH_V4
, FPU_ARCH_FPA
},
22380 {"armv4xm", ARM_ARCH_V4xM
, FPU_ARCH_FPA
},
22381 {"armv4t", ARM_ARCH_V4T
, FPU_ARCH_FPA
},
22382 {"armv4txm", ARM_ARCH_V4TxM
, FPU_ARCH_FPA
},
22383 {"armv5", ARM_ARCH_V5
, FPU_ARCH_VFP
},
22384 {"armv5t", ARM_ARCH_V5T
, FPU_ARCH_VFP
},
22385 {"armv5txm", ARM_ARCH_V5TxM
, FPU_ARCH_VFP
},
22386 {"armv5te", ARM_ARCH_V5TE
, FPU_ARCH_VFP
},
22387 {"armv5texp", ARM_ARCH_V5TExP
, FPU_ARCH_VFP
},
22388 {"armv5tej", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP
},
22389 {"armv6", ARM_ARCH_V6
, FPU_ARCH_VFP
},
22390 {"armv6j", ARM_ARCH_V6
, FPU_ARCH_VFP
},
22391 {"armv6k", ARM_ARCH_V6K
, FPU_ARCH_VFP
},
22392 {"armv6z", ARM_ARCH_V6Z
, FPU_ARCH_VFP
},
22393 {"armv6zk", ARM_ARCH_V6ZK
, FPU_ARCH_VFP
},
22394 {"armv6t2", ARM_ARCH_V6T2
, FPU_ARCH_VFP
},
22395 {"armv6kt2", ARM_ARCH_V6KT2
, FPU_ARCH_VFP
},
22396 {"armv6zt2", ARM_ARCH_V6ZT2
, FPU_ARCH_VFP
},
22397 {"armv6zkt2", ARM_ARCH_V6ZKT2
, FPU_ARCH_VFP
},
22398 {"armv6-m", ARM_ARCH_V6M
, FPU_ARCH_VFP
},
22399 {"armv7", ARM_ARCH_V7
, FPU_ARCH_VFP
},
22400 /* The official spelling of the ARMv7 profile variants is the dashed form.
22401 Accept the non-dashed form for compatibility with old toolchains. */
22402 {"armv7a", ARM_ARCH_V7A
, FPU_ARCH_VFP
},
22403 {"armv7r", ARM_ARCH_V7R
, FPU_ARCH_VFP
},
22404 {"armv7m", ARM_ARCH_V7M
, FPU_ARCH_VFP
},
22405 {"armv7-a", ARM_ARCH_V7A
, FPU_ARCH_VFP
},
22406 {"armv7-r", ARM_ARCH_V7R
, FPU_ARCH_VFP
},
22407 {"armv7-m", ARM_ARCH_V7M
, FPU_ARCH_VFP
},
22408 {"armv7e-m", ARM_ARCH_V7EM
, FPU_ARCH_VFP
},
22409 {"xscale", ARM_ARCH_XSCALE
, FPU_ARCH_VFP
},
22410 {"iwmmxt", ARM_ARCH_IWMMXT
, FPU_ARCH_VFP
},
22411 {"iwmmxt2", ARM_ARCH_IWMMXT2
,FPU_ARCH_VFP
},
22412 {NULL
, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
22415 /* ISA extensions in the co-processor space. */
22416 struct arm_option_cpu_value_table
22419 const arm_feature_set value
;
22422 static const struct arm_option_cpu_value_table arm_extensions
[] =
22424 {"maverick", ARM_FEATURE (0, ARM_CEXT_MAVERICK
)},
22425 {"xscale", ARM_FEATURE (0, ARM_CEXT_XSCALE
)},
22426 {"iwmmxt", ARM_FEATURE (0, ARM_CEXT_IWMMXT
)},
22427 {"iwmmxt2", ARM_FEATURE (0, ARM_CEXT_IWMMXT2
)},
22428 {NULL
, ARM_ARCH_NONE
}
22431 /* This list should, at a minimum, contain all the fpu names
22432 recognized by GCC. */
22433 static const struct arm_option_cpu_value_table arm_fpus
[] =
22435 {"softfpa", FPU_NONE
},
22436 {"fpe", FPU_ARCH_FPE
},
22437 {"fpe2", FPU_ARCH_FPE
},
22438 {"fpe3", FPU_ARCH_FPA
}, /* Third release supports LFM/SFM. */
22439 {"fpa", FPU_ARCH_FPA
},
22440 {"fpa10", FPU_ARCH_FPA
},
22441 {"fpa11", FPU_ARCH_FPA
},
22442 {"arm7500fe", FPU_ARCH_FPA
},
22443 {"softvfp", FPU_ARCH_VFP
},
22444 {"softvfp+vfp", FPU_ARCH_VFP_V2
},
22445 {"vfp", FPU_ARCH_VFP_V2
},
22446 {"vfp9", FPU_ARCH_VFP_V2
},
22447 {"vfp3", FPU_ARCH_VFP_V3
}, /* For backwards compatbility. */
22448 {"vfp10", FPU_ARCH_VFP_V2
},
22449 {"vfp10-r0", FPU_ARCH_VFP_V1
},
22450 {"vfpxd", FPU_ARCH_VFP_V1xD
},
22451 {"vfpv2", FPU_ARCH_VFP_V2
},
22452 {"vfpv3", FPU_ARCH_VFP_V3
},
22453 {"vfpv3-fp16", FPU_ARCH_VFP_V3_FP16
},
22454 {"vfpv3-d16", FPU_ARCH_VFP_V3D16
},
22455 {"vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16
},
22456 {"vfpv3xd", FPU_ARCH_VFP_V3xD
},
22457 {"vfpv3xd-fp16", FPU_ARCH_VFP_V3xD_FP16
},
22458 {"arm1020t", FPU_ARCH_VFP_V1
},
22459 {"arm1020e", FPU_ARCH_VFP_V2
},
22460 {"arm1136jfs", FPU_ARCH_VFP_V2
},
22461 {"arm1136jf-s", FPU_ARCH_VFP_V2
},
22462 {"maverick", FPU_ARCH_MAVERICK
},
22463 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1
},
22464 {"neon-fp16", FPU_ARCH_NEON_FP16
},
22465 {"vfpv4", FPU_ARCH_VFP_V4
},
22466 {"vfpv4-d16", FPU_ARCH_VFP_V4D16
},
22467 {"fpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16
},
22468 {"neon-vfpv4", FPU_ARCH_NEON_VFP_V4
},
22469 {NULL
, ARM_ARCH_NONE
}
22472 struct arm_option_value_table
22478 static const struct arm_option_value_table arm_float_abis
[] =
22480 {"hard", ARM_FLOAT_ABI_HARD
},
22481 {"softfp", ARM_FLOAT_ABI_SOFTFP
},
22482 {"soft", ARM_FLOAT_ABI_SOFT
},
22487 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
22488 static const struct arm_option_value_table arm_eabis
[] =
22490 {"gnu", EF_ARM_EABI_UNKNOWN
},
22491 {"4", EF_ARM_EABI_VER4
},
22492 {"5", EF_ARM_EABI_VER5
},
22497 struct arm_long_option_table
22499 char * option
; /* Substring to match. */
22500 char * help
; /* Help information. */
22501 int (* func
) (char * subopt
); /* Function to decode sub-option. */
22502 char * deprecated
; /* If non-null, print this message. */
22506 arm_parse_extension (char * str
, const arm_feature_set
**opt_p
)
22508 arm_feature_set
*ext_set
= (arm_feature_set
*)
22509 xmalloc (sizeof (arm_feature_set
));
22511 /* Copy the feature set, so that we can modify it. */
22512 *ext_set
= **opt_p
;
22515 while (str
!= NULL
&& *str
!= 0)
22517 const struct arm_option_cpu_value_table
* opt
;
22523 as_bad (_("invalid architectural extension"));
22528 ext
= strchr (str
, '+');
22531 optlen
= ext
- str
;
22533 optlen
= strlen (str
);
22537 as_bad (_("missing architectural extension"));
22541 for (opt
= arm_extensions
; opt
->name
!= NULL
; opt
++)
22542 if (strncmp (opt
->name
, str
, optlen
) == 0)
22544 ARM_MERGE_FEATURE_SETS (*ext_set
, *ext_set
, opt
->value
);
22548 if (opt
->name
== NULL
)
22550 as_bad (_("unknown architectural extension `%s'"), str
);
22561 arm_parse_cpu (char * str
)
22563 const struct arm_cpu_option_table
* opt
;
22564 char * ext
= strchr (str
, '+');
22568 optlen
= ext
- str
;
22570 optlen
= strlen (str
);
22574 as_bad (_("missing cpu name `%s'"), str
);
22578 for (opt
= arm_cpus
; opt
->name
!= NULL
; opt
++)
22579 if (strncmp (opt
->name
, str
, optlen
) == 0)
22581 mcpu_cpu_opt
= &opt
->value
;
22582 mcpu_fpu_opt
= &opt
->default_fpu
;
22583 if (opt
->canonical_name
)
22584 strcpy (selected_cpu_name
, opt
->canonical_name
);
22589 for (i
= 0; i
< optlen
; i
++)
22590 selected_cpu_name
[i
] = TOUPPER (opt
->name
[i
]);
22591 selected_cpu_name
[i
] = 0;
22595 return arm_parse_extension (ext
, &mcpu_cpu_opt
);
22600 as_bad (_("unknown cpu `%s'"), str
);
22605 arm_parse_arch (char * str
)
22607 const struct arm_arch_option_table
*opt
;
22608 char *ext
= strchr (str
, '+');
22612 optlen
= ext
- str
;
22614 optlen
= strlen (str
);
22618 as_bad (_("missing architecture name `%s'"), str
);
22622 for (opt
= arm_archs
; opt
->name
!= NULL
; opt
++)
22623 if (streq (opt
->name
, str
))
22625 march_cpu_opt
= &opt
->value
;
22626 march_fpu_opt
= &opt
->default_fpu
;
22627 strcpy (selected_cpu_name
, opt
->name
);
22630 return arm_parse_extension (ext
, &march_cpu_opt
);
22635 as_bad (_("unknown architecture `%s'\n"), str
);
22640 arm_parse_fpu (char * str
)
22642 const struct arm_option_cpu_value_table
* opt
;
22644 for (opt
= arm_fpus
; opt
->name
!= NULL
; opt
++)
22645 if (streq (opt
->name
, str
))
22647 mfpu_opt
= &opt
->value
;
22651 as_bad (_("unknown floating point format `%s'\n"), str
);
22656 arm_parse_float_abi (char * str
)
22658 const struct arm_option_value_table
* opt
;
22660 for (opt
= arm_float_abis
; opt
->name
!= NULL
; opt
++)
22661 if (streq (opt
->name
, str
))
22663 mfloat_abi_opt
= opt
->value
;
22667 as_bad (_("unknown floating point abi `%s'\n"), str
);
22673 arm_parse_eabi (char * str
)
22675 const struct arm_option_value_table
*opt
;
22677 for (opt
= arm_eabis
; opt
->name
!= NULL
; opt
++)
22678 if (streq (opt
->name
, str
))
22680 meabi_flags
= opt
->value
;
22683 as_bad (_("unknown EABI `%s'\n"), str
);
22689 arm_parse_it_mode (char * str
)
22691 bfd_boolean ret
= TRUE
;
22693 if (streq ("arm", str
))
22694 implicit_it_mode
= IMPLICIT_IT_MODE_ARM
;
22695 else if (streq ("thumb", str
))
22696 implicit_it_mode
= IMPLICIT_IT_MODE_THUMB
;
22697 else if (streq ("always", str
))
22698 implicit_it_mode
= IMPLICIT_IT_MODE_ALWAYS
;
22699 else if (streq ("never", str
))
22700 implicit_it_mode
= IMPLICIT_IT_MODE_NEVER
;
22703 as_bad (_("unknown implicit IT mode `%s', should be "\
22704 "arm, thumb, always, or never."), str
);
22711 struct arm_long_option_table arm_long_opts
[] =
22713 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
22714 arm_parse_cpu
, NULL
},
22715 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
22716 arm_parse_arch
, NULL
},
22717 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
22718 arm_parse_fpu
, NULL
},
22719 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
22720 arm_parse_float_abi
, NULL
},
22722 {"meabi=", N_("<ver>\t\t assemble for eabi version <ver>"),
22723 arm_parse_eabi
, NULL
},
22725 {"mimplicit-it=", N_("<mode>\t controls implicit insertion of IT instructions"),
22726 arm_parse_it_mode
, NULL
},
22727 {NULL
, NULL
, 0, NULL
}
22731 md_parse_option (int c
, char * arg
)
22733 struct arm_option_table
*opt
;
22734 const struct arm_legacy_option_table
*fopt
;
22735 struct arm_long_option_table
*lopt
;
22741 target_big_endian
= 1;
22747 target_big_endian
= 0;
22751 case OPTION_FIX_V4BX
:
22756 /* Listing option. Just ignore these, we don't support additional
22761 for (opt
= arm_opts
; opt
->option
!= NULL
; opt
++)
22763 if (c
== opt
->option
[0]
22764 && ((arg
== NULL
&& opt
->option
[1] == 0)
22765 || streq (arg
, opt
->option
+ 1)))
22767 /* If the option is deprecated, tell the user. */
22768 if (warn_on_deprecated
&& opt
->deprecated
!= NULL
)
22769 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
,
22770 arg
? arg
: "", _(opt
->deprecated
));
22772 if (opt
->var
!= NULL
)
22773 *opt
->var
= opt
->value
;
22779 for (fopt
= arm_legacy_opts
; fopt
->option
!= NULL
; fopt
++)
22781 if (c
== fopt
->option
[0]
22782 && ((arg
== NULL
&& fopt
->option
[1] == 0)
22783 || streq (arg
, fopt
->option
+ 1)))
22785 /* If the option is deprecated, tell the user. */
22786 if (warn_on_deprecated
&& fopt
->deprecated
!= NULL
)
22787 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
,
22788 arg
? arg
: "", _(fopt
->deprecated
));
22790 if (fopt
->var
!= NULL
)
22791 *fopt
->var
= &fopt
->value
;
22797 for (lopt
= arm_long_opts
; lopt
->option
!= NULL
; lopt
++)
22799 /* These options are expected to have an argument. */
22800 if (c
== lopt
->option
[0]
22802 && strncmp (arg
, lopt
->option
+ 1,
22803 strlen (lopt
->option
+ 1)) == 0)
22805 /* If the option is deprecated, tell the user. */
22806 if (warn_on_deprecated
&& lopt
->deprecated
!= NULL
)
22807 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
, arg
,
22808 _(lopt
->deprecated
));
22810 /* Call the sup-option parser. */
22811 return lopt
->func (arg
+ strlen (lopt
->option
) - 1);
22822 md_show_usage (FILE * fp
)
22824 struct arm_option_table
*opt
;
22825 struct arm_long_option_table
*lopt
;
22827 fprintf (fp
, _(" ARM-specific assembler options:\n"));
22829 for (opt
= arm_opts
; opt
->option
!= NULL
; opt
++)
22830 if (opt
->help
!= NULL
)
22831 fprintf (fp
, " -%-23s%s\n", opt
->option
, _(opt
->help
));
22833 for (lopt
= arm_long_opts
; lopt
->option
!= NULL
; lopt
++)
22834 if (lopt
->help
!= NULL
)
22835 fprintf (fp
, " -%s%s\n", lopt
->option
, _(lopt
->help
));
22839 -EB assemble code for a big-endian cpu\n"));
22844 -EL assemble code for a little-endian cpu\n"));
22848 --fix-v4bx Allow BX in ARMv4 code\n"));
22856 arm_feature_set flags
;
22857 } cpu_arch_ver_table
;
22859 /* Mapping from CPU features to EABI CPU arch values. Table must be sorted
22860 least features first. */
22861 static const cpu_arch_ver_table cpu_arch_ver
[] =
22867 {4, ARM_ARCH_V5TE
},
22868 {5, ARM_ARCH_V5TEJ
},
22872 {11, ARM_ARCH_V6M
},
22873 {8, ARM_ARCH_V6T2
},
22874 {10, ARM_ARCH_V7A
},
22875 {10, ARM_ARCH_V7R
},
22876 {10, ARM_ARCH_V7M
},
22880 /* Set an attribute if it has not already been set by the user. */
22882 aeabi_set_attribute_int (int tag
, int value
)
22885 || tag
>= NUM_KNOWN_OBJ_ATTRIBUTES
22886 || !attributes_set_explicitly
[tag
])
22887 bfd_elf_add_proc_attr_int (stdoutput
, tag
, value
);
22891 aeabi_set_attribute_string (int tag
, const char *value
)
22894 || tag
>= NUM_KNOWN_OBJ_ATTRIBUTES
22895 || !attributes_set_explicitly
[tag
])
22896 bfd_elf_add_proc_attr_string (stdoutput
, tag
, value
);
22899 /* Set the public EABI object attributes. */
22901 aeabi_set_public_attributes (void)
22904 arm_feature_set flags
;
22905 arm_feature_set tmp
;
22906 const cpu_arch_ver_table
*p
;
22908 /* Choose the architecture based on the capabilities of the requested cpu
22909 (if any) and/or the instructions actually used. */
22910 ARM_MERGE_FEATURE_SETS (flags
, arm_arch_used
, thumb_arch_used
);
22911 ARM_MERGE_FEATURE_SETS (flags
, flags
, *mfpu_opt
);
22912 ARM_MERGE_FEATURE_SETS (flags
, flags
, selected_cpu
);
22913 /*Allow the user to override the reported architecture. */
22916 ARM_CLEAR_FEATURE (flags
, flags
, arm_arch_any
);
22917 ARM_MERGE_FEATURE_SETS (flags
, flags
, *object_arch
);
22922 for (p
= cpu_arch_ver
; p
->val
; p
++)
22924 if (ARM_CPU_HAS_FEATURE (tmp
, p
->flags
))
22927 ARM_CLEAR_FEATURE (tmp
, tmp
, p
->flags
);
22931 /* The table lookup above finds the last architecture to contribute
22932 a new feature. Unfortunately, Tag13 is a subset of the union of
22933 v6T2 and v7-M, so it is never seen as contributing a new feature.
22934 We can not search for the last entry which is entirely used,
22935 because if no CPU is specified we build up only those flags
22936 actually used. Perhaps we should separate out the specified
22937 and implicit cases. Avoid taking this path for -march=all by
22938 checking for contradictory v7-A / v7-M features. */
22940 && !ARM_CPU_HAS_FEATURE (flags
, arm_ext_v7a
)
22941 && ARM_CPU_HAS_FEATURE (flags
, arm_ext_v7m
)
22942 && ARM_CPU_HAS_FEATURE (flags
, arm_ext_v6_dsp
))
22945 /* Tag_CPU_name. */
22946 if (selected_cpu_name
[0])
22950 q
= selected_cpu_name
;
22951 if (strncmp (q
, "armv", 4) == 0)
22956 for (i
= 0; q
[i
]; i
++)
22957 q
[i
] = TOUPPER (q
[i
]);
22959 aeabi_set_attribute_string (Tag_CPU_name
, q
);
22962 /* Tag_CPU_arch. */
22963 aeabi_set_attribute_int (Tag_CPU_arch
, arch
);
22965 /* Tag_CPU_arch_profile. */
22966 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v7a
))
22967 aeabi_set_attribute_int (Tag_CPU_arch_profile
, 'A');
22968 else if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v7r
))
22969 aeabi_set_attribute_int (Tag_CPU_arch_profile
, 'R');
22970 else if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_m
))
22971 aeabi_set_attribute_int (Tag_CPU_arch_profile
, 'M');
22973 /* Tag_ARM_ISA_use. */
22974 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v1
)
22976 aeabi_set_attribute_int (Tag_ARM_ISA_use
, 1);
22978 /* Tag_THUMB_ISA_use. */
22979 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v4t
)
22981 aeabi_set_attribute_int (Tag_THUMB_ISA_use
,
22982 ARM_CPU_HAS_FEATURE (flags
, arm_arch_t2
) ? 2 : 1);
22984 /* Tag_VFP_arch. */
22985 if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_fma
))
22986 aeabi_set_attribute_int (Tag_VFP_arch
,
22987 ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_d32
)
22989 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_d32
))
22990 aeabi_set_attribute_int (Tag_VFP_arch
, 3);
22991 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v3xd
))
22992 aeabi_set_attribute_int (Tag_VFP_arch
, 4);
22993 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v2
))
22994 aeabi_set_attribute_int (Tag_VFP_arch
, 2);
22995 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1
)
22996 || ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1xd
))
22997 aeabi_set_attribute_int (Tag_VFP_arch
, 1);
22999 /* Tag_ABI_HardFP_use. */
23000 if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1xd
)
23001 && !ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1
))
23002 aeabi_set_attribute_int (Tag_ABI_HardFP_use
, 1);
23004 /* Tag_WMMX_arch. */
23005 if (ARM_CPU_HAS_FEATURE (flags
, arm_cext_iwmmxt2
))
23006 aeabi_set_attribute_int (Tag_WMMX_arch
, 2);
23007 else if (ARM_CPU_HAS_FEATURE (flags
, arm_cext_iwmmxt
))
23008 aeabi_set_attribute_int (Tag_WMMX_arch
, 1);
23010 /* Tag_Advanced_SIMD_arch (formerly Tag_NEON_arch). */
23011 if (ARM_CPU_HAS_FEATURE (flags
, fpu_neon_ext_v1
))
23012 aeabi_set_attribute_int
23013 (Tag_Advanced_SIMD_arch
, (ARM_CPU_HAS_FEATURE (flags
, fpu_neon_ext_fma
)
23016 /* Tag_VFP_HP_extension (formerly Tag_NEON_FP16_arch). */
23017 if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_fp16
))
23018 aeabi_set_attribute_int (Tag_VFP_HP_extension
, 1);
23021 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_div
))
23022 aeabi_set_attribute_int (Tag_DIV_use
, 0);
23023 /* Fill this in when gas supports v7a sdiv/udiv.
23024 else if (... v7a with div extension used ...)
23025 aeabi_set_attribute_int (Tag_DIV_use, 2); */
23027 aeabi_set_attribute_int (Tag_DIV_use
, 1);
23030 /* Add the default contents for the .ARM.attributes section. */
23034 if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
23037 aeabi_set_public_attributes ();
23039 #endif /* OBJ_ELF */
23042 /* Parse a .cpu directive. */
23045 s_arm_cpu (int ignored ATTRIBUTE_UNUSED
)
23047 const struct arm_cpu_option_table
*opt
;
23051 name
= input_line_pointer
;
23052 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
23053 input_line_pointer
++;
23054 saved_char
= *input_line_pointer
;
23055 *input_line_pointer
= 0;
23057 /* Skip the first "all" entry. */
23058 for (opt
= arm_cpus
+ 1; opt
->name
!= NULL
; opt
++)
23059 if (streq (opt
->name
, name
))
23061 mcpu_cpu_opt
= &opt
->value
;
23062 selected_cpu
= opt
->value
;
23063 if (opt
->canonical_name
)
23064 strcpy (selected_cpu_name
, opt
->canonical_name
);
23068 for (i
= 0; opt
->name
[i
]; i
++)
23069 selected_cpu_name
[i
] = TOUPPER (opt
->name
[i
]);
23070 selected_cpu_name
[i
] = 0;
23072 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
23073 *input_line_pointer
= saved_char
;
23074 demand_empty_rest_of_line ();
23077 as_bad (_("unknown cpu `%s'"), name
);
23078 *input_line_pointer
= saved_char
;
23079 ignore_rest_of_line ();
23083 /* Parse a .arch directive. */
23086 s_arm_arch (int ignored ATTRIBUTE_UNUSED
)
23088 const struct arm_arch_option_table
*opt
;
23092 name
= input_line_pointer
;
23093 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
23094 input_line_pointer
++;
23095 saved_char
= *input_line_pointer
;
23096 *input_line_pointer
= 0;
23098 /* Skip the first "all" entry. */
23099 for (opt
= arm_archs
+ 1; opt
->name
!= NULL
; opt
++)
23100 if (streq (opt
->name
, name
))
23102 mcpu_cpu_opt
= &opt
->value
;
23103 selected_cpu
= opt
->value
;
23104 strcpy (selected_cpu_name
, opt
->name
);
23105 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
23106 *input_line_pointer
= saved_char
;
23107 demand_empty_rest_of_line ();
23111 as_bad (_("unknown architecture `%s'\n"), name
);
23112 *input_line_pointer
= saved_char
;
23113 ignore_rest_of_line ();
23117 /* Parse a .object_arch directive. */
23120 s_arm_object_arch (int ignored ATTRIBUTE_UNUSED
)
23122 const struct arm_arch_option_table
*opt
;
23126 name
= input_line_pointer
;
23127 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
23128 input_line_pointer
++;
23129 saved_char
= *input_line_pointer
;
23130 *input_line_pointer
= 0;
23132 /* Skip the first "all" entry. */
23133 for (opt
= arm_archs
+ 1; opt
->name
!= NULL
; opt
++)
23134 if (streq (opt
->name
, name
))
23136 object_arch
= &opt
->value
;
23137 *input_line_pointer
= saved_char
;
23138 demand_empty_rest_of_line ();
23142 as_bad (_("unknown architecture `%s'\n"), name
);
23143 *input_line_pointer
= saved_char
;
23144 ignore_rest_of_line ();
23147 /* Parse a .fpu directive. */
23150 s_arm_fpu (int ignored ATTRIBUTE_UNUSED
)
23152 const struct arm_option_cpu_value_table
*opt
;
23156 name
= input_line_pointer
;
23157 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
23158 input_line_pointer
++;
23159 saved_char
= *input_line_pointer
;
23160 *input_line_pointer
= 0;
23162 for (opt
= arm_fpus
; opt
->name
!= NULL
; opt
++)
23163 if (streq (opt
->name
, name
))
23165 mfpu_opt
= &opt
->value
;
23166 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
23167 *input_line_pointer
= saved_char
;
23168 demand_empty_rest_of_line ();
23172 as_bad (_("unknown floating point format `%s'\n"), name
);
23173 *input_line_pointer
= saved_char
;
23174 ignore_rest_of_line ();
23177 /* Copy symbol information. */
23180 arm_copy_symbol_attributes (symbolS
*dest
, symbolS
*src
)
23182 ARM_GET_FLAG (dest
) = ARM_GET_FLAG (src
);
23186 /* Given a symbolic attribute NAME, return the proper integer value.
23187 Returns -1 if the attribute is not known. */
23190 arm_convert_symbolic_attribute (const char *name
)
23192 static const struct
23197 attribute_table
[] =
23199 /* When you modify this table you should
23200 also modify the list in doc/c-arm.texi. */
23201 #define T(tag) {#tag, tag}
23202 T (Tag_CPU_raw_name
),
23205 T (Tag_CPU_arch_profile
),
23206 T (Tag_ARM_ISA_use
),
23207 T (Tag_THUMB_ISA_use
),
23211 T (Tag_Advanced_SIMD_arch
),
23212 T (Tag_PCS_config
),
23213 T (Tag_ABI_PCS_R9_use
),
23214 T (Tag_ABI_PCS_RW_data
),
23215 T (Tag_ABI_PCS_RO_data
),
23216 T (Tag_ABI_PCS_GOT_use
),
23217 T (Tag_ABI_PCS_wchar_t
),
23218 T (Tag_ABI_FP_rounding
),
23219 T (Tag_ABI_FP_denormal
),
23220 T (Tag_ABI_FP_exceptions
),
23221 T (Tag_ABI_FP_user_exceptions
),
23222 T (Tag_ABI_FP_number_model
),
23223 T (Tag_ABI_align_needed
),
23224 T (Tag_ABI_align8_needed
),
23225 T (Tag_ABI_align_preserved
),
23226 T (Tag_ABI_align8_preserved
),
23227 T (Tag_ABI_enum_size
),
23228 T (Tag_ABI_HardFP_use
),
23229 T (Tag_ABI_VFP_args
),
23230 T (Tag_ABI_WMMX_args
),
23231 T (Tag_ABI_optimization_goals
),
23232 T (Tag_ABI_FP_optimization_goals
),
23233 T (Tag_compatibility
),
23234 T (Tag_CPU_unaligned_access
),
23235 T (Tag_FP_HP_extension
),
23236 T (Tag_VFP_HP_extension
),
23237 T (Tag_ABI_FP_16bit_format
),
23238 T (Tag_MPextension_use
),
23240 T (Tag_nodefaults
),
23241 T (Tag_also_compatible_with
),
23242 T (Tag_conformance
),
23244 T (Tag_Virtualization_use
),
23245 /* We deliberately do not include Tag_MPextension_use_legacy. */
23253 for (i
= 0; i
< ARRAY_SIZE (attribute_table
); i
++)
23254 if (streq (name
, attribute_table
[i
].name
))
23255 return attribute_table
[i
].tag
;
23261 /* Apply sym value for relocations only in the case that
23262 they are for local symbols and you have the respective
23263 architectural feature for blx and simple switches. */
23265 arm_apply_sym_value (struct fix
* fixP
)
23268 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
23269 && !S_IS_EXTERNAL (fixP
->fx_addsy
))
23271 switch (fixP
->fx_r_type
)
23273 case BFD_RELOC_ARM_PCREL_BLX
:
23274 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
23275 if (ARM_IS_FUNC (fixP
->fx_addsy
))
23279 case BFD_RELOC_ARM_PCREL_CALL
:
23280 case BFD_RELOC_THUMB_PCREL_BLX
:
23281 if (THUMB_IS_FUNC (fixP
->fx_addsy
))
23292 #endif /* OBJ_ELF */