1 /* tc-arm.c -- Assemble for the ARM
2 Copyright 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003,
4 Free Software Foundation, Inc.
5 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
6 Modified by David Taylor (dtaylor@armltd.co.uk)
7 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
8 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
9 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
11 This file is part of GAS, the GNU Assembler.
13 GAS is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2, or (at your option)
18 GAS is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with GAS; see the file COPYING. If not, write to the Free
25 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
32 #include "safe-ctype.h"
36 #include "opcode/arm.h"
40 #include "dwarf2dbg.h"
41 #include "dw2gencfi.h"
44 /* XXX Set this to 1 after the next binutils release. */
45 #define WARN_DEPRECATED 0
48 /* Must be at least the size of the largest unwind opcode (currently two). */
49 #define ARM_OPCODE_CHUNK_SIZE 8
51 /* This structure holds the unwinding state. */
56 symbolS
* table_entry
;
57 symbolS
* personality_routine
;
58 int personality_index
;
59 /* The segment containing the function. */
62 /* Opcodes generated from this function. */
63 unsigned char * opcodes
;
66 /* The number of bytes pushed to the stack. */
68 /* We don't add stack adjustment opcodes immediately so that we can merge
69 multiple adjustments. We can also omit the final adjustment
70 when using a frame pointer. */
71 offsetT pending_offset
;
72 /* These two fields are set by both unwind_movsp and unwind_setfp. They
73 hold the reg+offset to use when restoring sp from a frame pointer. */
76 /* Nonzero if an unwind_setfp directive has been seen. */
78 /* Nonzero if the last opcode restores sp from fp_reg. */
79 unsigned sp_restored
:1;
82 /* Bit N indicates that an R_ARM_NONE relocation has been output for
83 __aeabi_unwind_cpp_prN already if set. This enables dependencies to be
84 emitted only once per section, to save unnecessary bloat. */
85 static unsigned int marked_pr_dependency
= 0;
96 /* Types of processor to assemble for. */
98 #if defined __XSCALE__
99 #define CPU_DEFAULT ARM_ARCH_XSCALE
101 #if defined __thumb__
102 #define CPU_DEFAULT ARM_ARCH_V5T
109 # define FPU_DEFAULT FPU_ARCH_FPA
110 # elif defined (TE_NetBSD)
112 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
114 /* Legacy a.out format. */
115 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
117 # elif defined (TE_VXWORKS)
118 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
120 /* For backwards compatibility, default to FPA. */
121 # define FPU_DEFAULT FPU_ARCH_FPA
123 #endif /* ifndef FPU_DEFAULT */
125 #define streq(a, b) (strcmp (a, b) == 0)
127 static arm_feature_set cpu_variant
;
128 static arm_feature_set arm_arch_used
;
129 static arm_feature_set thumb_arch_used
;
131 /* Flags stored in private area of BFD structure. */
132 static int uses_apcs_26
= FALSE
;
133 static int atpcs
= FALSE
;
134 static int support_interwork
= FALSE
;
135 static int uses_apcs_float
= FALSE
;
136 static int pic_code
= FALSE
;
138 /* Variables that we set while parsing command-line options. Once all
139 options have been read we re-process these values to set the real
141 static const arm_feature_set
*legacy_cpu
= NULL
;
142 static const arm_feature_set
*legacy_fpu
= NULL
;
144 static const arm_feature_set
*mcpu_cpu_opt
= NULL
;
145 static const arm_feature_set
*mcpu_fpu_opt
= NULL
;
146 static const arm_feature_set
*march_cpu_opt
= NULL
;
147 static const arm_feature_set
*march_fpu_opt
= NULL
;
148 static const arm_feature_set
*mfpu_opt
= NULL
;
150 /* Constants for known architecture features. */
151 static const arm_feature_set fpu_default
= FPU_DEFAULT
;
152 static const arm_feature_set fpu_arch_vfp_v1
= FPU_ARCH_VFP_V1
;
153 static const arm_feature_set fpu_arch_vfp_v2
= FPU_ARCH_VFP_V2
;
154 static const arm_feature_set fpu_arch_vfp_v3
= FPU_ARCH_VFP_V3
;
155 static const arm_feature_set fpu_arch_neon_v1
= FPU_ARCH_NEON_V1
;
156 static const arm_feature_set fpu_arch_fpa
= FPU_ARCH_FPA
;
157 static const arm_feature_set fpu_any_hard
= FPU_ANY_HARD
;
158 static const arm_feature_set fpu_arch_maverick
= FPU_ARCH_MAVERICK
;
159 static const arm_feature_set fpu_endian_pure
= FPU_ARCH_ENDIAN_PURE
;
162 static const arm_feature_set cpu_default
= CPU_DEFAULT
;
165 static const arm_feature_set arm_ext_v1
= ARM_FEATURE (ARM_EXT_V1
, 0);
166 static const arm_feature_set arm_ext_v2
= ARM_FEATURE (ARM_EXT_V1
, 0);
167 static const arm_feature_set arm_ext_v2s
= ARM_FEATURE (ARM_EXT_V2S
, 0);
168 static const arm_feature_set arm_ext_v3
= ARM_FEATURE (ARM_EXT_V3
, 0);
169 static const arm_feature_set arm_ext_v3m
= ARM_FEATURE (ARM_EXT_V3M
, 0);
170 static const arm_feature_set arm_ext_v4
= ARM_FEATURE (ARM_EXT_V4
, 0);
171 static const arm_feature_set arm_ext_v4t
= ARM_FEATURE (ARM_EXT_V4T
, 0);
172 static const arm_feature_set arm_ext_v5
= ARM_FEATURE (ARM_EXT_V5
, 0);
173 static const arm_feature_set arm_ext_v4t_5
=
174 ARM_FEATURE (ARM_EXT_V4T
| ARM_EXT_V5
, 0);
175 static const arm_feature_set arm_ext_v5t
= ARM_FEATURE (ARM_EXT_V5T
, 0);
176 static const arm_feature_set arm_ext_v5e
= ARM_FEATURE (ARM_EXT_V5E
, 0);
177 static const arm_feature_set arm_ext_v5exp
= ARM_FEATURE (ARM_EXT_V5ExP
, 0);
178 static const arm_feature_set arm_ext_v5j
= ARM_FEATURE (ARM_EXT_V5J
, 0);
179 static const arm_feature_set arm_ext_v6
= ARM_FEATURE (ARM_EXT_V6
, 0);
180 static const arm_feature_set arm_ext_v6k
= ARM_FEATURE (ARM_EXT_V6K
, 0);
181 static const arm_feature_set arm_ext_v6z
= ARM_FEATURE (ARM_EXT_V6Z
, 0);
182 static const arm_feature_set arm_ext_v6t2
= ARM_FEATURE (ARM_EXT_V6T2
, 0);
183 static const arm_feature_set arm_ext_v6_notm
= ARM_FEATURE (ARM_EXT_V6_NOTM
, 0);
184 static const arm_feature_set arm_ext_div
= ARM_FEATURE (ARM_EXT_DIV
, 0);
185 static const arm_feature_set arm_ext_v7
= ARM_FEATURE (ARM_EXT_V7
, 0);
186 static const arm_feature_set arm_ext_v7a
= ARM_FEATURE (ARM_EXT_V7A
, 0);
187 static const arm_feature_set arm_ext_v7r
= ARM_FEATURE (ARM_EXT_V7R
, 0);
188 static const arm_feature_set arm_ext_v7m
= ARM_FEATURE (ARM_EXT_V7M
, 0);
190 static const arm_feature_set arm_arch_any
= ARM_ANY
;
191 static const arm_feature_set arm_arch_full
= ARM_FEATURE (-1, -1);
192 static const arm_feature_set arm_arch_t2
= ARM_ARCH_THUMB2
;
193 static const arm_feature_set arm_arch_none
= ARM_ARCH_NONE
;
195 static const arm_feature_set arm_cext_iwmmxt
=
196 ARM_FEATURE (0, ARM_CEXT_IWMMXT
);
197 static const arm_feature_set arm_cext_xscale
=
198 ARM_FEATURE (0, ARM_CEXT_XSCALE
);
199 static const arm_feature_set arm_cext_maverick
=
200 ARM_FEATURE (0, ARM_CEXT_MAVERICK
);
201 static const arm_feature_set fpu_fpa_ext_v1
= ARM_FEATURE (0, FPU_FPA_EXT_V1
);
202 static const arm_feature_set fpu_fpa_ext_v2
= ARM_FEATURE (0, FPU_FPA_EXT_V2
);
203 static const arm_feature_set fpu_vfp_ext_v1xd
=
204 ARM_FEATURE (0, FPU_VFP_EXT_V1xD
);
205 static const arm_feature_set fpu_vfp_ext_v1
= ARM_FEATURE (0, FPU_VFP_EXT_V1
);
206 static const arm_feature_set fpu_vfp_ext_v2
= ARM_FEATURE (0, FPU_VFP_EXT_V2
);
207 static const arm_feature_set fpu_vfp_ext_v3
= ARM_FEATURE (0, FPU_VFP_EXT_V3
);
208 static const arm_feature_set fpu_neon_ext_v1
= ARM_FEATURE (0, FPU_NEON_EXT_V1
);
209 static const arm_feature_set fpu_vfp_v3_or_neon_ext
=
210 ARM_FEATURE (0, FPU_NEON_EXT_V1
| FPU_VFP_EXT_V3
);
212 static int mfloat_abi_opt
= -1;
213 /* Record user cpu selection for object attributes. */
214 static arm_feature_set selected_cpu
= ARM_ARCH_NONE
;
215 /* Must be long enough to hold any of the names in arm_cpus. */
216 static char selected_cpu_name
[16];
219 static int meabi_flags
= EABI_DEFAULT
;
221 static int meabi_flags
= EF_ARM_EABI_UNKNOWN
;
226 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
227 symbolS
* GOT_symbol
;
230 /* 0: assemble for ARM,
231 1: assemble for Thumb,
232 2: assemble for Thumb even though target CPU does not support thumb
234 static int thumb_mode
= 0;
236 /* If unified_syntax is true, we are processing the new unified
237 ARM/Thumb syntax. Important differences from the old ARM mode:
239 - Immediate operands do not require a # prefix.
240 - Conditional affixes always appear at the end of the
241 instruction. (For backward compatibility, those instructions
242 that formerly had them in the middle, continue to accept them
244 - The IT instruction may appear, and if it does is validated
245 against subsequent conditional affixes. It does not generate
248 Important differences from the old Thumb mode:
250 - Immediate operands do not require a # prefix.
251 - Most of the V6T2 instructions are only available in unified mode.
252 - The .N and .W suffixes are recognized and honored (it is an error
253 if they cannot be honored).
254 - All instructions set the flags if and only if they have an 's' affix.
255 - Conditional affixes may be used. They are validated against
256 preceding IT instructions. Unlike ARM mode, you cannot use a
257 conditional affix except in the scope of an IT instruction. */
259 static bfd_boolean unified_syntax
= FALSE
;
274 enum neon_el_type type
;
278 #define NEON_MAX_TYPE_ELS 4
282 struct neon_type_el el
[NEON_MAX_TYPE_ELS
];
289 unsigned long instruction
;
293 /* "uncond_value" is set to the value in place of the conditional field in
294 unconditional versions of the instruction, or -1 if nothing is
297 struct neon_type vectype
;
298 /* Set to the opcode if the instruction needs relaxation.
299 Zero if the instruction is not relaxed. */
303 bfd_reloc_code_real_type type
;
312 struct neon_type_el vectype
;
313 unsigned present
: 1; /* Operand present. */
314 unsigned isreg
: 1; /* Operand was a register. */
315 unsigned immisreg
: 1; /* .imm field is a second register. */
316 unsigned isscalar
: 1; /* Operand is a (Neon) scalar. */
317 unsigned immisalign
: 1; /* Immediate is an alignment specifier. */
318 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
319 instructions. This allows us to disambiguate ARM <-> vector insns. */
320 unsigned regisimm
: 1; /* 64-bit immediate, reg forms high 32 bits. */
321 unsigned isvec
: 1; /* Is a single, double or quad VFP/Neon reg. */
322 unsigned isquad
: 1; /* Operand is Neon quad-precision register. */
323 unsigned issingle
: 1; /* Operand is VFP single-precision register. */
324 unsigned hasreloc
: 1; /* Operand has relocation suffix. */
325 unsigned writeback
: 1; /* Operand has trailing ! */
326 unsigned preind
: 1; /* Preindexed address. */
327 unsigned postind
: 1; /* Postindexed address. */
328 unsigned negative
: 1; /* Index register was negated. */
329 unsigned shifted
: 1; /* Shift applied to operation. */
330 unsigned shift_kind
: 3; /* Shift operation (enum shift_kind). */
334 static struct arm_it inst
;
336 #define NUM_FLOAT_VALS 8
338 const char * fp_const
[] =
340 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
343 /* Number of littlenums required to hold an extended precision number. */
344 #define MAX_LITTLENUMS 6
346 LITTLENUM_TYPE fp_values
[NUM_FLOAT_VALS
][MAX_LITTLENUMS
];
356 #define CP_T_X 0x00008000
357 #define CP_T_Y 0x00400000
359 #define CONDS_BIT 0x00100000
360 #define LOAD_BIT 0x00100000
362 #define DOUBLE_LOAD_FLAG 0x00000001
366 const char * template;
370 #define COND_ALWAYS 0xE
374 const char *template;
378 struct asm_barrier_opt
380 const char *template;
384 /* The bit that distinguishes CPSR and SPSR. */
385 #define SPSR_BIT (1 << 22)
387 /* The individual PSR flag bits. */
388 #define PSR_c (1 << 16)
389 #define PSR_x (1 << 17)
390 #define PSR_s (1 << 18)
391 #define PSR_f (1 << 19)
396 bfd_reloc_code_real_type reloc
;
401 VFP_REG_Sd
, VFP_REG_Sm
, VFP_REG_Sn
,
402 VFP_REG_Dd
, VFP_REG_Dm
, VFP_REG_Dn
407 VFP_LDSTMIA
, VFP_LDSTMDB
, VFP_LDSTMIAX
, VFP_LDSTMDBX
410 /* Bits for DEFINED field in neon_typed_alias. */
411 #define NTA_HASTYPE 1
412 #define NTA_HASINDEX 2
414 struct neon_typed_alias
416 unsigned char defined
;
418 struct neon_type_el eltype
;
421 /* ARM register categories. This includes coprocessor numbers and various
422 architecture extensions' registers. */
448 /* Structure for a hash table entry for a register.
449 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
450 information which states whether a vector type or index is specified (for a
451 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
455 unsigned char number
;
457 unsigned char builtin
;
458 struct neon_typed_alias
*neon
;
461 /* Diagnostics used when we don't get a register of the expected type. */
462 const char *const reg_expected_msgs
[] =
464 N_("ARM register expected"),
465 N_("bad or missing co-processor number"),
466 N_("co-processor register expected"),
467 N_("FPA register expected"),
468 N_("VFP single precision register expected"),
469 N_("VFP/Neon double precision register expected"),
470 N_("Neon quad precision register expected"),
471 N_("VFP single or double precision register expected"),
472 N_("Neon double or quad precision register expected"),
473 N_("VFP single, double or Neon quad precision register expected"),
474 N_("VFP system register expected"),
475 N_("Maverick MVF register expected"),
476 N_("Maverick MVD register expected"),
477 N_("Maverick MVFX register expected"),
478 N_("Maverick MVDX register expected"),
479 N_("Maverick MVAX register expected"),
480 N_("Maverick DSPSC register expected"),
481 N_("iWMMXt data register expected"),
482 N_("iWMMXt control register expected"),
483 N_("iWMMXt scalar register expected"),
484 N_("XScale accumulator register expected"),
487 /* Some well known registers that we refer to directly elsewhere. */
492 /* ARM instructions take 4bytes in the object file, Thumb instructions
498 /* Basic string to match. */
499 const char *template;
501 /* Parameters to instruction. */
502 unsigned char operands
[8];
504 /* Conditional tag - see opcode_lookup. */
505 unsigned int tag
: 4;
507 /* Basic instruction code. */
508 unsigned int avalue
: 28;
510 /* Thumb-format instruction code. */
513 /* Which architecture variant provides this instruction. */
514 const arm_feature_set
*avariant
;
515 const arm_feature_set
*tvariant
;
517 /* Function to call to encode instruction in ARM format. */
518 void (* aencode
) (void);
520 /* Function to call to encode instruction in Thumb format. */
521 void (* tencode
) (void);
524 /* Defines for various bits that we will want to toggle. */
525 #define INST_IMMEDIATE 0x02000000
526 #define OFFSET_REG 0x02000000
527 #define HWOFFSET_IMM 0x00400000
528 #define SHIFT_BY_REG 0x00000010
529 #define PRE_INDEX 0x01000000
530 #define INDEX_UP 0x00800000
531 #define WRITE_BACK 0x00200000
532 #define LDM_TYPE_2_OR_3 0x00400000
534 #define LITERAL_MASK 0xf000f000
535 #define OPCODE_MASK 0xfe1fffff
536 #define V4_STR_BIT 0x00000020
538 #define DATA_OP_SHIFT 21
540 #define T2_OPCODE_MASK 0xfe1fffff
541 #define T2_DATA_OP_SHIFT 21
543 /* Codes to distinguish the arithmetic instructions. */
554 #define OPCODE_CMP 10
555 #define OPCODE_CMN 11
556 #define OPCODE_ORR 12
557 #define OPCODE_MOV 13
558 #define OPCODE_BIC 14
559 #define OPCODE_MVN 15
561 #define T2_OPCODE_AND 0
562 #define T2_OPCODE_BIC 1
563 #define T2_OPCODE_ORR 2
564 #define T2_OPCODE_ORN 3
565 #define T2_OPCODE_EOR 4
566 #define T2_OPCODE_ADD 8
567 #define T2_OPCODE_ADC 10
568 #define T2_OPCODE_SBC 11
569 #define T2_OPCODE_SUB 13
570 #define T2_OPCODE_RSB 14
572 #define T_OPCODE_MUL 0x4340
573 #define T_OPCODE_TST 0x4200
574 #define T_OPCODE_CMN 0x42c0
575 #define T_OPCODE_NEG 0x4240
576 #define T_OPCODE_MVN 0x43c0
578 #define T_OPCODE_ADD_R3 0x1800
579 #define T_OPCODE_SUB_R3 0x1a00
580 #define T_OPCODE_ADD_HI 0x4400
581 #define T_OPCODE_ADD_ST 0xb000
582 #define T_OPCODE_SUB_ST 0xb080
583 #define T_OPCODE_ADD_SP 0xa800
584 #define T_OPCODE_ADD_PC 0xa000
585 #define T_OPCODE_ADD_I8 0x3000
586 #define T_OPCODE_SUB_I8 0x3800
587 #define T_OPCODE_ADD_I3 0x1c00
588 #define T_OPCODE_SUB_I3 0x1e00
590 #define T_OPCODE_ASR_R 0x4100
591 #define T_OPCODE_LSL_R 0x4080
592 #define T_OPCODE_LSR_R 0x40c0
593 #define T_OPCODE_ROR_R 0x41c0
594 #define T_OPCODE_ASR_I 0x1000
595 #define T_OPCODE_LSL_I 0x0000
596 #define T_OPCODE_LSR_I 0x0800
598 #define T_OPCODE_MOV_I8 0x2000
599 #define T_OPCODE_CMP_I8 0x2800
600 #define T_OPCODE_CMP_LR 0x4280
601 #define T_OPCODE_MOV_HR 0x4600
602 #define T_OPCODE_CMP_HR 0x4500
604 #define T_OPCODE_LDR_PC 0x4800
605 #define T_OPCODE_LDR_SP 0x9800
606 #define T_OPCODE_STR_SP 0x9000
607 #define T_OPCODE_LDR_IW 0x6800
608 #define T_OPCODE_STR_IW 0x6000
609 #define T_OPCODE_LDR_IH 0x8800
610 #define T_OPCODE_STR_IH 0x8000
611 #define T_OPCODE_LDR_IB 0x7800
612 #define T_OPCODE_STR_IB 0x7000
613 #define T_OPCODE_LDR_RW 0x5800
614 #define T_OPCODE_STR_RW 0x5000
615 #define T_OPCODE_LDR_RH 0x5a00
616 #define T_OPCODE_STR_RH 0x5200
617 #define T_OPCODE_LDR_RB 0x5c00
618 #define T_OPCODE_STR_RB 0x5400
620 #define T_OPCODE_PUSH 0xb400
621 #define T_OPCODE_POP 0xbc00
623 #define T_OPCODE_BRANCH 0xe000
625 #define THUMB_SIZE 2 /* Size of thumb instruction. */
626 #define THUMB_PP_PC_LR 0x0100
627 #define THUMB_LOAD_BIT 0x0800
628 #define THUMB2_LOAD_BIT 0x00100000
630 #define BAD_ARGS _("bad arguments to instruction")
631 #define BAD_PC _("r15 not allowed here")
632 #define BAD_COND _("instruction cannot be conditional")
633 #define BAD_OVERLAP _("registers may not be the same")
634 #define BAD_HIREG _("lo register required")
635 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
636 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode");
637 #define BAD_BRANCH _("branch must be last instruction in IT block")
638 #define BAD_NOT_IT _("instruction not allowed in IT block")
639 #define BAD_FPU _("selected FPU does not support instruction")
641 static struct hash_control
*arm_ops_hsh
;
642 static struct hash_control
*arm_cond_hsh
;
643 static struct hash_control
*arm_shift_hsh
;
644 static struct hash_control
*arm_psr_hsh
;
645 static struct hash_control
*arm_v7m_psr_hsh
;
646 static struct hash_control
*arm_reg_hsh
;
647 static struct hash_control
*arm_reloc_hsh
;
648 static struct hash_control
*arm_barrier_opt_hsh
;
650 /* Stuff needed to resolve the label ambiguity
660 symbolS
* last_label_seen
;
661 static int label_is_thumb_function_name
= FALSE
;
663 /* Literal pool structure. Held on a per-section
664 and per-sub-section basis. */
666 #define MAX_LITERAL_POOL_SIZE 1024
667 typedef struct literal_pool
669 expressionS literals
[MAX_LITERAL_POOL_SIZE
];
670 unsigned int next_free_entry
;
675 struct literal_pool
* next
;
678 /* Pointer to a linked list of literal pools. */
679 literal_pool
* list_of_pools
= NULL
;
681 /* State variables for IT block handling. */
682 static bfd_boolean current_it_mask
= 0;
683 static int current_cc
;
688 /* This array holds the chars that always start a comment. If the
689 pre-processor is disabled, these aren't very useful. */
690 const char comment_chars
[] = "@";
692 /* This array holds the chars that only start a comment at the beginning of
693 a line. If the line seems to have the form '# 123 filename'
694 .line and .file directives will appear in the pre-processed output. */
695 /* Note that input_file.c hand checks for '#' at the beginning of the
696 first line of the input file. This is because the compiler outputs
697 #NO_APP at the beginning of its output. */
698 /* Also note that comments like this one will always work. */
699 const char line_comment_chars
[] = "#";
701 const char line_separator_chars
[] = ";";
703 /* Chars that can be used to separate mant
704 from exp in floating point numbers. */
705 const char EXP_CHARS
[] = "eE";
707 /* Chars that mean this number is a floating point constant. */
711 const char FLT_CHARS
[] = "rRsSfFdDxXeEpP";
713 /* Prefix characters that indicate the start of an immediate
715 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
717 /* Separator character handling. */
719 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
722 skip_past_char (char ** str
, char c
)
732 #define skip_past_comma(str) skip_past_char (str, ',')
734 /* Arithmetic expressions (possibly involving symbols). */
736 /* Return TRUE if anything in the expression is a bignum. */
739 walk_no_bignums (symbolS
* sp
)
741 if (symbol_get_value_expression (sp
)->X_op
== O_big
)
744 if (symbol_get_value_expression (sp
)->X_add_symbol
)
746 return (walk_no_bignums (symbol_get_value_expression (sp
)->X_add_symbol
)
747 || (symbol_get_value_expression (sp
)->X_op_symbol
748 && walk_no_bignums (symbol_get_value_expression (sp
)->X_op_symbol
)));
754 static int in_my_get_expression
= 0;
756 /* Third argument to my_get_expression. */
757 #define GE_NO_PREFIX 0
758 #define GE_IMM_PREFIX 1
759 #define GE_OPT_PREFIX 2
760 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
761 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
762 #define GE_OPT_PREFIX_BIG 3
765 my_get_expression (expressionS
* ep
, char ** str
, int prefix_mode
)
770 /* In unified syntax, all prefixes are optional. */
772 prefix_mode
= (prefix_mode
== GE_OPT_PREFIX_BIG
) ? prefix_mode
777 case GE_NO_PREFIX
: break;
779 if (!is_immediate_prefix (**str
))
781 inst
.error
= _("immediate expression requires a # prefix");
787 case GE_OPT_PREFIX_BIG
:
788 if (is_immediate_prefix (**str
))
794 memset (ep
, 0, sizeof (expressionS
));
796 save_in
= input_line_pointer
;
797 input_line_pointer
= *str
;
798 in_my_get_expression
= 1;
799 seg
= expression (ep
);
800 in_my_get_expression
= 0;
802 if (ep
->X_op
== O_illegal
)
804 /* We found a bad expression in md_operand(). */
805 *str
= input_line_pointer
;
806 input_line_pointer
= save_in
;
807 if (inst
.error
== NULL
)
808 inst
.error
= _("bad expression");
813 if (seg
!= absolute_section
814 && seg
!= text_section
815 && seg
!= data_section
816 && seg
!= bss_section
817 && seg
!= undefined_section
)
819 inst
.error
= _("bad segment");
820 *str
= input_line_pointer
;
821 input_line_pointer
= save_in
;
826 /* Get rid of any bignums now, so that we don't generate an error for which
827 we can't establish a line number later on. Big numbers are never valid
828 in instructions, which is where this routine is always called. */
829 if (prefix_mode
!= GE_OPT_PREFIX_BIG
830 && (ep
->X_op
== O_big
832 && (walk_no_bignums (ep
->X_add_symbol
)
834 && walk_no_bignums (ep
->X_op_symbol
))))))
836 inst
.error
= _("invalid constant");
837 *str
= input_line_pointer
;
838 input_line_pointer
= save_in
;
842 *str
= input_line_pointer
;
843 input_line_pointer
= save_in
;
847 /* Turn a string in input_line_pointer into a floating point constant
848 of type TYPE, and store the appropriate bytes in *LITP. The number
849 of LITTLENUMS emitted is stored in *SIZEP. An error message is
850 returned, or NULL on OK.
852 Note that fp constants aren't represent in the normal way on the ARM.
853 In big endian mode, things are as expected. However, in little endian
854 mode fp constants are big-endian word-wise, and little-endian byte-wise
855 within the words. For example, (double) 1.1 in big endian mode is
856 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
857 the byte sequence 99 99 f1 3f 9a 99 99 99.
859 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
862 md_atof (int type
, char * litP
, int * sizeP
)
865 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
897 return _("bad call to MD_ATOF()");
900 t
= atof_ieee (input_line_pointer
, type
, words
);
902 input_line_pointer
= t
;
905 if (target_big_endian
)
907 for (i
= 0; i
< prec
; i
++)
909 md_number_to_chars (litP
, (valueT
) words
[i
], 2);
915 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_endian_pure
))
916 for (i
= prec
- 1; i
>= 0; i
--)
918 md_number_to_chars (litP
, (valueT
) words
[i
], 2);
922 /* For a 4 byte float the order of elements in `words' is 1 0.
923 For an 8 byte float the order is 1 0 3 2. */
924 for (i
= 0; i
< prec
; i
+= 2)
926 md_number_to_chars (litP
, (valueT
) words
[i
+ 1], 2);
927 md_number_to_chars (litP
+ 2, (valueT
) words
[i
], 2);
935 /* We handle all bad expressions here, so that we can report the faulty
936 instruction in the error message. */
938 md_operand (expressionS
* expr
)
940 if (in_my_get_expression
)
941 expr
->X_op
= O_illegal
;
944 /* Immediate values. */
946 /* Generic immediate-value read function for use in directives.
947 Accepts anything that 'expression' can fold to a constant.
948 *val receives the number. */
951 immediate_for_directive (int *val
)
954 exp
.X_op
= O_illegal
;
956 if (is_immediate_prefix (*input_line_pointer
))
958 input_line_pointer
++;
962 if (exp
.X_op
!= O_constant
)
964 as_bad (_("expected #constant"));
965 ignore_rest_of_line ();
968 *val
= exp
.X_add_number
;
973 /* Register parsing. */
975 /* Generic register parser. CCP points to what should be the
976 beginning of a register name. If it is indeed a valid register
977 name, advance CCP over it and return the reg_entry structure;
978 otherwise return NULL. Does not issue diagnostics. */
980 static struct reg_entry
*
981 arm_reg_parse_multi (char **ccp
)
985 struct reg_entry
*reg
;
987 #ifdef REGISTER_PREFIX
988 if (*start
!= REGISTER_PREFIX
)
992 #ifdef OPTIONAL_REGISTER_PREFIX
993 if (*start
== OPTIONAL_REGISTER_PREFIX
)
998 if (!ISALPHA (*p
) || !is_name_beginner (*p
))
1003 while (ISALPHA (*p
) || ISDIGIT (*p
) || *p
== '_');
1005 reg
= (struct reg_entry
*) hash_find_n (arm_reg_hsh
, start
, p
- start
);
1015 arm_reg_alt_syntax (char **ccp
, char *start
, struct reg_entry
*reg
,
1016 enum arm_reg_type type
)
1018 /* Alternative syntaxes are accepted for a few register classes. */
1025 /* Generic coprocessor register names are allowed for these. */
1026 if (reg
&& reg
->type
== REG_TYPE_CN
)
1031 /* For backward compatibility, a bare number is valid here. */
1033 unsigned long processor
= strtoul (start
, ccp
, 10);
1034 if (*ccp
!= start
&& processor
<= 15)
1038 case REG_TYPE_MMXWC
:
1039 /* WC includes WCG. ??? I'm not sure this is true for all
1040 instructions that take WC registers. */
1041 if (reg
&& reg
->type
== REG_TYPE_MMXWCG
)
1052 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1053 return value is the register number or FAIL. */
1056 arm_reg_parse (char **ccp
, enum arm_reg_type type
)
1059 struct reg_entry
*reg
= arm_reg_parse_multi (ccp
);
1062 /* Do not allow a scalar (reg+index) to parse as a register. */
1063 if (reg
&& reg
->neon
&& (reg
->neon
->defined
& NTA_HASINDEX
))
1066 if (reg
&& reg
->type
== type
)
1069 if ((ret
= arm_reg_alt_syntax (ccp
, start
, reg
, type
)) != FAIL
)
1076 /* Parse a Neon type specifier. *STR should point at the leading '.'
1077 character. Does no verification at this stage that the type fits the opcode
1084 Can all be legally parsed by this function.
1086 Fills in neon_type struct pointer with parsed information, and updates STR
1087 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1088 type, FAIL if not. */
1091 parse_neon_type (struct neon_type
*type
, char **str
)
1098 while (type
->elems
< NEON_MAX_TYPE_ELS
)
1100 enum neon_el_type thistype
= NT_untyped
;
1101 unsigned thissize
= -1u;
1108 /* Just a size without an explicit type. */
1112 switch (TOLOWER (*ptr
))
1114 case 'i': thistype
= NT_integer
; break;
1115 case 'f': thistype
= NT_float
; break;
1116 case 'p': thistype
= NT_poly
; break;
1117 case 's': thistype
= NT_signed
; break;
1118 case 'u': thistype
= NT_unsigned
; break;
1120 thistype
= NT_float
;
1125 as_bad (_("unexpected character `%c' in type specifier"), *ptr
);
1131 /* .f is an abbreviation for .f32. */
1132 if (thistype
== NT_float
&& !ISDIGIT (*ptr
))
1137 thissize
= strtoul (ptr
, &ptr
, 10);
1139 if (thissize
!= 8 && thissize
!= 16 && thissize
!= 32
1142 as_bad (_("bad size %d in type specifier"), thissize
);
1150 type
->el
[type
->elems
].type
= thistype
;
1151 type
->el
[type
->elems
].size
= thissize
;
1156 /* Empty/missing type is not a successful parse. */
1157 if (type
->elems
== 0)
1165 /* Errors may be set multiple times during parsing or bit encoding
1166 (particularly in the Neon bits), but usually the earliest error which is set
1167 will be the most meaningful. Avoid overwriting it with later (cascading)
1168 errors by calling this function. */
1171 first_error (const char *err
)
1177 /* Parse a single type, e.g. ".s32", leading period included. */
1179 parse_neon_operand_type (struct neon_type_el
*vectype
, char **ccp
)
1182 struct neon_type optype
;
1186 if (parse_neon_type (&optype
, &str
) == SUCCESS
)
1188 if (optype
.elems
== 1)
1189 *vectype
= optype
.el
[0];
1192 first_error (_("only one type should be specified for operand"));
1198 first_error (_("vector type expected"));
1210 /* Special meanings for indices (which have a range of 0-7), which will fit into
1213 #define NEON_ALL_LANES 15
1214 #define NEON_INTERLEAVE_LANES 14
1216 /* Parse either a register or a scalar, with an optional type. Return the
1217 register number, and optionally fill in the actual type of the register
1218 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1219 type/index information in *TYPEINFO. */
1222 parse_typed_reg_or_scalar (char **ccp
, enum arm_reg_type type
,
1223 enum arm_reg_type
*rtype
,
1224 struct neon_typed_alias
*typeinfo
)
1227 struct reg_entry
*reg
= arm_reg_parse_multi (&str
);
1228 struct neon_typed_alias atype
;
1229 struct neon_type_el parsetype
;
1233 atype
.eltype
.type
= NT_invtype
;
1234 atype
.eltype
.size
= -1;
1236 /* Try alternate syntax for some types of register. Note these are mutually
1237 exclusive with the Neon syntax extensions. */
1240 int altreg
= arm_reg_alt_syntax (&str
, *ccp
, reg
, type
);
1248 /* Undo polymorphism when a set of register types may be accepted. */
1249 if ((type
== REG_TYPE_NDQ
1250 && (reg
->type
== REG_TYPE_NQ
|| reg
->type
== REG_TYPE_VFD
))
1251 || (type
== REG_TYPE_VFSD
1252 && (reg
->type
== REG_TYPE_VFS
|| reg
->type
== REG_TYPE_VFD
))
1253 || (type
== REG_TYPE_NSDQ
1254 && (reg
->type
== REG_TYPE_VFS
|| reg
->type
== REG_TYPE_VFD
1255 || reg
->type
== REG_TYPE_NQ
)))
1258 if (type
!= reg
->type
)
1264 if (parse_neon_operand_type (&parsetype
, &str
) == SUCCESS
)
1266 if ((atype
.defined
& NTA_HASTYPE
) != 0)
1268 first_error (_("can't redefine type for operand"));
1271 atype
.defined
|= NTA_HASTYPE
;
1272 atype
.eltype
= parsetype
;
1275 if (skip_past_char (&str
, '[') == SUCCESS
)
1277 if (type
!= REG_TYPE_VFD
)
1279 first_error (_("only D registers may be indexed"));
1283 if ((atype
.defined
& NTA_HASINDEX
) != 0)
1285 first_error (_("can't change index for operand"));
1289 atype
.defined
|= NTA_HASINDEX
;
1291 if (skip_past_char (&str
, ']') == SUCCESS
)
1292 atype
.index
= NEON_ALL_LANES
;
1297 my_get_expression (&exp
, &str
, GE_NO_PREFIX
);
1299 if (exp
.X_op
!= O_constant
)
1301 first_error (_("constant expression required"));
1305 if (skip_past_char (&str
, ']') == FAIL
)
1308 atype
.index
= exp
.X_add_number
;
1323 /* Like arm_reg_parse, but allow allow the following extra features:
1324 - If RTYPE is non-zero, return the (possibly restricted) type of the
1325 register (e.g. Neon double or quad reg when either has been requested).
1326 - If this is a Neon vector type with additional type information, fill
1327 in the struct pointed to by VECTYPE (if non-NULL).
1328 This function will fault on encountering a scalar.
1332 arm_typed_reg_parse (char **ccp
, enum arm_reg_type type
,
1333 enum arm_reg_type
*rtype
, struct neon_type_el
*vectype
)
1335 struct neon_typed_alias atype
;
1337 int reg
= parse_typed_reg_or_scalar (&str
, type
, rtype
, &atype
);
1342 /* Do not allow a scalar (reg+index) to parse as a register. */
1343 if ((atype
.defined
& NTA_HASINDEX
) != 0)
1345 first_error (_("register operand expected, but got scalar"));
1350 *vectype
= atype
.eltype
;
1357 #define NEON_SCALAR_REG(X) ((X) >> 4)
1358 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1360 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1361 have enough information to be able to do a good job bounds-checking. So, we
1362 just do easy checks here, and do further checks later. */
1365 parse_scalar (char **ccp
, int elsize
, struct neon_type_el
*type
)
1369 struct neon_typed_alias atype
;
1371 reg
= parse_typed_reg_or_scalar (&str
, REG_TYPE_VFD
, NULL
, &atype
);
1373 if (reg
== FAIL
|| (atype
.defined
& NTA_HASINDEX
) == 0)
1376 if (atype
.index
== NEON_ALL_LANES
)
1378 first_error (_("scalar must have an index"));
1381 else if (atype
.index
>= 64 / elsize
)
1383 first_error (_("scalar index out of range"));
1388 *type
= atype
.eltype
;
1392 return reg
* 16 + atype
.index
;
1395 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1397 parse_reg_list (char ** strp
)
1399 char * str
= * strp
;
1403 /* We come back here if we get ranges concatenated by '+' or '|'. */
1418 if ((reg
= arm_reg_parse (&str
, REG_TYPE_RN
)) == FAIL
)
1420 first_error (_(reg_expected_msgs
[REG_TYPE_RN
]));
1430 first_error (_("bad range in register list"));
1434 for (i
= cur_reg
+ 1; i
< reg
; i
++)
1436 if (range
& (1 << i
))
1438 (_("Warning: duplicated register (r%d) in register list"),
1446 if (range
& (1 << reg
))
1447 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1449 else if (reg
<= cur_reg
)
1450 as_tsktsk (_("Warning: register range not in ascending order"));
1455 while (skip_past_comma (&str
) != FAIL
1456 || (in_range
= 1, *str
++ == '-'));
1461 first_error (_("missing `}'"));
1469 if (my_get_expression (&expr
, &str
, GE_NO_PREFIX
))
1472 if (expr
.X_op
== O_constant
)
1474 if (expr
.X_add_number
1475 != (expr
.X_add_number
& 0x0000ffff))
1477 inst
.error
= _("invalid register mask");
1481 if ((range
& expr
.X_add_number
) != 0)
1483 int regno
= range
& expr
.X_add_number
;
1486 regno
= (1 << regno
) - 1;
1488 (_("Warning: duplicated register (r%d) in register list"),
1492 range
|= expr
.X_add_number
;
1496 if (inst
.reloc
.type
!= 0)
1498 inst
.error
= _("expression too complex");
1502 memcpy (&inst
.reloc
.exp
, &expr
, sizeof (expressionS
));
1503 inst
.reloc
.type
= BFD_RELOC_ARM_MULTI
;
1504 inst
.reloc
.pc_rel
= 0;
1508 if (*str
== '|' || *str
== '+')
1514 while (another_range
);
1520 /* Types of registers in a list. */
1529 /* Parse a VFP register list. If the string is invalid return FAIL.
1530 Otherwise return the number of registers, and set PBASE to the first
1531 register. Parses registers of type ETYPE.
1532 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1533 - Q registers can be used to specify pairs of D registers
1534 - { } can be omitted from around a singleton register list
1535 FIXME: This is not implemented, as it would require backtracking in
1538 This could be done (the meaning isn't really ambiguous), but doesn't
1539 fit in well with the current parsing framework.
1540 - 32 D registers may be used (also true for VFPv3).
1541 FIXME: Types are ignored in these register lists, which is probably a
1545 parse_vfp_reg_list (char **ccp
, unsigned int *pbase
, enum reg_list_els etype
)
1550 enum arm_reg_type regtype
= 0;
1554 unsigned long mask
= 0;
1559 inst
.error
= _("expecting {");
1568 regtype
= REG_TYPE_VFS
;
1573 regtype
= REG_TYPE_VFD
;
1576 case REGLIST_NEON_D
:
1577 regtype
= REG_TYPE_NDQ
;
1581 if (etype
!= REGLIST_VFP_S
)
1583 /* VFPv3 allows 32 D registers. */
1584 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v3
))
1588 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
1591 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
1598 base_reg
= max_regs
;
1602 int setmask
= 1, addregs
= 1;
1604 new_base
= arm_typed_reg_parse (&str
, regtype
, ®type
, NULL
);
1606 if (new_base
== FAIL
)
1608 first_error (_(reg_expected_msgs
[regtype
]));
1612 if (new_base
>= max_regs
)
1614 first_error (_("register out of range in list"));
1618 /* Note: a value of 2 * n is returned for the register Q<n>. */
1619 if (regtype
== REG_TYPE_NQ
)
1625 if (new_base
< base_reg
)
1626 base_reg
= new_base
;
1628 if (mask
& (setmask
<< new_base
))
1630 first_error (_("invalid register list"));
1634 if ((mask
>> new_base
) != 0 && ! warned
)
1636 as_tsktsk (_("register list not in ascending order"));
1640 mask
|= setmask
<< new_base
;
1643 if (*str
== '-') /* We have the start of a range expression */
1649 if ((high_range
= arm_typed_reg_parse (&str
, regtype
, NULL
, NULL
))
1652 inst
.error
= gettext (reg_expected_msgs
[regtype
]);
1656 if (high_range
>= max_regs
)
1658 first_error (_("register out of range in list"));
1662 if (regtype
== REG_TYPE_NQ
)
1663 high_range
= high_range
+ 1;
1665 if (high_range
<= new_base
)
1667 inst
.error
= _("register range not in ascending order");
1671 for (new_base
+= addregs
; new_base
<= high_range
; new_base
+= addregs
)
1673 if (mask
& (setmask
<< new_base
))
1675 inst
.error
= _("invalid register list");
1679 mask
|= setmask
<< new_base
;
1684 while (skip_past_comma (&str
) != FAIL
);
1688 /* Sanity check -- should have raised a parse error above. */
1689 if (count
== 0 || count
> max_regs
)
1694 /* Final test -- the registers must be consecutive. */
1696 for (i
= 0; i
< count
; i
++)
1698 if ((mask
& (1u << i
)) == 0)
1700 inst
.error
= _("non-contiguous register range");
1710 /* True if two alias types are the same. */
1713 neon_alias_types_same (struct neon_typed_alias
*a
, struct neon_typed_alias
*b
)
1721 if (a
->defined
!= b
->defined
)
1724 if ((a
->defined
& NTA_HASTYPE
) != 0
1725 && (a
->eltype
.type
!= b
->eltype
.type
1726 || a
->eltype
.size
!= b
->eltype
.size
))
1729 if ((a
->defined
& NTA_HASINDEX
) != 0
1730 && (a
->index
!= b
->index
))
1736 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
1737 The base register is put in *PBASE.
1738 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
1740 The register stride (minus one) is put in bit 4 of the return value.
1741 Bits [6:5] encode the list length (minus one).
1742 The type of the list elements is put in *ELTYPE, if non-NULL. */
1744 #define NEON_LANE(X) ((X) & 0xf)
1745 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
1746 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
1749 parse_neon_el_struct_list (char **str
, unsigned *pbase
,
1750 struct neon_type_el
*eltype
)
1757 int leading_brace
= 0;
1758 enum arm_reg_type rtype
= REG_TYPE_NDQ
;
1760 const char *const incr_error
= "register stride must be 1 or 2";
1761 const char *const type_error
= "mismatched element/structure types in list";
1762 struct neon_typed_alias firsttype
;
1764 if (skip_past_char (&ptr
, '{') == SUCCESS
)
1769 struct neon_typed_alias atype
;
1770 int getreg
= parse_typed_reg_or_scalar (&ptr
, rtype
, &rtype
, &atype
);
1774 first_error (_(reg_expected_msgs
[rtype
]));
1781 if (rtype
== REG_TYPE_NQ
)
1788 else if (reg_incr
== -1)
1790 reg_incr
= getreg
- base_reg
;
1791 if (reg_incr
< 1 || reg_incr
> 2)
1793 first_error (_(incr_error
));
1797 else if (getreg
!= base_reg
+ reg_incr
* count
)
1799 first_error (_(incr_error
));
1803 if (!neon_alias_types_same (&atype
, &firsttype
))
1805 first_error (_(type_error
));
1809 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
1813 struct neon_typed_alias htype
;
1814 int hireg
, dregs
= (rtype
== REG_TYPE_NQ
) ? 2 : 1;
1816 lane
= NEON_INTERLEAVE_LANES
;
1817 else if (lane
!= NEON_INTERLEAVE_LANES
)
1819 first_error (_(type_error
));
1824 else if (reg_incr
!= 1)
1826 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
1830 hireg
= parse_typed_reg_or_scalar (&ptr
, rtype
, NULL
, &htype
);
1833 first_error (_(reg_expected_msgs
[rtype
]));
1836 if (!neon_alias_types_same (&htype
, &firsttype
))
1838 first_error (_(type_error
));
1841 count
+= hireg
+ dregs
- getreg
;
1845 /* If we're using Q registers, we can't use [] or [n] syntax. */
1846 if (rtype
== REG_TYPE_NQ
)
1852 if ((atype
.defined
& NTA_HASINDEX
) != 0)
1856 else if (lane
!= atype
.index
)
1858 first_error (_(type_error
));
1862 else if (lane
== -1)
1863 lane
= NEON_INTERLEAVE_LANES
;
1864 else if (lane
!= NEON_INTERLEAVE_LANES
)
1866 first_error (_(type_error
));
1871 while ((count
!= 1 || leading_brace
) && skip_past_comma (&ptr
) != FAIL
);
1873 /* No lane set by [x]. We must be interleaving structures. */
1875 lane
= NEON_INTERLEAVE_LANES
;
1878 if (lane
== -1 || base_reg
== -1 || count
< 1 || count
> 4
1879 || (count
> 1 && reg_incr
== -1))
1881 first_error (_("error parsing element/structure list"));
1885 if ((count
> 1 || leading_brace
) && skip_past_char (&ptr
, '}') == FAIL
)
1887 first_error (_("expected }"));
1895 *eltype
= firsttype
.eltype
;
1900 return lane
| ((reg_incr
- 1) << 4) | ((count
- 1) << 5);
1903 /* Parse an explicit relocation suffix on an expression. This is
1904 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
1905 arm_reloc_hsh contains no entries, so this function can only
1906 succeed if there is no () after the word. Returns -1 on error,
1907 BFD_RELOC_UNUSED if there wasn't any suffix. */
1909 parse_reloc (char **str
)
1911 struct reloc_entry
*r
;
1915 return BFD_RELOC_UNUSED
;
1920 while (*q
&& *q
!= ')' && *q
!= ',')
1925 if ((r
= hash_find_n (arm_reloc_hsh
, p
, q
- p
)) == NULL
)
1932 /* Directives: register aliases. */
1934 static struct reg_entry
*
1935 insert_reg_alias (char *str
, int number
, int type
)
1937 struct reg_entry
*new;
1940 if ((new = hash_find (arm_reg_hsh
, str
)) != 0)
1943 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str
);
1945 /* Only warn about a redefinition if it's not defined as the
1947 else if (new->number
!= number
|| new->type
!= type
)
1948 as_warn (_("ignoring redefinition of register alias '%s'"), str
);
1953 name
= xstrdup (str
);
1954 new = xmalloc (sizeof (struct reg_entry
));
1957 new->number
= number
;
1959 new->builtin
= FALSE
;
1962 if (hash_insert (arm_reg_hsh
, name
, (PTR
) new))
1969 insert_neon_reg_alias (char *str
, int number
, int type
,
1970 struct neon_typed_alias
*atype
)
1972 struct reg_entry
*reg
= insert_reg_alias (str
, number
, type
);
1976 first_error (_("attempt to redefine typed alias"));
1982 reg
->neon
= xmalloc (sizeof (struct neon_typed_alias
));
1983 *reg
->neon
= *atype
;
1987 /* Look for the .req directive. This is of the form:
1989 new_register_name .req existing_register_name
1991 If we find one, or if it looks sufficiently like one that we want to
1992 handle any error here, return non-zero. Otherwise return zero. */
1995 create_register_alias (char * newname
, char *p
)
1997 struct reg_entry
*old
;
1998 char *oldname
, *nbuf
;
2001 /* The input scrubber ensures that whitespace after the mnemonic is
2002 collapsed to single spaces. */
2004 if (strncmp (oldname
, " .req ", 6) != 0)
2008 if (*oldname
== '\0')
2011 old
= hash_find (arm_reg_hsh
, oldname
);
2014 as_warn (_("unknown register '%s' -- .req ignored"), oldname
);
2018 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2019 the desired alias name, and p points to its end. If not, then
2020 the desired alias name is in the global original_case_string. */
2021 #ifdef TC_CASE_SENSITIVE
2024 newname
= original_case_string
;
2025 nlen
= strlen (newname
);
2028 nbuf
= alloca (nlen
+ 1);
2029 memcpy (nbuf
, newname
, nlen
);
2032 /* Create aliases under the new name as stated; an all-lowercase
2033 version of the new name; and an all-uppercase version of the new
2035 insert_reg_alias (nbuf
, old
->number
, old
->type
);
2037 for (p
= nbuf
; *p
; p
++)
2040 if (strncmp (nbuf
, newname
, nlen
))
2041 insert_reg_alias (nbuf
, old
->number
, old
->type
);
2043 for (p
= nbuf
; *p
; p
++)
2046 if (strncmp (nbuf
, newname
, nlen
))
2047 insert_reg_alias (nbuf
, old
->number
, old
->type
);
2052 /* Create a Neon typed/indexed register alias using directives, e.g.:
2057 These typed registers can be used instead of the types specified after the
2058 Neon mnemonic, so long as all operands given have types. Types can also be
2059 specified directly, e.g.:
2060 vadd d0.s32, d1.s32, d2.s32
2064 create_neon_reg_alias (char *newname
, char *p
)
2066 enum arm_reg_type basetype
;
2067 struct reg_entry
*basereg
;
2068 struct reg_entry mybasereg
;
2069 struct neon_type ntype
;
2070 struct neon_typed_alias typeinfo
;
2071 char *namebuf
, *nameend
;
2074 typeinfo
.defined
= 0;
2075 typeinfo
.eltype
.type
= NT_invtype
;
2076 typeinfo
.eltype
.size
= -1;
2077 typeinfo
.index
= -1;
2081 if (strncmp (p
, " .dn ", 5) == 0)
2082 basetype
= REG_TYPE_VFD
;
2083 else if (strncmp (p
, " .qn ", 5) == 0)
2084 basetype
= REG_TYPE_NQ
;
2093 basereg
= arm_reg_parse_multi (&p
);
2095 if (basereg
&& basereg
->type
!= basetype
)
2097 as_bad (_("bad type for register"));
2101 if (basereg
== NULL
)
2104 /* Try parsing as an integer. */
2105 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
2106 if (exp
.X_op
!= O_constant
)
2108 as_bad (_("expression must be constant"));
2111 basereg
= &mybasereg
;
2112 basereg
->number
= (basetype
== REG_TYPE_NQ
) ? exp
.X_add_number
* 2
2118 typeinfo
= *basereg
->neon
;
2120 if (parse_neon_type (&ntype
, &p
) == SUCCESS
)
2122 /* We got a type. */
2123 if (typeinfo
.defined
& NTA_HASTYPE
)
2125 as_bad (_("can't redefine the type of a register alias"));
2129 typeinfo
.defined
|= NTA_HASTYPE
;
2130 if (ntype
.elems
!= 1)
2132 as_bad (_("you must specify a single type only"));
2135 typeinfo
.eltype
= ntype
.el
[0];
2138 if (skip_past_char (&p
, '[') == SUCCESS
)
2141 /* We got a scalar index. */
2143 if (typeinfo
.defined
& NTA_HASINDEX
)
2145 as_bad (_("can't redefine the index of a scalar alias"));
2149 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
2151 if (exp
.X_op
!= O_constant
)
2153 as_bad (_("scalar index must be constant"));
2157 typeinfo
.defined
|= NTA_HASINDEX
;
2158 typeinfo
.index
= exp
.X_add_number
;
2160 if (skip_past_char (&p
, ']') == FAIL
)
2162 as_bad (_("expecting ]"));
2167 namelen
= nameend
- newname
;
2168 namebuf
= alloca (namelen
+ 1);
2169 strncpy (namebuf
, newname
, namelen
);
2170 namebuf
[namelen
] = '\0';
2172 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2173 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2175 /* Insert name in all uppercase. */
2176 for (p
= namebuf
; *p
; p
++)
2179 if (strncmp (namebuf
, newname
, namelen
))
2180 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2181 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2183 /* Insert name in all lowercase. */
2184 for (p
= namebuf
; *p
; p
++)
2187 if (strncmp (namebuf
, newname
, namelen
))
2188 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2189 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2194 /* Should never be called, as .req goes between the alias and the
2195 register name, not at the beginning of the line. */
2197 s_req (int a ATTRIBUTE_UNUSED
)
2199 as_bad (_("invalid syntax for .req directive"));
2203 s_dn (int a ATTRIBUTE_UNUSED
)
2205 as_bad (_("invalid syntax for .dn directive"));
2209 s_qn (int a ATTRIBUTE_UNUSED
)
2211 as_bad (_("invalid syntax for .qn directive"));
2214 /* The .unreq directive deletes an alias which was previously defined
2215 by .req. For example:
2221 s_unreq (int a ATTRIBUTE_UNUSED
)
2226 name
= input_line_pointer
;
2228 while (*input_line_pointer
!= 0
2229 && *input_line_pointer
!= ' '
2230 && *input_line_pointer
!= '\n')
2231 ++input_line_pointer
;
2233 saved_char
= *input_line_pointer
;
2234 *input_line_pointer
= 0;
2237 as_bad (_("invalid syntax for .unreq directive"));
2240 struct reg_entry
*reg
= hash_find (arm_reg_hsh
, name
);
2243 as_bad (_("unknown register alias '%s'"), name
);
2244 else if (reg
->builtin
)
2245 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
2249 hash_delete (arm_reg_hsh
, name
);
2250 free ((char *) reg
->name
);
2257 *input_line_pointer
= saved_char
;
2258 demand_empty_rest_of_line ();
2261 /* Directives: Instruction set selection. */
2264 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2265 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2266 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2267 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2269 static enum mstate mapstate
= MAP_UNDEFINED
;
2272 mapping_state (enum mstate state
)
2275 const char * symname
;
2278 if (mapstate
== state
)
2279 /* The mapping symbol has already been emitted.
2280 There is nothing else to do. */
2289 type
= BSF_NO_FLAGS
;
2293 type
= BSF_NO_FLAGS
;
2297 type
= BSF_NO_FLAGS
;
2305 seg_info (now_seg
)->tc_segment_info_data
.mapstate
= state
;
2307 symbolP
= symbol_new (symname
, now_seg
, (valueT
) frag_now_fix (), frag_now
);
2308 symbol_table_insert (symbolP
);
2309 symbol_get_bfdsym (symbolP
)->flags
|= type
| BSF_LOCAL
;
2314 THUMB_SET_FUNC (symbolP
, 0);
2315 ARM_SET_THUMB (symbolP
, 0);
2316 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2320 THUMB_SET_FUNC (symbolP
, 1);
2321 ARM_SET_THUMB (symbolP
, 1);
2322 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2331 #define mapping_state(x) /* nothing */
2334 /* Find the real, Thumb encoded start of a Thumb function. */
2337 find_real_start (symbolS
* symbolP
)
2340 const char * name
= S_GET_NAME (symbolP
);
2341 symbolS
* new_target
;
2343 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2344 #define STUB_NAME ".real_start_of"
2349 /* The compiler may generate BL instructions to local labels because
2350 it needs to perform a branch to a far away location. These labels
2351 do not have a corresponding ".real_start_of" label. We check
2352 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2353 the ".real_start_of" convention for nonlocal branches. */
2354 if (S_IS_LOCAL (symbolP
) || name
[0] == '.')
2357 real_start
= ACONCAT ((STUB_NAME
, name
, NULL
));
2358 new_target
= symbol_find (real_start
);
2360 if (new_target
== NULL
)
2362 as_warn ("Failed to find real start of function: %s\n", name
);
2363 new_target
= symbolP
;
2370 opcode_select (int width
)
2377 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
2378 as_bad (_("selected processor does not support THUMB opcodes"));
2381 /* No need to force the alignment, since we will have been
2382 coming from ARM mode, which is word-aligned. */
2383 record_alignment (now_seg
, 1);
2385 mapping_state (MAP_THUMB
);
2391 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
2392 as_bad (_("selected processor does not support ARM opcodes"));
2397 frag_align (2, 0, 0);
2399 record_alignment (now_seg
, 1);
2401 mapping_state (MAP_ARM
);
2405 as_bad (_("invalid instruction size selected (%d)"), width
);
2410 s_arm (int ignore ATTRIBUTE_UNUSED
)
2413 demand_empty_rest_of_line ();
2417 s_thumb (int ignore ATTRIBUTE_UNUSED
)
2420 demand_empty_rest_of_line ();
2424 s_code (int unused ATTRIBUTE_UNUSED
)
2428 temp
= get_absolute_expression ();
2433 opcode_select (temp
);
2437 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp
);
2442 s_force_thumb (int ignore ATTRIBUTE_UNUSED
)
2444 /* If we are not already in thumb mode go into it, EVEN if
2445 the target processor does not support thumb instructions.
2446 This is used by gcc/config/arm/lib1funcs.asm for example
2447 to compile interworking support functions even if the
2448 target processor should not support interworking. */
2452 record_alignment (now_seg
, 1);
2455 demand_empty_rest_of_line ();
2459 s_thumb_func (int ignore ATTRIBUTE_UNUSED
)
2463 /* The following label is the name/address of the start of a Thumb function.
2464 We need to know this for the interworking support. */
2465 label_is_thumb_function_name
= TRUE
;
2468 /* Perform a .set directive, but also mark the alias as
2469 being a thumb function. */
2472 s_thumb_set (int equiv
)
2474 /* XXX the following is a duplicate of the code for s_set() in read.c
2475 We cannot just call that code as we need to get at the symbol that
2482 /* Especial apologies for the random logic:
2483 This just grew, and could be parsed much more simply!
2485 name
= input_line_pointer
;
2486 delim
= get_symbol_end ();
2487 end_name
= input_line_pointer
;
2490 if (*input_line_pointer
!= ',')
2493 as_bad (_("expected comma after name \"%s\""), name
);
2495 ignore_rest_of_line ();
2499 input_line_pointer
++;
2502 if (name
[0] == '.' && name
[1] == '\0')
2504 /* XXX - this should not happen to .thumb_set. */
2508 if ((symbolP
= symbol_find (name
)) == NULL
2509 && (symbolP
= md_undefined_symbol (name
)) == NULL
)
2512 /* When doing symbol listings, play games with dummy fragments living
2513 outside the normal fragment chain to record the file and line info
2515 if (listing
& LISTING_SYMBOLS
)
2517 extern struct list_info_struct
* listing_tail
;
2518 fragS
* dummy_frag
= xmalloc (sizeof (fragS
));
2520 memset (dummy_frag
, 0, sizeof (fragS
));
2521 dummy_frag
->fr_type
= rs_fill
;
2522 dummy_frag
->line
= listing_tail
;
2523 symbolP
= symbol_new (name
, undefined_section
, 0, dummy_frag
);
2524 dummy_frag
->fr_symbol
= symbolP
;
2528 symbolP
= symbol_new (name
, undefined_section
, 0, &zero_address_frag
);
2531 /* "set" symbols are local unless otherwise specified. */
2532 SF_SET_LOCAL (symbolP
);
2533 #endif /* OBJ_COFF */
2534 } /* Make a new symbol. */
2536 symbol_table_insert (symbolP
);
2541 && S_IS_DEFINED (symbolP
)
2542 && S_GET_SEGMENT (symbolP
) != reg_section
)
2543 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP
));
2545 pseudo_set (symbolP
);
2547 demand_empty_rest_of_line ();
2549 /* XXX Now we come to the Thumb specific bit of code. */
2551 THUMB_SET_FUNC (symbolP
, 1);
2552 ARM_SET_THUMB (symbolP
, 1);
2553 #if defined OBJ_ELF || defined OBJ_COFF
2554 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2558 /* Directives: Mode selection. */
2560 /* .syntax [unified|divided] - choose the new unified syntax
2561 (same for Arm and Thumb encoding, modulo slight differences in what
2562 can be represented) or the old divergent syntax for each mode. */
2564 s_syntax (int unused ATTRIBUTE_UNUSED
)
2568 name
= input_line_pointer
;
2569 delim
= get_symbol_end ();
2571 if (!strcasecmp (name
, "unified"))
2572 unified_syntax
= TRUE
;
2573 else if (!strcasecmp (name
, "divided"))
2574 unified_syntax
= FALSE
;
2577 as_bad (_("unrecognized syntax mode \"%s\""), name
);
2580 *input_line_pointer
= delim
;
2581 demand_empty_rest_of_line ();
2584 /* Directives: sectioning and alignment. */
2586 /* Same as s_align_ptwo but align 0 => align 2. */
2589 s_align (int unused ATTRIBUTE_UNUSED
)
2593 long max_alignment
= 15;
2595 temp
= get_absolute_expression ();
2596 if (temp
> max_alignment
)
2597 as_bad (_("alignment too large: %d assumed"), temp
= max_alignment
);
2600 as_bad (_("alignment negative. 0 assumed."));
2604 if (*input_line_pointer
== ',')
2606 input_line_pointer
++;
2607 temp_fill
= get_absolute_expression ();
2615 /* Only make a frag if we HAVE to. */
2616 if (temp
&& !need_pass_2
)
2617 frag_align (temp
, (int) temp_fill
, 0);
2618 demand_empty_rest_of_line ();
2620 record_alignment (now_seg
, temp
);
2624 s_bss (int ignore ATTRIBUTE_UNUSED
)
2626 /* We don't support putting frags in the BSS segment, we fake it by
2627 marking in_bss, then looking at s_skip for clues. */
2628 subseg_set (bss_section
, 0);
2629 demand_empty_rest_of_line ();
2630 mapping_state (MAP_DATA
);
2634 s_even (int ignore ATTRIBUTE_UNUSED
)
2636 /* Never make frag if expect extra pass. */
2638 frag_align (1, 0, 0);
2640 record_alignment (now_seg
, 1);
2642 demand_empty_rest_of_line ();
2645 /* Directives: Literal pools. */
2647 static literal_pool
*
2648 find_literal_pool (void)
2650 literal_pool
* pool
;
2652 for (pool
= list_of_pools
; pool
!= NULL
; pool
= pool
->next
)
2654 if (pool
->section
== now_seg
2655 && pool
->sub_section
== now_subseg
)
2662 static literal_pool
*
2663 find_or_make_literal_pool (void)
2665 /* Next literal pool ID number. */
2666 static unsigned int latest_pool_num
= 1;
2667 literal_pool
* pool
;
2669 pool
= find_literal_pool ();
2673 /* Create a new pool. */
2674 pool
= xmalloc (sizeof (* pool
));
2678 pool
->next_free_entry
= 0;
2679 pool
->section
= now_seg
;
2680 pool
->sub_section
= now_subseg
;
2681 pool
->next
= list_of_pools
;
2682 pool
->symbol
= NULL
;
2684 /* Add it to the list. */
2685 list_of_pools
= pool
;
2688 /* New pools, and emptied pools, will have a NULL symbol. */
2689 if (pool
->symbol
== NULL
)
2691 pool
->symbol
= symbol_create (FAKE_LABEL_NAME
, undefined_section
,
2692 (valueT
) 0, &zero_address_frag
);
2693 pool
->id
= latest_pool_num
++;
2700 /* Add the literal in the global 'inst'
2701 structure to the relevent literal pool. */
2704 add_to_lit_pool (void)
2706 literal_pool
* pool
;
2709 pool
= find_or_make_literal_pool ();
2711 /* Check if this literal value is already in the pool. */
2712 for (entry
= 0; entry
< pool
->next_free_entry
; entry
++)
2714 if ((pool
->literals
[entry
].X_op
== inst
.reloc
.exp
.X_op
)
2715 && (inst
.reloc
.exp
.X_op
== O_constant
)
2716 && (pool
->literals
[entry
].X_add_number
2717 == inst
.reloc
.exp
.X_add_number
)
2718 && (pool
->literals
[entry
].X_unsigned
2719 == inst
.reloc
.exp
.X_unsigned
))
2722 if ((pool
->literals
[entry
].X_op
== inst
.reloc
.exp
.X_op
)
2723 && (inst
.reloc
.exp
.X_op
== O_symbol
)
2724 && (pool
->literals
[entry
].X_add_number
2725 == inst
.reloc
.exp
.X_add_number
)
2726 && (pool
->literals
[entry
].X_add_symbol
2727 == inst
.reloc
.exp
.X_add_symbol
)
2728 && (pool
->literals
[entry
].X_op_symbol
2729 == inst
.reloc
.exp
.X_op_symbol
))
2733 /* Do we need to create a new entry? */
2734 if (entry
== pool
->next_free_entry
)
2736 if (entry
>= MAX_LITERAL_POOL_SIZE
)
2738 inst
.error
= _("literal pool overflow");
2742 pool
->literals
[entry
] = inst
.reloc
.exp
;
2743 pool
->next_free_entry
+= 1;
2746 inst
.reloc
.exp
.X_op
= O_symbol
;
2747 inst
.reloc
.exp
.X_add_number
= ((int) entry
) * 4;
2748 inst
.reloc
.exp
.X_add_symbol
= pool
->symbol
;
2753 /* Can't use symbol_new here, so have to create a symbol and then at
2754 a later date assign it a value. Thats what these functions do. */
2757 symbol_locate (symbolS
* symbolP
,
2758 const char * name
, /* It is copied, the caller can modify. */
2759 segT segment
, /* Segment identifier (SEG_<something>). */
2760 valueT valu
, /* Symbol value. */
2761 fragS
* frag
) /* Associated fragment. */
2763 unsigned int name_length
;
2764 char * preserved_copy_of_name
;
2766 name_length
= strlen (name
) + 1; /* +1 for \0. */
2767 obstack_grow (¬es
, name
, name_length
);
2768 preserved_copy_of_name
= obstack_finish (¬es
);
2770 #ifdef tc_canonicalize_symbol_name
2771 preserved_copy_of_name
=
2772 tc_canonicalize_symbol_name (preserved_copy_of_name
);
2775 S_SET_NAME (symbolP
, preserved_copy_of_name
);
2777 S_SET_SEGMENT (symbolP
, segment
);
2778 S_SET_VALUE (symbolP
, valu
);
2779 symbol_clear_list_pointers (symbolP
);
2781 symbol_set_frag (symbolP
, frag
);
2783 /* Link to end of symbol chain. */
2785 extern int symbol_table_frozen
;
2787 if (symbol_table_frozen
)
2791 symbol_append (symbolP
, symbol_lastP
, & symbol_rootP
, & symbol_lastP
);
2793 obj_symbol_new_hook (symbolP
);
2795 #ifdef tc_symbol_new_hook
2796 tc_symbol_new_hook (symbolP
);
2800 verify_symbol_chain (symbol_rootP
, symbol_lastP
);
2801 #endif /* DEBUG_SYMS */
2806 s_ltorg (int ignored ATTRIBUTE_UNUSED
)
2809 literal_pool
* pool
;
2812 pool
= find_literal_pool ();
2814 || pool
->symbol
== NULL
2815 || pool
->next_free_entry
== 0)
2818 mapping_state (MAP_DATA
);
2820 /* Align pool as you have word accesses.
2821 Only make a frag if we have to. */
2823 frag_align (2, 0, 0);
2825 record_alignment (now_seg
, 2);
2827 sprintf (sym_name
, "$$lit_\002%x", pool
->id
);
2829 symbol_locate (pool
->symbol
, sym_name
, now_seg
,
2830 (valueT
) frag_now_fix (), frag_now
);
2831 symbol_table_insert (pool
->symbol
);
2833 ARM_SET_THUMB (pool
->symbol
, thumb_mode
);
2835 #if defined OBJ_COFF || defined OBJ_ELF
2836 ARM_SET_INTERWORK (pool
->symbol
, support_interwork
);
2839 for (entry
= 0; entry
< pool
->next_free_entry
; entry
++)
2840 /* First output the expression in the instruction to the pool. */
2841 emit_expr (&(pool
->literals
[entry
]), 4); /* .word */
2843 /* Mark the pool as empty. */
2844 pool
->next_free_entry
= 0;
2845 pool
->symbol
= NULL
;
2849 /* Forward declarations for functions below, in the MD interface
2851 static void fix_new_arm (fragS
*, int, short, expressionS
*, int, int);
2852 static valueT
create_unwind_entry (int);
2853 static void start_unwind_section (const segT
, int);
2854 static void add_unwind_opcode (valueT
, int);
2855 static void flush_pending_unwind (void);
2857 /* Directives: Data. */
2860 s_arm_elf_cons (int nbytes
)
2864 #ifdef md_flush_pending_output
2865 md_flush_pending_output ();
2868 if (is_it_end_of_statement ())
2870 demand_empty_rest_of_line ();
2874 #ifdef md_cons_align
2875 md_cons_align (nbytes
);
2878 mapping_state (MAP_DATA
);
2882 char *base
= input_line_pointer
;
2886 if (exp
.X_op
!= O_symbol
)
2887 emit_expr (&exp
, (unsigned int) nbytes
);
2890 char *before_reloc
= input_line_pointer
;
2891 reloc
= parse_reloc (&input_line_pointer
);
2894 as_bad (_("unrecognized relocation suffix"));
2895 ignore_rest_of_line ();
2898 else if (reloc
== BFD_RELOC_UNUSED
)
2899 emit_expr (&exp
, (unsigned int) nbytes
);
2902 reloc_howto_type
*howto
= bfd_reloc_type_lookup (stdoutput
, reloc
);
2903 int size
= bfd_get_reloc_size (howto
);
2905 if (reloc
== BFD_RELOC_ARM_PLT32
)
2907 as_bad (_("(plt) is only valid on branch targets"));
2908 reloc
= BFD_RELOC_UNUSED
;
2913 as_bad (_("%s relocations do not fit in %d bytes"),
2914 howto
->name
, nbytes
);
2917 /* We've parsed an expression stopping at O_symbol.
2918 But there may be more expression left now that we
2919 have parsed the relocation marker. Parse it again.
2920 XXX Surely there is a cleaner way to do this. */
2921 char *p
= input_line_pointer
;
2923 char *save_buf
= alloca (input_line_pointer
- base
);
2924 memcpy (save_buf
, base
, input_line_pointer
- base
);
2925 memmove (base
+ (input_line_pointer
- before_reloc
),
2926 base
, before_reloc
- base
);
2928 input_line_pointer
= base
+ (input_line_pointer
-before_reloc
);
2930 memcpy (base
, save_buf
, p
- base
);
2932 offset
= nbytes
- size
;
2933 p
= frag_more ((int) nbytes
);
2934 fix_new_exp (frag_now
, p
- frag_now
->fr_literal
+ offset
,
2935 size
, &exp
, 0, reloc
);
2940 while (*input_line_pointer
++ == ',');
2942 /* Put terminator back into stream. */
2943 input_line_pointer
--;
2944 demand_empty_rest_of_line ();
2948 /* Parse a .rel31 directive. */
2951 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED
)
2958 if (*input_line_pointer
== '1')
2959 highbit
= 0x80000000;
2960 else if (*input_line_pointer
!= '0')
2961 as_bad (_("expected 0 or 1"));
2963 input_line_pointer
++;
2964 if (*input_line_pointer
!= ',')
2965 as_bad (_("missing comma"));
2966 input_line_pointer
++;
2968 #ifdef md_flush_pending_output
2969 md_flush_pending_output ();
2972 #ifdef md_cons_align
2976 mapping_state (MAP_DATA
);
2981 md_number_to_chars (p
, highbit
, 4);
2982 fix_new_arm (frag_now
, p
- frag_now
->fr_literal
, 4, &exp
, 1,
2983 BFD_RELOC_ARM_PREL31
);
2985 demand_empty_rest_of_line ();
2988 /* Directives: AEABI stack-unwind tables. */
2990 /* Parse an unwind_fnstart directive. Simply records the current location. */
2993 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED
)
2995 demand_empty_rest_of_line ();
2996 /* Mark the start of the function. */
2997 unwind
.proc_start
= expr_build_dot ();
2999 /* Reset the rest of the unwind info. */
3000 unwind
.opcode_count
= 0;
3001 unwind
.table_entry
= NULL
;
3002 unwind
.personality_routine
= NULL
;
3003 unwind
.personality_index
= -1;
3004 unwind
.frame_size
= 0;
3005 unwind
.fp_offset
= 0;
3008 unwind
.sp_restored
= 0;
3012 /* Parse a handlerdata directive. Creates the exception handling table entry
3013 for the function. */
3016 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED
)
3018 demand_empty_rest_of_line ();
3019 if (unwind
.table_entry
)
3020 as_bad (_("dupicate .handlerdata directive"));
3022 create_unwind_entry (1);
3025 /* Parse an unwind_fnend directive. Generates the index table entry. */
3028 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED
)
3034 demand_empty_rest_of_line ();
3036 /* Add eh table entry. */
3037 if (unwind
.table_entry
== NULL
)
3038 val
= create_unwind_entry (0);
3042 /* Add index table entry. This is two words. */
3043 start_unwind_section (unwind
.saved_seg
, 1);
3044 frag_align (2, 0, 0);
3045 record_alignment (now_seg
, 2);
3047 ptr
= frag_more (8);
3048 where
= frag_now_fix () - 8;
3050 /* Self relative offset of the function start. */
3051 fix_new (frag_now
, where
, 4, unwind
.proc_start
, 0, 1,
3052 BFD_RELOC_ARM_PREL31
);
3054 /* Indicate dependency on EHABI-defined personality routines to the
3055 linker, if it hasn't been done already. */
3056 if (unwind
.personality_index
>= 0 && unwind
.personality_index
< 3
3057 && !(marked_pr_dependency
& (1 << unwind
.personality_index
)))
3059 static const char *const name
[] = {
3060 "__aeabi_unwind_cpp_pr0",
3061 "__aeabi_unwind_cpp_pr1",
3062 "__aeabi_unwind_cpp_pr2"
3064 symbolS
*pr
= symbol_find_or_make (name
[unwind
.personality_index
]);
3065 fix_new (frag_now
, where
, 0, pr
, 0, 1, BFD_RELOC_NONE
);
3066 marked_pr_dependency
|= 1 << unwind
.personality_index
;
3067 seg_info (now_seg
)->tc_segment_info_data
.marked_pr_dependency
3068 = marked_pr_dependency
;
3072 /* Inline exception table entry. */
3073 md_number_to_chars (ptr
+ 4, val
, 4);
3075 /* Self relative offset of the table entry. */
3076 fix_new (frag_now
, where
+ 4, 4, unwind
.table_entry
, 0, 1,
3077 BFD_RELOC_ARM_PREL31
);
3079 /* Restore the original section. */
3080 subseg_set (unwind
.saved_seg
, unwind
.saved_subseg
);
3084 /* Parse an unwind_cantunwind directive. */
3087 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED
)
3089 demand_empty_rest_of_line ();
3090 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
3091 as_bad (_("personality routine specified for cantunwind frame"));
3093 unwind
.personality_index
= -2;
3097 /* Parse a personalityindex directive. */
3100 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED
)
3104 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
3105 as_bad (_("duplicate .personalityindex directive"));
3109 if (exp
.X_op
!= O_constant
3110 || exp
.X_add_number
< 0 || exp
.X_add_number
> 15)
3112 as_bad (_("bad personality routine number"));
3113 ignore_rest_of_line ();
3117 unwind
.personality_index
= exp
.X_add_number
;
3119 demand_empty_rest_of_line ();
3123 /* Parse a personality directive. */
3126 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED
)
3130 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
3131 as_bad (_("duplicate .personality directive"));
3133 name
= input_line_pointer
;
3134 c
= get_symbol_end ();
3135 p
= input_line_pointer
;
3136 unwind
.personality_routine
= symbol_find_or_make (name
);
3138 demand_empty_rest_of_line ();
3142 /* Parse a directive saving core registers. */
3145 s_arm_unwind_save_core (void)
3151 range
= parse_reg_list (&input_line_pointer
);
3154 as_bad (_("expected register list"));
3155 ignore_rest_of_line ();
3159 demand_empty_rest_of_line ();
3161 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
3162 into .unwind_save {..., sp...}. We aren't bothered about the value of
3163 ip because it is clobbered by calls. */
3164 if (unwind
.sp_restored
&& unwind
.fp_reg
== 12
3165 && (range
& 0x3000) == 0x1000)
3167 unwind
.opcode_count
--;
3168 unwind
.sp_restored
= 0;
3169 range
= (range
| 0x2000) & ~0x1000;
3170 unwind
.pending_offset
= 0;
3176 /* See if we can use the short opcodes. These pop a block of up to 8
3177 registers starting with r4, plus maybe r14. */
3178 for (n
= 0; n
< 8; n
++)
3180 /* Break at the first non-saved register. */
3181 if ((range
& (1 << (n
+ 4))) == 0)
3184 /* See if there are any other bits set. */
3185 if (n
== 0 || (range
& (0xfff0 << n
) & 0xbff0) != 0)
3187 /* Use the long form. */
3188 op
= 0x8000 | ((range
>> 4) & 0xfff);
3189 add_unwind_opcode (op
, 2);
3193 /* Use the short form. */
3195 op
= 0xa8; /* Pop r14. */
3197 op
= 0xa0; /* Do not pop r14. */
3199 add_unwind_opcode (op
, 1);
3206 op
= 0xb100 | (range
& 0xf);
3207 add_unwind_opcode (op
, 2);
3210 /* Record the number of bytes pushed. */
3211 for (n
= 0; n
< 16; n
++)
3213 if (range
& (1 << n
))
3214 unwind
.frame_size
+= 4;
3219 /* Parse a directive saving FPA registers. */
3222 s_arm_unwind_save_fpa (int reg
)
3228 /* Get Number of registers to transfer. */
3229 if (skip_past_comma (&input_line_pointer
) != FAIL
)
3232 exp
.X_op
= O_illegal
;
3234 if (exp
.X_op
!= O_constant
)
3236 as_bad (_("expected , <constant>"));
3237 ignore_rest_of_line ();
3241 num_regs
= exp
.X_add_number
;
3243 if (num_regs
< 1 || num_regs
> 4)
3245 as_bad (_("number of registers must be in the range [1:4]"));
3246 ignore_rest_of_line ();
3250 demand_empty_rest_of_line ();
3255 op
= 0xb4 | (num_regs
- 1);
3256 add_unwind_opcode (op
, 1);
3261 op
= 0xc800 | (reg
<< 4) | (num_regs
- 1);
3262 add_unwind_opcode (op
, 2);
3264 unwind
.frame_size
+= num_regs
* 12;
3268 /* Parse a directive saving VFP registers. */
3271 s_arm_unwind_save_vfp (void)
3277 count
= parse_vfp_reg_list (&input_line_pointer
, ®
, REGLIST_VFP_D
);
3280 as_bad (_("expected register list"));
3281 ignore_rest_of_line ();
3285 demand_empty_rest_of_line ();
3290 op
= 0xb8 | (count
- 1);
3291 add_unwind_opcode (op
, 1);
3296 op
= 0xb300 | (reg
<< 4) | (count
- 1);
3297 add_unwind_opcode (op
, 2);
3299 unwind
.frame_size
+= count
* 8 + 4;
3303 /* Parse a directive saving iWMMXt data registers. */
3306 s_arm_unwind_save_mmxwr (void)
3314 if (*input_line_pointer
== '{')
3315 input_line_pointer
++;
3319 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWR
);
3323 as_bad (_(reg_expected_msgs
[REG_TYPE_MMXWR
]));
3328 as_tsktsk (_("register list not in ascending order"));
3331 if (*input_line_pointer
== '-')
3333 input_line_pointer
++;
3334 hi_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWR
);
3337 as_bad (_(reg_expected_msgs
[REG_TYPE_MMXWR
]));
3340 else if (reg
>= hi_reg
)
3342 as_bad (_("bad register range"));
3345 for (; reg
< hi_reg
; reg
++)
3349 while (skip_past_comma (&input_line_pointer
) != FAIL
);
3351 if (*input_line_pointer
== '}')
3352 input_line_pointer
++;
3354 demand_empty_rest_of_line ();
3356 /* Generate any deferred opcodes because we're going to be looking at
3358 flush_pending_unwind ();
3360 for (i
= 0; i
< 16; i
++)
3362 if (mask
& (1 << i
))
3363 unwind
.frame_size
+= 8;
3366 /* Attempt to combine with a previous opcode. We do this because gcc
3367 likes to output separate unwind directives for a single block of
3369 if (unwind
.opcode_count
> 0)
3371 i
= unwind
.opcodes
[unwind
.opcode_count
- 1];
3372 if ((i
& 0xf8) == 0xc0)
3375 /* Only merge if the blocks are contiguous. */
3378 if ((mask
& 0xfe00) == (1 << 9))
3380 mask
|= ((1 << (i
+ 11)) - 1) & 0xfc00;
3381 unwind
.opcode_count
--;
3384 else if (i
== 6 && unwind
.opcode_count
>= 2)
3386 i
= unwind
.opcodes
[unwind
.opcode_count
- 2];
3390 op
= 0xffff << (reg
- 1);
3392 || ((mask
& op
) == (1u << (reg
- 1))))
3394 op
= (1 << (reg
+ i
+ 1)) - 1;
3395 op
&= ~((1 << reg
) - 1);
3397 unwind
.opcode_count
-= 2;
3404 /* We want to generate opcodes in the order the registers have been
3405 saved, ie. descending order. */
3406 for (reg
= 15; reg
>= -1; reg
--)
3408 /* Save registers in blocks. */
3410 || !(mask
& (1 << reg
)))
3412 /* We found an unsaved reg. Generate opcodes to save the
3413 preceeding block. */
3419 op
= 0xc0 | (hi_reg
- 10);
3420 add_unwind_opcode (op
, 1);
3425 op
= 0xc600 | ((reg
+ 1) << 4) | ((hi_reg
- reg
) - 1);
3426 add_unwind_opcode (op
, 2);
3435 ignore_rest_of_line ();
3439 s_arm_unwind_save_mmxwcg (void)
3446 if (*input_line_pointer
== '{')
3447 input_line_pointer
++;
3451 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWCG
);
3455 as_bad (_(reg_expected_msgs
[REG_TYPE_MMXWCG
]));
3461 as_tsktsk (_("register list not in ascending order"));
3464 if (*input_line_pointer
== '-')
3466 input_line_pointer
++;
3467 hi_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWCG
);
3470 as_bad (_(reg_expected_msgs
[REG_TYPE_MMXWCG
]));
3473 else if (reg
>= hi_reg
)
3475 as_bad (_("bad register range"));
3478 for (; reg
< hi_reg
; reg
++)
3482 while (skip_past_comma (&input_line_pointer
) != FAIL
);
3484 if (*input_line_pointer
== '}')
3485 input_line_pointer
++;
3487 demand_empty_rest_of_line ();
3489 /* Generate any deferred opcodes because we're going to be looking at
3491 flush_pending_unwind ();
3493 for (reg
= 0; reg
< 16; reg
++)
3495 if (mask
& (1 << reg
))
3496 unwind
.frame_size
+= 4;
3499 add_unwind_opcode (op
, 2);
3502 ignore_rest_of_line ();
3506 /* Parse an unwind_save directive. */
3509 s_arm_unwind_save (int ignored ATTRIBUTE_UNUSED
)
3512 struct reg_entry
*reg
;
3513 bfd_boolean had_brace
= FALSE
;
3515 /* Figure out what sort of save we have. */
3516 peek
= input_line_pointer
;
3524 reg
= arm_reg_parse_multi (&peek
);
3528 as_bad (_("register expected"));
3529 ignore_rest_of_line ();
3538 as_bad (_("FPA .unwind_save does not take a register list"));
3539 ignore_rest_of_line ();
3542 s_arm_unwind_save_fpa (reg
->number
);
3545 case REG_TYPE_RN
: s_arm_unwind_save_core (); return;
3546 case REG_TYPE_VFD
: s_arm_unwind_save_vfp (); return;
3547 case REG_TYPE_MMXWR
: s_arm_unwind_save_mmxwr (); return;
3548 case REG_TYPE_MMXWCG
: s_arm_unwind_save_mmxwcg (); return;
3551 as_bad (_(".unwind_save does not support this kind of register"));
3552 ignore_rest_of_line ();
3557 /* Parse an unwind_movsp directive. */
3560 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED
)
3565 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
3568 as_bad (_(reg_expected_msgs
[REG_TYPE_RN
]));
3569 ignore_rest_of_line ();
3572 demand_empty_rest_of_line ();
3574 if (reg
== REG_SP
|| reg
== REG_PC
)
3576 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
3580 if (unwind
.fp_reg
!= REG_SP
)
3581 as_bad (_("unexpected .unwind_movsp directive"));
3583 /* Generate opcode to restore the value. */
3585 add_unwind_opcode (op
, 1);
3587 /* Record the information for later. */
3588 unwind
.fp_reg
= reg
;
3589 unwind
.fp_offset
= unwind
.frame_size
;
3590 unwind
.sp_restored
= 1;
3593 /* Parse an unwind_pad directive. */
3596 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED
)
3600 if (immediate_for_directive (&offset
) == FAIL
)
3605 as_bad (_("stack increment must be multiple of 4"));
3606 ignore_rest_of_line ();
3610 /* Don't generate any opcodes, just record the details for later. */
3611 unwind
.frame_size
+= offset
;
3612 unwind
.pending_offset
+= offset
;
3614 demand_empty_rest_of_line ();
3617 /* Parse an unwind_setfp directive. */
3620 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED
)
3626 fp_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
3627 if (skip_past_comma (&input_line_pointer
) == FAIL
)
3630 sp_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
3632 if (fp_reg
== FAIL
|| sp_reg
== FAIL
)
3634 as_bad (_("expected <reg>, <reg>"));
3635 ignore_rest_of_line ();
3639 /* Optional constant. */
3640 if (skip_past_comma (&input_line_pointer
) != FAIL
)
3642 if (immediate_for_directive (&offset
) == FAIL
)
3648 demand_empty_rest_of_line ();
3650 if (sp_reg
!= 13 && sp_reg
!= unwind
.fp_reg
)
3652 as_bad (_("register must be either sp or set by a previous"
3653 "unwind_movsp directive"));
3657 /* Don't generate any opcodes, just record the information for later. */
3658 unwind
.fp_reg
= fp_reg
;
3661 unwind
.fp_offset
= unwind
.frame_size
- offset
;
3663 unwind
.fp_offset
-= offset
;
3666 /* Parse an unwind_raw directive. */
3669 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED
)
3672 /* This is an arbitrary limit. */
3673 unsigned char op
[16];
3677 if (exp
.X_op
== O_constant
3678 && skip_past_comma (&input_line_pointer
) != FAIL
)
3680 unwind
.frame_size
+= exp
.X_add_number
;
3684 exp
.X_op
= O_illegal
;
3686 if (exp
.X_op
!= O_constant
)
3688 as_bad (_("expected <offset>, <opcode>"));
3689 ignore_rest_of_line ();
3695 /* Parse the opcode. */
3700 as_bad (_("unwind opcode too long"));
3701 ignore_rest_of_line ();
3703 if (exp
.X_op
!= O_constant
|| exp
.X_add_number
& ~0xff)
3705 as_bad (_("invalid unwind opcode"));
3706 ignore_rest_of_line ();
3709 op
[count
++] = exp
.X_add_number
;
3711 /* Parse the next byte. */
3712 if (skip_past_comma (&input_line_pointer
) == FAIL
)
3718 /* Add the opcode bytes in reverse order. */
3720 add_unwind_opcode (op
[count
], 1);
3722 demand_empty_rest_of_line ();
3726 /* Parse a .eabi_attribute directive. */
3729 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED
)
3732 bfd_boolean is_string
;
3739 if (exp
.X_op
!= O_constant
)
3742 tag
= exp
.X_add_number
;
3743 if (tag
== 4 || tag
== 5 || tag
== 32 || (tag
> 32 && (tag
& 1) != 0))
3748 if (skip_past_comma (&input_line_pointer
) == FAIL
)
3750 if (tag
== 32 || !is_string
)
3753 if (exp
.X_op
!= O_constant
)
3755 as_bad (_("expected numeric constant"));
3756 ignore_rest_of_line ();
3759 i
= exp
.X_add_number
;
3761 if (tag
== Tag_compatibility
3762 && skip_past_comma (&input_line_pointer
) == FAIL
)
3764 as_bad (_("expected comma"));
3765 ignore_rest_of_line ();
3770 skip_whitespace(input_line_pointer
);
3771 if (*input_line_pointer
!= '"')
3773 input_line_pointer
++;
3774 s
= input_line_pointer
;
3775 while (*input_line_pointer
&& *input_line_pointer
!= '"')
3776 input_line_pointer
++;
3777 if (*input_line_pointer
!= '"')
3779 saved_char
= *input_line_pointer
;
3780 *input_line_pointer
= 0;
3788 if (tag
== Tag_compatibility
)
3789 elf32_arm_add_eabi_attr_compat (stdoutput
, i
, s
);
3791 elf32_arm_add_eabi_attr_string (stdoutput
, tag
, s
);
3793 elf32_arm_add_eabi_attr_int (stdoutput
, tag
, i
);
3797 *input_line_pointer
= saved_char
;
3798 input_line_pointer
++;
3800 demand_empty_rest_of_line ();
3803 as_bad (_("bad string constant"));
3804 ignore_rest_of_line ();
3807 as_bad (_("expected <tag> , <value>"));
3808 ignore_rest_of_line ();
3810 #endif /* OBJ_ELF */
3812 static void s_arm_arch (int);
3813 static void s_arm_cpu (int);
3814 static void s_arm_fpu (int);
3816 /* This table describes all the machine specific pseudo-ops the assembler
3817 has to support. The fields are:
3818 pseudo-op name without dot
3819 function to call to execute this pseudo-op
3820 Integer arg to pass to the function. */
3822 const pseudo_typeS md_pseudo_table
[] =
3824 /* Never called because '.req' does not start a line. */
3825 { "req", s_req
, 0 },
3826 /* Following two are likewise never called. */
3829 { "unreq", s_unreq
, 0 },
3830 { "bss", s_bss
, 0 },
3831 { "align", s_align
, 0 },
3832 { "arm", s_arm
, 0 },
3833 { "thumb", s_thumb
, 0 },
3834 { "code", s_code
, 0 },
3835 { "force_thumb", s_force_thumb
, 0 },
3836 { "thumb_func", s_thumb_func
, 0 },
3837 { "thumb_set", s_thumb_set
, 0 },
3838 { "even", s_even
, 0 },
3839 { "ltorg", s_ltorg
, 0 },
3840 { "pool", s_ltorg
, 0 },
3841 { "syntax", s_syntax
, 0 },
3842 { "cpu", s_arm_cpu
, 0 },
3843 { "arch", s_arm_arch
, 0 },
3844 { "fpu", s_arm_fpu
, 0 },
3846 { "word", s_arm_elf_cons
, 4 },
3847 { "long", s_arm_elf_cons
, 4 },
3848 { "rel31", s_arm_rel31
, 0 },
3849 { "fnstart", s_arm_unwind_fnstart
, 0 },
3850 { "fnend", s_arm_unwind_fnend
, 0 },
3851 { "cantunwind", s_arm_unwind_cantunwind
, 0 },
3852 { "personality", s_arm_unwind_personality
, 0 },
3853 { "personalityindex", s_arm_unwind_personalityindex
, 0 },
3854 { "handlerdata", s_arm_unwind_handlerdata
, 0 },
3855 { "save", s_arm_unwind_save
, 0 },
3856 { "movsp", s_arm_unwind_movsp
, 0 },
3857 { "pad", s_arm_unwind_pad
, 0 },
3858 { "setfp", s_arm_unwind_setfp
, 0 },
3859 { "unwind_raw", s_arm_unwind_raw
, 0 },
3860 { "eabi_attribute", s_arm_eabi_attribute
, 0 },
3864 { "extend", float_cons
, 'x' },
3865 { "ldouble", float_cons
, 'x' },
3866 { "packed", float_cons
, 'p' },
3870 /* Parser functions used exclusively in instruction operands. */
3872 /* Generic immediate-value read function for use in insn parsing.
3873 STR points to the beginning of the immediate (the leading #);
3874 VAL receives the value; if the value is outside [MIN, MAX]
3875 issue an error. PREFIX_OPT is true if the immediate prefix is
3879 parse_immediate (char **str
, int *val
, int min
, int max
,
3880 bfd_boolean prefix_opt
)
3883 my_get_expression (&exp
, str
, prefix_opt
? GE_OPT_PREFIX
: GE_IMM_PREFIX
);
3884 if (exp
.X_op
!= O_constant
)
3886 inst
.error
= _("constant expression required");
3890 if (exp
.X_add_number
< min
|| exp
.X_add_number
> max
)
3892 inst
.error
= _("immediate value out of range");
3896 *val
= exp
.X_add_number
;
3900 /* Less-generic immediate-value read function with the possibility of loading a
3901 big (64-bit) immediate, as required by Neon VMOV and VMVN immediate
3902 instructions. Puts the result directly in inst.operands[i]. */
3905 parse_big_immediate (char **str
, int i
)
3910 my_get_expression (&exp
, &ptr
, GE_OPT_PREFIX_BIG
);
3912 if (exp
.X_op
== O_constant
)
3913 inst
.operands
[i
].imm
= exp
.X_add_number
;
3914 else if (exp
.X_op
== O_big
3915 && LITTLENUM_NUMBER_OF_BITS
* exp
.X_add_number
> 32
3916 && LITTLENUM_NUMBER_OF_BITS
* exp
.X_add_number
<= 64)
3918 unsigned parts
= 32 / LITTLENUM_NUMBER_OF_BITS
, j
, idx
= 0;
3919 /* Bignums have their least significant bits in
3920 generic_bignum[0]. Make sure we put 32 bits in imm and
3921 32 bits in reg, in a (hopefully) portable way. */
3922 assert (parts
!= 0);
3923 inst
.operands
[i
].imm
= 0;
3924 for (j
= 0; j
< parts
; j
++, idx
++)
3925 inst
.operands
[i
].imm
|= generic_bignum
[idx
]
3926 << (LITTLENUM_NUMBER_OF_BITS
* j
);
3927 inst
.operands
[i
].reg
= 0;
3928 for (j
= 0; j
< parts
; j
++, idx
++)
3929 inst
.operands
[i
].reg
|= generic_bignum
[idx
]
3930 << (LITTLENUM_NUMBER_OF_BITS
* j
);
3931 inst
.operands
[i
].regisimm
= 1;
3941 /* Returns the pseudo-register number of an FPA immediate constant,
3942 or FAIL if there isn't a valid constant here. */
3945 parse_fpa_immediate (char ** str
)
3947 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
3953 /* First try and match exact strings, this is to guarantee
3954 that some formats will work even for cross assembly. */
3956 for (i
= 0; fp_const
[i
]; i
++)
3958 if (strncmp (*str
, fp_const
[i
], strlen (fp_const
[i
])) == 0)
3962 *str
+= strlen (fp_const
[i
]);
3963 if (is_end_of_line
[(unsigned char) **str
])
3969 /* Just because we didn't get a match doesn't mean that the constant
3970 isn't valid, just that it is in a format that we don't
3971 automatically recognize. Try parsing it with the standard
3972 expression routines. */
3974 memset (words
, 0, MAX_LITTLENUMS
* sizeof (LITTLENUM_TYPE
));
3976 /* Look for a raw floating point number. */
3977 if ((save_in
= atof_ieee (*str
, 'x', words
)) != NULL
3978 && is_end_of_line
[(unsigned char) *save_in
])
3980 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
3982 for (j
= 0; j
< MAX_LITTLENUMS
; j
++)
3984 if (words
[j
] != fp_values
[i
][j
])
3988 if (j
== MAX_LITTLENUMS
)
3996 /* Try and parse a more complex expression, this will probably fail
3997 unless the code uses a floating point prefix (eg "0f"). */
3998 save_in
= input_line_pointer
;
3999 input_line_pointer
= *str
;
4000 if (expression (&exp
) == absolute_section
4001 && exp
.X_op
== O_big
4002 && exp
.X_add_number
< 0)
4004 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
4006 if (gen_to_words (words
, 5, (long) 15) == 0)
4008 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
4010 for (j
= 0; j
< MAX_LITTLENUMS
; j
++)
4012 if (words
[j
] != fp_values
[i
][j
])
4016 if (j
== MAX_LITTLENUMS
)
4018 *str
= input_line_pointer
;
4019 input_line_pointer
= save_in
;
4026 *str
= input_line_pointer
;
4027 input_line_pointer
= save_in
;
4028 inst
.error
= _("invalid FPA immediate expression");
4032 /* Returns 1 if a number has "quarter-precision" float format
4033 0baBbbbbbc defgh000 00000000 00000000. */
4036 is_quarter_float (unsigned imm
)
4038 int bs
= (imm
& 0x20000000) ? 0x3e000000 : 0x40000000;
4039 return (imm
& 0x7ffff) == 0 && ((imm
& 0x7e000000) ^ bs
) == 0;
4042 /* Parse an 8-bit "quarter-precision" floating point number of the form:
4043 0baBbbbbbc defgh000 00000000 00000000.
4044 The minus-zero case needs special handling, since it can't be encoded in the
4045 "quarter-precision" float format, but can nonetheless be loaded as an integer
4049 parse_qfloat_immediate (char **ccp
, int *immed
)
4052 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
4054 skip_past_char (&str
, '#');
4056 if ((str
= atof_ieee (str
, 's', words
)) != NULL
)
4058 unsigned fpword
= 0;
4061 /* Our FP word must be 32 bits (single-precision FP). */
4062 for (i
= 0; i
< 32 / LITTLENUM_NUMBER_OF_BITS
; i
++)
4064 fpword
<<= LITTLENUM_NUMBER_OF_BITS
;
4068 if (is_quarter_float (fpword
) || fpword
== 0x80000000)
4081 /* Shift operands. */
4084 SHIFT_LSL
, SHIFT_LSR
, SHIFT_ASR
, SHIFT_ROR
, SHIFT_RRX
4087 struct asm_shift_name
4090 enum shift_kind kind
;
4093 /* Third argument to parse_shift. */
4094 enum parse_shift_mode
4096 NO_SHIFT_RESTRICT
, /* Any kind of shift is accepted. */
4097 SHIFT_IMMEDIATE
, /* Shift operand must be an immediate. */
4098 SHIFT_LSL_OR_ASR_IMMEDIATE
, /* Shift must be LSL or ASR immediate. */
4099 SHIFT_ASR_IMMEDIATE
, /* Shift must be ASR immediate. */
4100 SHIFT_LSL_IMMEDIATE
, /* Shift must be LSL immediate. */
4103 /* Parse a <shift> specifier on an ARM data processing instruction.
4104 This has three forms:
4106 (LSL|LSR|ASL|ASR|ROR) Rs
4107 (LSL|LSR|ASL|ASR|ROR) #imm
4110 Note that ASL is assimilated to LSL in the instruction encoding, and
4111 RRX to ROR #0 (which cannot be written as such). */
4114 parse_shift (char **str
, int i
, enum parse_shift_mode mode
)
4116 const struct asm_shift_name
*shift_name
;
4117 enum shift_kind shift
;
4122 for (p
= *str
; ISALPHA (*p
); p
++)
4127 inst
.error
= _("shift expression expected");
4131 shift_name
= hash_find_n (arm_shift_hsh
, *str
, p
- *str
);
4133 if (shift_name
== NULL
)
4135 inst
.error
= _("shift expression expected");
4139 shift
= shift_name
->kind
;
4143 case NO_SHIFT_RESTRICT
:
4144 case SHIFT_IMMEDIATE
: break;
4146 case SHIFT_LSL_OR_ASR_IMMEDIATE
:
4147 if (shift
!= SHIFT_LSL
&& shift
!= SHIFT_ASR
)
4149 inst
.error
= _("'LSL' or 'ASR' required");
4154 case SHIFT_LSL_IMMEDIATE
:
4155 if (shift
!= SHIFT_LSL
)
4157 inst
.error
= _("'LSL' required");
4162 case SHIFT_ASR_IMMEDIATE
:
4163 if (shift
!= SHIFT_ASR
)
4165 inst
.error
= _("'ASR' required");
4173 if (shift
!= SHIFT_RRX
)
4175 /* Whitespace can appear here if the next thing is a bare digit. */
4176 skip_whitespace (p
);
4178 if (mode
== NO_SHIFT_RESTRICT
4179 && (reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
4181 inst
.operands
[i
].imm
= reg
;
4182 inst
.operands
[i
].immisreg
= 1;
4184 else if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_IMM_PREFIX
))
4187 inst
.operands
[i
].shift_kind
= shift
;
4188 inst
.operands
[i
].shifted
= 1;
4193 /* Parse a <shifter_operand> for an ARM data processing instruction:
4196 #<immediate>, <rotate>
4200 where <shift> is defined by parse_shift above, and <rotate> is a
4201 multiple of 2 between 0 and 30. Validation of immediate operands
4202 is deferred to md_apply_fix. */
4205 parse_shifter_operand (char **str
, int i
)
4210 if ((value
= arm_reg_parse (str
, REG_TYPE_RN
)) != FAIL
)
4212 inst
.operands
[i
].reg
= value
;
4213 inst
.operands
[i
].isreg
= 1;
4215 /* parse_shift will override this if appropriate */
4216 inst
.reloc
.exp
.X_op
= O_constant
;
4217 inst
.reloc
.exp
.X_add_number
= 0;
4219 if (skip_past_comma (str
) == FAIL
)
4222 /* Shift operation on register. */
4223 return parse_shift (str
, i
, NO_SHIFT_RESTRICT
);
4226 if (my_get_expression (&inst
.reloc
.exp
, str
, GE_IMM_PREFIX
))
4229 if (skip_past_comma (str
) == SUCCESS
)
4231 /* #x, y -- ie explicit rotation by Y. */
4232 if (my_get_expression (&expr
, str
, GE_NO_PREFIX
))
4235 if (expr
.X_op
!= O_constant
|| inst
.reloc
.exp
.X_op
!= O_constant
)
4237 inst
.error
= _("constant expression expected");
4241 value
= expr
.X_add_number
;
4242 if (value
< 0 || value
> 30 || value
% 2 != 0)
4244 inst
.error
= _("invalid rotation");
4247 if (inst
.reloc
.exp
.X_add_number
< 0 || inst
.reloc
.exp
.X_add_number
> 255)
4249 inst
.error
= _("invalid constant");
4253 /* Convert to decoded value. md_apply_fix will put it back. */
4254 inst
.reloc
.exp
.X_add_number
4255 = (((inst
.reloc
.exp
.X_add_number
<< (32 - value
))
4256 | (inst
.reloc
.exp
.X_add_number
>> value
)) & 0xffffffff);
4259 inst
.reloc
.type
= BFD_RELOC_ARM_IMMEDIATE
;
4260 inst
.reloc
.pc_rel
= 0;
4264 /* Parse all forms of an ARM address expression. Information is written
4265 to inst.operands[i] and/or inst.reloc.
4267 Preindexed addressing (.preind=1):
4269 [Rn, #offset] .reg=Rn .reloc.exp=offset
4270 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4271 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4272 .shift_kind=shift .reloc.exp=shift_imm
4274 These three may have a trailing ! which causes .writeback to be set also.
4276 Postindexed addressing (.postind=1, .writeback=1):
4278 [Rn], #offset .reg=Rn .reloc.exp=offset
4279 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4280 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4281 .shift_kind=shift .reloc.exp=shift_imm
4283 Unindexed addressing (.preind=0, .postind=0):
4285 [Rn], {option} .reg=Rn .imm=option .immisreg=0
4289 [Rn]{!} shorthand for [Rn,#0]{!}
4290 =immediate .isreg=0 .reloc.exp=immediate
4291 label .reg=PC .reloc.pc_rel=1 .reloc.exp=label
4293 It is the caller's responsibility to check for addressing modes not
4294 supported by the instruction, and to set inst.reloc.type. */
4297 parse_address (char **str
, int i
)
4302 if (skip_past_char (&p
, '[') == FAIL
)
4304 if (skip_past_char (&p
, '=') == FAIL
)
4306 /* bare address - translate to PC-relative offset */
4307 inst
.reloc
.pc_rel
= 1;
4308 inst
.operands
[i
].reg
= REG_PC
;
4309 inst
.operands
[i
].isreg
= 1;
4310 inst
.operands
[i
].preind
= 1;
4312 /* else a load-constant pseudo op, no special treatment needed here */
4314 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_NO_PREFIX
))
4321 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
4323 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
4326 inst
.operands
[i
].reg
= reg
;
4327 inst
.operands
[i
].isreg
= 1;
4329 if (skip_past_comma (&p
) == SUCCESS
)
4331 inst
.operands
[i
].preind
= 1;
4334 else if (*p
== '-') p
++, inst
.operands
[i
].negative
= 1;
4336 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
4338 inst
.operands
[i
].imm
= reg
;
4339 inst
.operands
[i
].immisreg
= 1;
4341 if (skip_past_comma (&p
) == SUCCESS
)
4342 if (parse_shift (&p
, i
, SHIFT_IMMEDIATE
) == FAIL
)
4345 else if (skip_past_char (&p
, ':') == SUCCESS
)
4347 /* FIXME: '@' should be used here, but it's filtered out by generic
4348 code before we get to see it here. This may be subject to
4351 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
4352 if (exp
.X_op
!= O_constant
)
4354 inst
.error
= _("alignment must be constant");
4357 inst
.operands
[i
].imm
= exp
.X_add_number
<< 8;
4358 inst
.operands
[i
].immisalign
= 1;
4359 /* Alignments are not pre-indexes. */
4360 inst
.operands
[i
].preind
= 0;
4364 if (inst
.operands
[i
].negative
)
4366 inst
.operands
[i
].negative
= 0;
4369 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_IMM_PREFIX
))
4374 if (skip_past_char (&p
, ']') == FAIL
)
4376 inst
.error
= _("']' expected");
4380 if (skip_past_char (&p
, '!') == SUCCESS
)
4381 inst
.operands
[i
].writeback
= 1;
4383 else if (skip_past_comma (&p
) == SUCCESS
)
4385 if (skip_past_char (&p
, '{') == SUCCESS
)
4387 /* [Rn], {expr} - unindexed, with option */
4388 if (parse_immediate (&p
, &inst
.operands
[i
].imm
,
4389 0, 255, TRUE
) == FAIL
)
4392 if (skip_past_char (&p
, '}') == FAIL
)
4394 inst
.error
= _("'}' expected at end of 'option' field");
4397 if (inst
.operands
[i
].preind
)
4399 inst
.error
= _("cannot combine index with option");
4407 inst
.operands
[i
].postind
= 1;
4408 inst
.operands
[i
].writeback
= 1;
4410 if (inst
.operands
[i
].preind
)
4412 inst
.error
= _("cannot combine pre- and post-indexing");
4417 else if (*p
== '-') p
++, inst
.operands
[i
].negative
= 1;
4419 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
4421 /* We might be using the immediate for alignment already. If we
4422 are, OR the register number into the low-order bits. */
4423 if (inst
.operands
[i
].immisalign
)
4424 inst
.operands
[i
].imm
|= reg
;
4426 inst
.operands
[i
].imm
= reg
;
4427 inst
.operands
[i
].immisreg
= 1;
4429 if (skip_past_comma (&p
) == SUCCESS
)
4430 if (parse_shift (&p
, i
, SHIFT_IMMEDIATE
) == FAIL
)
4435 if (inst
.operands
[i
].negative
)
4437 inst
.operands
[i
].negative
= 0;
4440 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_IMM_PREFIX
))
4446 /* If at this point neither .preind nor .postind is set, we have a
4447 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
4448 if (inst
.operands
[i
].preind
== 0 && inst
.operands
[i
].postind
== 0)
4450 inst
.operands
[i
].preind
= 1;
4451 inst
.reloc
.exp
.X_op
= O_constant
;
4452 inst
.reloc
.exp
.X_add_number
= 0;
4458 /* Parse an operand for a MOVW or MOVT instruction. */
4460 parse_half (char **str
)
4465 skip_past_char (&p
, '#');
4466 if (strncasecmp (p
, ":lower16:", 9) == 0)
4467 inst
.reloc
.type
= BFD_RELOC_ARM_MOVW
;
4468 else if (strncasecmp (p
, ":upper16:", 9) == 0)
4469 inst
.reloc
.type
= BFD_RELOC_ARM_MOVT
;
4471 if (inst
.reloc
.type
!= BFD_RELOC_UNUSED
)
4477 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_NO_PREFIX
))
4480 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
4482 if (inst
.reloc
.exp
.X_op
!= O_constant
)
4484 inst
.error
= _("constant expression expected");
4487 if (inst
.reloc
.exp
.X_add_number
< 0
4488 || inst
.reloc
.exp
.X_add_number
> 0xffff)
4490 inst
.error
= _("immediate value out of range");
4498 /* Miscellaneous. */
4500 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
4501 or a bitmask suitable to be or-ed into the ARM msr instruction. */
4503 parse_psr (char **str
)
4506 unsigned long psr_field
;
4507 const struct asm_psr
*psr
;
4510 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
4511 feature for ease of use and backwards compatibility. */
4513 if (strncasecmp (p
, "SPSR", 4) == 0)
4514 psr_field
= SPSR_BIT
;
4515 else if (strncasecmp (p
, "CPSR", 4) == 0)
4522 while (ISALNUM (*p
) || *p
== '_');
4524 psr
= hash_find_n (arm_v7m_psr_hsh
, start
, p
- start
);
4535 /* A suffix follows. */
4541 while (ISALNUM (*p
) || *p
== '_');
4543 psr
= hash_find_n (arm_psr_hsh
, start
, p
- start
);
4547 psr_field
|= psr
->field
;
4552 goto error
; /* Garbage after "[CS]PSR". */
4554 psr_field
|= (PSR_c
| PSR_f
);
4560 inst
.error
= _("flag for {c}psr instruction expected");
4564 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
4565 value suitable for splatting into the AIF field of the instruction. */
4568 parse_cps_flags (char **str
)
4577 case '\0': case ',':
4580 case 'a': case 'A': saw_a_flag
= 1; val
|= 0x4; break;
4581 case 'i': case 'I': saw_a_flag
= 1; val
|= 0x2; break;
4582 case 'f': case 'F': saw_a_flag
= 1; val
|= 0x1; break;
4585 inst
.error
= _("unrecognized CPS flag");
4590 if (saw_a_flag
== 0)
4592 inst
.error
= _("missing CPS flags");
4600 /* Parse an endian specifier ("BE" or "LE", case insensitive);
4601 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
4604 parse_endian_specifier (char **str
)
4609 if (strncasecmp (s
, "BE", 2))
4611 else if (strncasecmp (s
, "LE", 2))
4615 inst
.error
= _("valid endian specifiers are be or le");
4619 if (ISALNUM (s
[2]) || s
[2] == '_')
4621 inst
.error
= _("valid endian specifiers are be or le");
4626 return little_endian
;
4629 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
4630 value suitable for poking into the rotate field of an sxt or sxta
4631 instruction, or FAIL on error. */
4634 parse_ror (char **str
)
4639 if (strncasecmp (s
, "ROR", 3) == 0)
4643 inst
.error
= _("missing rotation field after comma");
4647 if (parse_immediate (&s
, &rot
, 0, 24, FALSE
) == FAIL
)
4652 case 0: *str
= s
; return 0x0;
4653 case 8: *str
= s
; return 0x1;
4654 case 16: *str
= s
; return 0x2;
4655 case 24: *str
= s
; return 0x3;
4658 inst
.error
= _("rotation can only be 0, 8, 16, or 24");
4663 /* Parse a conditional code (from conds[] below). The value returned is in the
4664 range 0 .. 14, or FAIL. */
4666 parse_cond (char **str
)
4669 const struct asm_cond
*c
;
4672 while (ISALPHA (*q
))
4675 c
= hash_find_n (arm_cond_hsh
, p
, q
- p
);
4678 inst
.error
= _("condition required");
4686 /* Parse an option for a barrier instruction. Returns the encoding for the
4689 parse_barrier (char **str
)
4692 const struct asm_barrier_opt
*o
;
4695 while (ISALPHA (*q
))
4698 o
= hash_find_n (arm_barrier_opt_hsh
, p
, q
- p
);
4706 /* Parse the operands of a table branch instruction. Similar to a memory
4709 parse_tb (char **str
)
4714 if (skip_past_char (&p
, '[') == FAIL
)
4716 inst
.error
= _("'[' expected");
4720 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
4722 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
4725 inst
.operands
[0].reg
= reg
;
4727 if (skip_past_comma (&p
) == FAIL
)
4729 inst
.error
= _("',' expected");
4733 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
4735 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
4738 inst
.operands
[0].imm
= reg
;
4740 if (skip_past_comma (&p
) == SUCCESS
)
4742 if (parse_shift (&p
, 0, SHIFT_LSL_IMMEDIATE
) == FAIL
)
4744 if (inst
.reloc
.exp
.X_add_number
!= 1)
4746 inst
.error
= _("invalid shift");
4749 inst
.operands
[0].shifted
= 1;
4752 if (skip_past_char (&p
, ']') == FAIL
)
4754 inst
.error
= _("']' expected");
4761 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
4762 information on the types the operands can take and how they are encoded.
4763 Up to four operands may be read; this function handles setting the
4764 ".present" field for each read operand itself.
4765 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
4766 else returns FAIL. */
4769 parse_neon_mov (char **str
, int *which_operand
)
4771 int i
= *which_operand
, val
;
4772 enum arm_reg_type rtype
;
4774 struct neon_type_el optype
;
4776 if ((val
= parse_scalar (&ptr
, 8, &optype
)) != FAIL
)
4778 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
4779 inst
.operands
[i
].reg
= val
;
4780 inst
.operands
[i
].isscalar
= 1;
4781 inst
.operands
[i
].vectype
= optype
;
4782 inst
.operands
[i
++].present
= 1;
4784 if (skip_past_comma (&ptr
) == FAIL
)
4787 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
4790 inst
.operands
[i
].reg
= val
;
4791 inst
.operands
[i
].isreg
= 1;
4792 inst
.operands
[i
].present
= 1;
4794 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_NSDQ
, &rtype
, &optype
))
4797 /* Cases 0, 1, 2, 3, 5 (D only). */
4798 if (skip_past_comma (&ptr
) == FAIL
)
4801 inst
.operands
[i
].reg
= val
;
4802 inst
.operands
[i
].isreg
= 1;
4803 inst
.operands
[i
].isquad
= (rtype
== REG_TYPE_NQ
);
4804 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
4805 inst
.operands
[i
].isvec
= 1;
4806 inst
.operands
[i
].vectype
= optype
;
4807 inst
.operands
[i
++].present
= 1;
4809 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
4811 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
4812 Case 13: VMOV <Sd>, <Rm> */
4813 inst
.operands
[i
].reg
= val
;
4814 inst
.operands
[i
].isreg
= 1;
4815 inst
.operands
[i
].present
= 1;
4817 if (rtype
== REG_TYPE_NQ
)
4819 first_error (_("can't use Neon quad register here"));
4822 else if (rtype
!= REG_TYPE_VFS
)
4825 if (skip_past_comma (&ptr
) == FAIL
)
4827 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
4829 inst
.operands
[i
].reg
= val
;
4830 inst
.operands
[i
].isreg
= 1;
4831 inst
.operands
[i
].present
= 1;
4834 else if (parse_qfloat_immediate (&ptr
, &inst
.operands
[i
].imm
) == SUCCESS
)
4835 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
4836 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
4837 Case 10: VMOV.F32 <Sd>, #<imm>
4838 Case 11: VMOV.F64 <Dd>, #<imm> */
4840 else if (parse_big_immediate (&ptr
, i
) == SUCCESS
)
4841 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
4842 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
4844 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_NSDQ
, &rtype
,
4847 /* Case 0: VMOV<c><q> <Qd>, <Qm>
4848 Case 1: VMOV<c><q> <Dd>, <Dm>
4849 Case 8: VMOV.F32 <Sd>, <Sm>
4850 Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */
4852 inst
.operands
[i
].reg
= val
;
4853 inst
.operands
[i
].isreg
= 1;
4854 inst
.operands
[i
].isquad
= (rtype
== REG_TYPE_NQ
);
4855 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
4856 inst
.operands
[i
].isvec
= 1;
4857 inst
.operands
[i
].vectype
= optype
;
4858 inst
.operands
[i
].present
= 1;
4860 if (skip_past_comma (&ptr
) == SUCCESS
)
4865 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
4868 inst
.operands
[i
].reg
= val
;
4869 inst
.operands
[i
].isreg
= 1;
4870 inst
.operands
[i
++].present
= 1;
4872 if (skip_past_comma (&ptr
) == FAIL
)
4875 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
4878 inst
.operands
[i
].reg
= val
;
4879 inst
.operands
[i
].isreg
= 1;
4880 inst
.operands
[i
++].present
= 1;
4885 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
4889 else if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
4892 inst
.operands
[i
].reg
= val
;
4893 inst
.operands
[i
].isreg
= 1;
4894 inst
.operands
[i
++].present
= 1;
4896 if (skip_past_comma (&ptr
) == FAIL
)
4899 if ((val
= parse_scalar (&ptr
, 8, &optype
)) != FAIL
)
4901 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
4902 inst
.operands
[i
].reg
= val
;
4903 inst
.operands
[i
].isscalar
= 1;
4904 inst
.operands
[i
].present
= 1;
4905 inst
.operands
[i
].vectype
= optype
;
4907 else if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
4909 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
4910 inst
.operands
[i
].reg
= val
;
4911 inst
.operands
[i
].isreg
= 1;
4912 inst
.operands
[i
++].present
= 1;
4914 if (skip_past_comma (&ptr
) == FAIL
)
4917 if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFSD
, &rtype
, &optype
))
4920 first_error (_(reg_expected_msgs
[REG_TYPE_VFSD
]));
4924 inst
.operands
[i
].reg
= val
;
4925 inst
.operands
[i
].isreg
= 1;
4926 inst
.operands
[i
].isvec
= 1;
4927 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
4928 inst
.operands
[i
].vectype
= optype
;
4929 inst
.operands
[i
].present
= 1;
4931 if (rtype
== REG_TYPE_VFS
)
4935 if (skip_past_comma (&ptr
) == FAIL
)
4937 if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFS
, NULL
,
4940 first_error (_(reg_expected_msgs
[REG_TYPE_VFS
]));
4943 inst
.operands
[i
].reg
= val
;
4944 inst
.operands
[i
].isreg
= 1;
4945 inst
.operands
[i
].isvec
= 1;
4946 inst
.operands
[i
].issingle
= 1;
4947 inst
.operands
[i
].vectype
= optype
;
4948 inst
.operands
[i
].present
= 1;
4951 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFS
, NULL
, &optype
))
4955 inst
.operands
[i
].reg
= val
;
4956 inst
.operands
[i
].isreg
= 1;
4957 inst
.operands
[i
].isvec
= 1;
4958 inst
.operands
[i
].issingle
= 1;
4959 inst
.operands
[i
].vectype
= optype
;
4960 inst
.operands
[i
++].present
= 1;
4965 first_error (_("parse error"));
4969 /* Successfully parsed the operands. Update args. */
4975 first_error (_("expected comma"));
4979 first_error (_(reg_expected_msgs
[REG_TYPE_RN
]));
4983 /* Matcher codes for parse_operands. */
4984 enum operand_parse_code
4986 OP_stop
, /* end of line */
4988 OP_RR
, /* ARM register */
4989 OP_RRnpc
, /* ARM register, not r15 */
4990 OP_RRnpcb
, /* ARM register, not r15, in square brackets */
4991 OP_RRw
, /* ARM register, not r15, optional trailing ! */
4992 OP_RCP
, /* Coprocessor number */
4993 OP_RCN
, /* Coprocessor register */
4994 OP_RF
, /* FPA register */
4995 OP_RVS
, /* VFP single precision register */
4996 OP_RVD
, /* VFP double precision register (0..15) */
4997 OP_RND
, /* Neon double precision register (0..31) */
4998 OP_RNQ
, /* Neon quad precision register */
4999 OP_RVSD
, /* VFP single or double precision register */
5000 OP_RNDQ
, /* Neon double or quad precision register */
5001 OP_RNSDQ
, /* Neon single, double or quad precision register */
5002 OP_RNSC
, /* Neon scalar D[X] */
5003 OP_RVC
, /* VFP control register */
5004 OP_RMF
, /* Maverick F register */
5005 OP_RMD
, /* Maverick D register */
5006 OP_RMFX
, /* Maverick FX register */
5007 OP_RMDX
, /* Maverick DX register */
5008 OP_RMAX
, /* Maverick AX register */
5009 OP_RMDS
, /* Maverick DSPSC register */
5010 OP_RIWR
, /* iWMMXt wR register */
5011 OP_RIWC
, /* iWMMXt wC register */
5012 OP_RIWG
, /* iWMMXt wCG register */
5013 OP_RXA
, /* XScale accumulator register */
5015 OP_REGLST
, /* ARM register list */
5016 OP_VRSLST
, /* VFP single-precision register list */
5017 OP_VRDLST
, /* VFP double-precision register list */
5018 OP_VRSDLST
, /* VFP single or double-precision register list (& quad) */
5019 OP_NRDLST
, /* Neon double-precision register list (d0-d31, qN aliases) */
5020 OP_NSTRLST
, /* Neon element/structure list */
5022 OP_NILO
, /* Neon immediate/logic operands 2 or 2+3. (VBIC, VORR...) */
5023 OP_RNDQ_I0
, /* Neon D or Q reg, or immediate zero. */
5024 OP_RVSD_I0
, /* VFP S or D reg, or immediate zero. */
5025 OP_RR_RNSC
, /* ARM reg or Neon scalar. */
5026 OP_RNSDQ_RNSC
, /* Vector S, D or Q reg, or Neon scalar. */
5027 OP_RNDQ_RNSC
, /* Neon D or Q reg, or Neon scalar. */
5028 OP_RND_RNSC
, /* Neon D reg, or Neon scalar. */
5029 OP_VMOV
, /* Neon VMOV operands. */
5030 OP_RNDQ_IMVNb
,/* Neon D or Q reg, or immediate good for VMVN. */
5031 OP_RNDQ_I63b
, /* Neon D or Q reg, or immediate for shift. */
5033 OP_I0
, /* immediate zero */
5034 OP_I7
, /* immediate value 0 .. 7 */
5035 OP_I15
, /* 0 .. 15 */
5036 OP_I16
, /* 1 .. 16 */
5037 OP_I16z
, /* 0 .. 16 */
5038 OP_I31
, /* 0 .. 31 */
5039 OP_I31w
, /* 0 .. 31, optional trailing ! */
5040 OP_I32
, /* 1 .. 32 */
5041 OP_I32z
, /* 0 .. 32 */
5042 OP_I63
, /* 0 .. 63 */
5043 OP_I63s
, /* -64 .. 63 */
5044 OP_I64
, /* 1 .. 64 */
5045 OP_I64z
, /* 0 .. 64 */
5046 OP_I255
, /* 0 .. 255 */
5048 OP_I4b
, /* immediate, prefix optional, 1 .. 4 */
5049 OP_I7b
, /* 0 .. 7 */
5050 OP_I15b
, /* 0 .. 15 */
5051 OP_I31b
, /* 0 .. 31 */
5053 OP_SH
, /* shifter operand */
5054 OP_ADDR
, /* Memory address expression (any mode) */
5055 OP_EXP
, /* arbitrary expression */
5056 OP_EXPi
, /* same, with optional immediate prefix */
5057 OP_EXPr
, /* same, with optional relocation suffix */
5058 OP_HALF
, /* 0 .. 65535 or low/high reloc. */
5060 OP_CPSF
, /* CPS flags */
5061 OP_ENDI
, /* Endianness specifier */
5062 OP_PSR
, /* CPSR/SPSR mask for msr */
5063 OP_COND
, /* conditional code */
5064 OP_TB
, /* Table branch. */
5066 OP_RVC_PSR
, /* CPSR/SPSR mask for msr, or VFP control register. */
5067 OP_APSR_RR
, /* ARM register or "APSR_nzcv". */
5069 OP_RRnpc_I0
, /* ARM register or literal 0 */
5070 OP_RR_EXr
, /* ARM register or expression with opt. reloc suff. */
5071 OP_RR_EXi
, /* ARM register or expression with imm prefix */
5072 OP_RF_IF
, /* FPA register or immediate */
5073 OP_RIWR_RIWC
, /* iWMMXt R or C reg */
5075 /* Optional operands. */
5076 OP_oI7b
, /* immediate, prefix optional, 0 .. 7 */
5077 OP_oI31b
, /* 0 .. 31 */
5078 OP_oI32b
, /* 1 .. 32 */
5079 OP_oIffffb
, /* 0 .. 65535 */
5080 OP_oI255c
, /* curly-brace enclosed, 0 .. 255 */
5082 OP_oRR
, /* ARM register */
5083 OP_oRRnpc
, /* ARM register, not the PC */
5084 OP_oRND
, /* Optional Neon double precision register */
5085 OP_oRNQ
, /* Optional Neon quad precision register */
5086 OP_oRNDQ
, /* Optional Neon double or quad precision register */
5087 OP_oRNSDQ
, /* Optional single, double or quad precision vector register */
5088 OP_oSHll
, /* LSL immediate */
5089 OP_oSHar
, /* ASR immediate */
5090 OP_oSHllar
, /* LSL or ASR immediate */
5091 OP_oROR
, /* ROR 0/8/16/24 */
5092 OP_oBARRIER
, /* Option argument for a barrier instruction. */
5094 OP_FIRST_OPTIONAL
= OP_oI7b
5097 /* Generic instruction operand parser. This does no encoding and no
5098 semantic validation; it merely squirrels values away in the inst
5099 structure. Returns SUCCESS or FAIL depending on whether the
5100 specified grammar matched. */
5102 parse_operands (char *str
, const unsigned char *pattern
)
5104 unsigned const char *upat
= pattern
;
5105 char *backtrack_pos
= 0;
5106 const char *backtrack_error
= 0;
5107 int i
, val
, backtrack_index
= 0;
5108 enum arm_reg_type rtype
;
5110 #define po_char_or_fail(chr) do { \
5111 if (skip_past_char (&str, chr) == FAIL) \
5115 #define po_reg_or_fail(regtype) do { \
5116 val = arm_typed_reg_parse (&str, regtype, &rtype, \
5117 &inst.operands[i].vectype); \
5120 first_error (_(reg_expected_msgs[regtype])); \
5123 inst.operands[i].reg = val; \
5124 inst.operands[i].isreg = 1; \
5125 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
5126 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
5127 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
5128 || rtype == REG_TYPE_VFD \
5129 || rtype == REG_TYPE_NQ); \
5132 #define po_reg_or_goto(regtype, label) do { \
5133 val = arm_typed_reg_parse (&str, regtype, &rtype, \
5134 &inst.operands[i].vectype); \
5138 inst.operands[i].reg = val; \
5139 inst.operands[i].isreg = 1; \
5140 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
5141 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
5142 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
5143 || rtype == REG_TYPE_VFD \
5144 || rtype == REG_TYPE_NQ); \
5147 #define po_imm_or_fail(min, max, popt) do { \
5148 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
5150 inst.operands[i].imm = val; \
5153 #define po_scalar_or_goto(elsz, label) do { \
5154 val = parse_scalar (&str, elsz, &inst.operands[i].vectype); \
5157 inst.operands[i].reg = val; \
5158 inst.operands[i].isscalar = 1; \
5161 #define po_misc_or_fail(expr) do { \
5166 skip_whitespace (str
);
5168 for (i
= 0; upat
[i
] != OP_stop
; i
++)
5170 if (upat
[i
] >= OP_FIRST_OPTIONAL
)
5172 /* Remember where we are in case we need to backtrack. */
5173 assert (!backtrack_pos
);
5174 backtrack_pos
= str
;
5175 backtrack_error
= inst
.error
;
5176 backtrack_index
= i
;
5180 po_char_or_fail (',');
5188 case OP_RR
: po_reg_or_fail (REG_TYPE_RN
); break;
5189 case OP_RCP
: po_reg_or_fail (REG_TYPE_CP
); break;
5190 case OP_RCN
: po_reg_or_fail (REG_TYPE_CN
); break;
5191 case OP_RF
: po_reg_or_fail (REG_TYPE_FN
); break;
5192 case OP_RVS
: po_reg_or_fail (REG_TYPE_VFS
); break;
5193 case OP_RVD
: po_reg_or_fail (REG_TYPE_VFD
); break;
5195 case OP_RND
: po_reg_or_fail (REG_TYPE_VFD
); break;
5196 case OP_RVC
: po_reg_or_fail (REG_TYPE_VFC
); break;
5197 case OP_RMF
: po_reg_or_fail (REG_TYPE_MVF
); break;
5198 case OP_RMD
: po_reg_or_fail (REG_TYPE_MVD
); break;
5199 case OP_RMFX
: po_reg_or_fail (REG_TYPE_MVFX
); break;
5200 case OP_RMDX
: po_reg_or_fail (REG_TYPE_MVDX
); break;
5201 case OP_RMAX
: po_reg_or_fail (REG_TYPE_MVAX
); break;
5202 case OP_RMDS
: po_reg_or_fail (REG_TYPE_DSPSC
); break;
5203 case OP_RIWR
: po_reg_or_fail (REG_TYPE_MMXWR
); break;
5204 case OP_RIWC
: po_reg_or_fail (REG_TYPE_MMXWC
); break;
5205 case OP_RIWG
: po_reg_or_fail (REG_TYPE_MMXWCG
); break;
5206 case OP_RXA
: po_reg_or_fail (REG_TYPE_XSCALE
); break;
5208 case OP_RNQ
: po_reg_or_fail (REG_TYPE_NQ
); break;
5210 case OP_RNDQ
: po_reg_or_fail (REG_TYPE_NDQ
); break;
5211 case OP_RVSD
: po_reg_or_fail (REG_TYPE_VFSD
); break;
5213 case OP_RNSDQ
: po_reg_or_fail (REG_TYPE_NSDQ
); break;
5215 /* Neon scalar. Using an element size of 8 means that some invalid
5216 scalars are accepted here, so deal with those in later code. */
5217 case OP_RNSC
: po_scalar_or_goto (8, failure
); break;
5219 /* WARNING: We can expand to two operands here. This has the potential
5220 to totally confuse the backtracking mechanism! It will be OK at
5221 least as long as we don't try to use optional args as well,
5225 po_reg_or_goto (REG_TYPE_NDQ
, try_imm
);
5227 skip_past_comma (&str
);
5228 po_reg_or_goto (REG_TYPE_NDQ
, one_reg_only
);
5231 /* Optional register operand was omitted. Unfortunately, it's in
5232 operands[i-1] and we need it to be in inst.operands[i]. Fix that
5233 here (this is a bit grotty). */
5234 inst
.operands
[i
] = inst
.operands
[i
-1];
5235 inst
.operands
[i
-1].present
= 0;
5238 /* Immediate gets verified properly later, so accept any now. */
5239 po_imm_or_fail (INT_MIN
, INT_MAX
, TRUE
);
5245 po_reg_or_goto (REG_TYPE_NDQ
, try_imm0
);
5248 po_imm_or_fail (0, 0, TRUE
);
5253 po_reg_or_goto (REG_TYPE_VFSD
, try_imm0
);
5258 po_scalar_or_goto (8, try_rr
);
5261 po_reg_or_fail (REG_TYPE_RN
);
5267 po_scalar_or_goto (8, try_nsdq
);
5270 po_reg_or_fail (REG_TYPE_NSDQ
);
5276 po_scalar_or_goto (8, try_ndq
);
5279 po_reg_or_fail (REG_TYPE_NDQ
);
5285 po_scalar_or_goto (8, try_vfd
);
5288 po_reg_or_fail (REG_TYPE_VFD
);
5293 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
5294 not careful then bad things might happen. */
5295 po_misc_or_fail (parse_neon_mov (&str
, &i
) == FAIL
);
5300 po_reg_or_goto (REG_TYPE_NDQ
, try_mvnimm
);
5303 /* There's a possibility of getting a 64-bit immediate here, so
5304 we need special handling. */
5305 if (parse_big_immediate (&str
, i
) == FAIL
)
5307 inst
.error
= _("immediate value is out of range");
5315 po_reg_or_goto (REG_TYPE_NDQ
, try_shimm
);
5318 po_imm_or_fail (0, 63, TRUE
);
5323 po_char_or_fail ('[');
5324 po_reg_or_fail (REG_TYPE_RN
);
5325 po_char_or_fail (']');
5329 po_reg_or_fail (REG_TYPE_RN
);
5330 if (skip_past_char (&str
, '!') == SUCCESS
)
5331 inst
.operands
[i
].writeback
= 1;
5335 case OP_I7
: po_imm_or_fail ( 0, 7, FALSE
); break;
5336 case OP_I15
: po_imm_or_fail ( 0, 15, FALSE
); break;
5337 case OP_I16
: po_imm_or_fail ( 1, 16, FALSE
); break;
5338 case OP_I16z
: po_imm_or_fail ( 0, 16, FALSE
); break;
5339 case OP_I31
: po_imm_or_fail ( 0, 31, FALSE
); break;
5340 case OP_I32
: po_imm_or_fail ( 1, 32, FALSE
); break;
5341 case OP_I32z
: po_imm_or_fail ( 0, 32, FALSE
); break;
5342 case OP_I63s
: po_imm_or_fail (-64, 63, FALSE
); break;
5343 case OP_I63
: po_imm_or_fail ( 0, 63, FALSE
); break;
5344 case OP_I64
: po_imm_or_fail ( 1, 64, FALSE
); break;
5345 case OP_I64z
: po_imm_or_fail ( 0, 64, FALSE
); break;
5346 case OP_I255
: po_imm_or_fail ( 0, 255, FALSE
); break;
5348 case OP_I4b
: po_imm_or_fail ( 1, 4, TRUE
); break;
5350 case OP_I7b
: po_imm_or_fail ( 0, 7, TRUE
); break;
5351 case OP_I15b
: po_imm_or_fail ( 0, 15, TRUE
); break;
5353 case OP_I31b
: po_imm_or_fail ( 0, 31, TRUE
); break;
5354 case OP_oI32b
: po_imm_or_fail ( 1, 32, TRUE
); break;
5355 case OP_oIffffb
: po_imm_or_fail ( 0, 0xffff, TRUE
); break;
5357 /* Immediate variants */
5359 po_char_or_fail ('{');
5360 po_imm_or_fail (0, 255, TRUE
);
5361 po_char_or_fail ('}');
5365 /* The expression parser chokes on a trailing !, so we have
5366 to find it first and zap it. */
5369 while (*s
&& *s
!= ',')
5374 inst
.operands
[i
].writeback
= 1;
5376 po_imm_or_fail (0, 31, TRUE
);
5384 po_misc_or_fail (my_get_expression (&inst
.reloc
.exp
, &str
,
5389 po_misc_or_fail (my_get_expression (&inst
.reloc
.exp
, &str
,
5394 po_misc_or_fail (my_get_expression (&inst
.reloc
.exp
, &str
,
5396 if (inst
.reloc
.exp
.X_op
== O_symbol
)
5398 val
= parse_reloc (&str
);
5401 inst
.error
= _("unrecognized relocation suffix");
5404 else if (val
!= BFD_RELOC_UNUSED
)
5406 inst
.operands
[i
].imm
= val
;
5407 inst
.operands
[i
].hasreloc
= 1;
5412 /* Operand for MOVW or MOVT. */
5414 po_misc_or_fail (parse_half (&str
));
5417 /* Register or expression */
5418 case OP_RR_EXr
: po_reg_or_goto (REG_TYPE_RN
, EXPr
); break;
5419 case OP_RR_EXi
: po_reg_or_goto (REG_TYPE_RN
, EXPi
); break;
5421 /* Register or immediate */
5422 case OP_RRnpc_I0
: po_reg_or_goto (REG_TYPE_RN
, I0
); break;
5423 I0
: po_imm_or_fail (0, 0, FALSE
); break;
5425 case OP_RF_IF
: po_reg_or_goto (REG_TYPE_FN
, IF
); break;
5427 if (!is_immediate_prefix (*str
))
5430 val
= parse_fpa_immediate (&str
);
5433 /* FPA immediates are encoded as registers 8-15.
5434 parse_fpa_immediate has already applied the offset. */
5435 inst
.operands
[i
].reg
= val
;
5436 inst
.operands
[i
].isreg
= 1;
5439 /* Two kinds of register */
5442 struct reg_entry
*rege
= arm_reg_parse_multi (&str
);
5443 if (rege
->type
!= REG_TYPE_MMXWR
5444 && rege
->type
!= REG_TYPE_MMXWC
5445 && rege
->type
!= REG_TYPE_MMXWCG
)
5447 inst
.error
= _("iWMMXt data or control register expected");
5450 inst
.operands
[i
].reg
= rege
->number
;
5451 inst
.operands
[i
].isreg
= (rege
->type
== REG_TYPE_MMXWR
);
5456 case OP_CPSF
: val
= parse_cps_flags (&str
); break;
5457 case OP_ENDI
: val
= parse_endian_specifier (&str
); break;
5458 case OP_oROR
: val
= parse_ror (&str
); break;
5459 case OP_PSR
: val
= parse_psr (&str
); break;
5460 case OP_COND
: val
= parse_cond (&str
); break;
5461 case OP_oBARRIER
:val
= parse_barrier (&str
); break;
5464 po_reg_or_goto (REG_TYPE_VFC
, try_psr
);
5465 inst
.operands
[i
].isvec
= 1; /* Mark VFP control reg as vector. */
5468 val
= parse_psr (&str
);
5472 po_reg_or_goto (REG_TYPE_RN
, try_apsr
);
5475 /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
5477 if (strncasecmp (str
, "APSR_", 5) == 0)
5484 case 'c': found
= (found
& 1) ? 16 : found
| 1; break;
5485 case 'n': found
= (found
& 2) ? 16 : found
| 2; break;
5486 case 'z': found
= (found
& 4) ? 16 : found
| 4; break;
5487 case 'v': found
= (found
& 8) ? 16 : found
| 8; break;
5488 default: found
= 16;
5492 inst
.operands
[i
].isvec
= 1;
5499 po_misc_or_fail (parse_tb (&str
));
5502 /* Register lists */
5504 val
= parse_reg_list (&str
);
5507 inst
.operands
[1].writeback
= 1;
5513 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
, REGLIST_VFP_S
);
5517 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
, REGLIST_VFP_D
);
5521 /* Allow Q registers too. */
5522 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
5527 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
5529 inst
.operands
[i
].issingle
= 1;
5534 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
5539 val
= parse_neon_el_struct_list (&str
, &inst
.operands
[i
].reg
,
5540 &inst
.operands
[i
].vectype
);
5543 /* Addressing modes */
5545 po_misc_or_fail (parse_address (&str
, i
));
5549 po_misc_or_fail (parse_shifter_operand (&str
, i
));
5553 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_LSL_IMMEDIATE
));
5557 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_ASR_IMMEDIATE
));
5561 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_LSL_OR_ASR_IMMEDIATE
));
5565 as_fatal ("unhandled operand code %d", upat
[i
]);
5568 /* Various value-based sanity checks and shared operations. We
5569 do not signal immediate failures for the register constraints;
5570 this allows a syntax error to take precedence. */
5578 if (inst
.operands
[i
].isreg
&& inst
.operands
[i
].reg
== REG_PC
)
5579 inst
.error
= BAD_PC
;
5597 inst
.operands
[i
].imm
= val
;
5604 /* If we get here, this operand was successfully parsed. */
5605 inst
.operands
[i
].present
= 1;
5609 inst
.error
= BAD_ARGS
;
5614 /* The parse routine should already have set inst.error, but set a
5615 defaut here just in case. */
5617 inst
.error
= _("syntax error");
5621 /* Do not backtrack over a trailing optional argument that
5622 absorbed some text. We will only fail again, with the
5623 'garbage following instruction' error message, which is
5624 probably less helpful than the current one. */
5625 if (backtrack_index
== i
&& backtrack_pos
!= str
5626 && upat
[i
+1] == OP_stop
)
5629 inst
.error
= _("syntax error");
5633 /* Try again, skipping the optional argument at backtrack_pos. */
5634 str
= backtrack_pos
;
5635 inst
.error
= backtrack_error
;
5636 inst
.operands
[backtrack_index
].present
= 0;
5637 i
= backtrack_index
;
5641 /* Check that we have parsed all the arguments. */
5642 if (*str
!= '\0' && !inst
.error
)
5643 inst
.error
= _("garbage following instruction");
5645 return inst
.error
? FAIL
: SUCCESS
;
5648 #undef po_char_or_fail
5649 #undef po_reg_or_fail
5650 #undef po_reg_or_goto
5651 #undef po_imm_or_fail
5652 #undef po_scalar_or_fail
5654 /* Shorthand macro for instruction encoding functions issuing errors. */
5655 #define constraint(expr, err) do { \
5663 /* Functions for operand encoding. ARM, then Thumb. */
5665 #define rotate_left(v, n) (v << n | v >> (32 - n))
5667 /* If VAL can be encoded in the immediate field of an ARM instruction,
5668 return the encoded form. Otherwise, return FAIL. */
5671 encode_arm_immediate (unsigned int val
)
5675 for (i
= 0; i
< 32; i
+= 2)
5676 if ((a
= rotate_left (val
, i
)) <= 0xff)
5677 return a
| (i
<< 7); /* 12-bit pack: [shift-cnt,const]. */
5682 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
5683 return the encoded form. Otherwise, return FAIL. */
5685 encode_thumb32_immediate (unsigned int val
)
5692 for (i
= 1; i
<= 24; i
++)
5695 if ((val
& ~(0xff << i
)) == 0)
5696 return ((val
>> i
) & 0x7f) | ((32 - i
) << 7);
5700 if (val
== ((a
<< 16) | a
))
5702 if (val
== ((a
<< 24) | (a
<< 16) | (a
<< 8) | a
))
5706 if (val
== ((a
<< 16) | a
))
5707 return 0x200 | (a
>> 8);
5711 /* Encode a VFP SP or DP register number into inst.instruction. */
5714 encode_arm_vfp_reg (int reg
, enum vfp_reg_pos pos
)
5716 if ((pos
== VFP_REG_Dd
|| pos
== VFP_REG_Dn
|| pos
== VFP_REG_Dm
)
5719 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v3
))
5722 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
5725 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
5730 first_error (_("D register out of range for selected VFP version"));
5738 inst
.instruction
|= ((reg
>> 1) << 12) | ((reg
& 1) << 22);
5742 inst
.instruction
|= ((reg
>> 1) << 16) | ((reg
& 1) << 7);
5746 inst
.instruction
|= ((reg
>> 1) << 0) | ((reg
& 1) << 5);
5750 inst
.instruction
|= ((reg
& 15) << 12) | ((reg
>> 4) << 22);
5754 inst
.instruction
|= ((reg
& 15) << 16) | ((reg
>> 4) << 7);
5758 inst
.instruction
|= (reg
& 15) | ((reg
>> 4) << 5);
5766 /* Encode a <shift> in an ARM-format instruction. The immediate,
5767 if any, is handled by md_apply_fix. */
5769 encode_arm_shift (int i
)
5771 if (inst
.operands
[i
].shift_kind
== SHIFT_RRX
)
5772 inst
.instruction
|= SHIFT_ROR
<< 5;
5775 inst
.instruction
|= inst
.operands
[i
].shift_kind
<< 5;
5776 if (inst
.operands
[i
].immisreg
)
5778 inst
.instruction
|= SHIFT_BY_REG
;
5779 inst
.instruction
|= inst
.operands
[i
].imm
<< 8;
5782 inst
.reloc
.type
= BFD_RELOC_ARM_SHIFT_IMM
;
5787 encode_arm_shifter_operand (int i
)
5789 if (inst
.operands
[i
].isreg
)
5791 inst
.instruction
|= inst
.operands
[i
].reg
;
5792 encode_arm_shift (i
);
5795 inst
.instruction
|= INST_IMMEDIATE
;
5798 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
5800 encode_arm_addr_mode_common (int i
, bfd_boolean is_t
)
5802 assert (inst
.operands
[i
].isreg
);
5803 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
5805 if (inst
.operands
[i
].preind
)
5809 inst
.error
= _("instruction does not accept preindexed addressing");
5812 inst
.instruction
|= PRE_INDEX
;
5813 if (inst
.operands
[i
].writeback
)
5814 inst
.instruction
|= WRITE_BACK
;
5817 else if (inst
.operands
[i
].postind
)
5819 assert (inst
.operands
[i
].writeback
);
5821 inst
.instruction
|= WRITE_BACK
;
5823 else /* unindexed - only for coprocessor */
5825 inst
.error
= _("instruction does not accept unindexed addressing");
5829 if (((inst
.instruction
& WRITE_BACK
) || !(inst
.instruction
& PRE_INDEX
))
5830 && (((inst
.instruction
& 0x000f0000) >> 16)
5831 == ((inst
.instruction
& 0x0000f000) >> 12)))
5832 as_warn ((inst
.instruction
& LOAD_BIT
)
5833 ? _("destination register same as write-back base")
5834 : _("source register same as write-back base"));
5837 /* inst.operands[i] was set up by parse_address. Encode it into an
5838 ARM-format mode 2 load or store instruction. If is_t is true,
5839 reject forms that cannot be used with a T instruction (i.e. not
5842 encode_arm_addr_mode_2 (int i
, bfd_boolean is_t
)
5844 encode_arm_addr_mode_common (i
, is_t
);
5846 if (inst
.operands
[i
].immisreg
)
5848 inst
.instruction
|= INST_IMMEDIATE
; /* yes, this is backwards */
5849 inst
.instruction
|= inst
.operands
[i
].imm
;
5850 if (!inst
.operands
[i
].negative
)
5851 inst
.instruction
|= INDEX_UP
;
5852 if (inst
.operands
[i
].shifted
)
5854 if (inst
.operands
[i
].shift_kind
== SHIFT_RRX
)
5855 inst
.instruction
|= SHIFT_ROR
<< 5;
5858 inst
.instruction
|= inst
.operands
[i
].shift_kind
<< 5;
5859 inst
.reloc
.type
= BFD_RELOC_ARM_SHIFT_IMM
;
5863 else /* immediate offset in inst.reloc */
5865 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
5866 inst
.reloc
.type
= BFD_RELOC_ARM_OFFSET_IMM
;
5870 /* inst.operands[i] was set up by parse_address. Encode it into an
5871 ARM-format mode 3 load or store instruction. Reject forms that
5872 cannot be used with such instructions. If is_t is true, reject
5873 forms that cannot be used with a T instruction (i.e. not
5876 encode_arm_addr_mode_3 (int i
, bfd_boolean is_t
)
5878 if (inst
.operands
[i
].immisreg
&& inst
.operands
[i
].shifted
)
5880 inst
.error
= _("instruction does not accept scaled register index");
5884 encode_arm_addr_mode_common (i
, is_t
);
5886 if (inst
.operands
[i
].immisreg
)
5888 inst
.instruction
|= inst
.operands
[i
].imm
;
5889 if (!inst
.operands
[i
].negative
)
5890 inst
.instruction
|= INDEX_UP
;
5892 else /* immediate offset in inst.reloc */
5894 inst
.instruction
|= HWOFFSET_IMM
;
5895 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
5896 inst
.reloc
.type
= BFD_RELOC_ARM_OFFSET_IMM8
;
5900 /* inst.operands[i] was set up by parse_address. Encode it into an
5901 ARM-format instruction. Reject all forms which cannot be encoded
5902 into a coprocessor load/store instruction. If wb_ok is false,
5903 reject use of writeback; if unind_ok is false, reject use of
5904 unindexed addressing. If reloc_override is not 0, use it instead
5905 of BFD_ARM_CP_OFF_IMM. */
5908 encode_arm_cp_address (int i
, int wb_ok
, int unind_ok
, int reloc_override
)
5910 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
5912 assert (!(inst
.operands
[i
].preind
&& inst
.operands
[i
].postind
));
5914 if (!inst
.operands
[i
].preind
&& !inst
.operands
[i
].postind
) /* unindexed */
5916 assert (!inst
.operands
[i
].writeback
);
5919 inst
.error
= _("instruction does not support unindexed addressing");
5922 inst
.instruction
|= inst
.operands
[i
].imm
;
5923 inst
.instruction
|= INDEX_UP
;
5927 if (inst
.operands
[i
].preind
)
5928 inst
.instruction
|= PRE_INDEX
;
5930 if (inst
.operands
[i
].writeback
)
5932 if (inst
.operands
[i
].reg
== REG_PC
)
5934 inst
.error
= _("pc may not be used with write-back");
5939 inst
.error
= _("instruction does not support writeback");
5942 inst
.instruction
|= WRITE_BACK
;
5946 inst
.reloc
.type
= reloc_override
;
5947 else if (thumb_mode
)
5948 inst
.reloc
.type
= BFD_RELOC_ARM_T32_CP_OFF_IMM
;
5950 inst
.reloc
.type
= BFD_RELOC_ARM_CP_OFF_IMM
;
5954 /* inst.reloc.exp describes an "=expr" load pseudo-operation.
5955 Determine whether it can be performed with a move instruction; if
5956 it can, convert inst.instruction to that move instruction and
5957 return 1; if it can't, convert inst.instruction to a literal-pool
5958 load and return 0. If this is not a valid thing to do in the
5959 current context, set inst.error and return 1.
5961 inst.operands[i] describes the destination register. */
5964 move_or_literal_pool (int i
, bfd_boolean thumb_p
, bfd_boolean mode_3
)
5969 tbit
= (inst
.instruction
> 0xffff) ? THUMB2_LOAD_BIT
: THUMB_LOAD_BIT
;
5973 if ((inst
.instruction
& tbit
) == 0)
5975 inst
.error
= _("invalid pseudo operation");
5978 if (inst
.reloc
.exp
.X_op
!= O_constant
&& inst
.reloc
.exp
.X_op
!= O_symbol
)
5980 inst
.error
= _("constant expression expected");
5983 if (inst
.reloc
.exp
.X_op
== O_constant
)
5987 if (!unified_syntax
&& (inst
.reloc
.exp
.X_add_number
& ~0xFF) == 0)
5989 /* This can be done with a mov(1) instruction. */
5990 inst
.instruction
= T_OPCODE_MOV_I8
| (inst
.operands
[i
].reg
<< 8);
5991 inst
.instruction
|= inst
.reloc
.exp
.X_add_number
;
5997 int value
= encode_arm_immediate (inst
.reloc
.exp
.X_add_number
);
6000 /* This can be done with a mov instruction. */
6001 inst
.instruction
&= LITERAL_MASK
;
6002 inst
.instruction
|= INST_IMMEDIATE
| (OPCODE_MOV
<< DATA_OP_SHIFT
);
6003 inst
.instruction
|= value
& 0xfff;
6007 value
= encode_arm_immediate (~inst
.reloc
.exp
.X_add_number
);
6010 /* This can be done with a mvn instruction. */
6011 inst
.instruction
&= LITERAL_MASK
;
6012 inst
.instruction
|= INST_IMMEDIATE
| (OPCODE_MVN
<< DATA_OP_SHIFT
);
6013 inst
.instruction
|= value
& 0xfff;
6019 if (add_to_lit_pool () == FAIL
)
6021 inst
.error
= _("literal pool insertion failed");
6024 inst
.operands
[1].reg
= REG_PC
;
6025 inst
.operands
[1].isreg
= 1;
6026 inst
.operands
[1].preind
= 1;
6027 inst
.reloc
.pc_rel
= 1;
6028 inst
.reloc
.type
= (thumb_p
6029 ? BFD_RELOC_ARM_THUMB_OFFSET
6031 ? BFD_RELOC_ARM_HWLITERAL
6032 : BFD_RELOC_ARM_LITERAL
));
6036 /* Functions for instruction encoding, sorted by subarchitecture.
6037 First some generics; their names are taken from the conventional
6038 bit positions for register arguments in ARM format instructions. */
6048 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6054 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6055 inst
.instruction
|= inst
.operands
[1].reg
;
6061 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6062 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
6068 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
6069 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
6075 unsigned Rn
= inst
.operands
[2].reg
;
6076 /* Enforce restrictions on SWP instruction. */
6077 if ((inst
.instruction
& 0x0fbfffff) == 0x01000090)
6078 constraint (Rn
== inst
.operands
[0].reg
|| Rn
== inst
.operands
[1].reg
,
6079 _("Rn must not overlap other operands"));
6080 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6081 inst
.instruction
|= inst
.operands
[1].reg
;
6082 inst
.instruction
|= Rn
<< 16;
6088 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6089 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
6090 inst
.instruction
|= inst
.operands
[2].reg
;
6096 inst
.instruction
|= inst
.operands
[0].reg
;
6097 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
6098 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
6104 inst
.instruction
|= inst
.operands
[0].imm
;
6110 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6111 encode_arm_cp_address (1, TRUE
, TRUE
, 0);
6114 /* ARM instructions, in alphabetical order by function name (except
6115 that wrapper functions appear immediately after the function they
6118 /* This is a pseudo-op of the form "adr rd, label" to be converted
6119 into a relative address of the form "add rd, pc, #label-.-8". */
6124 inst
.instruction
|= (inst
.operands
[0].reg
<< 12); /* Rd */
6126 /* Frag hacking will turn this into a sub instruction if the offset turns
6127 out to be negative. */
6128 inst
.reloc
.type
= BFD_RELOC_ARM_IMMEDIATE
;
6129 inst
.reloc
.pc_rel
= 1;
6130 inst
.reloc
.exp
.X_add_number
-= 8;
6133 /* This is a pseudo-op of the form "adrl rd, label" to be converted
6134 into a relative address of the form:
6135 add rd, pc, #low(label-.-8)"
6136 add rd, rd, #high(label-.-8)" */
6141 inst
.instruction
|= (inst
.operands
[0].reg
<< 12); /* Rd */
6143 /* Frag hacking will turn this into a sub instruction if the offset turns
6144 out to be negative. */
6145 inst
.reloc
.type
= BFD_RELOC_ARM_ADRL_IMMEDIATE
;
6146 inst
.reloc
.pc_rel
= 1;
6147 inst
.size
= INSN_SIZE
* 2;
6148 inst
.reloc
.exp
.X_add_number
-= 8;
6154 if (!inst
.operands
[1].present
)
6155 inst
.operands
[1].reg
= inst
.operands
[0].reg
;
6156 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6157 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
6158 encode_arm_shifter_operand (2);
6164 if (inst
.operands
[0].present
)
6166 constraint ((inst
.instruction
& 0xf0) != 0x40
6167 && inst
.operands
[0].imm
!= 0xf,
6168 "bad barrier type");
6169 inst
.instruction
|= inst
.operands
[0].imm
;
6172 inst
.instruction
|= 0xf;
6178 unsigned int msb
= inst
.operands
[1].imm
+ inst
.operands
[2].imm
;
6179 constraint (msb
> 32, _("bit-field extends past end of register"));
6180 /* The instruction encoding stores the LSB and MSB,
6181 not the LSB and width. */
6182 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6183 inst
.instruction
|= inst
.operands
[1].imm
<< 7;
6184 inst
.instruction
|= (msb
- 1) << 16;
6192 /* #0 in second position is alternative syntax for bfc, which is
6193 the same instruction but with REG_PC in the Rm field. */
6194 if (!inst
.operands
[1].isreg
)
6195 inst
.operands
[1].reg
= REG_PC
;
6197 msb
= inst
.operands
[2].imm
+ inst
.operands
[3].imm
;
6198 constraint (msb
> 32, _("bit-field extends past end of register"));
6199 /* The instruction encoding stores the LSB and MSB,
6200 not the LSB and width. */
6201 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6202 inst
.instruction
|= inst
.operands
[1].reg
;
6203 inst
.instruction
|= inst
.operands
[2].imm
<< 7;
6204 inst
.instruction
|= (msb
- 1) << 16;
6210 constraint (inst
.operands
[2].imm
+ inst
.operands
[3].imm
> 32,
6211 _("bit-field extends past end of register"));
6212 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6213 inst
.instruction
|= inst
.operands
[1].reg
;
6214 inst
.instruction
|= inst
.operands
[2].imm
<< 7;
6215 inst
.instruction
|= (inst
.operands
[3].imm
- 1) << 16;
6218 /* ARM V5 breakpoint instruction (argument parse)
6219 BKPT <16 bit unsigned immediate>
6220 Instruction is not conditional.
6221 The bit pattern given in insns[] has the COND_ALWAYS condition,
6222 and it is an error if the caller tried to override that. */
6227 /* Top 12 of 16 bits to bits 19:8. */
6228 inst
.instruction
|= (inst
.operands
[0].imm
& 0xfff0) << 4;
6230 /* Bottom 4 of 16 bits to bits 3:0. */
6231 inst
.instruction
|= inst
.operands
[0].imm
& 0xf;
6235 encode_branch (int default_reloc
)
6237 if (inst
.operands
[0].hasreloc
)
6239 constraint (inst
.operands
[0].imm
!= BFD_RELOC_ARM_PLT32
,
6240 _("the only suffix valid here is '(plt)'"));
6241 inst
.reloc
.type
= BFD_RELOC_ARM_PLT32
;
6245 inst
.reloc
.type
= default_reloc
;
6247 inst
.reloc
.pc_rel
= 1;
6254 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
6255 encode_branch (BFD_RELOC_ARM_PCREL_JUMP
);
6258 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH
);
6265 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
6267 if (inst
.cond
== COND_ALWAYS
)
6268 encode_branch (BFD_RELOC_ARM_PCREL_CALL
);
6270 encode_branch (BFD_RELOC_ARM_PCREL_JUMP
);
6274 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH
);
6277 /* ARM V5 branch-link-exchange instruction (argument parse)
6278 BLX <target_addr> ie BLX(1)
6279 BLX{<condition>} <Rm> ie BLX(2)
6280 Unfortunately, there are two different opcodes for this mnemonic.
6281 So, the insns[].value is not used, and the code here zaps values
6282 into inst.instruction.
6283 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
6288 if (inst
.operands
[0].isreg
)
6290 /* Arg is a register; the opcode provided by insns[] is correct.
6291 It is not illegal to do "blx pc", just useless. */
6292 if (inst
.operands
[0].reg
== REG_PC
)
6293 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
6295 inst
.instruction
|= inst
.operands
[0].reg
;
6299 /* Arg is an address; this instruction cannot be executed
6300 conditionally, and the opcode must be adjusted. */
6301 constraint (inst
.cond
!= COND_ALWAYS
, BAD_COND
);
6302 inst
.instruction
= 0xfa000000;
6304 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
6305 encode_branch (BFD_RELOC_ARM_PCREL_CALL
);
6308 encode_branch (BFD_RELOC_ARM_PCREL_BLX
);
6315 if (inst
.operands
[0].reg
== REG_PC
)
6316 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
6318 inst
.instruction
|= inst
.operands
[0].reg
;
6322 /* ARM v5TEJ. Jump to Jazelle code. */
6327 if (inst
.operands
[0].reg
== REG_PC
)
6328 as_tsktsk (_("use of r15 in bxj is not really useful"));
6330 inst
.instruction
|= inst
.operands
[0].reg
;
6333 /* Co-processor data operation:
6334 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
6335 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
6339 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
6340 inst
.instruction
|= inst
.operands
[1].imm
<< 20;
6341 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
6342 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
6343 inst
.instruction
|= inst
.operands
[4].reg
;
6344 inst
.instruction
|= inst
.operands
[5].imm
<< 5;
6350 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
6351 encode_arm_shifter_operand (1);
6354 /* Transfer between coprocessor and ARM registers.
6355 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
6360 No special properties. */
6365 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
6366 inst
.instruction
|= inst
.operands
[1].imm
<< 21;
6367 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
6368 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
6369 inst
.instruction
|= inst
.operands
[4].reg
;
6370 inst
.instruction
|= inst
.operands
[5].imm
<< 5;
6373 /* Transfer between coprocessor register and pair of ARM registers.
6374 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
6379 Two XScale instructions are special cases of these:
6381 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
6382 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
6384 Result unpredicatable if Rd or Rn is R15. */
6389 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
6390 inst
.instruction
|= inst
.operands
[1].imm
<< 4;
6391 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
6392 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
6393 inst
.instruction
|= inst
.operands
[4].reg
;
6399 inst
.instruction
|= inst
.operands
[0].imm
<< 6;
6400 inst
.instruction
|= inst
.operands
[1].imm
;
6406 inst
.instruction
|= inst
.operands
[0].imm
;
6412 /* There is no IT instruction in ARM mode. We
6413 process it but do not generate code for it. */
6420 int base_reg
= inst
.operands
[0].reg
;
6421 int range
= inst
.operands
[1].imm
;
6423 inst
.instruction
|= base_reg
<< 16;
6424 inst
.instruction
|= range
;
6426 if (inst
.operands
[1].writeback
)
6427 inst
.instruction
|= LDM_TYPE_2_OR_3
;
6429 if (inst
.operands
[0].writeback
)
6431 inst
.instruction
|= WRITE_BACK
;
6432 /* Check for unpredictable uses of writeback. */
6433 if (inst
.instruction
& LOAD_BIT
)
6435 /* Not allowed in LDM type 2. */
6436 if ((inst
.instruction
& LDM_TYPE_2_OR_3
)
6437 && ((range
& (1 << REG_PC
)) == 0))
6438 as_warn (_("writeback of base register is UNPREDICTABLE"));
6439 /* Only allowed if base reg not in list for other types. */
6440 else if (range
& (1 << base_reg
))
6441 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
6445 /* Not allowed for type 2. */
6446 if (inst
.instruction
& LDM_TYPE_2_OR_3
)
6447 as_warn (_("writeback of base register is UNPREDICTABLE"));
6448 /* Only allowed if base reg not in list, or first in list. */
6449 else if ((range
& (1 << base_reg
))
6450 && (range
& ((1 << base_reg
) - 1)))
6451 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
6456 /* ARMv5TE load-consecutive (argument parse)
6465 constraint (inst
.operands
[0].reg
% 2 != 0,
6466 _("first destination register must be even"));
6467 constraint (inst
.operands
[1].present
6468 && inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
6469 _("can only load two consecutive registers"));
6470 constraint (inst
.operands
[0].reg
== REG_LR
, _("r14 not allowed here"));
6471 constraint (!inst
.operands
[2].isreg
, _("'[' expected"));
6473 if (!inst
.operands
[1].present
)
6474 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
6476 if (inst
.instruction
& LOAD_BIT
)
6478 /* encode_arm_addr_mode_3 will diagnose overlap between the base
6479 register and the first register written; we have to diagnose
6480 overlap between the base and the second register written here. */
6482 if (inst
.operands
[2].reg
== inst
.operands
[1].reg
6483 && (inst
.operands
[2].writeback
|| inst
.operands
[2].postind
))
6484 as_warn (_("base register written back, and overlaps "
6485 "second destination register"));
6487 /* For an index-register load, the index register must not overlap the
6488 destination (even if not write-back). */
6489 else if (inst
.operands
[2].immisreg
6490 && ((unsigned) inst
.operands
[2].imm
== inst
.operands
[0].reg
6491 || (unsigned) inst
.operands
[2].imm
== inst
.operands
[1].reg
))
6492 as_warn (_("index register overlaps destination register"));
6495 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6496 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE
);
6502 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].preind
6503 || inst
.operands
[1].postind
|| inst
.operands
[1].writeback
6504 || inst
.operands
[1].immisreg
|| inst
.operands
[1].shifted
6505 || inst
.operands
[1].negative
6506 /* This can arise if the programmer has written
6508 or if they have mistakenly used a register name as the last
6511 It is very difficult to distinguish between these two cases
6512 because "rX" might actually be a label. ie the register
6513 name has been occluded by a symbol of the same name. So we
6514 just generate a general 'bad addressing mode' type error
6515 message and leave it up to the programmer to discover the
6516 true cause and fix their mistake. */
6517 || (inst
.operands
[1].reg
== REG_PC
),
6520 constraint (inst
.reloc
.exp
.X_op
!= O_constant
6521 || inst
.reloc
.exp
.X_add_number
!= 0,
6522 _("offset must be zero in ARM encoding"));
6524 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6525 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
6526 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
6532 constraint (inst
.operands
[0].reg
% 2 != 0,
6533 _("even register required"));
6534 constraint (inst
.operands
[1].present
6535 && inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
6536 _("can only load two consecutive registers"));
6537 /* If op 1 were present and equal to PC, this function wouldn't
6538 have been called in the first place. */
6539 constraint (inst
.operands
[0].reg
== REG_LR
, _("r14 not allowed here"));
6541 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6542 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
6548 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6549 if (!inst
.operands
[1].isreg
)
6550 if (move_or_literal_pool (0, /*thumb_p=*/FALSE
, /*mode_3=*/FALSE
))
6552 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE
);
6558 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
6560 if (inst
.operands
[1].preind
)
6562 constraint (inst
.reloc
.exp
.X_op
!= O_constant
||
6563 inst
.reloc
.exp
.X_add_number
!= 0,
6564 _("this instruction requires a post-indexed address"));
6566 inst
.operands
[1].preind
= 0;
6567 inst
.operands
[1].postind
= 1;
6568 inst
.operands
[1].writeback
= 1;
6570 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6571 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE
);
6574 /* Halfword and signed-byte load/store operations. */
6579 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6580 if (!inst
.operands
[1].isreg
)
6581 if (move_or_literal_pool (0, /*thumb_p=*/FALSE
, /*mode_3=*/TRUE
))
6583 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE
);
6589 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
6591 if (inst
.operands
[1].preind
)
6593 constraint (inst
.reloc
.exp
.X_op
!= O_constant
||
6594 inst
.reloc
.exp
.X_add_number
!= 0,
6595 _("this instruction requires a post-indexed address"));
6597 inst
.operands
[1].preind
= 0;
6598 inst
.operands
[1].postind
= 1;
6599 inst
.operands
[1].writeback
= 1;
6601 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6602 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE
);
6605 /* Co-processor register load/store.
6606 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
6610 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
6611 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
6612 encode_arm_cp_address (2, TRUE
, TRUE
, 0);
6618 /* This restriction does not apply to mls (nor to mla in v6, but
6619 that's hard to detect at present). */
6620 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
6621 && !(inst
.instruction
& 0x00400000))
6622 as_tsktsk (_("rd and rm should be different in mla"));
6624 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
6625 inst
.instruction
|= inst
.operands
[1].reg
;
6626 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
6627 inst
.instruction
|= inst
.operands
[3].reg
<< 12;
6634 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6635 encode_arm_shifter_operand (1);
6638 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
6645 top
= (inst
.instruction
& 0x00400000) != 0;
6646 constraint (top
&& inst
.reloc
.type
== BFD_RELOC_ARM_MOVW
,
6647 _(":lower16: not allowed this instruction"));
6648 constraint (!top
&& inst
.reloc
.type
== BFD_RELOC_ARM_MOVT
,
6649 _(":upper16: not allowed instruction"));
6650 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6651 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
6653 imm
= inst
.reloc
.exp
.X_add_number
;
6654 /* The value is in two pieces: 0:11, 16:19. */
6655 inst
.instruction
|= (imm
& 0x00000fff);
6656 inst
.instruction
|= (imm
& 0x0000f000) << 4;
6660 static void do_vfp_nsyn_opcode (const char *);
6663 do_vfp_nsyn_mrs (void)
6665 if (inst
.operands
[0].isvec
)
6667 if (inst
.operands
[1].reg
!= 1)
6668 first_error (_("operand 1 must be FPSCR"));
6669 memset (&inst
.operands
[0], '\0', sizeof (inst
.operands
[0]));
6670 memset (&inst
.operands
[1], '\0', sizeof (inst
.operands
[1]));
6671 do_vfp_nsyn_opcode ("fmstat");
6673 else if (inst
.operands
[1].isvec
)
6674 do_vfp_nsyn_opcode ("fmrx");
6682 do_vfp_nsyn_msr (void)
6684 if (inst
.operands
[0].isvec
)
6685 do_vfp_nsyn_opcode ("fmxr");
6695 if (do_vfp_nsyn_mrs () == SUCCESS
)
6698 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
6699 constraint ((inst
.operands
[1].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
))
6701 _("'CPSR' or 'SPSR' expected"));
6702 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6703 inst
.instruction
|= (inst
.operands
[1].imm
& SPSR_BIT
);
6706 /* Two possible forms:
6707 "{C|S}PSR_<field>, Rm",
6708 "{C|S}PSR_f, #expression". */
6713 if (do_vfp_nsyn_msr () == SUCCESS
)
6716 inst
.instruction
|= inst
.operands
[0].imm
;
6717 if (inst
.operands
[1].isreg
)
6718 inst
.instruction
|= inst
.operands
[1].reg
;
6721 inst
.instruction
|= INST_IMMEDIATE
;
6722 inst
.reloc
.type
= BFD_RELOC_ARM_IMMEDIATE
;
6723 inst
.reloc
.pc_rel
= 0;
6730 if (!inst
.operands
[2].present
)
6731 inst
.operands
[2].reg
= inst
.operands
[0].reg
;
6732 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
6733 inst
.instruction
|= inst
.operands
[1].reg
;
6734 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
6736 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
)
6737 as_tsktsk (_("rd and rm should be different in mul"));
6740 /* Long Multiply Parser
6741 UMULL RdLo, RdHi, Rm, Rs
6742 SMULL RdLo, RdHi, Rm, Rs
6743 UMLAL RdLo, RdHi, Rm, Rs
6744 SMLAL RdLo, RdHi, Rm, Rs. */
6749 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6750 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
6751 inst
.instruction
|= inst
.operands
[2].reg
;
6752 inst
.instruction
|= inst
.operands
[3].reg
<< 8;
6754 /* rdhi, rdlo and rm must all be different. */
6755 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
6756 || inst
.operands
[0].reg
== inst
.operands
[2].reg
6757 || inst
.operands
[1].reg
== inst
.operands
[2].reg
)
6758 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
6764 if (inst
.operands
[0].present
)
6766 /* Architectural NOP hints are CPSR sets with no bits selected. */
6767 inst
.instruction
&= 0xf0000000;
6768 inst
.instruction
|= 0x0320f000 + inst
.operands
[0].imm
;
6772 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
6773 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
6774 Condition defaults to COND_ALWAYS.
6775 Error if Rd, Rn or Rm are R15. */
6780 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6781 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
6782 inst
.instruction
|= inst
.operands
[2].reg
;
6783 if (inst
.operands
[3].present
)
6784 encode_arm_shift (3);
6787 /* ARM V6 PKHTB (Argument Parse). */
6792 if (!inst
.operands
[3].present
)
6794 /* If the shift specifier is omitted, turn the instruction
6795 into pkhbt rd, rm, rn. */
6796 inst
.instruction
&= 0xfff00010;
6797 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6798 inst
.instruction
|= inst
.operands
[1].reg
;
6799 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
6803 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6804 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
6805 inst
.instruction
|= inst
.operands
[2].reg
;
6806 encode_arm_shift (3);
6810 /* ARMv5TE: Preload-Cache
6814 Syntactically, like LDR with B=1, W=0, L=1. */
6819 constraint (!inst
.operands
[0].isreg
,
6820 _("'[' expected after PLD mnemonic"));
6821 constraint (inst
.operands
[0].postind
,
6822 _("post-indexed expression used in preload instruction"));
6823 constraint (inst
.operands
[0].writeback
,
6824 _("writeback used in preload instruction"));
6825 constraint (!inst
.operands
[0].preind
,
6826 _("unindexed addressing used in preload instruction"));
6827 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE
);
6830 /* ARMv7: PLI <addr_mode> */
6834 constraint (!inst
.operands
[0].isreg
,
6835 _("'[' expected after PLI mnemonic"));
6836 constraint (inst
.operands
[0].postind
,
6837 _("post-indexed expression used in preload instruction"));
6838 constraint (inst
.operands
[0].writeback
,
6839 _("writeback used in preload instruction"));
6840 constraint (!inst
.operands
[0].preind
,
6841 _("unindexed addressing used in preload instruction"));
6842 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE
);
6843 inst
.instruction
&= ~PRE_INDEX
;
6849 inst
.operands
[1] = inst
.operands
[0];
6850 memset (&inst
.operands
[0], 0, sizeof inst
.operands
[0]);
6851 inst
.operands
[0].isreg
= 1;
6852 inst
.operands
[0].writeback
= 1;
6853 inst
.operands
[0].reg
= REG_SP
;
6857 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
6858 word at the specified address and the following word
6860 Unconditionally executed.
6861 Error if Rn is R15. */
6866 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
6867 if (inst
.operands
[0].writeback
)
6868 inst
.instruction
|= WRITE_BACK
;
6871 /* ARM V6 ssat (argument parse). */
6876 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6877 inst
.instruction
|= (inst
.operands
[1].imm
- 1) << 16;
6878 inst
.instruction
|= inst
.operands
[2].reg
;
6880 if (inst
.operands
[3].present
)
6881 encode_arm_shift (3);
6884 /* ARM V6 usat (argument parse). */
6889 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6890 inst
.instruction
|= inst
.operands
[1].imm
<< 16;
6891 inst
.instruction
|= inst
.operands
[2].reg
;
6893 if (inst
.operands
[3].present
)
6894 encode_arm_shift (3);
6897 /* ARM V6 ssat16 (argument parse). */
6902 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6903 inst
.instruction
|= ((inst
.operands
[1].imm
- 1) << 16);
6904 inst
.instruction
|= inst
.operands
[2].reg
;
6910 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6911 inst
.instruction
|= inst
.operands
[1].imm
<< 16;
6912 inst
.instruction
|= inst
.operands
[2].reg
;
6915 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
6916 preserving the other bits.
6918 setend <endian_specifier>, where <endian_specifier> is either
6924 if (inst
.operands
[0].imm
)
6925 inst
.instruction
|= 0x200;
6931 unsigned int Rm
= (inst
.operands
[1].present
6932 ? inst
.operands
[1].reg
6933 : inst
.operands
[0].reg
);
6935 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6936 inst
.instruction
|= Rm
;
6937 if (inst
.operands
[2].isreg
) /* Rd, {Rm,} Rs */
6939 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
6940 inst
.instruction
|= SHIFT_BY_REG
;
6943 inst
.reloc
.type
= BFD_RELOC_ARM_SHIFT_IMM
;
6949 inst
.reloc
.type
= BFD_RELOC_ARM_SMC
;
6950 inst
.reloc
.pc_rel
= 0;
6956 inst
.reloc
.type
= BFD_RELOC_ARM_SWI
;
6957 inst
.reloc
.pc_rel
= 0;
6960 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
6961 SMLAxy{cond} Rd,Rm,Rs,Rn
6962 SMLAWy{cond} Rd,Rm,Rs,Rn
6963 Error if any register is R15. */
6968 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
6969 inst
.instruction
|= inst
.operands
[1].reg
;
6970 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
6971 inst
.instruction
|= inst
.operands
[3].reg
<< 12;
6974 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
6975 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
6976 Error if any register is R15.
6977 Warning if Rdlo == Rdhi. */
6982 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6983 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
6984 inst
.instruction
|= inst
.operands
[2].reg
;
6985 inst
.instruction
|= inst
.operands
[3].reg
<< 8;
6987 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
)
6988 as_tsktsk (_("rdhi and rdlo must be different"));
6991 /* ARM V5E (El Segundo) signed-multiply (argument parse)
6992 SMULxy{cond} Rd,Rm,Rs
6993 Error if any register is R15. */
6998 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
6999 inst
.instruction
|= inst
.operands
[1].reg
;
7000 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
7003 /* ARM V6 srs (argument parse). */
7008 inst
.instruction
|= inst
.operands
[0].imm
;
7009 if (inst
.operands
[0].writeback
)
7010 inst
.instruction
|= WRITE_BACK
;
7013 /* ARM V6 strex (argument parse). */
7018 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
7019 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
7020 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
7021 || inst
.operands
[2].negative
7022 /* See comment in do_ldrex(). */
7023 || (inst
.operands
[2].reg
== REG_PC
),
7026 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
7027 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
7029 constraint (inst
.reloc
.exp
.X_op
!= O_constant
7030 || inst
.reloc
.exp
.X_add_number
!= 0,
7031 _("offset must be zero in ARM encoding"));
7033 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7034 inst
.instruction
|= inst
.operands
[1].reg
;
7035 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
7036 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
7042 constraint (inst
.operands
[1].reg
% 2 != 0,
7043 _("even register required"));
7044 constraint (inst
.operands
[2].present
7045 && inst
.operands
[2].reg
!= inst
.operands
[1].reg
+ 1,
7046 _("can only store two consecutive registers"));
7047 /* If op 2 were present and equal to PC, this function wouldn't
7048 have been called in the first place. */
7049 constraint (inst
.operands
[1].reg
== REG_LR
, _("r14 not allowed here"));
7051 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
7052 || inst
.operands
[0].reg
== inst
.operands
[1].reg
+ 1
7053 || inst
.operands
[0].reg
== inst
.operands
[3].reg
,
7056 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7057 inst
.instruction
|= inst
.operands
[1].reg
;
7058 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
7061 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
7062 extends it to 32-bits, and adds the result to a value in another
7063 register. You can specify a rotation by 0, 8, 16, or 24 bits
7064 before extracting the 16-bit value.
7065 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
7066 Condition defaults to COND_ALWAYS.
7067 Error if any register uses R15. */
7072 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7073 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7074 inst
.instruction
|= inst
.operands
[2].reg
;
7075 inst
.instruction
|= inst
.operands
[3].imm
<< 10;
7080 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
7081 Condition defaults to COND_ALWAYS.
7082 Error if any register uses R15. */
7087 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7088 inst
.instruction
|= inst
.operands
[1].reg
;
7089 inst
.instruction
|= inst
.operands
[2].imm
<< 10;
7092 /* VFP instructions. In a logical order: SP variant first, monad
7093 before dyad, arithmetic then move then load/store. */
7096 do_vfp_sp_monadic (void)
7098 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
7099 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sm
);
7103 do_vfp_sp_dyadic (void)
7105 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
7106 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sn
);
7107 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Sm
);
7111 do_vfp_sp_compare_z (void)
7113 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
7117 do_vfp_dp_sp_cvt (void)
7119 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
7120 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sm
);
7124 do_vfp_sp_dp_cvt (void)
7126 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
7127 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dm
);
7131 do_vfp_reg_from_sp (void)
7133 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7134 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sn
);
7138 do_vfp_reg2_from_sp2 (void)
7140 constraint (inst
.operands
[2].imm
!= 2,
7141 _("only two consecutive VFP SP registers allowed here"));
7142 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7143 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7144 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Sm
);
7148 do_vfp_sp_from_reg (void)
7150 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sn
);
7151 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
7155 do_vfp_sp2_from_reg2 (void)
7157 constraint (inst
.operands
[0].imm
!= 2,
7158 _("only two consecutive VFP SP registers allowed here"));
7159 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sm
);
7160 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
7161 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
7165 do_vfp_sp_ldst (void)
7167 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
7168 encode_arm_cp_address (1, FALSE
, TRUE
, 0);
7172 do_vfp_dp_ldst (void)
7174 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
7175 encode_arm_cp_address (1, FALSE
, TRUE
, 0);
7180 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type
)
7182 if (inst
.operands
[0].writeback
)
7183 inst
.instruction
|= WRITE_BACK
;
7185 constraint (ldstm_type
!= VFP_LDSTMIA
,
7186 _("this addressing mode requires base-register writeback"));
7187 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7188 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sd
);
7189 inst
.instruction
|= inst
.operands
[1].imm
;
7193 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type
)
7197 if (inst
.operands
[0].writeback
)
7198 inst
.instruction
|= WRITE_BACK
;
7200 constraint (ldstm_type
!= VFP_LDSTMIA
&& ldstm_type
!= VFP_LDSTMIAX
,
7201 _("this addressing mode requires base-register writeback"));
7203 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7204 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
7206 count
= inst
.operands
[1].imm
<< 1;
7207 if (ldstm_type
== VFP_LDSTMIAX
|| ldstm_type
== VFP_LDSTMDBX
)
7210 inst
.instruction
|= count
;
7214 do_vfp_sp_ldstmia (void)
7216 vfp_sp_ldstm (VFP_LDSTMIA
);
7220 do_vfp_sp_ldstmdb (void)
7222 vfp_sp_ldstm (VFP_LDSTMDB
);
7226 do_vfp_dp_ldstmia (void)
7228 vfp_dp_ldstm (VFP_LDSTMIA
);
7232 do_vfp_dp_ldstmdb (void)
7234 vfp_dp_ldstm (VFP_LDSTMDB
);
7238 do_vfp_xp_ldstmia (void)
7240 vfp_dp_ldstm (VFP_LDSTMIAX
);
7244 do_vfp_xp_ldstmdb (void)
7246 vfp_dp_ldstm (VFP_LDSTMDBX
);
7250 do_vfp_dp_rd_rm (void)
7252 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
7253 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dm
);
7257 do_vfp_dp_rn_rd (void)
7259 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dn
);
7260 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
7264 do_vfp_dp_rd_rn (void)
7266 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
7267 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dn
);
7271 do_vfp_dp_rd_rn_rm (void)
7273 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
7274 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dn
);
7275 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Dm
);
7281 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
7285 do_vfp_dp_rm_rd_rn (void)
7287 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dm
);
7288 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
7289 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Dn
);
7292 /* VFPv3 instructions. */
7294 do_vfp_sp_const (void)
7296 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
7297 inst
.instruction
|= (inst
.operands
[1].imm
& 15) << 16;
7298 inst
.instruction
|= (inst
.operands
[1].imm
>> 4);
7302 do_vfp_dp_const (void)
7304 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
7305 inst
.instruction
|= (inst
.operands
[1].imm
& 15) << 16;
7306 inst
.instruction
|= (inst
.operands
[1].imm
>> 4);
7310 vfp_conv (int srcsize
)
7312 unsigned immbits
= srcsize
- inst
.operands
[1].imm
;
7313 inst
.instruction
|= (immbits
& 1) << 5;
7314 inst
.instruction
|= (immbits
>> 1);
7318 do_vfp_sp_conv_16 (void)
7320 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
7325 do_vfp_dp_conv_16 (void)
7327 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
7332 do_vfp_sp_conv_32 (void)
7334 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
7339 do_vfp_dp_conv_32 (void)
7341 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
7346 /* FPA instructions. Also in a logical order. */
7351 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7352 inst
.instruction
|= inst
.operands
[1].reg
;
7356 do_fpa_ldmstm (void)
7358 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7359 switch (inst
.operands
[1].imm
)
7361 case 1: inst
.instruction
|= CP_T_X
; break;
7362 case 2: inst
.instruction
|= CP_T_Y
; break;
7363 case 3: inst
.instruction
|= CP_T_Y
| CP_T_X
; break;
7368 if (inst
.instruction
& (PRE_INDEX
| INDEX_UP
))
7370 /* The instruction specified "ea" or "fd", so we can only accept
7371 [Rn]{!}. The instruction does not really support stacking or
7372 unstacking, so we have to emulate these by setting appropriate
7373 bits and offsets. */
7374 constraint (inst
.reloc
.exp
.X_op
!= O_constant
7375 || inst
.reloc
.exp
.X_add_number
!= 0,
7376 _("this instruction does not support indexing"));
7378 if ((inst
.instruction
& PRE_INDEX
) || inst
.operands
[2].writeback
)
7379 inst
.reloc
.exp
.X_add_number
= 12 * inst
.operands
[1].imm
;
7381 if (!(inst
.instruction
& INDEX_UP
))
7382 inst
.reloc
.exp
.X_add_number
= -inst
.reloc
.exp
.X_add_number
;
7384 if (!(inst
.instruction
& PRE_INDEX
) && inst
.operands
[2].writeback
)
7386 inst
.operands
[2].preind
= 0;
7387 inst
.operands
[2].postind
= 1;
7391 encode_arm_cp_address (2, TRUE
, TRUE
, 0);
7395 /* iWMMXt instructions: strictly in alphabetical order. */
7398 do_iwmmxt_tandorc (void)
7400 constraint (inst
.operands
[0].reg
!= REG_PC
, _("only r15 allowed here"));
7404 do_iwmmxt_textrc (void)
7406 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7407 inst
.instruction
|= inst
.operands
[1].imm
;
7411 do_iwmmxt_textrm (void)
7413 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7414 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7415 inst
.instruction
|= inst
.operands
[2].imm
;
7419 do_iwmmxt_tinsr (void)
7421 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7422 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
7423 inst
.instruction
|= inst
.operands
[2].imm
;
7427 do_iwmmxt_tmia (void)
7429 inst
.instruction
|= inst
.operands
[0].reg
<< 5;
7430 inst
.instruction
|= inst
.operands
[1].reg
;
7431 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
7435 do_iwmmxt_waligni (void)
7437 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7438 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7439 inst
.instruction
|= inst
.operands
[2].reg
;
7440 inst
.instruction
|= inst
.operands
[3].imm
<< 20;
7444 do_iwmmxt_wmov (void)
7446 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
7447 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7448 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7449 inst
.instruction
|= inst
.operands
[1].reg
;
7453 do_iwmmxt_wldstbh (void)
7456 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7458 reloc
= BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
;
7460 reloc
= BFD_RELOC_ARM_CP_OFF_IMM_S2
;
7461 encode_arm_cp_address (1, TRUE
, FALSE
, reloc
);
7465 do_iwmmxt_wldstw (void)
7467 /* RIWR_RIWC clears .isreg for a control register. */
7468 if (!inst
.operands
[0].isreg
)
7470 constraint (inst
.cond
!= COND_ALWAYS
, BAD_COND
);
7471 inst
.instruction
|= 0xf0000000;
7474 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7475 encode_arm_cp_address (1, TRUE
, TRUE
, 0);
7479 do_iwmmxt_wldstd (void)
7481 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7482 encode_arm_cp_address (1, TRUE
, FALSE
, 0);
7486 do_iwmmxt_wshufh (void)
7488 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7489 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7490 inst
.instruction
|= ((inst
.operands
[2].imm
& 0xf0) << 16);
7491 inst
.instruction
|= (inst
.operands
[2].imm
& 0x0f);
7495 do_iwmmxt_wzero (void)
7497 /* WZERO reg is an alias for WANDN reg, reg, reg. */
7498 inst
.instruction
|= inst
.operands
[0].reg
;
7499 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7500 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7503 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
7504 operations first, then control, shift, and load/store. */
7506 /* Insns like "foo X,Y,Z". */
7509 do_mav_triple (void)
7511 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7512 inst
.instruction
|= inst
.operands
[1].reg
;
7513 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
7516 /* Insns like "foo W,X,Y,Z".
7517 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
7522 inst
.instruction
|= inst
.operands
[0].reg
<< 5;
7523 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
7524 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
7525 inst
.instruction
|= inst
.operands
[3].reg
;
7528 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
7532 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
7535 /* Maverick shift immediate instructions.
7536 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
7537 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
7542 int imm
= inst
.operands
[2].imm
;
7544 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7545 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7547 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
7548 Bits 5-7 of the insn should have bits 4-6 of the immediate.
7549 Bit 4 should be 0. */
7550 imm
= (imm
& 0xf) | ((imm
& 0x70) << 1);
7552 inst
.instruction
|= imm
;
7555 /* XScale instructions. Also sorted arithmetic before move. */
7557 /* Xscale multiply-accumulate (argument parse)
7560 MIAxycc acc0,Rm,Rs. */
7565 inst
.instruction
|= inst
.operands
[1].reg
;
7566 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
7569 /* Xscale move-accumulator-register (argument parse)
7571 MARcc acc0,RdLo,RdHi. */
7576 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
7577 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
7580 /* Xscale move-register-accumulator (argument parse)
7582 MRAcc RdLo,RdHi,acc0. */
7587 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
, BAD_OVERLAP
);
7588 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7589 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7592 /* Encoding functions relevant only to Thumb. */
7594 /* inst.operands[i] is a shifted-register operand; encode
7595 it into inst.instruction in the format used by Thumb32. */
7598 encode_thumb32_shifted_operand (int i
)
7600 unsigned int value
= inst
.reloc
.exp
.X_add_number
;
7601 unsigned int shift
= inst
.operands
[i
].shift_kind
;
7603 constraint (inst
.operands
[i
].immisreg
,
7604 _("shift by register not allowed in thumb mode"));
7605 inst
.instruction
|= inst
.operands
[i
].reg
;
7606 if (shift
== SHIFT_RRX
)
7607 inst
.instruction
|= SHIFT_ROR
<< 4;
7610 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
7611 _("expression too complex"));
7613 constraint (value
> 32
7614 || (value
== 32 && (shift
== SHIFT_LSL
7615 || shift
== SHIFT_ROR
)),
7616 _("shift expression is too large"));
7620 else if (value
== 32)
7623 inst
.instruction
|= shift
<< 4;
7624 inst
.instruction
|= (value
& 0x1c) << 10;
7625 inst
.instruction
|= (value
& 0x03) << 6;
7630 /* inst.operands[i] was set up by parse_address. Encode it into a
7631 Thumb32 format load or store instruction. Reject forms that cannot
7632 be used with such instructions. If is_t is true, reject forms that
7633 cannot be used with a T instruction; if is_d is true, reject forms
7634 that cannot be used with a D instruction. */
7637 encode_thumb32_addr_mode (int i
, bfd_boolean is_t
, bfd_boolean is_d
)
7639 bfd_boolean is_pc
= (inst
.operands
[i
].reg
== REG_PC
);
7641 constraint (!inst
.operands
[i
].isreg
,
7642 _("Instruction does not support =N addresses"));
7644 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
7645 if (inst
.operands
[i
].immisreg
)
7647 constraint (is_pc
, _("cannot use register index with PC-relative addressing"));
7648 constraint (is_t
|| is_d
, _("cannot use register index with this instruction"));
7649 constraint (inst
.operands
[i
].negative
,
7650 _("Thumb does not support negative register indexing"));
7651 constraint (inst
.operands
[i
].postind
,
7652 _("Thumb does not support register post-indexing"));
7653 constraint (inst
.operands
[i
].writeback
,
7654 _("Thumb does not support register indexing with writeback"));
7655 constraint (inst
.operands
[i
].shifted
&& inst
.operands
[i
].shift_kind
!= SHIFT_LSL
,
7656 _("Thumb supports only LSL in shifted register indexing"));
7658 inst
.instruction
|= inst
.operands
[i
].imm
;
7659 if (inst
.operands
[i
].shifted
)
7661 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
7662 _("expression too complex"));
7663 constraint (inst
.reloc
.exp
.X_add_number
< 0
7664 || inst
.reloc
.exp
.X_add_number
> 3,
7665 _("shift out of range"));
7666 inst
.instruction
|= inst
.reloc
.exp
.X_add_number
<< 4;
7668 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
7670 else if (inst
.operands
[i
].preind
)
7672 constraint (is_pc
&& inst
.operands
[i
].writeback
,
7673 _("cannot use writeback with PC-relative addressing"));
7674 constraint (is_t
&& inst
.operands
[i
].writeback
,
7675 _("cannot use writeback with this instruction"));
7679 inst
.instruction
|= 0x01000000;
7680 if (inst
.operands
[i
].writeback
)
7681 inst
.instruction
|= 0x00200000;
7685 inst
.instruction
|= 0x00000c00;
7686 if (inst
.operands
[i
].writeback
)
7687 inst
.instruction
|= 0x00000100;
7689 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
7691 else if (inst
.operands
[i
].postind
)
7693 assert (inst
.operands
[i
].writeback
);
7694 constraint (is_pc
, _("cannot use post-indexing with PC-relative addressing"));
7695 constraint (is_t
, _("cannot use post-indexing with this instruction"));
7698 inst
.instruction
|= 0x00200000;
7700 inst
.instruction
|= 0x00000900;
7701 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
7703 else /* unindexed - only for coprocessor */
7704 inst
.error
= _("instruction does not accept unindexed addressing");
7707 /* Table of Thumb instructions which exist in both 16- and 32-bit
7708 encodings (the latter only in post-V6T2 cores). The index is the
7709 value used in the insns table below. When there is more than one
7710 possible 16-bit encoding for the instruction, this table always
7712 Also contains several pseudo-instructions used during relaxation. */
7713 #define T16_32_TAB \
7714 X(adc, 4140, eb400000), \
7715 X(adcs, 4140, eb500000), \
7716 X(add, 1c00, eb000000), \
7717 X(adds, 1c00, eb100000), \
7718 X(addi, 0000, f1000000), \
7719 X(addis, 0000, f1100000), \
7720 X(add_pc,000f, f20f0000), \
7721 X(add_sp,000d, f10d0000), \
7722 X(adr, 000f, f20f0000), \
7723 X(and, 4000, ea000000), \
7724 X(ands, 4000, ea100000), \
7725 X(asr, 1000, fa40f000), \
7726 X(asrs, 1000, fa50f000), \
7727 X(b, e000, f000b000), \
7728 X(bcond, d000, f0008000), \
7729 X(bic, 4380, ea200000), \
7730 X(bics, 4380, ea300000), \
7731 X(cmn, 42c0, eb100f00), \
7732 X(cmp, 2800, ebb00f00), \
7733 X(cpsie, b660, f3af8400), \
7734 X(cpsid, b670, f3af8600), \
7735 X(cpy, 4600, ea4f0000), \
7736 X(dec_sp,80dd, f1bd0d00), \
7737 X(eor, 4040, ea800000), \
7738 X(eors, 4040, ea900000), \
7739 X(inc_sp,00dd, f10d0d00), \
7740 X(ldmia, c800, e8900000), \
7741 X(ldr, 6800, f8500000), \
7742 X(ldrb, 7800, f8100000), \
7743 X(ldrh, 8800, f8300000), \
7744 X(ldrsb, 5600, f9100000), \
7745 X(ldrsh, 5e00, f9300000), \
7746 X(ldr_pc,4800, f85f0000), \
7747 X(ldr_pc2,4800, f85f0000), \
7748 X(ldr_sp,9800, f85d0000), \
7749 X(lsl, 0000, fa00f000), \
7750 X(lsls, 0000, fa10f000), \
7751 X(lsr, 0800, fa20f000), \
7752 X(lsrs, 0800, fa30f000), \
7753 X(mov, 2000, ea4f0000), \
7754 X(movs, 2000, ea5f0000), \
7755 X(mul, 4340, fb00f000), \
7756 X(muls, 4340, ffffffff), /* no 32b muls */ \
7757 X(mvn, 43c0, ea6f0000), \
7758 X(mvns, 43c0, ea7f0000), \
7759 X(neg, 4240, f1c00000), /* rsb #0 */ \
7760 X(negs, 4240, f1d00000), /* rsbs #0 */ \
7761 X(orr, 4300, ea400000), \
7762 X(orrs, 4300, ea500000), \
7763 X(pop, bc00, e8bd0000), /* ldmia sp!,... */ \
7764 X(push, b400, e92d0000), /* stmdb sp!,... */ \
7765 X(rev, ba00, fa90f080), \
7766 X(rev16, ba40, fa90f090), \
7767 X(revsh, bac0, fa90f0b0), \
7768 X(ror, 41c0, fa60f000), \
7769 X(rors, 41c0, fa70f000), \
7770 X(sbc, 4180, eb600000), \
7771 X(sbcs, 4180, eb700000), \
7772 X(stmia, c000, e8800000), \
7773 X(str, 6000, f8400000), \
7774 X(strb, 7000, f8000000), \
7775 X(strh, 8000, f8200000), \
7776 X(str_sp,9000, f84d0000), \
7777 X(sub, 1e00, eba00000), \
7778 X(subs, 1e00, ebb00000), \
7779 X(subi, 8000, f1a00000), \
7780 X(subis, 8000, f1b00000), \
7781 X(sxtb, b240, fa4ff080), \
7782 X(sxth, b200, fa0ff080), \
7783 X(tst, 4200, ea100f00), \
7784 X(uxtb, b2c0, fa5ff080), \
7785 X(uxth, b280, fa1ff080), \
7786 X(nop, bf00, f3af8000), \
7787 X(yield, bf10, f3af8001), \
7788 X(wfe, bf20, f3af8002), \
7789 X(wfi, bf30, f3af8003), \
7790 X(sev, bf40, f3af9004), /* typo, 8004? */
7792 /* To catch errors in encoding functions, the codes are all offset by
7793 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
7794 as 16-bit instructions. */
7795 #define X(a,b,c) T_MNEM_##a
7796 enum t16_32_codes
{ T16_32_OFFSET
= 0xF7FF, T16_32_TAB
};
7799 #define X(a,b,c) 0x##b
7800 static const unsigned short thumb_op16
[] = { T16_32_TAB
};
7801 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
7804 #define X(a,b,c) 0x##c
7805 static const unsigned int thumb_op32
[] = { T16_32_TAB
};
7806 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
7807 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
7811 /* Thumb instruction encoders, in alphabetical order. */
7815 do_t_add_sub_w (void)
7819 Rd
= inst
.operands
[0].reg
;
7820 Rn
= inst
.operands
[1].reg
;
7822 constraint (Rd
== 15, _("PC not allowed as destination"));
7823 inst
.instruction
|= (Rn
<< 16) | (Rd
<< 8);
7824 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMM12
;
7827 /* Parse an add or subtract instruction. We get here with inst.instruction
7828 equalling any of THUMB_OPCODE_add, adds, sub, or subs. */
7835 Rd
= inst
.operands
[0].reg
;
7836 Rs
= (inst
.operands
[1].present
7837 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
7838 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
7846 flags
= (inst
.instruction
== T_MNEM_adds
7847 || inst
.instruction
== T_MNEM_subs
);
7849 narrow
= (current_it_mask
== 0);
7851 narrow
= (current_it_mask
!= 0);
7852 if (!inst
.operands
[2].isreg
)
7855 if (inst
.size_req
!= 4)
7859 add
= (inst
.instruction
== T_MNEM_add
7860 || inst
.instruction
== T_MNEM_adds
);
7861 /* Attempt to use a narrow opcode, with relaxation if
7863 if (Rd
== REG_SP
&& Rs
== REG_SP
&& !flags
)
7864 opcode
= add
? T_MNEM_inc_sp
: T_MNEM_dec_sp
;
7865 else if (Rd
<= 7 && Rs
== REG_SP
&& add
&& !flags
)
7866 opcode
= T_MNEM_add_sp
;
7867 else if (Rd
<= 7 && Rs
== REG_PC
&& add
&& !flags
)
7868 opcode
= T_MNEM_add_pc
;
7869 else if (Rd
<= 7 && Rs
<= 7 && narrow
)
7872 opcode
= add
? T_MNEM_addis
: T_MNEM_subis
;
7874 opcode
= add
? T_MNEM_addi
: T_MNEM_subi
;
7878 inst
.instruction
= THUMB_OP16(opcode
);
7879 inst
.instruction
|= (Rd
<< 4) | Rs
;
7880 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_ADD
;
7881 if (inst
.size_req
!= 2)
7882 inst
.relax
= opcode
;
7885 constraint (inst
.size_req
== 2, BAD_HIREG
);
7887 if (inst
.size_req
== 4
7888 || (inst
.size_req
!= 2 && !opcode
))
7890 /* ??? Convert large immediates to addw/subw. */
7891 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
7892 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
7893 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
7894 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7895 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
7900 Rn
= inst
.operands
[2].reg
;
7901 /* See if we can do this with a 16-bit instruction. */
7902 if (!inst
.operands
[2].shifted
&& inst
.size_req
!= 4)
7904 if (Rd
> 7 || Rs
> 7 || Rn
> 7)
7909 inst
.instruction
= ((inst
.instruction
== T_MNEM_adds
7910 || inst
.instruction
== T_MNEM_add
)
7913 inst
.instruction
|= Rd
| (Rs
<< 3) | (Rn
<< 6);
7917 if (inst
.instruction
== T_MNEM_add
)
7921 inst
.instruction
= T_OPCODE_ADD_HI
;
7922 inst
.instruction
|= (Rd
& 8) << 4;
7923 inst
.instruction
|= (Rd
& 7);
7924 inst
.instruction
|= Rn
<< 3;
7927 /* ... because addition is commutative! */
7930 inst
.instruction
= T_OPCODE_ADD_HI
;
7931 inst
.instruction
|= (Rd
& 8) << 4;
7932 inst
.instruction
|= (Rd
& 7);
7933 inst
.instruction
|= Rs
<< 3;
7938 /* If we get here, it can't be done in 16 bits. */
7939 constraint (inst
.operands
[2].shifted
&& inst
.operands
[2].immisreg
,
7940 _("shift must be constant"));
7941 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
7942 inst
.instruction
|= Rd
<< 8;
7943 inst
.instruction
|= Rs
<< 16;
7944 encode_thumb32_shifted_operand (2);
7949 constraint (inst
.instruction
== T_MNEM_adds
7950 || inst
.instruction
== T_MNEM_subs
,
7953 if (!inst
.operands
[2].isreg
) /* Rd, Rs, #imm */
7955 constraint ((Rd
> 7 && (Rd
!= REG_SP
|| Rs
!= REG_SP
))
7956 || (Rs
> 7 && Rs
!= REG_SP
&& Rs
!= REG_PC
),
7959 inst
.instruction
= (inst
.instruction
== T_MNEM_add
7961 inst
.instruction
|= (Rd
<< 4) | Rs
;
7962 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_ADD
;
7966 Rn
= inst
.operands
[2].reg
;
7967 constraint (inst
.operands
[2].shifted
, _("unshifted register required"));
7969 /* We now have Rd, Rs, and Rn set to registers. */
7970 if (Rd
> 7 || Rs
> 7 || Rn
> 7)
7972 /* Can't do this for SUB. */
7973 constraint (inst
.instruction
== T_MNEM_sub
, BAD_HIREG
);
7974 inst
.instruction
= T_OPCODE_ADD_HI
;
7975 inst
.instruction
|= (Rd
& 8) << 4;
7976 inst
.instruction
|= (Rd
& 7);
7978 inst
.instruction
|= Rn
<< 3;
7980 inst
.instruction
|= Rs
<< 3;
7982 constraint (1, _("dest must overlap one source register"));
7986 inst
.instruction
= (inst
.instruction
== T_MNEM_add
7987 ? T_OPCODE_ADD_R3
: T_OPCODE_SUB_R3
);
7988 inst
.instruction
|= Rd
| (Rs
<< 3) | (Rn
<< 6);
7996 if (unified_syntax
&& inst
.size_req
== 0 && inst
.operands
[0].reg
<= 7)
7998 /* Defer to section relaxation. */
7999 inst
.relax
= inst
.instruction
;
8000 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8001 inst
.instruction
|= inst
.operands
[0].reg
<< 4;
8003 else if (unified_syntax
&& inst
.size_req
!= 2)
8005 /* Generate a 32-bit opcode. */
8006 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
8007 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8008 inst
.reloc
.type
= BFD_RELOC_ARM_T32_ADD_PC12
;
8009 inst
.reloc
.pc_rel
= 1;
8013 /* Generate a 16-bit opcode. */
8014 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8015 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_ADD
;
8016 inst
.reloc
.exp
.X_add_number
-= 4; /* PC relative adjust. */
8017 inst
.reloc
.pc_rel
= 1;
8019 inst
.instruction
|= inst
.operands
[0].reg
<< 4;
8023 /* Arithmetic instructions for which there is just one 16-bit
8024 instruction encoding, and it allows only two low registers.
8025 For maximal compatibility with ARM syntax, we allow three register
8026 operands even when Thumb-32 instructions are not available, as long
8027 as the first two are identical. For instance, both "sbc r0,r1" and
8028 "sbc r0,r0,r1" are allowed. */
8034 Rd
= inst
.operands
[0].reg
;
8035 Rs
= (inst
.operands
[1].present
8036 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
8037 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
8038 Rn
= inst
.operands
[2].reg
;
8042 if (!inst
.operands
[2].isreg
)
8044 /* For an immediate, we always generate a 32-bit opcode;
8045 section relaxation will shrink it later if possible. */
8046 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
8047 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
8048 inst
.instruction
|= Rd
<< 8;
8049 inst
.instruction
|= Rs
<< 16;
8050 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
8056 /* See if we can do this with a 16-bit instruction. */
8057 if (THUMB_SETS_FLAGS (inst
.instruction
))
8058 narrow
= current_it_mask
== 0;
8060 narrow
= current_it_mask
!= 0;
8062 if (Rd
> 7 || Rn
> 7 || Rs
> 7)
8064 if (inst
.operands
[2].shifted
)
8066 if (inst
.size_req
== 4)
8072 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8073 inst
.instruction
|= Rd
;
8074 inst
.instruction
|= Rn
<< 3;
8078 /* If we get here, it can't be done in 16 bits. */
8079 constraint (inst
.operands
[2].shifted
8080 && inst
.operands
[2].immisreg
,
8081 _("shift must be constant"));
8082 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
8083 inst
.instruction
|= Rd
<< 8;
8084 inst
.instruction
|= Rs
<< 16;
8085 encode_thumb32_shifted_operand (2);
8090 /* On its face this is a lie - the instruction does set the
8091 flags. However, the only supported mnemonic in this mode
8093 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
8095 constraint (!inst
.operands
[2].isreg
|| inst
.operands
[2].shifted
,
8096 _("unshifted register required"));
8097 constraint (Rd
> 7 || Rs
> 7 || Rn
> 7, BAD_HIREG
);
8098 constraint (Rd
!= Rs
,
8099 _("dest and source1 must be the same register"));
8101 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8102 inst
.instruction
|= Rd
;
8103 inst
.instruction
|= Rn
<< 3;
8107 /* Similarly, but for instructions where the arithmetic operation is
8108 commutative, so we can allow either of them to be different from
8109 the destination operand in a 16-bit instruction. For instance, all
8110 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
8117 Rd
= inst
.operands
[0].reg
;
8118 Rs
= (inst
.operands
[1].present
8119 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
8120 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
8121 Rn
= inst
.operands
[2].reg
;
8125 if (!inst
.operands
[2].isreg
)
8127 /* For an immediate, we always generate a 32-bit opcode;
8128 section relaxation will shrink it later if possible. */
8129 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
8130 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
8131 inst
.instruction
|= Rd
<< 8;
8132 inst
.instruction
|= Rs
<< 16;
8133 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
8139 /* See if we can do this with a 16-bit instruction. */
8140 if (THUMB_SETS_FLAGS (inst
.instruction
))
8141 narrow
= current_it_mask
== 0;
8143 narrow
= current_it_mask
!= 0;
8145 if (Rd
> 7 || Rn
> 7 || Rs
> 7)
8147 if (inst
.operands
[2].shifted
)
8149 if (inst
.size_req
== 4)
8156 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8157 inst
.instruction
|= Rd
;
8158 inst
.instruction
|= Rn
<< 3;
8163 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8164 inst
.instruction
|= Rd
;
8165 inst
.instruction
|= Rs
<< 3;
8170 /* If we get here, it can't be done in 16 bits. */
8171 constraint (inst
.operands
[2].shifted
8172 && inst
.operands
[2].immisreg
,
8173 _("shift must be constant"));
8174 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
8175 inst
.instruction
|= Rd
<< 8;
8176 inst
.instruction
|= Rs
<< 16;
8177 encode_thumb32_shifted_operand (2);
8182 /* On its face this is a lie - the instruction does set the
8183 flags. However, the only supported mnemonic in this mode
8185 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
8187 constraint (!inst
.operands
[2].isreg
|| inst
.operands
[2].shifted
,
8188 _("unshifted register required"));
8189 constraint (Rd
> 7 || Rs
> 7 || Rn
> 7, BAD_HIREG
);
8191 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8192 inst
.instruction
|= Rd
;
8195 inst
.instruction
|= Rn
<< 3;
8197 inst
.instruction
|= Rs
<< 3;
8199 constraint (1, _("dest must overlap one source register"));
8206 if (inst
.operands
[0].present
)
8208 constraint ((inst
.instruction
& 0xf0) != 0x40
8209 && inst
.operands
[0].imm
!= 0xf,
8210 "bad barrier type");
8211 inst
.instruction
|= inst
.operands
[0].imm
;
8214 inst
.instruction
|= 0xf;
8220 unsigned int msb
= inst
.operands
[1].imm
+ inst
.operands
[2].imm
;
8221 constraint (msb
> 32, _("bit-field extends past end of register"));
8222 /* The instruction encoding stores the LSB and MSB,
8223 not the LSB and width. */
8224 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8225 inst
.instruction
|= (inst
.operands
[1].imm
& 0x1c) << 10;
8226 inst
.instruction
|= (inst
.operands
[1].imm
& 0x03) << 6;
8227 inst
.instruction
|= msb
- 1;
8235 /* #0 in second position is alternative syntax for bfc, which is
8236 the same instruction but with REG_PC in the Rm field. */
8237 if (!inst
.operands
[1].isreg
)
8238 inst
.operands
[1].reg
= REG_PC
;
8240 msb
= inst
.operands
[2].imm
+ inst
.operands
[3].imm
;
8241 constraint (msb
> 32, _("bit-field extends past end of register"));
8242 /* The instruction encoding stores the LSB and MSB,
8243 not the LSB and width. */
8244 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8245 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8246 inst
.instruction
|= (inst
.operands
[2].imm
& 0x1c) << 10;
8247 inst
.instruction
|= (inst
.operands
[2].imm
& 0x03) << 6;
8248 inst
.instruction
|= msb
- 1;
8254 constraint (inst
.operands
[2].imm
+ inst
.operands
[3].imm
> 32,
8255 _("bit-field extends past end of register"));
8256 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8257 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8258 inst
.instruction
|= (inst
.operands
[2].imm
& 0x1c) << 10;
8259 inst
.instruction
|= (inst
.operands
[2].imm
& 0x03) << 6;
8260 inst
.instruction
|= inst
.operands
[3].imm
- 1;
8263 /* ARM V5 Thumb BLX (argument parse)
8264 BLX <target_addr> which is BLX(1)
8265 BLX <Rm> which is BLX(2)
8266 Unfortunately, there are two different opcodes for this mnemonic.
8267 So, the insns[].value is not used, and the code here zaps values
8268 into inst.instruction.
8270 ??? How to take advantage of the additional two bits of displacement
8271 available in Thumb32 mode? Need new relocation? */
8276 constraint (current_it_mask
&& current_it_mask
!= 0x10, BAD_BRANCH
);
8277 if (inst
.operands
[0].isreg
)
8278 /* We have a register, so this is BLX(2). */
8279 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
8282 /* No register. This must be BLX(1). */
8283 inst
.instruction
= 0xf000e800;
8285 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
8286 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
8289 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BLX
;
8290 inst
.reloc
.pc_rel
= 1;
8300 if (current_it_mask
)
8302 /* Conditional branches inside IT blocks are encoded as unconditional
8305 /* A branch must be the last instruction in an IT block. */
8306 constraint (current_it_mask
!= 0x10, BAD_BRANCH
);
8311 if (cond
!= COND_ALWAYS
)
8312 opcode
= T_MNEM_bcond
;
8314 opcode
= inst
.instruction
;
8316 if (unified_syntax
&& inst
.size_req
== 4)
8318 inst
.instruction
= THUMB_OP32(opcode
);
8319 if (cond
== COND_ALWAYS
)
8320 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH25
;
8323 assert (cond
!= 0xF);
8324 inst
.instruction
|= cond
<< 22;
8325 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH20
;
8330 inst
.instruction
= THUMB_OP16(opcode
);
8331 if (cond
== COND_ALWAYS
)
8332 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH12
;
8335 inst
.instruction
|= cond
<< 8;
8336 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH9
;
8338 /* Allow section relaxation. */
8339 if (unified_syntax
&& inst
.size_req
!= 2)
8340 inst
.relax
= opcode
;
8343 inst
.reloc
.pc_rel
= 1;
8349 constraint (inst
.cond
!= COND_ALWAYS
,
8350 _("instruction is always unconditional"));
8351 if (inst
.operands
[0].present
)
8353 constraint (inst
.operands
[0].imm
> 255,
8354 _("immediate value out of range"));
8355 inst
.instruction
|= inst
.operands
[0].imm
;
8360 do_t_branch23 (void)
8362 constraint (current_it_mask
&& current_it_mask
!= 0x10, BAD_BRANCH
);
8363 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
8364 inst
.reloc
.pc_rel
= 1;
8366 /* If the destination of the branch is a defined symbol which does not have
8367 the THUMB_FUNC attribute, then we must be calling a function which has
8368 the (interfacearm) attribute. We look for the Thumb entry point to that
8369 function and change the branch to refer to that function instead. */
8370 if ( inst
.reloc
.exp
.X_op
== O_symbol
8371 && inst
.reloc
.exp
.X_add_symbol
!= NULL
8372 && S_IS_DEFINED (inst
.reloc
.exp
.X_add_symbol
)
8373 && ! THUMB_IS_FUNC (inst
.reloc
.exp
.X_add_symbol
))
8374 inst
.reloc
.exp
.X_add_symbol
=
8375 find_real_start (inst
.reloc
.exp
.X_add_symbol
);
8381 constraint (current_it_mask
&& current_it_mask
!= 0x10, BAD_BRANCH
);
8382 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
8383 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
8384 should cause the alignment to be checked once it is known. This is
8385 because BX PC only works if the instruction is word aligned. */
8391 constraint (current_it_mask
&& current_it_mask
!= 0x10, BAD_BRANCH
);
8392 if (inst
.operands
[0].reg
== REG_PC
)
8393 as_tsktsk (_("use of r15 in bxj is not really useful"));
8395 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8401 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8402 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8403 inst
.instruction
|= inst
.operands
[1].reg
;
8409 constraint (current_it_mask
, BAD_NOT_IT
);
8410 inst
.instruction
|= inst
.operands
[0].imm
;
8416 constraint (current_it_mask
, BAD_NOT_IT
);
8418 && (inst
.operands
[1].present
|| inst
.size_req
== 4)
8419 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6_notm
))
8421 unsigned int imod
= (inst
.instruction
& 0x0030) >> 4;
8422 inst
.instruction
= 0xf3af8000;
8423 inst
.instruction
|= imod
<< 9;
8424 inst
.instruction
|= inst
.operands
[0].imm
<< 5;
8425 if (inst
.operands
[1].present
)
8426 inst
.instruction
|= 0x100 | inst
.operands
[1].imm
;
8430 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
)
8431 && (inst
.operands
[0].imm
& 4),
8432 _("selected processor does not support 'A' form "
8433 "of this instruction"));
8434 constraint (inst
.operands
[1].present
|| inst
.size_req
== 4,
8435 _("Thumb does not support the 2-argument "
8436 "form of this instruction"));
8437 inst
.instruction
|= inst
.operands
[0].imm
;
8441 /* THUMB CPY instruction (argument parse). */
8446 if (inst
.size_req
== 4)
8448 inst
.instruction
= THUMB_OP32 (T_MNEM_mov
);
8449 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8450 inst
.instruction
|= inst
.operands
[1].reg
;
8454 inst
.instruction
|= (inst
.operands
[0].reg
& 0x8) << 4;
8455 inst
.instruction
|= (inst
.operands
[0].reg
& 0x7);
8456 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
8463 constraint (current_it_mask
, BAD_NOT_IT
);
8464 constraint (inst
.operands
[0].reg
> 7, BAD_HIREG
);
8465 inst
.instruction
|= inst
.operands
[0].reg
;
8466 inst
.reloc
.pc_rel
= 1;
8467 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH7
;
8473 inst
.instruction
|= inst
.operands
[0].imm
;
8479 if (!inst
.operands
[1].present
)
8480 inst
.operands
[1].reg
= inst
.operands
[0].reg
;
8481 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8482 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8483 inst
.instruction
|= inst
.operands
[2].reg
;
8489 if (unified_syntax
&& inst
.size_req
== 4)
8490 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
8492 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8498 unsigned int cond
= inst
.operands
[0].imm
;
8500 constraint (current_it_mask
, BAD_NOT_IT
);
8501 current_it_mask
= (inst
.instruction
& 0xf) | 0x10;
8504 /* If the condition is a negative condition, invert the mask. */
8505 if ((cond
& 0x1) == 0x0)
8507 unsigned int mask
= inst
.instruction
& 0x000f;
8509 if ((mask
& 0x7) == 0)
8510 /* no conversion needed */;
8511 else if ((mask
& 0x3) == 0)
8513 else if ((mask
& 0x1) == 0)
8518 inst
.instruction
&= 0xfff0;
8519 inst
.instruction
|= mask
;
8522 inst
.instruction
|= cond
<< 4;
8528 /* This really doesn't seem worth it. */
8529 constraint (inst
.reloc
.type
!= BFD_RELOC_UNUSED
,
8530 _("expression too complex"));
8531 constraint (inst
.operands
[1].writeback
,
8532 _("Thumb load/store multiple does not support {reglist}^"));
8536 /* See if we can use a 16-bit instruction. */
8537 if (inst
.instruction
< 0xffff /* not ldmdb/stmdb */
8538 && inst
.size_req
!= 4
8539 && inst
.operands
[0].reg
<= 7
8540 && !(inst
.operands
[1].imm
& ~0xff)
8541 && (inst
.instruction
== T_MNEM_stmia
8542 ? inst
.operands
[0].writeback
8543 : (inst
.operands
[0].writeback
8544 == !(inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
)))))
8546 if (inst
.instruction
== T_MNEM_stmia
8547 && (inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
))
8548 && (inst
.operands
[1].imm
& ((1 << inst
.operands
[0].reg
) - 1)))
8549 as_warn (_("value stored for r%d is UNPREDICTABLE"),
8550 inst
.operands
[0].reg
);
8552 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8553 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8554 inst
.instruction
|= inst
.operands
[1].imm
;
8558 if (inst
.operands
[1].imm
& (1 << 13))
8559 as_warn (_("SP should not be in register list"));
8560 if (inst
.instruction
== T_MNEM_stmia
)
8562 if (inst
.operands
[1].imm
& (1 << 15))
8563 as_warn (_("PC should not be in register list"));
8564 if (inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
))
8565 as_warn (_("value stored for r%d is UNPREDICTABLE"),
8566 inst
.operands
[0].reg
);
8570 if (inst
.operands
[1].imm
& (1 << 14)
8571 && inst
.operands
[1].imm
& (1 << 15))
8572 as_warn (_("LR and PC should not both be in register list"));
8573 if ((inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
))
8574 && inst
.operands
[0].writeback
)
8575 as_warn (_("base register should not be in register list "
8576 "when written back"));
8578 if (inst
.instruction
< 0xffff)
8579 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
8580 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8581 inst
.instruction
|= inst
.operands
[1].imm
;
8582 if (inst
.operands
[0].writeback
)
8583 inst
.instruction
|= WRITE_BACK
;
8588 constraint (inst
.operands
[0].reg
> 7
8589 || (inst
.operands
[1].imm
& ~0xff), BAD_HIREG
);
8590 if (inst
.instruction
== T_MNEM_stmia
)
8592 if (!inst
.operands
[0].writeback
)
8593 as_warn (_("this instruction will write back the base register"));
8594 if ((inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
))
8595 && (inst
.operands
[1].imm
& ((1 << inst
.operands
[0].reg
) - 1)))
8596 as_warn (_("value stored for r%d is UNPREDICTABLE"),
8597 inst
.operands
[0].reg
);
8601 if (!inst
.operands
[0].writeback
8602 && !(inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
)))
8603 as_warn (_("this instruction will write back the base register"));
8604 else if (inst
.operands
[0].writeback
8605 && (inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
)))
8606 as_warn (_("this instruction will not write back the base register"));
8609 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8610 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8611 inst
.instruction
|= inst
.operands
[1].imm
;
8618 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].preind
8619 || inst
.operands
[1].postind
|| inst
.operands
[1].writeback
8620 || inst
.operands
[1].immisreg
|| inst
.operands
[1].shifted
8621 || inst
.operands
[1].negative
,
8624 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8625 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8626 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_U8
;
8632 if (!inst
.operands
[1].present
)
8634 constraint (inst
.operands
[0].reg
== REG_LR
,
8635 _("r14 not allowed as first register "
8636 "when second register is omitted"));
8637 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
8639 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
,
8642 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8643 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
8644 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
8650 unsigned long opcode
;
8653 opcode
= inst
.instruction
;
8656 if (!inst
.operands
[1].isreg
)
8658 if (opcode
<= 0xffff)
8659 inst
.instruction
= THUMB_OP32 (opcode
);
8660 if (move_or_literal_pool (0, /*thumb_p=*/TRUE
, /*mode_3=*/FALSE
))
8663 if (inst
.operands
[1].isreg
8664 && !inst
.operands
[1].writeback
8665 && !inst
.operands
[1].shifted
&& !inst
.operands
[1].postind
8666 && !inst
.operands
[1].negative
&& inst
.operands
[0].reg
<= 7
8668 && inst
.size_req
!= 4)
8670 /* Insn may have a 16-bit form. */
8671 Rn
= inst
.operands
[1].reg
;
8672 if (inst
.operands
[1].immisreg
)
8674 inst
.instruction
= THUMB_OP16 (opcode
);
8676 if (Rn
<= 7 && inst
.operands
[1].imm
<= 7)
8679 else if ((Rn
<= 7 && opcode
!= T_MNEM_ldrsh
8680 && opcode
!= T_MNEM_ldrsb
)
8681 || ((Rn
== REG_PC
|| Rn
== REG_SP
) && opcode
== T_MNEM_ldr
)
8682 || (Rn
== REG_SP
&& opcode
== T_MNEM_str
))
8689 if (inst
.reloc
.pc_rel
)
8690 opcode
= T_MNEM_ldr_pc2
;
8692 opcode
= T_MNEM_ldr_pc
;
8696 if (opcode
== T_MNEM_ldr
)
8697 opcode
= T_MNEM_ldr_sp
;
8699 opcode
= T_MNEM_str_sp
;
8701 inst
.instruction
= inst
.operands
[0].reg
<< 8;
8705 inst
.instruction
= inst
.operands
[0].reg
;
8706 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
8708 inst
.instruction
|= THUMB_OP16 (opcode
);
8709 if (inst
.size_req
== 2)
8710 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_OFFSET
;
8712 inst
.relax
= opcode
;
8716 /* Definitely a 32-bit variant. */
8717 inst
.instruction
= THUMB_OP32 (opcode
);
8718 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8719 encode_thumb32_addr_mode (1, /*is_t=*/FALSE
, /*is_d=*/FALSE
);
8723 constraint (inst
.operands
[0].reg
> 7, BAD_HIREG
);
8725 if (inst
.instruction
== T_MNEM_ldrsh
|| inst
.instruction
== T_MNEM_ldrsb
)
8727 /* Only [Rn,Rm] is acceptable. */
8728 constraint (inst
.operands
[1].reg
> 7 || inst
.operands
[1].imm
> 7, BAD_HIREG
);
8729 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].immisreg
8730 || inst
.operands
[1].postind
|| inst
.operands
[1].shifted
8731 || inst
.operands
[1].negative
,
8732 _("Thumb does not support this addressing mode"));
8733 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8737 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8738 if (!inst
.operands
[1].isreg
)
8739 if (move_or_literal_pool (0, /*thumb_p=*/TRUE
, /*mode_3=*/FALSE
))
8742 constraint (!inst
.operands
[1].preind
8743 || inst
.operands
[1].shifted
8744 || inst
.operands
[1].writeback
,
8745 _("Thumb does not support this addressing mode"));
8746 if (inst
.operands
[1].reg
== REG_PC
|| inst
.operands
[1].reg
== REG_SP
)
8748 constraint (inst
.instruction
& 0x0600,
8749 _("byte or halfword not valid for base register"));
8750 constraint (inst
.operands
[1].reg
== REG_PC
8751 && !(inst
.instruction
& THUMB_LOAD_BIT
),
8752 _("r15 based store not allowed"));
8753 constraint (inst
.operands
[1].immisreg
,
8754 _("invalid base register for register offset"));
8756 if (inst
.operands
[1].reg
== REG_PC
)
8757 inst
.instruction
= T_OPCODE_LDR_PC
;
8758 else if (inst
.instruction
& THUMB_LOAD_BIT
)
8759 inst
.instruction
= T_OPCODE_LDR_SP
;
8761 inst
.instruction
= T_OPCODE_STR_SP
;
8763 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8764 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_OFFSET
;
8768 constraint (inst
.operands
[1].reg
> 7, BAD_HIREG
);
8769 if (!inst
.operands
[1].immisreg
)
8771 /* Immediate offset. */
8772 inst
.instruction
|= inst
.operands
[0].reg
;
8773 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
8774 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_OFFSET
;
8778 /* Register offset. */
8779 constraint (inst
.operands
[1].imm
> 7, BAD_HIREG
);
8780 constraint (inst
.operands
[1].negative
,
8781 _("Thumb does not support this addressing mode"));
8784 switch (inst
.instruction
)
8786 case T_OPCODE_STR_IW
: inst
.instruction
= T_OPCODE_STR_RW
; break;
8787 case T_OPCODE_STR_IH
: inst
.instruction
= T_OPCODE_STR_RH
; break;
8788 case T_OPCODE_STR_IB
: inst
.instruction
= T_OPCODE_STR_RB
; break;
8789 case T_OPCODE_LDR_IW
: inst
.instruction
= T_OPCODE_LDR_RW
; break;
8790 case T_OPCODE_LDR_IH
: inst
.instruction
= T_OPCODE_LDR_RH
; break;
8791 case T_OPCODE_LDR_IB
: inst
.instruction
= T_OPCODE_LDR_RB
; break;
8792 case 0x5600 /* ldrsb */:
8793 case 0x5e00 /* ldrsh */: break;
8797 inst
.instruction
|= inst
.operands
[0].reg
;
8798 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
8799 inst
.instruction
|= inst
.operands
[1].imm
<< 6;
8805 if (!inst
.operands
[1].present
)
8807 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
8808 constraint (inst
.operands
[0].reg
== REG_LR
,
8809 _("r14 not allowed here"));
8811 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8812 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
8813 encode_thumb32_addr_mode (2, /*is_t=*/FALSE
, /*is_d=*/TRUE
);
8820 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8821 encode_thumb32_addr_mode (1, /*is_t=*/TRUE
, /*is_d=*/FALSE
);
8827 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8828 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8829 inst
.instruction
|= inst
.operands
[2].reg
;
8830 inst
.instruction
|= inst
.operands
[3].reg
<< 12;
8836 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8837 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
8838 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
8839 inst
.instruction
|= inst
.operands
[3].reg
;
8847 int r0off
= (inst
.instruction
== T_MNEM_mov
8848 || inst
.instruction
== T_MNEM_movs
) ? 8 : 16;
8849 unsigned long opcode
;
8851 bfd_boolean low_regs
;
8853 low_regs
= (inst
.operands
[0].reg
<= 7 && inst
.operands
[1].reg
<= 7);
8854 opcode
= inst
.instruction
;
8855 if (current_it_mask
)
8856 narrow
= opcode
!= T_MNEM_movs
;
8858 narrow
= opcode
!= T_MNEM_movs
|| low_regs
;
8859 if (inst
.size_req
== 4
8860 || inst
.operands
[1].shifted
)
8863 if (!inst
.operands
[1].isreg
)
8865 /* Immediate operand. */
8866 if (current_it_mask
== 0 && opcode
== T_MNEM_mov
)
8868 if (low_regs
&& narrow
)
8870 inst
.instruction
= THUMB_OP16 (opcode
);
8871 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8872 if (inst
.size_req
== 2)
8873 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_IMM
;
8875 inst
.relax
= opcode
;
8879 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
8880 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
8881 inst
.instruction
|= inst
.operands
[0].reg
<< r0off
;
8882 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
8887 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
8888 inst
.instruction
|= inst
.operands
[0].reg
<< r0off
;
8889 encode_thumb32_shifted_operand (1);
8892 switch (inst
.instruction
)
8895 inst
.instruction
= T_OPCODE_MOV_HR
;
8896 inst
.instruction
|= (inst
.operands
[0].reg
& 0x8) << 4;
8897 inst
.instruction
|= (inst
.operands
[0].reg
& 0x7);
8898 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
8902 /* We know we have low registers at this point.
8903 Generate ADD Rd, Rs, #0. */
8904 inst
.instruction
= T_OPCODE_ADD_I3
;
8905 inst
.instruction
|= inst
.operands
[0].reg
;
8906 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
8912 inst
.instruction
= T_OPCODE_CMP_LR
;
8913 inst
.instruction
|= inst
.operands
[0].reg
;
8914 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
8918 inst
.instruction
= T_OPCODE_CMP_HR
;
8919 inst
.instruction
|= (inst
.operands
[0].reg
& 0x8) << 4;
8920 inst
.instruction
|= (inst
.operands
[0].reg
& 0x7);
8921 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
8928 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8929 if (inst
.operands
[1].isreg
)
8931 if (inst
.operands
[0].reg
< 8 && inst
.operands
[1].reg
< 8)
8933 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
8934 since a MOV instruction produces unpredictable results. */
8935 if (inst
.instruction
== T_OPCODE_MOV_I8
)
8936 inst
.instruction
= T_OPCODE_ADD_I3
;
8938 inst
.instruction
= T_OPCODE_CMP_LR
;
8940 inst
.instruction
|= inst
.operands
[0].reg
;
8941 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
8945 if (inst
.instruction
== T_OPCODE_MOV_I8
)
8946 inst
.instruction
= T_OPCODE_MOV_HR
;
8948 inst
.instruction
= T_OPCODE_CMP_HR
;
8954 constraint (inst
.operands
[0].reg
> 7,
8955 _("only lo regs allowed with immediate"));
8956 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8957 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_IMM
;
8967 top
= (inst
.instruction
& 0x00800000) != 0;
8968 if (inst
.reloc
.type
== BFD_RELOC_ARM_MOVW
)
8970 constraint (top
, _(":lower16: not allowed this instruction"));
8971 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_MOVW
;
8973 else if (inst
.reloc
.type
== BFD_RELOC_ARM_MOVT
)
8975 constraint (!top
, _(":upper16: not allowed this instruction"));
8976 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_MOVT
;
8979 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8980 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
8982 imm
= inst
.reloc
.exp
.X_add_number
;
8983 inst
.instruction
|= (imm
& 0xf000) << 4;
8984 inst
.instruction
|= (imm
& 0x0800) << 15;
8985 inst
.instruction
|= (imm
& 0x0700) << 4;
8986 inst
.instruction
|= (imm
& 0x00ff);
8995 int r0off
= (inst
.instruction
== T_MNEM_mvn
8996 || inst
.instruction
== T_MNEM_mvns
) ? 8 : 16;
8999 if (inst
.size_req
== 4
9000 || inst
.instruction
> 0xffff
9001 || inst
.operands
[1].shifted
9002 || inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7)
9004 else if (inst
.instruction
== T_MNEM_cmn
)
9006 else if (THUMB_SETS_FLAGS (inst
.instruction
))
9007 narrow
= (current_it_mask
== 0);
9009 narrow
= (current_it_mask
!= 0);
9011 if (!inst
.operands
[1].isreg
)
9013 /* For an immediate, we always generate a 32-bit opcode;
9014 section relaxation will shrink it later if possible. */
9015 if (inst
.instruction
< 0xffff)
9016 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9017 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
9018 inst
.instruction
|= inst
.operands
[0].reg
<< r0off
;
9019 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
9023 /* See if we can do this with a 16-bit instruction. */
9026 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9027 inst
.instruction
|= inst
.operands
[0].reg
;
9028 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9032 constraint (inst
.operands
[1].shifted
9033 && inst
.operands
[1].immisreg
,
9034 _("shift must be constant"));
9035 if (inst
.instruction
< 0xffff)
9036 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9037 inst
.instruction
|= inst
.operands
[0].reg
<< r0off
;
9038 encode_thumb32_shifted_operand (1);
9044 constraint (inst
.instruction
> 0xffff
9045 || inst
.instruction
== T_MNEM_mvns
, BAD_THUMB32
);
9046 constraint (!inst
.operands
[1].isreg
|| inst
.operands
[1].shifted
,
9047 _("unshifted register required"));
9048 constraint (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7,
9051 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9052 inst
.instruction
|= inst
.operands
[0].reg
;
9053 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9062 if (do_vfp_nsyn_mrs () == SUCCESS
)
9065 flags
= inst
.operands
[1].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
|SPSR_BIT
);
9068 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7m
),
9069 _("selected processor does not support "
9070 "requested special purpose register"));
9074 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
),
9075 _("selected processor does not support "
9076 "requested special purpose register %x"));
9077 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
9078 constraint ((flags
& ~SPSR_BIT
) != (PSR_c
|PSR_f
),
9079 _("'CPSR' or 'SPSR' expected"));
9082 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9083 inst
.instruction
|= (flags
& SPSR_BIT
) >> 2;
9084 inst
.instruction
|= inst
.operands
[1].imm
& 0xff;
9092 if (do_vfp_nsyn_msr () == SUCCESS
)
9095 constraint (!inst
.operands
[1].isreg
,
9096 _("Thumb encoding does not support an immediate here"));
9097 flags
= inst
.operands
[0].imm
;
9100 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
),
9101 _("selected processor does not support "
9102 "requested special purpose register"));
9106 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7m
),
9107 _("selected processor does not support "
9108 "requested special purpose register"));
9111 inst
.instruction
|= (flags
& SPSR_BIT
) >> 2;
9112 inst
.instruction
|= (flags
& ~SPSR_BIT
) >> 8;
9113 inst
.instruction
|= (flags
& 0xff);
9114 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9120 if (!inst
.operands
[2].present
)
9121 inst
.operands
[2].reg
= inst
.operands
[0].reg
;
9123 /* There is no 32-bit MULS and no 16-bit MUL. */
9124 if (unified_syntax
&& inst
.instruction
== T_MNEM_mul
)
9126 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9127 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9128 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9129 inst
.instruction
|= inst
.operands
[2].reg
<< 0;
9133 constraint (!unified_syntax
9134 && inst
.instruction
== T_MNEM_muls
, BAD_THUMB32
);
9135 constraint (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7,
9138 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9139 inst
.instruction
|= inst
.operands
[0].reg
;
9141 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
)
9142 inst
.instruction
|= inst
.operands
[2].reg
<< 3;
9143 else if (inst
.operands
[0].reg
== inst
.operands
[2].reg
)
9144 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9146 constraint (1, _("dest must overlap one source register"));
9153 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9154 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
9155 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9156 inst
.instruction
|= inst
.operands
[3].reg
;
9158 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
)
9159 as_tsktsk (_("rdhi and rdlo must be different"));
9167 if (inst
.size_req
== 4 || inst
.operands
[0].imm
> 15)
9169 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9170 inst
.instruction
|= inst
.operands
[0].imm
;
9174 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9175 inst
.instruction
|= inst
.operands
[0].imm
<< 4;
9180 constraint (inst
.operands
[0].present
,
9181 _("Thumb does not support NOP with hints"));
9182 inst
.instruction
= 0x46c0;
9193 if (THUMB_SETS_FLAGS (inst
.instruction
))
9194 narrow
= (current_it_mask
== 0);
9196 narrow
= (current_it_mask
!= 0);
9197 if (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7)
9199 if (inst
.size_req
== 4)
9204 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9205 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9206 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9210 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9211 inst
.instruction
|= inst
.operands
[0].reg
;
9212 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9217 constraint (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7,
9219 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
9221 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9222 inst
.instruction
|= inst
.operands
[0].reg
;
9223 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9230 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9231 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9232 inst
.instruction
|= inst
.operands
[2].reg
;
9233 if (inst
.operands
[3].present
)
9235 unsigned int val
= inst
.reloc
.exp
.X_add_number
;
9236 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
9237 _("expression too complex"));
9238 inst
.instruction
|= (val
& 0x1c) << 10;
9239 inst
.instruction
|= (val
& 0x03) << 6;
9246 if (!inst
.operands
[3].present
)
9247 inst
.instruction
&= ~0x00000020;
9254 encode_thumb32_addr_mode (0, /*is_t=*/FALSE
, /*is_d=*/FALSE
);
9258 do_t_push_pop (void)
9262 constraint (inst
.operands
[0].writeback
,
9263 _("push/pop do not support {reglist}^"));
9264 constraint (inst
.reloc
.type
!= BFD_RELOC_UNUSED
,
9265 _("expression too complex"));
9267 mask
= inst
.operands
[0].imm
;
9268 if ((mask
& ~0xff) == 0)
9269 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9270 else if ((inst
.instruction
== T_MNEM_push
9271 && (mask
& ~0xff) == 1 << REG_LR
)
9272 || (inst
.instruction
== T_MNEM_pop
9273 && (mask
& ~0xff) == 1 << REG_PC
))
9275 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9276 inst
.instruction
|= THUMB_PP_PC_LR
;
9279 else if (unified_syntax
)
9281 if (mask
& (1 << 13))
9282 inst
.error
= _("SP not allowed in register list");
9283 if (inst
.instruction
== T_MNEM_push
)
9285 if (mask
& (1 << 15))
9286 inst
.error
= _("PC not allowed in register list");
9290 if (mask
& (1 << 14)
9291 && mask
& (1 << 15))
9292 inst
.error
= _("LR and PC should not both be in register list");
9294 if ((mask
& (mask
- 1)) == 0)
9296 /* Single register push/pop implemented as str/ldr. */
9297 if (inst
.instruction
== T_MNEM_push
)
9298 inst
.instruction
= 0xf84d0d04; /* str reg, [sp, #-4]! */
9300 inst
.instruction
= 0xf85d0b04; /* ldr reg, [sp], #4 */
9301 mask
= ffs(mask
) - 1;
9305 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9309 inst
.error
= _("invalid register list to push/pop instruction");
9313 inst
.instruction
|= mask
;
9319 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9320 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9326 if (inst
.operands
[0].reg
<= 7 && inst
.operands
[1].reg
<= 7
9327 && inst
.size_req
!= 4)
9329 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9330 inst
.instruction
|= inst
.operands
[0].reg
;
9331 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9333 else if (unified_syntax
)
9335 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9336 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9337 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9338 inst
.instruction
|= inst
.operands
[1].reg
;
9341 inst
.error
= BAD_HIREG
;
9349 Rd
= inst
.operands
[0].reg
;
9350 Rs
= (inst
.operands
[1].present
9351 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
9352 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
9354 inst
.instruction
|= Rd
<< 8;
9355 inst
.instruction
|= Rs
<< 16;
9356 if (!inst
.operands
[2].isreg
)
9358 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
9359 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
9362 encode_thumb32_shifted_operand (2);
9368 constraint (current_it_mask
, BAD_NOT_IT
);
9369 if (inst
.operands
[0].imm
)
9370 inst
.instruction
|= 0x8;
9376 if (!inst
.operands
[1].present
)
9377 inst
.operands
[1].reg
= inst
.operands
[0].reg
;
9384 switch (inst
.instruction
)
9387 case T_MNEM_asrs
: shift_kind
= SHIFT_ASR
; break;
9389 case T_MNEM_lsls
: shift_kind
= SHIFT_LSL
; break;
9391 case T_MNEM_lsrs
: shift_kind
= SHIFT_LSR
; break;
9393 case T_MNEM_rors
: shift_kind
= SHIFT_ROR
; break;
9397 if (THUMB_SETS_FLAGS (inst
.instruction
))
9398 narrow
= (current_it_mask
== 0);
9400 narrow
= (current_it_mask
!= 0);
9401 if (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7)
9403 if (!inst
.operands
[2].isreg
&& shift_kind
== SHIFT_ROR
)
9405 if (inst
.operands
[2].isreg
9406 && (inst
.operands
[1].reg
!= inst
.operands
[0].reg
9407 || inst
.operands
[2].reg
> 7))
9409 if (inst
.size_req
== 4)
9414 if (inst
.operands
[2].isreg
)
9416 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9417 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9418 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9419 inst
.instruction
|= inst
.operands
[2].reg
;
9423 inst
.operands
[1].shifted
= 1;
9424 inst
.operands
[1].shift_kind
= shift_kind
;
9425 inst
.instruction
= THUMB_OP32 (THUMB_SETS_FLAGS (inst
.instruction
)
9426 ? T_MNEM_movs
: T_MNEM_mov
);
9427 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9428 encode_thumb32_shifted_operand (1);
9429 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
9430 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
9435 if (inst
.operands
[2].isreg
)
9439 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_R
; break;
9440 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_R
; break;
9441 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_R
; break;
9442 case SHIFT_ROR
: inst
.instruction
= T_OPCODE_ROR_R
; break;
9446 inst
.instruction
|= inst
.operands
[0].reg
;
9447 inst
.instruction
|= inst
.operands
[2].reg
<< 3;
9453 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_I
; break;
9454 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_I
; break;
9455 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_I
; break;
9458 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_SHIFT
;
9459 inst
.instruction
|= inst
.operands
[0].reg
;
9460 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9466 constraint (inst
.operands
[0].reg
> 7
9467 || inst
.operands
[1].reg
> 7, BAD_HIREG
);
9468 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
9470 if (inst
.operands
[2].isreg
) /* Rd, {Rs,} Rn */
9472 constraint (inst
.operands
[2].reg
> 7, BAD_HIREG
);
9473 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
9474 _("source1 and dest must be same register"));
9476 switch (inst
.instruction
)
9478 case T_MNEM_asr
: inst
.instruction
= T_OPCODE_ASR_R
; break;
9479 case T_MNEM_lsl
: inst
.instruction
= T_OPCODE_LSL_R
; break;
9480 case T_MNEM_lsr
: inst
.instruction
= T_OPCODE_LSR_R
; break;
9481 case T_MNEM_ror
: inst
.instruction
= T_OPCODE_ROR_R
; break;
9485 inst
.instruction
|= inst
.operands
[0].reg
;
9486 inst
.instruction
|= inst
.operands
[2].reg
<< 3;
9490 switch (inst
.instruction
)
9492 case T_MNEM_asr
: inst
.instruction
= T_OPCODE_ASR_I
; break;
9493 case T_MNEM_lsl
: inst
.instruction
= T_OPCODE_LSL_I
; break;
9494 case T_MNEM_lsr
: inst
.instruction
= T_OPCODE_LSR_I
; break;
9495 case T_MNEM_ror
: inst
.error
= _("ror #imm not supported"); return;
9498 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_SHIFT
;
9499 inst
.instruction
|= inst
.operands
[0].reg
;
9500 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9508 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9509 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9510 inst
.instruction
|= inst
.operands
[2].reg
;
9516 unsigned int value
= inst
.reloc
.exp
.X_add_number
;
9517 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
9518 _("expression too complex"));
9519 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
9520 inst
.instruction
|= (value
& 0xf000) >> 12;
9521 inst
.instruction
|= (value
& 0x0ff0);
9522 inst
.instruction
|= (value
& 0x000f) << 16;
9528 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9529 inst
.instruction
|= inst
.operands
[1].imm
- 1;
9530 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9532 if (inst
.operands
[3].present
)
9534 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
9535 _("expression too complex"));
9537 if (inst
.reloc
.exp
.X_add_number
!= 0)
9539 if (inst
.operands
[3].shift_kind
== SHIFT_ASR
)
9540 inst
.instruction
|= 0x00200000; /* sh bit */
9541 inst
.instruction
|= (inst
.reloc
.exp
.X_add_number
& 0x1c) << 10;
9542 inst
.instruction
|= (inst
.reloc
.exp
.X_add_number
& 0x03) << 6;
9544 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
9551 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9552 inst
.instruction
|= inst
.operands
[1].imm
- 1;
9553 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9559 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
9560 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
9561 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
9562 || inst
.operands
[2].negative
,
9565 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9566 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
9567 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9568 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_U8
;
9574 if (!inst
.operands
[2].present
)
9575 inst
.operands
[2].reg
= inst
.operands
[1].reg
+ 1;
9577 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
9578 || inst
.operands
[0].reg
== inst
.operands
[2].reg
9579 || inst
.operands
[0].reg
== inst
.operands
[3].reg
9580 || inst
.operands
[1].reg
== inst
.operands
[2].reg
,
9583 inst
.instruction
|= inst
.operands
[0].reg
;
9584 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
9585 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9586 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
9592 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9593 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9594 inst
.instruction
|= inst
.operands
[2].reg
;
9595 inst
.instruction
|= inst
.operands
[3].imm
<< 4;
9601 if (inst
.instruction
<= 0xffff && inst
.size_req
!= 4
9602 && inst
.operands
[0].reg
<= 7 && inst
.operands
[1].reg
<= 7
9603 && (!inst
.operands
[2].present
|| inst
.operands
[2].imm
== 0))
9605 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9606 inst
.instruction
|= inst
.operands
[0].reg
;
9607 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9609 else if (unified_syntax
)
9611 if (inst
.instruction
<= 0xffff)
9612 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9613 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9614 inst
.instruction
|= inst
.operands
[1].reg
;
9615 inst
.instruction
|= inst
.operands
[2].imm
<< 4;
9619 constraint (inst
.operands
[2].present
&& inst
.operands
[2].imm
!= 0,
9620 _("Thumb encoding does not support rotation"));
9621 constraint (1, BAD_HIREG
);
9628 inst
.reloc
.type
= BFD_RELOC_ARM_SWI
;
9636 half
= (inst
.instruction
& 0x10) != 0;
9637 constraint (current_it_mask
&& current_it_mask
!= 0x10, BAD_BRANCH
);
9638 constraint (inst
.operands
[0].immisreg
,
9639 _("instruction requires register index"));
9640 constraint (inst
.operands
[0].imm
== 15,
9641 _("PC is not a valid index register"));
9642 constraint (!half
&& inst
.operands
[0].shifted
,
9643 _("instruction does not allow shifted index"));
9644 inst
.instruction
|= (inst
.operands
[0].reg
<< 16) | inst
.operands
[0].imm
;
9650 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9651 inst
.instruction
|= inst
.operands
[1].imm
;
9652 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9654 if (inst
.operands
[3].present
)
9656 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
9657 _("expression too complex"));
9658 if (inst
.reloc
.exp
.X_add_number
!= 0)
9660 if (inst
.operands
[3].shift_kind
== SHIFT_ASR
)
9661 inst
.instruction
|= 0x00200000; /* sh bit */
9663 inst
.instruction
|= (inst
.reloc
.exp
.X_add_number
& 0x1c) << 10;
9664 inst
.instruction
|= (inst
.reloc
.exp
.X_add_number
& 0x03) << 6;
9666 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
9673 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9674 inst
.instruction
|= inst
.operands
[1].imm
;
9675 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9678 /* Neon instruction encoder helpers. */
9680 /* Encodings for the different types for various Neon opcodes. */
9682 /* An "invalid" code for the following tables. */
9685 struct neon_tab_entry
9688 unsigned float_or_poly
;
9689 unsigned scalar_or_imm
;
9692 /* Map overloaded Neon opcodes to their respective encodings. */
9693 #define NEON_ENC_TAB \
9694 X(vabd, 0x0000700, 0x1200d00, N_INV), \
9695 X(vmax, 0x0000600, 0x0000f00, N_INV), \
9696 X(vmin, 0x0000610, 0x0200f00, N_INV), \
9697 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
9698 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
9699 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
9700 X(vadd, 0x0000800, 0x0000d00, N_INV), \
9701 X(vsub, 0x1000800, 0x0200d00, N_INV), \
9702 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
9703 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
9704 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
9705 /* Register variants of the following two instructions are encoded as
9706 vcge / vcgt with the operands reversed. */ \
9707 X(vclt, 0x0000310, 0x1000e00, 0x1b10200), \
9708 X(vcle, 0x0000300, 0x1200e00, 0x1b10180), \
9709 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
9710 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
9711 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
9712 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
9713 X(vmlal, 0x0800800, N_INV, 0x0800240), \
9714 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
9715 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
9716 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
9717 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
9718 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
9719 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
9720 X(vshl, 0x0000400, N_INV, 0x0800510), \
9721 X(vqshl, 0x0000410, N_INV, 0x0800710), \
9722 X(vand, 0x0000110, N_INV, 0x0800030), \
9723 X(vbic, 0x0100110, N_INV, 0x0800030), \
9724 X(veor, 0x1000110, N_INV, N_INV), \
9725 X(vorn, 0x0300110, N_INV, 0x0800010), \
9726 X(vorr, 0x0200110, N_INV, 0x0800010), \
9727 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
9728 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
9729 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
9730 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
9731 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
9732 X(vst1, 0x0000000, 0x0800000, N_INV), \
9733 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
9734 X(vst2, 0x0000100, 0x0800100, N_INV), \
9735 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
9736 X(vst3, 0x0000200, 0x0800200, N_INV), \
9737 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
9738 X(vst4, 0x0000300, 0x0800300, N_INV), \
9739 X(vmovn, 0x1b20200, N_INV, N_INV), \
9740 X(vtrn, 0x1b20080, N_INV, N_INV), \
9741 X(vqmovn, 0x1b20200, N_INV, N_INV), \
9742 X(vqmovun, 0x1b20240, N_INV, N_INV), \
9743 X(vnmul, 0xe200a40, 0xe200b40, N_INV), \
9744 X(vnmla, 0xe000a40, 0xe000b40, N_INV), \
9745 X(vnmls, 0xe100a40, 0xe100b40, N_INV), \
9746 X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \
9747 X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \
9748 X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \
9749 X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV)
9753 #define X(OPC,I,F,S) N_MNEM_##OPC
9758 static const struct neon_tab_entry neon_enc_tab
[] =
9760 #define X(OPC,I,F,S) { (I), (F), (S) }
9765 #define NEON_ENC_INTEGER(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
9766 #define NEON_ENC_ARMREG(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
9767 #define NEON_ENC_POLY(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
9768 #define NEON_ENC_FLOAT(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
9769 #define NEON_ENC_SCALAR(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
9770 #define NEON_ENC_IMMED(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
9771 #define NEON_ENC_INTERLV(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
9772 #define NEON_ENC_LANE(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
9773 #define NEON_ENC_DUP(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
9774 #define NEON_ENC_SINGLE(X) \
9775 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
9776 #define NEON_ENC_DOUBLE(X) \
9777 ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
9779 /* Define shapes for instruction operands. The following mnemonic characters
9780 are used in this table:
9782 F - VFP S<n> register
9783 D - Neon D<n> register
9784 Q - Neon Q<n> register
9788 L - D<n> register list
9790 This table is used to generate various data:
9791 - enumerations of the form NS_DDR to be used as arguments to
9793 - a table classifying shapes into single, double, quad, mixed.
9794 - a table used to drive neon_select_shape.
9797 #define NEON_SHAPE_DEF \
9798 X(3, (D, D, D), DOUBLE), \
9799 X(3, (Q, Q, Q), QUAD), \
9800 X(3, (D, D, I), DOUBLE), \
9801 X(3, (Q, Q, I), QUAD), \
9802 X(3, (D, D, S), DOUBLE), \
9803 X(3, (Q, Q, S), QUAD), \
9804 X(2, (D, D), DOUBLE), \
9805 X(2, (Q, Q), QUAD), \
9806 X(2, (D, S), DOUBLE), \
9807 X(2, (Q, S), QUAD), \
9808 X(2, (D, R), DOUBLE), \
9809 X(2, (Q, R), QUAD), \
9810 X(2, (D, I), DOUBLE), \
9811 X(2, (Q, I), QUAD), \
9812 X(3, (D, L, D), DOUBLE), \
9813 X(2, (D, Q), MIXED), \
9814 X(2, (Q, D), MIXED), \
9815 X(3, (D, Q, I), MIXED), \
9816 X(3, (Q, D, I), MIXED), \
9817 X(3, (Q, D, D), MIXED), \
9818 X(3, (D, Q, Q), MIXED), \
9819 X(3, (Q, Q, D), MIXED), \
9820 X(3, (Q, D, S), MIXED), \
9821 X(3, (D, Q, S), MIXED), \
9822 X(4, (D, D, D, I), DOUBLE), \
9823 X(4, (Q, Q, Q, I), QUAD), \
9824 X(2, (F, F), SINGLE), \
9825 X(3, (F, F, F), SINGLE), \
9826 X(2, (F, I), SINGLE), \
9827 X(2, (F, D), MIXED), \
9828 X(2, (D, F), MIXED), \
9829 X(3, (F, F, I), MIXED), \
9830 X(4, (R, R, F, F), SINGLE), \
9831 X(4, (F, F, R, R), SINGLE), \
9832 X(3, (D, R, R), DOUBLE), \
9833 X(3, (R, R, D), DOUBLE), \
9834 X(2, (S, R), SINGLE), \
9835 X(2, (R, S), SINGLE), \
9836 X(2, (F, R), SINGLE), \
9837 X(2, (R, F), SINGLE)
9839 #define S2(A,B) NS_##A##B
9840 #define S3(A,B,C) NS_##A##B##C
9841 #define S4(A,B,C,D) NS_##A##B##C##D
9843 #define X(N, L, C) S##N L
9856 enum neon_shape_class
9864 #define X(N, L, C) SC_##C
9866 static enum neon_shape_class neon_shape_class
[] =
9884 /* Register widths of above. */
9885 static unsigned neon_shape_el_size
[] =
9896 struct neon_shape_info
9899 enum neon_shape_el el
[NEON_MAX_TYPE_ELS
];
9902 #define S2(A,B) { SE_##A, SE_##B }
9903 #define S3(A,B,C) { SE_##A, SE_##B, SE_##C }
9904 #define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D }
9906 #define X(N, L, C) { N, S##N L }
9908 static struct neon_shape_info neon_shape_tab
[] =
9918 /* Bit masks used in type checking given instructions.
9919 'N_EQK' means the type must be the same as (or based on in some way) the key
9920 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
9921 set, various other bits can be set as well in order to modify the meaning of
9922 the type constraint. */
9946 N_KEY
= 0x100000, /* key element (main type specifier). */
9947 N_EQK
= 0x200000, /* given operand has the same type & size as the key. */
9948 N_VFP
= 0x400000, /* VFP mode: operand size must match register width. */
9949 N_DBL
= 0x000001, /* if N_EQK, this operand is twice the size. */
9950 N_HLF
= 0x000002, /* if N_EQK, this operand is half the size. */
9951 N_SGN
= 0x000004, /* if N_EQK, this operand is forced to be signed. */
9952 N_UNS
= 0x000008, /* if N_EQK, this operand is forced to be unsigned. */
9953 N_INT
= 0x000010, /* if N_EQK, this operand is forced to be integer. */
9954 N_FLT
= 0x000020, /* if N_EQK, this operand is forced to be float. */
9955 N_SIZ
= 0x000040, /* if N_EQK, this operand is forced to be size-only. */
9957 N_MAX_NONSPECIAL
= N_F64
9960 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
9962 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
9963 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
9964 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
9965 #define N_SUF_32 (N_SU_32 | N_F32)
9966 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
9967 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F32)
9969 /* Pass this as the first type argument to neon_check_type to ignore types
9971 #define N_IGNORE_TYPE (N_KEY | N_EQK)
9973 /* Select a "shape" for the current instruction (describing register types or
9974 sizes) from a list of alternatives. Return NS_NULL if the current instruction
9975 doesn't fit. For non-polymorphic shapes, checking is usually done as a
9976 function of operand parsing, so this function doesn't need to be called.
9977 Shapes should be listed in order of decreasing length. */
9979 static enum neon_shape
9980 neon_select_shape (enum neon_shape shape
, ...)
9983 enum neon_shape first_shape
= shape
;
9985 /* Fix missing optional operands. FIXME: we don't know at this point how
9986 many arguments we should have, so this makes the assumption that we have
9987 > 1. This is true of all current Neon opcodes, I think, but may not be
9988 true in the future. */
9989 if (!inst
.operands
[1].present
)
9990 inst
.operands
[1] = inst
.operands
[0];
9992 va_start (ap
, shape
);
9994 for (; shape
!= NS_NULL
; shape
= va_arg (ap
, int))
9999 for (j
= 0; j
< neon_shape_tab
[shape
].els
; j
++)
10001 if (!inst
.operands
[j
].present
)
10007 switch (neon_shape_tab
[shape
].el
[j
])
10010 if (!(inst
.operands
[j
].isreg
10011 && inst
.operands
[j
].isvec
10012 && inst
.operands
[j
].issingle
10013 && !inst
.operands
[j
].isquad
))
10018 if (!(inst
.operands
[j
].isreg
10019 && inst
.operands
[j
].isvec
10020 && !inst
.operands
[j
].isquad
10021 && !inst
.operands
[j
].issingle
))
10026 if (!(inst
.operands
[j
].isreg
10027 && !inst
.operands
[j
].isvec
))
10032 if (!(inst
.operands
[j
].isreg
10033 && inst
.operands
[j
].isvec
10034 && inst
.operands
[j
].isquad
10035 && !inst
.operands
[j
].issingle
))
10040 if (!(!inst
.operands
[j
].isreg
10041 && !inst
.operands
[j
].isscalar
))
10046 if (!(!inst
.operands
[j
].isreg
10047 && inst
.operands
[j
].isscalar
))
10061 if (shape
== NS_NULL
&& first_shape
!= NS_NULL
)
10062 first_error (_("invalid instruction shape"));
10067 /* True if SHAPE is predominantly a quadword operation (most of the time, this
10068 means the Q bit should be set). */
10071 neon_quad (enum neon_shape shape
)
10073 return neon_shape_class
[shape
] == SC_QUAD
;
10077 neon_modify_type_size (unsigned typebits
, enum neon_el_type
*g_type
,
10080 /* Allow modification to be made to types which are constrained to be
10081 based on the key element, based on bits set alongside N_EQK. */
10082 if ((typebits
& N_EQK
) != 0)
10084 if ((typebits
& N_HLF
) != 0)
10086 else if ((typebits
& N_DBL
) != 0)
10088 if ((typebits
& N_SGN
) != 0)
10089 *g_type
= NT_signed
;
10090 else if ((typebits
& N_UNS
) != 0)
10091 *g_type
= NT_unsigned
;
10092 else if ((typebits
& N_INT
) != 0)
10093 *g_type
= NT_integer
;
10094 else if ((typebits
& N_FLT
) != 0)
10095 *g_type
= NT_float
;
10096 else if ((typebits
& N_SIZ
) != 0)
10097 *g_type
= NT_untyped
;
10101 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
10102 operand type, i.e. the single type specified in a Neon instruction when it
10103 is the only one given. */
10105 static struct neon_type_el
10106 neon_type_promote (struct neon_type_el
*key
, unsigned thisarg
)
10108 struct neon_type_el dest
= *key
;
10110 assert ((thisarg
& N_EQK
) != 0);
10112 neon_modify_type_size (thisarg
, &dest
.type
, &dest
.size
);
10117 /* Convert Neon type and size into compact bitmask representation. */
10119 static enum neon_type_mask
10120 type_chk_of_el_type (enum neon_el_type type
, unsigned size
)
10127 case 8: return N_8
;
10128 case 16: return N_16
;
10129 case 32: return N_32
;
10130 case 64: return N_64
;
10138 case 8: return N_I8
;
10139 case 16: return N_I16
;
10140 case 32: return N_I32
;
10141 case 64: return N_I64
;
10149 case 32: return N_F32
;
10150 case 64: return N_F64
;
10158 case 8: return N_P8
;
10159 case 16: return N_P16
;
10167 case 8: return N_S8
;
10168 case 16: return N_S16
;
10169 case 32: return N_S32
;
10170 case 64: return N_S64
;
10178 case 8: return N_U8
;
10179 case 16: return N_U16
;
10180 case 32: return N_U32
;
10181 case 64: return N_U64
;
10192 /* Convert compact Neon bitmask type representation to a type and size. Only
10193 handles the case where a single bit is set in the mask. */
10196 el_type_of_type_chk (enum neon_el_type
*type
, unsigned *size
,
10197 enum neon_type_mask mask
)
10199 if ((mask
& N_EQK
) != 0)
10202 if ((mask
& (N_S8
| N_U8
| N_I8
| N_8
| N_P8
)) != 0)
10204 else if ((mask
& (N_S16
| N_U16
| N_I16
| N_16
| N_P16
)) != 0)
10206 else if ((mask
& (N_S32
| N_U32
| N_I32
| N_32
| N_F32
)) != 0)
10208 else if ((mask
& (N_S64
| N_U64
| N_I64
| N_64
| N_F64
)) != 0)
10213 if ((mask
& (N_S8
| N_S16
| N_S32
| N_S64
)) != 0)
10215 else if ((mask
& (N_U8
| N_U16
| N_U32
| N_U64
)) != 0)
10216 *type
= NT_unsigned
;
10217 else if ((mask
& (N_I8
| N_I16
| N_I32
| N_I64
)) != 0)
10218 *type
= NT_integer
;
10219 else if ((mask
& (N_8
| N_16
| N_32
| N_64
)) != 0)
10220 *type
= NT_untyped
;
10221 else if ((mask
& (N_P8
| N_P16
)) != 0)
10223 else if ((mask
& (N_F32
| N_F64
)) != 0)
10231 /* Modify a bitmask of allowed types. This is only needed for type
10235 modify_types_allowed (unsigned allowed
, unsigned mods
)
10238 enum neon_el_type type
;
10244 for (i
= 1; i
<= N_MAX_NONSPECIAL
; i
<<= 1)
10246 if (el_type_of_type_chk (&type
, &size
, allowed
& i
) == SUCCESS
)
10248 neon_modify_type_size (mods
, &type
, &size
);
10249 destmask
|= type_chk_of_el_type (type
, size
);
10256 /* Check type and return type classification.
10257 The manual states (paraphrase): If one datatype is given, it indicates the
10259 - the second operand, if there is one
10260 - the operand, if there is no second operand
10261 - the result, if there are no operands.
10262 This isn't quite good enough though, so we use a concept of a "key" datatype
10263 which is set on a per-instruction basis, which is the one which matters when
10264 only one data type is written.
10265 Note: this function has side-effects (e.g. filling in missing operands). All
10266 Neon instructions should call it before performing bit encoding. */
10268 static struct neon_type_el
10269 neon_check_type (unsigned els
, enum neon_shape ns
, ...)
10272 unsigned i
, pass
, key_el
= 0;
10273 unsigned types
[NEON_MAX_TYPE_ELS
];
10274 enum neon_el_type k_type
= NT_invtype
;
10275 unsigned k_size
= -1u;
10276 struct neon_type_el badtype
= {NT_invtype
, -1};
10277 unsigned key_allowed
= 0;
10279 /* Optional registers in Neon instructions are always (not) in operand 1.
10280 Fill in the missing operand here, if it was omitted. */
10281 if (els
> 1 && !inst
.operands
[1].present
)
10282 inst
.operands
[1] = inst
.operands
[0];
10284 /* Suck up all the varargs. */
10286 for (i
= 0; i
< els
; i
++)
10288 unsigned thisarg
= va_arg (ap
, unsigned);
10289 if (thisarg
== N_IGNORE_TYPE
)
10294 types
[i
] = thisarg
;
10295 if ((thisarg
& N_KEY
) != 0)
10300 if (inst
.vectype
.elems
> 0)
10301 for (i
= 0; i
< els
; i
++)
10302 if (inst
.operands
[i
].vectype
.type
!= NT_invtype
)
10304 first_error (_("types specified in both the mnemonic and operands"));
10308 /* Duplicate inst.vectype elements here as necessary.
10309 FIXME: No idea if this is exactly the same as the ARM assembler,
10310 particularly when an insn takes one register and one non-register
10312 if (inst
.vectype
.elems
== 1 && els
> 1)
10315 inst
.vectype
.elems
= els
;
10316 inst
.vectype
.el
[key_el
] = inst
.vectype
.el
[0];
10317 for (j
= 0; j
< els
; j
++)
10319 inst
.vectype
.el
[j
] = neon_type_promote (&inst
.vectype
.el
[key_el
],
10322 else if (inst
.vectype
.elems
== 0 && els
> 0)
10325 /* No types were given after the mnemonic, so look for types specified
10326 after each operand. We allow some flexibility here; as long as the
10327 "key" operand has a type, we can infer the others. */
10328 for (j
= 0; j
< els
; j
++)
10329 if (inst
.operands
[j
].vectype
.type
!= NT_invtype
)
10330 inst
.vectype
.el
[j
] = inst
.operands
[j
].vectype
;
10332 if (inst
.operands
[key_el
].vectype
.type
!= NT_invtype
)
10334 for (j
= 0; j
< els
; j
++)
10335 if (inst
.operands
[j
].vectype
.type
== NT_invtype
)
10336 inst
.vectype
.el
[j
] = neon_type_promote (&inst
.vectype
.el
[key_el
],
10341 first_error (_("operand types can't be inferred"));
10345 else if (inst
.vectype
.elems
!= els
)
10347 first_error (_("type specifier has the wrong number of parts"));
10351 for (pass
= 0; pass
< 2; pass
++)
10353 for (i
= 0; i
< els
; i
++)
10355 unsigned thisarg
= types
[i
];
10356 unsigned types_allowed
= ((thisarg
& N_EQK
) != 0 && pass
!= 0)
10357 ? modify_types_allowed (key_allowed
, thisarg
) : thisarg
;
10358 enum neon_el_type g_type
= inst
.vectype
.el
[i
].type
;
10359 unsigned g_size
= inst
.vectype
.el
[i
].size
;
10361 /* Decay more-specific signed & unsigned types to sign-insensitive
10362 integer types if sign-specific variants are unavailable. */
10363 if ((g_type
== NT_signed
|| g_type
== NT_unsigned
)
10364 && (types_allowed
& N_SU_ALL
) == 0)
10365 g_type
= NT_integer
;
10367 /* If only untyped args are allowed, decay any more specific types to
10368 them. Some instructions only care about signs for some element
10369 sizes, so handle that properly. */
10370 if ((g_size
== 8 && (types_allowed
& N_8
) != 0)
10371 || (g_size
== 16 && (types_allowed
& N_16
) != 0)
10372 || (g_size
== 32 && (types_allowed
& N_32
) != 0)
10373 || (g_size
== 64 && (types_allowed
& N_64
) != 0))
10374 g_type
= NT_untyped
;
10378 if ((thisarg
& N_KEY
) != 0)
10382 key_allowed
= thisarg
& ~N_KEY
;
10387 if ((thisarg
& N_VFP
) != 0)
10389 enum neon_shape_el regshape
= neon_shape_tab
[ns
].el
[i
];
10390 unsigned regwidth
= neon_shape_el_size
[regshape
], match
;
10392 /* In VFP mode, operands must match register widths. If we
10393 have a key operand, use its width, else use the width of
10394 the current operand. */
10400 if (regwidth
!= match
)
10402 first_error (_("operand size must match register width"));
10407 if ((thisarg
& N_EQK
) == 0)
10409 unsigned given_type
= type_chk_of_el_type (g_type
, g_size
);
10411 if ((given_type
& types_allowed
) == 0)
10413 first_error (_("bad type in Neon instruction"));
10419 enum neon_el_type mod_k_type
= k_type
;
10420 unsigned mod_k_size
= k_size
;
10421 neon_modify_type_size (thisarg
, &mod_k_type
, &mod_k_size
);
10422 if (g_type
!= mod_k_type
|| g_size
!= mod_k_size
)
10424 first_error (_("inconsistent types in Neon instruction"));
10432 return inst
.vectype
.el
[key_el
];
10435 /* Neon-style VFP instruction forwarding. */
10437 /* Thumb VFP instructions have 0xE in the condition field. */
10440 do_vfp_cond_or_thumb (void)
10443 inst
.instruction
|= 0xe0000000;
10445 inst
.instruction
|= inst
.cond
<< 28;
10448 /* Look up and encode a simple mnemonic, for use as a helper function for the
10449 Neon-style VFP syntax. This avoids duplication of bits of the insns table,
10450 etc. It is assumed that operand parsing has already been done, and that the
10451 operands are in the form expected by the given opcode (this isn't necessarily
10452 the same as the form in which they were parsed, hence some massaging must
10453 take place before this function is called).
10454 Checks current arch version against that in the looked-up opcode. */
10457 do_vfp_nsyn_opcode (const char *opname
)
10459 const struct asm_opcode
*opcode
;
10461 opcode
= hash_find (arm_ops_hsh
, opname
);
10466 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
,
10467 thumb_mode
? *opcode
->tvariant
: *opcode
->avariant
),
10472 inst
.instruction
= opcode
->tvalue
;
10473 opcode
->tencode ();
10477 inst
.instruction
= (inst
.cond
<< 28) | opcode
->avalue
;
10478 opcode
->aencode ();
10483 do_vfp_nsyn_add_sub (enum neon_shape rs
)
10485 int is_add
= (inst
.instruction
& 0x0fffffff) == N_MNEM_vadd
;
10490 do_vfp_nsyn_opcode ("fadds");
10492 do_vfp_nsyn_opcode ("fsubs");
10497 do_vfp_nsyn_opcode ("faddd");
10499 do_vfp_nsyn_opcode ("fsubd");
10503 /* Check operand types to see if this is a VFP instruction, and if so call
10507 try_vfp_nsyn (int args
, void (*pfn
) (enum neon_shape
))
10509 enum neon_shape rs
;
10510 struct neon_type_el et
;
10515 rs
= neon_select_shape (NS_FF
, NS_DD
, NS_NULL
);
10516 et
= neon_check_type (2, rs
,
10517 N_EQK
| N_VFP
, N_F32
| N_F64
| N_KEY
| N_VFP
);
10521 rs
= neon_select_shape (NS_FFF
, NS_DDD
, NS_NULL
);
10522 et
= neon_check_type (3, rs
,
10523 N_EQK
| N_VFP
, N_EQK
| N_VFP
, N_F32
| N_F64
| N_KEY
| N_VFP
);
10530 if (et
.type
!= NT_invtype
)
10542 do_vfp_nsyn_mla_mls (enum neon_shape rs
)
10544 int is_mla
= (inst
.instruction
& 0x0fffffff) == N_MNEM_vmla
;
10549 do_vfp_nsyn_opcode ("fmacs");
10551 do_vfp_nsyn_opcode ("fmscs");
10556 do_vfp_nsyn_opcode ("fmacd");
10558 do_vfp_nsyn_opcode ("fmscd");
10563 do_vfp_nsyn_mul (enum neon_shape rs
)
10566 do_vfp_nsyn_opcode ("fmuls");
10568 do_vfp_nsyn_opcode ("fmuld");
10572 do_vfp_nsyn_abs_neg (enum neon_shape rs
)
10574 int is_neg
= (inst
.instruction
& 0x80) != 0;
10575 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F32
| N_F64
| N_VFP
| N_KEY
);
10580 do_vfp_nsyn_opcode ("fnegs");
10582 do_vfp_nsyn_opcode ("fabss");
10587 do_vfp_nsyn_opcode ("fnegd");
10589 do_vfp_nsyn_opcode ("fabsd");
10593 /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
10594 insns belong to Neon, and are handled elsewhere. */
10597 do_vfp_nsyn_ldm_stm (int is_dbmode
)
10599 int is_ldm
= (inst
.instruction
& (1 << 20)) != 0;
10603 do_vfp_nsyn_opcode ("fldmdbs");
10605 do_vfp_nsyn_opcode ("fldmias");
10610 do_vfp_nsyn_opcode ("fstmdbs");
10612 do_vfp_nsyn_opcode ("fstmias");
10617 do_vfp_nsyn_ldr_str (int is_ldr
)
10620 do_vfp_nsyn_opcode ("flds");
10622 do_vfp_nsyn_opcode ("fsts");
10626 do_vfp_nsyn_sqrt (void)
10628 enum neon_shape rs
= neon_select_shape (NS_FF
, NS_DD
, NS_NULL
);
10629 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F32
| N_F64
| N_KEY
| N_VFP
);
10632 do_vfp_nsyn_opcode ("fsqrts");
10634 do_vfp_nsyn_opcode ("fsqrtd");
10638 do_vfp_nsyn_div (void)
10640 enum neon_shape rs
= neon_select_shape (NS_FFF
, NS_DDD
, NS_NULL
);
10641 neon_check_type (3, rs
, N_EQK
| N_VFP
, N_EQK
| N_VFP
,
10642 N_F32
| N_F64
| N_KEY
| N_VFP
);
10645 do_vfp_nsyn_opcode ("fdivs");
10647 do_vfp_nsyn_opcode ("fdivd");
10651 do_vfp_nsyn_nmul (void)
10653 enum neon_shape rs
= neon_select_shape (NS_FFF
, NS_DDD
, NS_NULL
);
10654 neon_check_type (3, rs
, N_EQK
| N_VFP
, N_EQK
| N_VFP
,
10655 N_F32
| N_F64
| N_KEY
| N_VFP
);
10659 inst
.instruction
= NEON_ENC_SINGLE (inst
.instruction
);
10660 do_vfp_sp_dyadic ();
10664 inst
.instruction
= NEON_ENC_DOUBLE (inst
.instruction
);
10665 do_vfp_dp_rd_rn_rm ();
10667 do_vfp_cond_or_thumb ();
10671 do_vfp_nsyn_cmp (void)
10673 if (inst
.operands
[1].isreg
)
10675 enum neon_shape rs
= neon_select_shape (NS_FF
, NS_DD
, NS_NULL
);
10676 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F32
| N_F64
| N_KEY
| N_VFP
);
10680 inst
.instruction
= NEON_ENC_SINGLE (inst
.instruction
);
10681 do_vfp_sp_monadic ();
10685 inst
.instruction
= NEON_ENC_DOUBLE (inst
.instruction
);
10686 do_vfp_dp_rd_rm ();
10691 enum neon_shape rs
= neon_select_shape (NS_FI
, NS_DI
, NS_NULL
);
10692 neon_check_type (2, rs
, N_F32
| N_F64
| N_KEY
| N_VFP
, N_EQK
);
10694 switch (inst
.instruction
& 0x0fffffff)
10697 inst
.instruction
+= N_MNEM_vcmpz
- N_MNEM_vcmp
;
10700 inst
.instruction
+= N_MNEM_vcmpez
- N_MNEM_vcmpe
;
10708 inst
.instruction
= NEON_ENC_SINGLE (inst
.instruction
);
10709 do_vfp_sp_compare_z ();
10713 inst
.instruction
= NEON_ENC_DOUBLE (inst
.instruction
);
10717 do_vfp_cond_or_thumb ();
10721 nsyn_insert_sp (void)
10723 inst
.operands
[1] = inst
.operands
[0];
10724 memset (&inst
.operands
[0], '\0', sizeof (inst
.operands
[0]));
10725 inst
.operands
[0].reg
= 13;
10726 inst
.operands
[0].isreg
= 1;
10727 inst
.operands
[0].writeback
= 1;
10728 inst
.operands
[0].present
= 1;
10732 do_vfp_nsyn_push (void)
10735 if (inst
.operands
[1].issingle
)
10736 do_vfp_nsyn_opcode ("fstmdbs");
10738 do_vfp_nsyn_opcode ("fstmdbd");
10742 do_vfp_nsyn_pop (void)
10745 if (inst
.operands
[1].issingle
)
10746 do_vfp_nsyn_opcode ("fldmdbs");
10748 do_vfp_nsyn_opcode ("fldmdbd");
10751 /* Fix up Neon data-processing instructions, ORing in the correct bits for
10752 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
10755 neon_dp_fixup (unsigned i
)
10759 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
10773 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
10777 neon_logbits (unsigned x
)
10779 return ffs (x
) - 4;
10782 #define LOW4(R) ((R) & 0xf)
10783 #define HI1(R) (((R) >> 4) & 1)
10785 /* Encode insns with bit pattern:
10787 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
10788 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
10790 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
10791 different meaning for some instruction. */
10794 neon_three_same (int isquad
, int ubit
, int size
)
10796 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
10797 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
10798 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
10799 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
10800 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
10801 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
10802 inst
.instruction
|= (isquad
!= 0) << 6;
10803 inst
.instruction
|= (ubit
!= 0) << 24;
10805 inst
.instruction
|= neon_logbits (size
) << 20;
10807 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
10810 /* Encode instructions of the form:
10812 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
10813 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
10815 Don't write size if SIZE == -1. */
10818 neon_two_same (int qbit
, int ubit
, int size
)
10820 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
10821 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
10822 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
10823 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
10824 inst
.instruction
|= (qbit
!= 0) << 6;
10825 inst
.instruction
|= (ubit
!= 0) << 24;
10828 inst
.instruction
|= neon_logbits (size
) << 18;
10830 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
10833 /* Neon instruction encoders, in approximate order of appearance. */
10836 do_neon_dyadic_i_su (void)
10838 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
10839 struct neon_type_el et
= neon_check_type (3, rs
,
10840 N_EQK
, N_EQK
, N_SU_32
| N_KEY
);
10841 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
10845 do_neon_dyadic_i64_su (void)
10847 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
10848 struct neon_type_el et
= neon_check_type (3, rs
,
10849 N_EQK
, N_EQK
, N_SU_ALL
| N_KEY
);
10850 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
10854 neon_imm_shift (int write_ubit
, int uval
, int isquad
, struct neon_type_el et
,
10857 unsigned size
= et
.size
>> 3;
10858 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
10859 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
10860 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
10861 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
10862 inst
.instruction
|= (isquad
!= 0) << 6;
10863 inst
.instruction
|= immbits
<< 16;
10864 inst
.instruction
|= (size
>> 3) << 7;
10865 inst
.instruction
|= (size
& 0x7) << 19;
10867 inst
.instruction
|= (uval
!= 0) << 24;
10869 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
10873 do_neon_shl_imm (void)
10875 if (!inst
.operands
[2].isreg
)
10877 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
10878 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_KEY
| N_I_ALL
);
10879 inst
.instruction
= NEON_ENC_IMMED (inst
.instruction
);
10880 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, inst
.operands
[2].imm
);
10884 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
10885 struct neon_type_el et
= neon_check_type (3, rs
,
10886 N_EQK
, N_SU_ALL
| N_KEY
, N_EQK
| N_SGN
);
10887 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
10888 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
10893 do_neon_qshl_imm (void)
10895 if (!inst
.operands
[2].isreg
)
10897 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
10898 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_ALL
| N_KEY
);
10899 inst
.instruction
= NEON_ENC_IMMED (inst
.instruction
);
10900 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, neon_quad (rs
), et
,
10901 inst
.operands
[2].imm
);
10905 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
10906 struct neon_type_el et
= neon_check_type (3, rs
,
10907 N_EQK
, N_SU_ALL
| N_KEY
, N_EQK
| N_SGN
);
10908 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
10909 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
10914 neon_cmode_for_logic_imm (unsigned immediate
, unsigned *immbits
, int size
)
10916 /* Handle .I8 and .I64 as pseudo-instructions. */
10920 /* Unfortunately, this will make everything apart from zero out-of-range.
10921 FIXME is this the intended semantics? There doesn't seem much point in
10922 accepting .I8 if so. */
10923 immediate
|= immediate
<< 8;
10927 /* Similarly, anything other than zero will be replicated in bits [63:32],
10928 which probably isn't want we want if we specified .I64. */
10929 if (immediate
!= 0)
10930 goto bad_immediate
;
10936 if (immediate
== (immediate
& 0x000000ff))
10938 *immbits
= immediate
;
10939 return (size
== 16) ? 0x9 : 0x1;
10941 else if (immediate
== (immediate
& 0x0000ff00))
10943 *immbits
= immediate
>> 8;
10944 return (size
== 16) ? 0xb : 0x3;
10946 else if (immediate
== (immediate
& 0x00ff0000))
10948 *immbits
= immediate
>> 16;
10951 else if (immediate
== (immediate
& 0xff000000))
10953 *immbits
= immediate
>> 24;
10958 first_error (_("immediate value out of range"));
10962 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
10966 neon_bits_same_in_bytes (unsigned imm
)
10968 return ((imm
& 0x000000ff) == 0 || (imm
& 0x000000ff) == 0x000000ff)
10969 && ((imm
& 0x0000ff00) == 0 || (imm
& 0x0000ff00) == 0x0000ff00)
10970 && ((imm
& 0x00ff0000) == 0 || (imm
& 0x00ff0000) == 0x00ff0000)
10971 && ((imm
& 0xff000000) == 0 || (imm
& 0xff000000) == 0xff000000);
10974 /* For immediate of above form, return 0bABCD. */
10977 neon_squash_bits (unsigned imm
)
10979 return (imm
& 0x01) | ((imm
& 0x0100) >> 7) | ((imm
& 0x010000) >> 14)
10980 | ((imm
& 0x01000000) >> 21);
10983 /* Compress quarter-float representation to 0b...000 abcdefgh. */
10986 neon_qfloat_bits (unsigned imm
)
10988 return ((imm
>> 19) & 0x7f) | ((imm
>> 24) & 0x80);
10991 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
10992 the instruction. *OP is passed as the initial value of the op field, and
10993 may be set to a different value depending on the constant (i.e.
10994 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
10998 neon_cmode_for_move_imm (unsigned immlo
, unsigned immhi
, unsigned *immbits
,
10999 int *op
, int size
, enum neon_el_type type
)
11001 if (type
== NT_float
&& is_quarter_float (immlo
) && immhi
== 0)
11003 if (size
!= 32 || *op
== 1)
11005 *immbits
= neon_qfloat_bits (immlo
);
11008 else if (size
== 64 && neon_bits_same_in_bytes (immhi
)
11009 && neon_bits_same_in_bytes (immlo
))
11011 /* Check this one first so we don't have to bother with immhi in later
11015 *immbits
= (neon_squash_bits (immhi
) << 4) | neon_squash_bits (immlo
);
11019 else if (immhi
!= 0)
11021 else if (immlo
== (immlo
& 0x000000ff))
11023 /* 64-bit case was already handled. Don't allow MVN with 8-bit
11025 if ((size
!= 8 && size
!= 16 && size
!= 32)
11026 || (size
== 8 && *op
== 1))
11029 return (size
== 8) ? 0xe : (size
== 16) ? 0x8 : 0x0;
11031 else if (immlo
== (immlo
& 0x0000ff00))
11033 if (size
!= 16 && size
!= 32)
11035 *immbits
= immlo
>> 8;
11036 return (size
== 16) ? 0xa : 0x2;
11038 else if (immlo
== (immlo
& 0x00ff0000))
11042 *immbits
= immlo
>> 16;
11045 else if (immlo
== (immlo
& 0xff000000))
11049 *immbits
= immlo
>> 24;
11052 else if (immlo
== ((immlo
& 0x0000ff00) | 0x000000ff))
11056 *immbits
= (immlo
>> 8) & 0xff;
11059 else if (immlo
== ((immlo
& 0x00ff0000) | 0x0000ffff))
11063 *immbits
= (immlo
>> 16) & 0xff;
11070 /* Write immediate bits [7:0] to the following locations:
11072 |28/24|23 19|18 16|15 4|3 0|
11073 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
11075 This function is used by VMOV/VMVN/VORR/VBIC. */
11078 neon_write_immbits (unsigned immbits
)
11080 inst
.instruction
|= immbits
& 0xf;
11081 inst
.instruction
|= ((immbits
>> 4) & 0x7) << 16;
11082 inst
.instruction
|= ((immbits
>> 7) & 0x1) << 24;
11085 /* Invert low-order SIZE bits of XHI:XLO. */
11088 neon_invert_size (unsigned *xlo
, unsigned *xhi
, int size
)
11090 unsigned immlo
= xlo
? *xlo
: 0;
11091 unsigned immhi
= xhi
? *xhi
: 0;
11096 immlo
= (~immlo
) & 0xff;
11100 immlo
= (~immlo
) & 0xffff;
11104 immhi
= (~immhi
) & 0xffffffff;
11105 /* fall through. */
11108 immlo
= (~immlo
) & 0xffffffff;
11123 do_neon_logic (void)
11125 if (inst
.operands
[2].present
&& inst
.operands
[2].isreg
)
11127 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
11128 neon_check_type (3, rs
, N_IGNORE_TYPE
);
11129 /* U bit and size field were set as part of the bitmask. */
11130 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
11131 neon_three_same (neon_quad (rs
), 0, -1);
11135 enum neon_shape rs
= neon_select_shape (NS_DI
, NS_QI
, NS_NULL
);
11136 struct neon_type_el et
= neon_check_type (2, rs
,
11137 N_I8
| N_I16
| N_I32
| N_I64
| N_F32
| N_KEY
, N_EQK
);
11138 enum neon_opc opcode
= inst
.instruction
& 0x0fffffff;
11142 if (et
.type
== NT_invtype
)
11145 inst
.instruction
= NEON_ENC_IMMED (inst
.instruction
);
11150 cmode
= neon_cmode_for_logic_imm (inst
.operands
[1].imm
, &immbits
,
11155 cmode
= neon_cmode_for_logic_imm (inst
.operands
[1].imm
, &immbits
,
11160 /* Pseudo-instruction for VBIC. */
11161 immbits
= inst
.operands
[1].imm
;
11162 neon_invert_size (&immbits
, 0, et
.size
);
11163 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
11167 /* Pseudo-instruction for VORR. */
11168 immbits
= inst
.operands
[1].imm
;
11169 neon_invert_size (&immbits
, 0, et
.size
);
11170 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
11180 inst
.instruction
|= neon_quad (rs
) << 6;
11181 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
11182 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
11183 inst
.instruction
|= cmode
<< 8;
11184 neon_write_immbits (immbits
);
11186 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
11191 do_neon_bitfield (void)
11193 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
11194 neon_check_type (3, rs
, N_IGNORE_TYPE
);
11195 neon_three_same (neon_quad (rs
), 0, -1);
11199 neon_dyadic_misc (enum neon_el_type ubit_meaning
, unsigned types
,
11202 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
11203 struct neon_type_el et
= neon_check_type (3, rs
, N_EQK
| destbits
, N_EQK
,
11205 if (et
.type
== NT_float
)
11207 inst
.instruction
= NEON_ENC_FLOAT (inst
.instruction
);
11208 neon_three_same (neon_quad (rs
), 0, -1);
11212 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
11213 neon_three_same (neon_quad (rs
), et
.type
== ubit_meaning
, et
.size
);
11218 do_neon_dyadic_if_su (void)
11220 neon_dyadic_misc (NT_unsigned
, N_SUF_32
, 0);
11224 do_neon_dyadic_if_su_d (void)
11226 /* This version only allow D registers, but that constraint is enforced during
11227 operand parsing so we don't need to do anything extra here. */
11228 neon_dyadic_misc (NT_unsigned
, N_SUF_32
, 0);
11232 do_neon_dyadic_if_i (void)
11234 neon_dyadic_misc (NT_unsigned
, N_IF_32
, 0);
11238 do_neon_dyadic_if_i_d (void)
11240 neon_dyadic_misc (NT_unsigned
, N_IF_32
, 0);
11243 enum vfp_or_neon_is_neon_bits
11246 NEON_CHECK_ARCH
= 2
11249 /* Call this function if an instruction which may have belonged to the VFP or
11250 Neon instruction sets, but turned out to be a Neon instruction (due to the
11251 operand types involved, etc.). We have to check and/or fix-up a couple of
11254 - Make sure the user hasn't attempted to make a Neon instruction
11256 - Alter the value in the condition code field if necessary.
11257 - Make sure that the arch supports Neon instructions.
11259 Which of these operations take place depends on bits from enum
11260 vfp_or_neon_is_neon_bits.
11262 WARNING: This function has side effects! If NEON_CHECK_CC is used and the
11263 current instruction's condition is COND_ALWAYS, the condition field is
11264 changed to inst.uncond_value. This is necessary because instructions shared
11265 between VFP and Neon may be conditional for the VFP variants only, and the
11266 unconditional Neon version must have, e.g., 0xF in the condition field. */
11269 vfp_or_neon_is_neon (unsigned check
)
11271 /* Conditions are always legal in Thumb mode (IT blocks). */
11272 if (!thumb_mode
&& (check
& NEON_CHECK_CC
))
11274 if (inst
.cond
!= COND_ALWAYS
)
11276 first_error (_(BAD_COND
));
11279 if (inst
.uncond_value
!= -1)
11280 inst
.instruction
|= inst
.uncond_value
<< 28;
11283 if ((check
& NEON_CHECK_ARCH
)
11284 && !ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
))
11286 first_error (_(BAD_FPU
));
11294 do_neon_addsub_if_i (void)
11296 if (try_vfp_nsyn (3, do_vfp_nsyn_add_sub
) == SUCCESS
)
11299 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
11302 /* The "untyped" case can't happen. Do this to stop the "U" bit being
11303 affected if we specify unsigned args. */
11304 neon_dyadic_misc (NT_untyped
, N_IF_32
| N_I64
, 0);
11307 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
11309 V<op> A,B (A is operand 0, B is operand 2)
11314 so handle that case specially. */
11317 neon_exchange_operands (void)
11319 void *scratch
= alloca (sizeof (inst
.operands
[0]));
11320 if (inst
.operands
[1].present
)
11322 /* Swap operands[1] and operands[2]. */
11323 memcpy (scratch
, &inst
.operands
[1], sizeof (inst
.operands
[0]));
11324 inst
.operands
[1] = inst
.operands
[2];
11325 memcpy (&inst
.operands
[2], scratch
, sizeof (inst
.operands
[0]));
11329 inst
.operands
[1] = inst
.operands
[2];
11330 inst
.operands
[2] = inst
.operands
[0];
11335 neon_compare (unsigned regtypes
, unsigned immtypes
, int invert
)
11337 if (inst
.operands
[2].isreg
)
11340 neon_exchange_operands ();
11341 neon_dyadic_misc (NT_unsigned
, regtypes
, N_SIZ
);
11345 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
11346 struct neon_type_el et
= neon_check_type (2, rs
,
11347 N_EQK
| N_SIZ
, immtypes
| N_KEY
);
11349 inst
.instruction
= NEON_ENC_IMMED (inst
.instruction
);
11350 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
11351 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
11352 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
11353 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
11354 inst
.instruction
|= neon_quad (rs
) << 6;
11355 inst
.instruction
|= (et
.type
== NT_float
) << 10;
11356 inst
.instruction
|= neon_logbits (et
.size
) << 18;
11358 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
11365 neon_compare (N_SUF_32
, N_S8
| N_S16
| N_S32
| N_F32
, FALSE
);
11369 do_neon_cmp_inv (void)
11371 neon_compare (N_SUF_32
, N_S8
| N_S16
| N_S32
| N_F32
, TRUE
);
11377 neon_compare (N_IF_32
, N_IF_32
, FALSE
);
11380 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
11381 scalars, which are encoded in 5 bits, M : Rm.
11382 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
11383 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
11387 neon_scalar_for_mul (unsigned scalar
, unsigned elsize
)
11389 unsigned regno
= NEON_SCALAR_REG (scalar
);
11390 unsigned elno
= NEON_SCALAR_INDEX (scalar
);
11395 if (regno
> 7 || elno
> 3)
11397 return regno
| (elno
<< 3);
11400 if (regno
> 15 || elno
> 1)
11402 return regno
| (elno
<< 4);
11406 first_error (_("scalar out of range for multiply instruction"));
11412 /* Encode multiply / multiply-accumulate scalar instructions. */
11415 neon_mul_mac (struct neon_type_el et
, int ubit
)
11419 /* Give a more helpful error message if we have an invalid type. */
11420 if (et
.type
== NT_invtype
)
11423 scalar
= neon_scalar_for_mul (inst
.operands
[2].reg
, et
.size
);
11424 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
11425 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
11426 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
11427 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
11428 inst
.instruction
|= LOW4 (scalar
);
11429 inst
.instruction
|= HI1 (scalar
) << 5;
11430 inst
.instruction
|= (et
.type
== NT_float
) << 8;
11431 inst
.instruction
|= neon_logbits (et
.size
) << 20;
11432 inst
.instruction
|= (ubit
!= 0) << 24;
11434 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
11438 do_neon_mac_maybe_scalar (void)
11440 if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls
) == SUCCESS
)
11443 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
11446 if (inst
.operands
[2].isscalar
)
11448 enum neon_shape rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
11449 struct neon_type_el et
= neon_check_type (3, rs
,
11450 N_EQK
, N_EQK
, N_I16
| N_I32
| N_F32
| N_KEY
);
11451 inst
.instruction
= NEON_ENC_SCALAR (inst
.instruction
);
11452 neon_mul_mac (et
, neon_quad (rs
));
11455 do_neon_dyadic_if_i ();
11461 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
11462 struct neon_type_el et
= neon_check_type (3, rs
,
11463 N_EQK
, N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
11464 neon_three_same (neon_quad (rs
), 0, et
.size
);
11467 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
11468 same types as the MAC equivalents. The polynomial type for this instruction
11469 is encoded the same as the integer type. */
11474 if (try_vfp_nsyn (3, do_vfp_nsyn_mul
) == SUCCESS
)
11477 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
11480 if (inst
.operands
[2].isscalar
)
11481 do_neon_mac_maybe_scalar ();
11483 neon_dyadic_misc (NT_poly
, N_I8
| N_I16
| N_I32
| N_F32
| N_P8
, 0);
11487 do_neon_qdmulh (void)
11489 if (inst
.operands
[2].isscalar
)
11491 enum neon_shape rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
11492 struct neon_type_el et
= neon_check_type (3, rs
,
11493 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
11494 inst
.instruction
= NEON_ENC_SCALAR (inst
.instruction
);
11495 neon_mul_mac (et
, neon_quad (rs
));
11499 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
11500 struct neon_type_el et
= neon_check_type (3, rs
,
11501 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
11502 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
11503 /* The U bit (rounding) comes from bit mask. */
11504 neon_three_same (neon_quad (rs
), 0, et
.size
);
11509 do_neon_fcmp_absolute (void)
11511 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
11512 neon_check_type (3, rs
, N_EQK
, N_EQK
, N_F32
| N_KEY
);
11513 /* Size field comes from bit mask. */
11514 neon_three_same (neon_quad (rs
), 1, -1);
11518 do_neon_fcmp_absolute_inv (void)
11520 neon_exchange_operands ();
11521 do_neon_fcmp_absolute ();
11525 do_neon_step (void)
11527 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
11528 neon_check_type (3, rs
, N_EQK
, N_EQK
, N_F32
| N_KEY
);
11529 neon_three_same (neon_quad (rs
), 0, -1);
11533 do_neon_abs_neg (void)
11535 enum neon_shape rs
;
11536 struct neon_type_el et
;
11538 if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg
) == SUCCESS
)
11541 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
11544 rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
11545 et
= neon_check_type (2, rs
, N_EQK
, N_S8
| N_S16
| N_S32
| N_F32
| N_KEY
);
11547 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
11548 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
11549 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
11550 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
11551 inst
.instruction
|= neon_quad (rs
) << 6;
11552 inst
.instruction
|= (et
.type
== NT_float
) << 10;
11553 inst
.instruction
|= neon_logbits (et
.size
) << 18;
11555 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
11561 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
11562 struct neon_type_el et
= neon_check_type (2, rs
,
11563 N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
11564 int imm
= inst
.operands
[2].imm
;
11565 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
11566 _("immediate out of range for insert"));
11567 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, imm
);
11573 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
11574 struct neon_type_el et
= neon_check_type (2, rs
,
11575 N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
11576 int imm
= inst
.operands
[2].imm
;
11577 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
11578 _("immediate out of range for insert"));
11579 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, et
.size
- imm
);
11583 do_neon_qshlu_imm (void)
11585 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
11586 struct neon_type_el et
= neon_check_type (2, rs
,
11587 N_EQK
| N_UNS
, N_S8
| N_S16
| N_S32
| N_S64
| N_KEY
);
11588 int imm
= inst
.operands
[2].imm
;
11589 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
11590 _("immediate out of range for shift"));
11591 /* Only encodes the 'U present' variant of the instruction.
11592 In this case, signed types have OP (bit 8) set to 0.
11593 Unsigned types have OP set to 1. */
11594 inst
.instruction
|= (et
.type
== NT_unsigned
) << 8;
11595 /* The rest of the bits are the same as other immediate shifts. */
11596 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, imm
);
11600 do_neon_qmovn (void)
11602 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
11603 N_EQK
| N_HLF
, N_SU_16_64
| N_KEY
);
11604 /* Saturating move where operands can be signed or unsigned, and the
11605 destination has the same signedness. */
11606 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
11607 if (et
.type
== NT_unsigned
)
11608 inst
.instruction
|= 0xc0;
11610 inst
.instruction
|= 0x80;
11611 neon_two_same (0, 1, et
.size
/ 2);
11615 do_neon_qmovun (void)
11617 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
11618 N_EQK
| N_HLF
| N_UNS
, N_S16
| N_S32
| N_S64
| N_KEY
);
11619 /* Saturating move with unsigned results. Operands must be signed. */
11620 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
11621 neon_two_same (0, 1, et
.size
/ 2);
11625 do_neon_rshift_sat_narrow (void)
11627 /* FIXME: Types for narrowing. If operands are signed, results can be signed
11628 or unsigned. If operands are unsigned, results must also be unsigned. */
11629 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
11630 N_EQK
| N_HLF
, N_SU_16_64
| N_KEY
);
11631 int imm
= inst
.operands
[2].imm
;
11632 /* This gets the bounds check, size encoding and immediate bits calculation
11636 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
11637 VQMOVN.I<size> <Dd>, <Qm>. */
11640 inst
.operands
[2].present
= 0;
11641 inst
.instruction
= N_MNEM_vqmovn
;
11646 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
11647 _("immediate out of range"));
11648 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, 0, et
, et
.size
- imm
);
11652 do_neon_rshift_sat_narrow_u (void)
11654 /* FIXME: Types for narrowing. If operands are signed, results can be signed
11655 or unsigned. If operands are unsigned, results must also be unsigned. */
11656 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
11657 N_EQK
| N_HLF
| N_UNS
, N_S16
| N_S32
| N_S64
| N_KEY
);
11658 int imm
= inst
.operands
[2].imm
;
11659 /* This gets the bounds check, size encoding and immediate bits calculation
11663 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
11664 VQMOVUN.I<size> <Dd>, <Qm>. */
11667 inst
.operands
[2].present
= 0;
11668 inst
.instruction
= N_MNEM_vqmovun
;
11673 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
11674 _("immediate out of range"));
11675 /* FIXME: The manual is kind of unclear about what value U should have in
11676 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
11678 neon_imm_shift (TRUE
, 1, 0, et
, et
.size
- imm
);
11682 do_neon_movn (void)
11684 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
11685 N_EQK
| N_HLF
, N_I16
| N_I32
| N_I64
| N_KEY
);
11686 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
11687 neon_two_same (0, 1, et
.size
/ 2);
11691 do_neon_rshift_narrow (void)
11693 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
11694 N_EQK
| N_HLF
, N_I16
| N_I32
| N_I64
| N_KEY
);
11695 int imm
= inst
.operands
[2].imm
;
11696 /* This gets the bounds check, size encoding and immediate bits calculation
11700 /* If immediate is zero then we are a pseudo-instruction for
11701 VMOVN.I<size> <Dd>, <Qm> */
11704 inst
.operands
[2].present
= 0;
11705 inst
.instruction
= N_MNEM_vmovn
;
11710 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
11711 _("immediate out of range for narrowing operation"));
11712 neon_imm_shift (FALSE
, 0, 0, et
, et
.size
- imm
);
11716 do_neon_shll (void)
11718 /* FIXME: Type checking when lengthening. */
11719 struct neon_type_el et
= neon_check_type (2, NS_QDI
,
11720 N_EQK
| N_DBL
, N_I8
| N_I16
| N_I32
| N_KEY
);
11721 unsigned imm
= inst
.operands
[2].imm
;
11723 if (imm
== et
.size
)
11725 /* Maximum shift variant. */
11726 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
11727 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
11728 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
11729 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
11730 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
11731 inst
.instruction
|= neon_logbits (et
.size
) << 18;
11733 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
11737 /* A more-specific type check for non-max versions. */
11738 et
= neon_check_type (2, NS_QDI
,
11739 N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
11740 inst
.instruction
= NEON_ENC_IMMED (inst
.instruction
);
11741 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, 0, et
, imm
);
11745 /* Check the various types for the VCVT instruction, and return which version
11746 the current instruction is. */
11749 neon_cvt_flavour (enum neon_shape rs
)
11751 #define CVT_VAR(C,X,Y) \
11752 et = neon_check_type (2, rs, whole_reg | (X), whole_reg | (Y)); \
11753 if (et.type != NT_invtype) \
11755 inst.error = NULL; \
11758 struct neon_type_el et
;
11759 unsigned whole_reg
= (rs
== NS_FFI
|| rs
== NS_FD
|| rs
== NS_DF
11760 || rs
== NS_FF
) ? N_VFP
: 0;
11761 /* The instruction versions which take an immediate take one register
11762 argument, which is extended to the width of the full register. Thus the
11763 "source" and "destination" registers must have the same width. Hack that
11764 here by making the size equal to the key (wider, in this case) operand. */
11765 unsigned key
= (rs
== NS_QQI
|| rs
== NS_DDI
|| rs
== NS_FFI
) ? N_KEY
: 0;
11767 CVT_VAR (0, N_S32
, N_F32
);
11768 CVT_VAR (1, N_U32
, N_F32
);
11769 CVT_VAR (2, N_F32
, N_S32
);
11770 CVT_VAR (3, N_F32
, N_U32
);
11774 /* VFP instructions. */
11775 CVT_VAR (4, N_F32
, N_F64
);
11776 CVT_VAR (5, N_F64
, N_F32
);
11777 CVT_VAR (6, N_S32
, N_F64
| key
);
11778 CVT_VAR (7, N_U32
, N_F64
| key
);
11779 CVT_VAR (8, N_F64
| key
, N_S32
);
11780 CVT_VAR (9, N_F64
| key
, N_U32
);
11781 /* VFP instructions with bitshift. */
11782 CVT_VAR (10, N_F32
| key
, N_S16
);
11783 CVT_VAR (11, N_F32
| key
, N_U16
);
11784 CVT_VAR (12, N_F64
| key
, N_S16
);
11785 CVT_VAR (13, N_F64
| key
, N_U16
);
11786 CVT_VAR (14, N_S16
, N_F32
| key
);
11787 CVT_VAR (15, N_U16
, N_F32
| key
);
11788 CVT_VAR (16, N_S16
, N_F64
| key
);
11789 CVT_VAR (17, N_U16
, N_F64
| key
);
11795 /* Neon-syntax VFP conversions. */
11798 do_vfp_nsyn_cvt (enum neon_shape rs
, int flavour
)
11800 const char *opname
= 0;
11802 if (rs
== NS_DDI
|| rs
== NS_QQI
|| rs
== NS_FFI
)
11804 /* Conversions with immediate bitshift. */
11805 const char *enc
[] =
11827 if (flavour
>= 0 && flavour
< (int) ARRAY_SIZE (enc
))
11829 opname
= enc
[flavour
];
11830 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
11831 _("operands 0 and 1 must be the same register"));
11832 inst
.operands
[1] = inst
.operands
[2];
11833 memset (&inst
.operands
[2], '\0', sizeof (inst
.operands
[2]));
11838 /* Conversions without bitshift. */
11839 const char *enc
[] =
11853 if (flavour
>= 0 && flavour
< (int) ARRAY_SIZE (enc
))
11854 opname
= enc
[flavour
];
11858 do_vfp_nsyn_opcode (opname
);
11862 do_vfp_nsyn_cvtz (void)
11864 enum neon_shape rs
= neon_select_shape (NS_FF
, NS_FD
, NS_NULL
);
11865 int flavour
= neon_cvt_flavour (rs
);
11866 const char *enc
[] =
11878 if (flavour
>= 0 && flavour
< (int) ARRAY_SIZE (enc
) && enc
[flavour
])
11879 do_vfp_nsyn_opcode (enc
[flavour
]);
11885 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_FFI
, NS_DD
, NS_QQ
,
11886 NS_FD
, NS_DF
, NS_FF
, NS_NULL
);
11887 int flavour
= neon_cvt_flavour (rs
);
11889 /* VFP rather than Neon conversions. */
11892 do_vfp_nsyn_cvt (rs
, flavour
);
11901 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
11904 /* Fixed-point conversion with #0 immediate is encoded as an
11905 integer conversion. */
11906 if (inst
.operands
[2].present
&& inst
.operands
[2].imm
== 0)
11908 unsigned immbits
= 32 - inst
.operands
[2].imm
;
11909 unsigned enctab
[] = { 0x0000100, 0x1000100, 0x0, 0x1000000 };
11910 inst
.instruction
= NEON_ENC_IMMED (inst
.instruction
);
11912 inst
.instruction
|= enctab
[flavour
];
11913 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
11914 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
11915 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
11916 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
11917 inst
.instruction
|= neon_quad (rs
) << 6;
11918 inst
.instruction
|= 1 << 21;
11919 inst
.instruction
|= immbits
<< 16;
11921 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
11929 unsigned enctab
[] = { 0x100, 0x180, 0x0, 0x080 };
11931 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
11933 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
11937 inst
.instruction
|= enctab
[flavour
];
11939 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
11940 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
11941 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
11942 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
11943 inst
.instruction
|= neon_quad (rs
) << 6;
11944 inst
.instruction
|= 2 << 18;
11946 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
11951 /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */
11952 do_vfp_nsyn_cvt (rs
, flavour
);
11957 neon_move_immediate (void)
11959 enum neon_shape rs
= neon_select_shape (NS_DI
, NS_QI
, NS_NULL
);
11960 struct neon_type_el et
= neon_check_type (2, rs
,
11961 N_I8
| N_I16
| N_I32
| N_I64
| N_F32
| N_KEY
, N_EQK
);
11962 unsigned immlo
, immhi
= 0, immbits
;
11965 constraint (et
.type
== NT_invtype
,
11966 _("operand size must be specified for immediate VMOV"));
11968 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
11969 op
= (inst
.instruction
& (1 << 5)) != 0;
11971 immlo
= inst
.operands
[1].imm
;
11972 if (inst
.operands
[1].regisimm
)
11973 immhi
= inst
.operands
[1].reg
;
11975 constraint (et
.size
< 32 && (immlo
& ~((1 << et
.size
) - 1)) != 0,
11976 _("immediate has bits set outside the operand size"));
11978 if ((cmode
= neon_cmode_for_move_imm (immlo
, immhi
, &immbits
, &op
,
11979 et
.size
, et
.type
)) == FAIL
)
11981 /* Invert relevant bits only. */
11982 neon_invert_size (&immlo
, &immhi
, et
.size
);
11983 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
11984 with one or the other; those cases are caught by
11985 neon_cmode_for_move_imm. */
11987 if ((cmode
= neon_cmode_for_move_imm (immlo
, immhi
, &immbits
, &op
,
11988 et
.size
, et
.type
)) == FAIL
)
11990 first_error (_("immediate out of range"));
11995 inst
.instruction
&= ~(1 << 5);
11996 inst
.instruction
|= op
<< 5;
11998 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
11999 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
12000 inst
.instruction
|= neon_quad (rs
) << 6;
12001 inst
.instruction
|= cmode
<< 8;
12003 neon_write_immbits (immbits
);
12009 if (inst
.operands
[1].isreg
)
12011 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
12013 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
12014 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
12015 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
12016 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
12017 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
12018 inst
.instruction
|= neon_quad (rs
) << 6;
12022 inst
.instruction
= NEON_ENC_IMMED (inst
.instruction
);
12023 neon_move_immediate ();
12026 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
12029 /* Encode instructions of form:
12031 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
12032 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm |
12037 neon_mixed_length (struct neon_type_el et
, unsigned size
)
12039 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
12040 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
12041 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
12042 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
12043 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
12044 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
12045 inst
.instruction
|= (et
.type
== NT_unsigned
) << 24;
12046 inst
.instruction
|= neon_logbits (size
) << 20;
12048 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
12052 do_neon_dyadic_long (void)
12054 /* FIXME: Type checking for lengthening op. */
12055 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
12056 N_EQK
| N_DBL
, N_EQK
, N_SU_32
| N_KEY
);
12057 neon_mixed_length (et
, et
.size
);
12061 do_neon_abal (void)
12063 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
12064 N_EQK
| N_INT
| N_DBL
, N_EQK
, N_SU_32
| N_KEY
);
12065 neon_mixed_length (et
, et
.size
);
12069 neon_mac_reg_scalar_long (unsigned regtypes
, unsigned scalartypes
)
12071 if (inst
.operands
[2].isscalar
)
12073 struct neon_type_el et
= neon_check_type (3, NS_QDS
,
12074 N_EQK
| N_DBL
, N_EQK
, regtypes
| N_KEY
);
12075 inst
.instruction
= NEON_ENC_SCALAR (inst
.instruction
);
12076 neon_mul_mac (et
, et
.type
== NT_unsigned
);
12080 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
12081 N_EQK
| N_DBL
, N_EQK
, scalartypes
| N_KEY
);
12082 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
12083 neon_mixed_length (et
, et
.size
);
12088 do_neon_mac_maybe_scalar_long (void)
12090 neon_mac_reg_scalar_long (N_S16
| N_S32
| N_U16
| N_U32
, N_SU_32
);
12094 do_neon_dyadic_wide (void)
12096 struct neon_type_el et
= neon_check_type (3, NS_QQD
,
12097 N_EQK
| N_DBL
, N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
12098 neon_mixed_length (et
, et
.size
);
12102 do_neon_dyadic_narrow (void)
12104 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
12105 N_EQK
| N_DBL
, N_EQK
, N_I16
| N_I32
| N_I64
| N_KEY
);
12106 neon_mixed_length (et
, et
.size
/ 2);
12110 do_neon_mul_sat_scalar_long (void)
12112 neon_mac_reg_scalar_long (N_S16
| N_S32
, N_S16
| N_S32
);
12116 do_neon_vmull (void)
12118 if (inst
.operands
[2].isscalar
)
12119 do_neon_mac_maybe_scalar_long ();
12122 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
12123 N_EQK
| N_DBL
, N_EQK
, N_SU_32
| N_P8
| N_KEY
);
12124 if (et
.type
== NT_poly
)
12125 inst
.instruction
= NEON_ENC_POLY (inst
.instruction
);
12127 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
12128 /* For polynomial encoding, size field must be 0b00 and the U bit must be
12129 zero. Should be OK as-is. */
12130 neon_mixed_length (et
, et
.size
);
12137 enum neon_shape rs
= neon_select_shape (NS_DDDI
, NS_QQQI
, NS_NULL
);
12138 struct neon_type_el et
= neon_check_type (3, rs
,
12139 N_EQK
, N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
12140 unsigned imm
= (inst
.operands
[3].imm
* et
.size
) / 8;
12141 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
12142 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
12143 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
12144 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
12145 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
12146 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
12147 inst
.instruction
|= neon_quad (rs
) << 6;
12148 inst
.instruction
|= imm
<< 8;
12150 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
12156 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
12157 struct neon_type_el et
= neon_check_type (2, rs
,
12158 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
12159 unsigned op
= (inst
.instruction
>> 7) & 3;
12160 /* N (width of reversed regions) is encoded as part of the bitmask. We
12161 extract it here to check the elements to be reversed are smaller.
12162 Otherwise we'd get a reserved instruction. */
12163 unsigned elsize
= (op
== 2) ? 16 : (op
== 1) ? 32 : (op
== 0) ? 64 : 0;
12164 assert (elsize
!= 0);
12165 constraint (et
.size
>= elsize
,
12166 _("elements must be smaller than reversal region"));
12167 neon_two_same (neon_quad (rs
), 1, et
.size
);
12173 if (inst
.operands
[1].isscalar
)
12175 enum neon_shape rs
= neon_select_shape (NS_DS
, NS_QS
, NS_NULL
);
12176 struct neon_type_el et
= neon_check_type (2, rs
,
12177 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
12178 unsigned sizebits
= et
.size
>> 3;
12179 unsigned dm
= NEON_SCALAR_REG (inst
.operands
[1].reg
);
12180 int logsize
= neon_logbits (et
.size
);
12181 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[1].reg
) << logsize
;
12183 if (vfp_or_neon_is_neon (NEON_CHECK_CC
) == FAIL
)
12186 inst
.instruction
= NEON_ENC_SCALAR (inst
.instruction
);
12187 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
12188 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
12189 inst
.instruction
|= LOW4 (dm
);
12190 inst
.instruction
|= HI1 (dm
) << 5;
12191 inst
.instruction
|= neon_quad (rs
) << 6;
12192 inst
.instruction
|= x
<< 17;
12193 inst
.instruction
|= sizebits
<< 16;
12195 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
12199 enum neon_shape rs
= neon_select_shape (NS_DR
, NS_QR
, NS_NULL
);
12200 struct neon_type_el et
= neon_check_type (2, rs
,
12201 N_8
| N_16
| N_32
| N_KEY
, N_EQK
);
12202 /* Duplicate ARM register to lanes of vector. */
12203 inst
.instruction
= NEON_ENC_ARMREG (inst
.instruction
);
12206 case 8: inst
.instruction
|= 0x400000; break;
12207 case 16: inst
.instruction
|= 0x000020; break;
12208 case 32: inst
.instruction
|= 0x000000; break;
12211 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 12;
12212 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 16;
12213 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 7;
12214 inst
.instruction
|= neon_quad (rs
) << 21;
12215 /* The encoding for this instruction is identical for the ARM and Thumb
12216 variants, except for the condition field. */
12217 do_vfp_cond_or_thumb ();
12221 /* VMOV has particularly many variations. It can be one of:
12222 0. VMOV<c><q> <Qd>, <Qm>
12223 1. VMOV<c><q> <Dd>, <Dm>
12224 (Register operations, which are VORR with Rm = Rn.)
12225 2. VMOV<c><q>.<dt> <Qd>, #<imm>
12226 3. VMOV<c><q>.<dt> <Dd>, #<imm>
12228 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
12229 (ARM register to scalar.)
12230 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
12231 (Two ARM registers to vector.)
12232 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
12233 (Scalar to ARM register.)
12234 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
12235 (Vector to two ARM registers.)
12236 8. VMOV.F32 <Sd>, <Sm>
12237 9. VMOV.F64 <Dd>, <Dm>
12238 (VFP register moves.)
12239 10. VMOV.F32 <Sd>, #imm
12240 11. VMOV.F64 <Dd>, #imm
12241 (VFP float immediate load.)
12242 12. VMOV <Rd>, <Sm>
12243 (VFP single to ARM reg.)
12244 13. VMOV <Sd>, <Rm>
12245 (ARM reg to VFP single.)
12246 14. VMOV <Rd>, <Re>, <Sn>, <Sm>
12247 (Two ARM regs to two VFP singles.)
12248 15. VMOV <Sd>, <Se>, <Rn>, <Rm>
12249 (Two VFP singles to two ARM regs.)
12251 These cases can be disambiguated using neon_select_shape, except cases 1/9
12252 and 3/11 which depend on the operand type too.
12254 All the encoded bits are hardcoded by this function.
12256 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
12257 Cases 5, 7 may be used with VFPv2 and above.
12259 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
12260 can specify a type where it doesn't make sense to, and is ignored).
12266 enum neon_shape rs
= neon_select_shape (NS_RRFF
, NS_FFRR
, NS_DRR
, NS_RRD
,
12267 NS_QQ
, NS_DD
, NS_QI
, NS_DI
, NS_SR
, NS_RS
, NS_FF
, NS_FI
, NS_RF
, NS_FR
,
12269 struct neon_type_el et
;
12270 const char *ldconst
= 0;
12274 case NS_DD
: /* case 1/9. */
12275 et
= neon_check_type (2, rs
, N_EQK
, N_F64
| N_KEY
);
12276 /* It is not an error here if no type is given. */
12278 if (et
.type
== NT_float
&& et
.size
== 64)
12280 do_vfp_nsyn_opcode ("fcpyd");
12283 /* fall through. */
12285 case NS_QQ
: /* case 0/1. */
12287 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
12289 /* The architecture manual I have doesn't explicitly state which
12290 value the U bit should have for register->register moves, but
12291 the equivalent VORR instruction has U = 0, so do that. */
12292 inst
.instruction
= 0x0200110;
12293 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
12294 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
12295 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
12296 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
12297 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
12298 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
12299 inst
.instruction
|= neon_quad (rs
) << 6;
12301 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
12305 case NS_DI
: /* case 3/11. */
12306 et
= neon_check_type (2, rs
, N_EQK
, N_F64
| N_KEY
);
12308 if (et
.type
== NT_float
&& et
.size
== 64)
12310 /* case 11 (fconstd). */
12311 ldconst
= "fconstd";
12312 goto encode_fconstd
;
12314 /* fall through. */
12316 case NS_QI
: /* case 2/3. */
12317 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
12319 inst
.instruction
= 0x0800010;
12320 neon_move_immediate ();
12321 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
12324 case NS_SR
: /* case 4. */
12326 unsigned bcdebits
= 0;
12327 struct neon_type_el et
= neon_check_type (2, NS_NULL
,
12328 N_8
| N_16
| N_32
| N_KEY
, N_EQK
);
12329 int logsize
= neon_logbits (et
.size
);
12330 unsigned dn
= NEON_SCALAR_REG (inst
.operands
[0].reg
);
12331 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[0].reg
);
12333 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v1
),
12335 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
)
12336 && et
.size
!= 32, _(BAD_FPU
));
12337 constraint (et
.type
== NT_invtype
, _("bad type for scalar"));
12338 constraint (x
>= 64 / et
.size
, _("scalar index out of range"));
12342 case 8: bcdebits
= 0x8; break;
12343 case 16: bcdebits
= 0x1; break;
12344 case 32: bcdebits
= 0x0; break;
12348 bcdebits
|= x
<< logsize
;
12350 inst
.instruction
= 0xe000b10;
12351 do_vfp_cond_or_thumb ();
12352 inst
.instruction
|= LOW4 (dn
) << 16;
12353 inst
.instruction
|= HI1 (dn
) << 7;
12354 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
12355 inst
.instruction
|= (bcdebits
& 3) << 5;
12356 inst
.instruction
|= (bcdebits
>> 2) << 21;
12360 case NS_DRR
: /* case 5 (fmdrr). */
12361 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v2
),
12364 inst
.instruction
= 0xc400b10;
12365 do_vfp_cond_or_thumb ();
12366 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
);
12367 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 5;
12368 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
12369 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
12372 case NS_RS
: /* case 6. */
12374 struct neon_type_el et
= neon_check_type (2, NS_NULL
,
12375 N_EQK
, N_S8
| N_S16
| N_U8
| N_U16
| N_32
| N_KEY
);
12376 unsigned logsize
= neon_logbits (et
.size
);
12377 unsigned dn
= NEON_SCALAR_REG (inst
.operands
[1].reg
);
12378 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[1].reg
);
12379 unsigned abcdebits
= 0;
12381 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v1
),
12383 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
)
12384 && et
.size
!= 32, _(BAD_FPU
));
12385 constraint (et
.type
== NT_invtype
, _("bad type for scalar"));
12386 constraint (x
>= 64 / et
.size
, _("scalar index out of range"));
12390 case 8: abcdebits
= (et
.type
== NT_signed
) ? 0x08 : 0x18; break;
12391 case 16: abcdebits
= (et
.type
== NT_signed
) ? 0x01 : 0x11; break;
12392 case 32: abcdebits
= 0x00; break;
12396 abcdebits
|= x
<< logsize
;
12397 inst
.instruction
= 0xe100b10;
12398 do_vfp_cond_or_thumb ();
12399 inst
.instruction
|= LOW4 (dn
) << 16;
12400 inst
.instruction
|= HI1 (dn
) << 7;
12401 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
12402 inst
.instruction
|= (abcdebits
& 3) << 5;
12403 inst
.instruction
|= (abcdebits
>> 2) << 21;
12407 case NS_RRD
: /* case 7 (fmrrd). */
12408 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v2
),
12411 inst
.instruction
= 0xc500b10;
12412 do_vfp_cond_or_thumb ();
12413 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
12414 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
12415 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
12416 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
12419 case NS_FF
: /* case 8 (fcpys). */
12420 do_vfp_nsyn_opcode ("fcpys");
12423 case NS_FI
: /* case 10 (fconsts). */
12424 ldconst
= "fconsts";
12426 if (is_quarter_float (inst
.operands
[1].imm
))
12428 inst
.operands
[1].imm
= neon_qfloat_bits (inst
.operands
[1].imm
);
12429 do_vfp_nsyn_opcode (ldconst
);
12432 first_error (_("immediate out of range"));
12435 case NS_RF
: /* case 12 (fmrs). */
12436 do_vfp_nsyn_opcode ("fmrs");
12439 case NS_FR
: /* case 13 (fmsr). */
12440 do_vfp_nsyn_opcode ("fmsr");
12443 /* The encoders for the fmrrs and fmsrr instructions expect three operands
12444 (one of which is a list), but we have parsed four. Do some fiddling to
12445 make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
12447 case NS_RRFF
: /* case 14 (fmrrs). */
12448 constraint (inst
.operands
[3].reg
!= inst
.operands
[2].reg
+ 1,
12449 _("VFP registers must be adjacent"));
12450 inst
.operands
[2].imm
= 2;
12451 memset (&inst
.operands
[3], '\0', sizeof (inst
.operands
[3]));
12452 do_vfp_nsyn_opcode ("fmrrs");
12455 case NS_FFRR
: /* case 15 (fmsrr). */
12456 constraint (inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
12457 _("VFP registers must be adjacent"));
12458 inst
.operands
[1] = inst
.operands
[2];
12459 inst
.operands
[2] = inst
.operands
[3];
12460 inst
.operands
[0].imm
= 2;
12461 memset (&inst
.operands
[3], '\0', sizeof (inst
.operands
[3]));
12462 do_vfp_nsyn_opcode ("fmsrr");
12471 do_neon_rshift_round_imm (void)
12473 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
12474 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_ALL
| N_KEY
);
12475 int imm
= inst
.operands
[2].imm
;
12477 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
12480 inst
.operands
[2].present
= 0;
12485 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
12486 _("immediate out of range for shift"));
12487 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, neon_quad (rs
), et
,
12492 do_neon_movl (void)
12494 struct neon_type_el et
= neon_check_type (2, NS_QD
,
12495 N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
12496 unsigned sizebits
= et
.size
>> 3;
12497 inst
.instruction
|= sizebits
<< 19;
12498 neon_two_same (0, et
.type
== NT_unsigned
, -1);
12504 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
12505 struct neon_type_el et
= neon_check_type (2, rs
,
12506 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
12507 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
12508 neon_two_same (neon_quad (rs
), 1, et
.size
);
12512 do_neon_zip_uzp (void)
12514 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
12515 struct neon_type_el et
= neon_check_type (2, rs
,
12516 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
12517 if (rs
== NS_DD
&& et
.size
== 32)
12519 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
12520 inst
.instruction
= N_MNEM_vtrn
;
12524 neon_two_same (neon_quad (rs
), 1, et
.size
);
12528 do_neon_sat_abs_neg (void)
12530 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
12531 struct neon_type_el et
= neon_check_type (2, rs
,
12532 N_EQK
, N_S8
| N_S16
| N_S32
| N_KEY
);
12533 neon_two_same (neon_quad (rs
), 1, et
.size
);
12537 do_neon_pair_long (void)
12539 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
12540 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_32
| N_KEY
);
12541 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
12542 inst
.instruction
|= (et
.type
== NT_unsigned
) << 7;
12543 neon_two_same (neon_quad (rs
), 1, et
.size
);
12547 do_neon_recip_est (void)
12549 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
12550 struct neon_type_el et
= neon_check_type (2, rs
,
12551 N_EQK
| N_FLT
, N_F32
| N_U32
| N_KEY
);
12552 inst
.instruction
|= (et
.type
== NT_float
) << 8;
12553 neon_two_same (neon_quad (rs
), 1, et
.size
);
12559 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
12560 struct neon_type_el et
= neon_check_type (2, rs
,
12561 N_EQK
, N_S8
| N_S16
| N_S32
| N_KEY
);
12562 neon_two_same (neon_quad (rs
), 1, et
.size
);
12568 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
12569 struct neon_type_el et
= neon_check_type (2, rs
,
12570 N_EQK
, N_I8
| N_I16
| N_I32
| N_KEY
);
12571 neon_two_same (neon_quad (rs
), 1, et
.size
);
12577 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
12578 struct neon_type_el et
= neon_check_type (2, rs
,
12579 N_EQK
| N_INT
, N_8
| N_KEY
);
12580 neon_two_same (neon_quad (rs
), 1, et
.size
);
12586 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
12587 neon_two_same (neon_quad (rs
), 1, -1);
12591 do_neon_tbl_tbx (void)
12593 unsigned listlenbits
;
12594 neon_check_type (3, NS_DLD
, N_EQK
, N_EQK
, N_8
| N_KEY
);
12596 if (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 4)
12598 first_error (_("bad list length for table lookup"));
12602 listlenbits
= inst
.operands
[1].imm
- 1;
12603 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
12604 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
12605 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
12606 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
12607 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
12608 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
12609 inst
.instruction
|= listlenbits
<< 8;
12611 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
12615 do_neon_ldm_stm (void)
12617 /* P, U and L bits are part of bitmask. */
12618 int is_dbmode
= (inst
.instruction
& (1 << 24)) != 0;
12619 unsigned offsetbits
= inst
.operands
[1].imm
* 2;
12621 if (inst
.operands
[1].issingle
)
12623 do_vfp_nsyn_ldm_stm (is_dbmode
);
12627 constraint (is_dbmode
&& !inst
.operands
[0].writeback
,
12628 _("writeback (!) must be used for VLDMDB and VSTMDB"));
12630 constraint (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 16,
12631 _("register list must contain at least 1 and at most 16 "
12634 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
12635 inst
.instruction
|= inst
.operands
[0].writeback
<< 21;
12636 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 12;
12637 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 22;
12639 inst
.instruction
|= offsetbits
;
12641 do_vfp_cond_or_thumb ();
12645 do_neon_ldr_str (void)
12647 unsigned offsetbits
;
12649 int is_ldr
= (inst
.instruction
& (1 << 20)) != 0;
12651 if (inst
.operands
[0].issingle
)
12653 do_vfp_nsyn_ldr_str (is_ldr
);
12657 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
12658 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
12660 constraint (inst
.reloc
.pc_rel
&& !is_ldr
,
12661 _("PC-relative addressing unavailable with VSTR"));
12663 constraint (!inst
.reloc
.pc_rel
&& inst
.reloc
.exp
.X_op
!= O_constant
,
12664 _("Immediate value must be a constant"));
12666 if (inst
.reloc
.exp
.X_add_number
< 0)
12669 offsetbits
= -inst
.reloc
.exp
.X_add_number
/ 4;
12672 offsetbits
= inst
.reloc
.exp
.X_add_number
/ 4;
12674 /* FIXME: Does this catch everything? */
12675 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].preind
12676 || inst
.operands
[1].postind
|| inst
.operands
[1].writeback
12677 || inst
.operands
[1].immisreg
|| inst
.operands
[1].shifted
,
12679 constraint ((inst
.operands
[1].imm
& 3) != 0,
12680 _("Offset must be a multiple of 4"));
12681 constraint (offsetbits
!= (offsetbits
& 0xff),
12682 _("Immediate offset out of range"));
12684 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
12685 inst
.instruction
|= offsetbits
& 0xff;
12686 inst
.instruction
|= offset_up
<< 23;
12688 do_vfp_cond_or_thumb ();
12690 if (inst
.reloc
.pc_rel
)
12693 inst
.reloc
.type
= BFD_RELOC_ARM_T32_CP_OFF_IMM
;
12695 inst
.reloc
.type
= BFD_RELOC_ARM_CP_OFF_IMM
;
12698 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
12701 /* "interleave" version also handles non-interleaving register VLD1/VST1
12705 do_neon_ld_st_interleave (void)
12707 struct neon_type_el et
= neon_check_type (1, NS_NULL
,
12708 N_8
| N_16
| N_32
| N_64
);
12709 unsigned alignbits
= 0;
12711 /* The bits in this table go:
12712 0: register stride of one (0) or two (1)
12713 1,2: register list length, minus one (1, 2, 3, 4).
12714 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
12715 We use -1 for invalid entries. */
12716 const int typetable
[] =
12718 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
12719 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
12720 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
12721 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
12725 if (et
.type
== NT_invtype
)
12728 if (inst
.operands
[1].immisalign
)
12729 switch (inst
.operands
[1].imm
>> 8)
12731 case 64: alignbits
= 1; break;
12733 if (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) == 3)
12734 goto bad_alignment
;
12738 if (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) == 3)
12739 goto bad_alignment
;
12744 first_error (_("bad alignment"));
12748 inst
.instruction
|= alignbits
<< 4;
12749 inst
.instruction
|= neon_logbits (et
.size
) << 6;
12751 /* Bits [4:6] of the immediate in a list specifier encode register stride
12752 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
12753 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
12754 up the right value for "type" in a table based on this value and the given
12755 list style, then stick it back. */
12756 idx
= ((inst
.operands
[0].imm
>> 4) & 7)
12757 | (((inst
.instruction
>> 8) & 3) << 3);
12759 typebits
= typetable
[idx
];
12761 constraint (typebits
== -1, _("bad list type for instruction"));
12763 inst
.instruction
&= ~0xf00;
12764 inst
.instruction
|= typebits
<< 8;
12767 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
12768 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
12769 otherwise. The variable arguments are a list of pairs of legal (size, align)
12770 values, terminated with -1. */
12773 neon_alignment_bit (int size
, int align
, int *do_align
, ...)
12776 int result
= FAIL
, thissize
, thisalign
;
12778 if (!inst
.operands
[1].immisalign
)
12784 va_start (ap
, do_align
);
12788 thissize
= va_arg (ap
, int);
12789 if (thissize
== -1)
12791 thisalign
= va_arg (ap
, int);
12793 if (size
== thissize
&& align
== thisalign
)
12796 while (result
!= SUCCESS
);
12800 if (result
== SUCCESS
)
12803 first_error (_("unsupported alignment for instruction"));
12809 do_neon_ld_st_lane (void)
12811 struct neon_type_el et
= neon_check_type (1, NS_NULL
, N_8
| N_16
| N_32
);
12812 int align_good
, do_align
= 0;
12813 int logsize
= neon_logbits (et
.size
);
12814 int align
= inst
.operands
[1].imm
>> 8;
12815 int n
= (inst
.instruction
>> 8) & 3;
12816 int max_el
= 64 / et
.size
;
12818 if (et
.type
== NT_invtype
)
12821 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != n
+ 1,
12822 _("bad list length"));
12823 constraint (NEON_LANE (inst
.operands
[0].imm
) >= max_el
,
12824 _("scalar index out of range"));
12825 constraint (n
!= 0 && NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2
12827 _("stride of 2 unavailable when element size is 8"));
12831 case 0: /* VLD1 / VST1. */
12832 align_good
= neon_alignment_bit (et
.size
, align
, &do_align
, 16, 16,
12834 if (align_good
== FAIL
)
12838 unsigned alignbits
= 0;
12841 case 16: alignbits
= 0x1; break;
12842 case 32: alignbits
= 0x3; break;
12845 inst
.instruction
|= alignbits
<< 4;
12849 case 1: /* VLD2 / VST2. */
12850 align_good
= neon_alignment_bit (et
.size
, align
, &do_align
, 8, 16, 16, 32,
12852 if (align_good
== FAIL
)
12855 inst
.instruction
|= 1 << 4;
12858 case 2: /* VLD3 / VST3. */
12859 constraint (inst
.operands
[1].immisalign
,
12860 _("can't use alignment with this instruction"));
12863 case 3: /* VLD4 / VST4. */
12864 align_good
= neon_alignment_bit (et
.size
, align
, &do_align
, 8, 32,
12865 16, 64, 32, 64, 32, 128, -1);
12866 if (align_good
== FAIL
)
12870 unsigned alignbits
= 0;
12873 case 8: alignbits
= 0x1; break;
12874 case 16: alignbits
= 0x1; break;
12875 case 32: alignbits
= (align
== 64) ? 0x1 : 0x2; break;
12878 inst
.instruction
|= alignbits
<< 4;
12885 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
12886 if (n
!= 0 && NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
12887 inst
.instruction
|= 1 << (4 + logsize
);
12889 inst
.instruction
|= NEON_LANE (inst
.operands
[0].imm
) << (logsize
+ 5);
12890 inst
.instruction
|= logsize
<< 10;
12893 /* Encode single n-element structure to all lanes VLD<n> instructions. */
12896 do_neon_ld_dup (void)
12898 struct neon_type_el et
= neon_check_type (1, NS_NULL
, N_8
| N_16
| N_32
);
12899 int align_good
, do_align
= 0;
12901 if (et
.type
== NT_invtype
)
12904 switch ((inst
.instruction
>> 8) & 3)
12906 case 0: /* VLD1. */
12907 assert (NEON_REG_STRIDE (inst
.operands
[0].imm
) != 2);
12908 align_good
= neon_alignment_bit (et
.size
, inst
.operands
[1].imm
>> 8,
12909 &do_align
, 16, 16, 32, 32, -1);
12910 if (align_good
== FAIL
)
12912 switch (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
))
12915 case 2: inst
.instruction
|= 1 << 5; break;
12916 default: first_error (_("bad list length")); return;
12918 inst
.instruction
|= neon_logbits (et
.size
) << 6;
12921 case 1: /* VLD2. */
12922 align_good
= neon_alignment_bit (et
.size
, inst
.operands
[1].imm
>> 8,
12923 &do_align
, 8, 16, 16, 32, 32, 64, -1);
12924 if (align_good
== FAIL
)
12926 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 2,
12927 _("bad list length"));
12928 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
12929 inst
.instruction
|= 1 << 5;
12930 inst
.instruction
|= neon_logbits (et
.size
) << 6;
12933 case 2: /* VLD3. */
12934 constraint (inst
.operands
[1].immisalign
,
12935 _("can't use alignment with this instruction"));
12936 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 3,
12937 _("bad list length"));
12938 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
12939 inst
.instruction
|= 1 << 5;
12940 inst
.instruction
|= neon_logbits (et
.size
) << 6;
12943 case 3: /* VLD4. */
12945 int align
= inst
.operands
[1].imm
>> 8;
12946 align_good
= neon_alignment_bit (et
.size
, align
, &do_align
, 8, 32,
12947 16, 64, 32, 64, 32, 128, -1);
12948 if (align_good
== FAIL
)
12950 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 4,
12951 _("bad list length"));
12952 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
12953 inst
.instruction
|= 1 << 5;
12954 if (et
.size
== 32 && align
== 128)
12955 inst
.instruction
|= 0x3 << 6;
12957 inst
.instruction
|= neon_logbits (et
.size
) << 6;
12964 inst
.instruction
|= do_align
<< 4;
12967 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
12968 apart from bits [11:4]. */
12971 do_neon_ldx_stx (void)
12973 switch (NEON_LANE (inst
.operands
[0].imm
))
12975 case NEON_INTERLEAVE_LANES
:
12976 inst
.instruction
= NEON_ENC_INTERLV (inst
.instruction
);
12977 do_neon_ld_st_interleave ();
12980 case NEON_ALL_LANES
:
12981 inst
.instruction
= NEON_ENC_DUP (inst
.instruction
);
12986 inst
.instruction
= NEON_ENC_LANE (inst
.instruction
);
12987 do_neon_ld_st_lane ();
12990 /* L bit comes from bit mask. */
12991 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
12992 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
12993 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
12995 if (inst
.operands
[1].postind
)
12997 int postreg
= inst
.operands
[1].imm
& 0xf;
12998 constraint (!inst
.operands
[1].immisreg
,
12999 _("post-index must be a register"));
13000 constraint (postreg
== 0xd || postreg
== 0xf,
13001 _("bad register for post-index"));
13002 inst
.instruction
|= postreg
;
13004 else if (inst
.operands
[1].writeback
)
13006 inst
.instruction
|= 0xd;
13009 inst
.instruction
|= 0xf;
13012 inst
.instruction
|= 0xf9000000;
13014 inst
.instruction
|= 0xf4000000;
13018 /* Overall per-instruction processing. */
13020 /* We need to be able to fix up arbitrary expressions in some statements.
13021 This is so that we can handle symbols that are an arbitrary distance from
13022 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
13023 which returns part of an address in a form which will be valid for
13024 a data instruction. We do this by pushing the expression into a symbol
13025 in the expr_section, and creating a fix for that. */
13028 fix_new_arm (fragS
* frag
,
13043 new_fix
= fix_new_exp (frag
, where
, size
, exp
, pc_rel
, reloc
);
13047 new_fix
= fix_new (frag
, where
, size
, make_expr_symbol (exp
), 0,
13052 /* Mark whether the fix is to a THUMB instruction, or an ARM
13054 new_fix
->tc_fix_data
= thumb_mode
;
13057 /* Create a frg for an instruction requiring relaxation. */
13059 output_relax_insn (void)
13066 /* The size of the instruction is unknown, so tie the debug info to the
13067 start of the instruction. */
13068 dwarf2_emit_insn (0);
13071 switch (inst
.reloc
.exp
.X_op
)
13074 sym
= inst
.reloc
.exp
.X_add_symbol
;
13075 offset
= inst
.reloc
.exp
.X_add_number
;
13079 offset
= inst
.reloc
.exp
.X_add_number
;
13082 sym
= make_expr_symbol (&inst
.reloc
.exp
);
13086 to
= frag_var (rs_machine_dependent
, INSN_SIZE
, THUMB_SIZE
,
13087 inst
.relax
, sym
, offset
, NULL
/*offset, opcode*/);
13088 md_number_to_chars (to
, inst
.instruction
, THUMB_SIZE
);
13091 /* Write a 32-bit thumb instruction to buf. */
13093 put_thumb32_insn (char * buf
, unsigned long insn
)
13095 md_number_to_chars (buf
, insn
>> 16, THUMB_SIZE
);
13096 md_number_to_chars (buf
+ THUMB_SIZE
, insn
, THUMB_SIZE
);
13100 output_inst (const char * str
)
13106 as_bad ("%s -- `%s'", inst
.error
, str
);
13110 output_relax_insn();
13113 if (inst
.size
== 0)
13116 to
= frag_more (inst
.size
);
13118 if (thumb_mode
&& (inst
.size
> THUMB_SIZE
))
13120 assert (inst
.size
== (2 * THUMB_SIZE
));
13121 put_thumb32_insn (to
, inst
.instruction
);
13123 else if (inst
.size
> INSN_SIZE
)
13125 assert (inst
.size
== (2 * INSN_SIZE
));
13126 md_number_to_chars (to
, inst
.instruction
, INSN_SIZE
);
13127 md_number_to_chars (to
+ INSN_SIZE
, inst
.instruction
, INSN_SIZE
);
13130 md_number_to_chars (to
, inst
.instruction
, inst
.size
);
13132 if (inst
.reloc
.type
!= BFD_RELOC_UNUSED
)
13133 fix_new_arm (frag_now
, to
- frag_now
->fr_literal
,
13134 inst
.size
, & inst
.reloc
.exp
, inst
.reloc
.pc_rel
,
13138 dwarf2_emit_insn (inst
.size
);
13142 /* Tag values used in struct asm_opcode's tag field. */
13145 OT_unconditional
, /* Instruction cannot be conditionalized.
13146 The ARM condition field is still 0xE. */
13147 OT_unconditionalF
, /* Instruction cannot be conditionalized
13148 and carries 0xF in its ARM condition field. */
13149 OT_csuffix
, /* Instruction takes a conditional suffix. */
13150 OT_csuffixF
, /* Some forms of the instruction take a conditional
13151 suffix, others place 0xF where the condition field
13153 OT_cinfix3
, /* Instruction takes a conditional infix,
13154 beginning at character index 3. (In
13155 unified mode, it becomes a suffix.) */
13156 OT_cinfix3_deprecated
, /* The same as OT_cinfix3. This is used for
13157 tsts, cmps, cmns, and teqs. */
13158 OT_cinfix3_legacy
, /* Legacy instruction takes a conditional infix at
13159 character index 3, even in unified mode. Used for
13160 legacy instructions where suffix and infix forms
13161 may be ambiguous. */
13162 OT_csuf_or_in3
, /* Instruction takes either a conditional
13163 suffix or an infix at character index 3. */
13164 OT_odd_infix_unc
, /* This is the unconditional variant of an
13165 instruction that takes a conditional infix
13166 at an unusual position. In unified mode,
13167 this variant will accept a suffix. */
13168 OT_odd_infix_0
/* Values greater than or equal to OT_odd_infix_0
13169 are the conditional variants of instructions that
13170 take conditional infixes in unusual positions.
13171 The infix appears at character index
13172 (tag - OT_odd_infix_0). These are not accepted
13173 in unified mode. */
13176 /* Subroutine of md_assemble, responsible for looking up the primary
13177 opcode from the mnemonic the user wrote. STR points to the
13178 beginning of the mnemonic.
13180 This is not simply a hash table lookup, because of conditional
13181 variants. Most instructions have conditional variants, which are
13182 expressed with a _conditional affix_ to the mnemonic. If we were
13183 to encode each conditional variant as a literal string in the opcode
13184 table, it would have approximately 20,000 entries.
13186 Most mnemonics take this affix as a suffix, and in unified syntax,
13187 'most' is upgraded to 'all'. However, in the divided syntax, some
13188 instructions take the affix as an infix, notably the s-variants of
13189 the arithmetic instructions. Of those instructions, all but six
13190 have the infix appear after the third character of the mnemonic.
13192 Accordingly, the algorithm for looking up primary opcodes given
13195 1. Look up the identifier in the opcode table.
13196 If we find a match, go to step U.
13198 2. Look up the last two characters of the identifier in the
13199 conditions table. If we find a match, look up the first N-2
13200 characters of the identifier in the opcode table. If we
13201 find a match, go to step CE.
13203 3. Look up the fourth and fifth characters of the identifier in
13204 the conditions table. If we find a match, extract those
13205 characters from the identifier, and look up the remaining
13206 characters in the opcode table. If we find a match, go
13211 U. Examine the tag field of the opcode structure, in case this is
13212 one of the six instructions with its conditional infix in an
13213 unusual place. If it is, the tag tells us where to find the
13214 infix; look it up in the conditions table and set inst.cond
13215 accordingly. Otherwise, this is an unconditional instruction.
13216 Again set inst.cond accordingly. Return the opcode structure.
13218 CE. Examine the tag field to make sure this is an instruction that
13219 should receive a conditional suffix. If it is not, fail.
13220 Otherwise, set inst.cond from the suffix we already looked up,
13221 and return the opcode structure.
13223 CM. Examine the tag field to make sure this is an instruction that
13224 should receive a conditional infix after the third character.
13225 If it is not, fail. Otherwise, undo the edits to the current
13226 line of input and proceed as for case CE. */
13228 static const struct asm_opcode
*
13229 opcode_lookup (char **str
)
13233 const struct asm_opcode
*opcode
;
13234 const struct asm_cond
*cond
;
13237 /* Scan up to the end of the mnemonic, which must end in white space,
13238 '.' (in unified mode only), or end of string. */
13239 for (base
= end
= *str
; *end
!= '\0'; end
++)
13240 if (*end
== ' ' || (unified_syntax
&& *end
== '.'))
13246 /* Handle a possible width suffix and/or Neon type suffix. */
13253 else if (end
[1] == 'n')
13258 inst
.vectype
.elems
= 0;
13260 *str
= end
+ offset
;
13262 if (end
[offset
] == '.')
13264 /* See if we have a Neon type suffix. */
13265 if (parse_neon_type (&inst
.vectype
, str
) == FAIL
)
13268 else if (end
[offset
] != '\0' && end
[offset
] != ' ')
13274 /* Look for unaffixed or special-case affixed mnemonic. */
13275 opcode
= hash_find_n (arm_ops_hsh
, base
, end
- base
);
13279 if (opcode
->tag
< OT_odd_infix_0
)
13281 inst
.cond
= COND_ALWAYS
;
13285 if (unified_syntax
)
13286 as_warn (_("conditional infixes are deprecated in unified syntax"));
13287 affix
= base
+ (opcode
->tag
- OT_odd_infix_0
);
13288 cond
= hash_find_n (arm_cond_hsh
, affix
, 2);
13291 inst
.cond
= cond
->value
;
13295 /* Cannot have a conditional suffix on a mnemonic of less than two
13297 if (end
- base
< 3)
13300 /* Look for suffixed mnemonic. */
13302 cond
= hash_find_n (arm_cond_hsh
, affix
, 2);
13303 opcode
= hash_find_n (arm_ops_hsh
, base
, affix
- base
);
13304 if (opcode
&& cond
)
13307 switch (opcode
->tag
)
13309 case OT_cinfix3_legacy
:
13310 /* Ignore conditional suffixes matched on infix only mnemonics. */
13314 case OT_cinfix3_deprecated
:
13315 case OT_odd_infix_unc
:
13316 if (!unified_syntax
)
13318 /* else fall through */
13322 case OT_csuf_or_in3
:
13323 inst
.cond
= cond
->value
;
13326 case OT_unconditional
:
13327 case OT_unconditionalF
:
13330 inst
.cond
= cond
->value
;
13334 /* delayed diagnostic */
13335 inst
.error
= BAD_COND
;
13336 inst
.cond
= COND_ALWAYS
;
13345 /* Cannot have a usual-position infix on a mnemonic of less than
13346 six characters (five would be a suffix). */
13347 if (end
- base
< 6)
13350 /* Look for infixed mnemonic in the usual position. */
13352 cond
= hash_find_n (arm_cond_hsh
, affix
, 2);
13356 memcpy (save
, affix
, 2);
13357 memmove (affix
, affix
+ 2, (end
- affix
) - 2);
13358 opcode
= hash_find_n (arm_ops_hsh
, base
, (end
- base
) - 2);
13359 memmove (affix
+ 2, affix
, (end
- affix
) - 2);
13360 memcpy (affix
, save
, 2);
13363 && (opcode
->tag
== OT_cinfix3
13364 || opcode
->tag
== OT_cinfix3_deprecated
13365 || opcode
->tag
== OT_csuf_or_in3
13366 || opcode
->tag
== OT_cinfix3_legacy
))
13370 && (opcode
->tag
== OT_cinfix3
13371 || opcode
->tag
== OT_cinfix3_deprecated
))
13372 as_warn (_("conditional infixes are deprecated in unified syntax"));
13374 inst
.cond
= cond
->value
;
13382 md_assemble (char *str
)
13385 const struct asm_opcode
* opcode
;
13387 /* Align the previous label if needed. */
13388 if (last_label_seen
!= NULL
)
13390 symbol_set_frag (last_label_seen
, frag_now
);
13391 S_SET_VALUE (last_label_seen
, (valueT
) frag_now_fix ());
13392 S_SET_SEGMENT (last_label_seen
, now_seg
);
13395 memset (&inst
, '\0', sizeof (inst
));
13396 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
13398 opcode
= opcode_lookup (&p
);
13401 /* It wasn't an instruction, but it might be a register alias of
13402 the form alias .req reg, or a Neon .dn/.qn directive. */
13403 if (!create_register_alias (str
, p
)
13404 && !create_neon_reg_alias (str
, p
))
13405 as_bad (_("bad instruction `%s'"), str
);
13410 if (opcode
->tag
== OT_cinfix3_deprecated
)
13411 as_warn (_("s suffix on comparison instruction is deprecated"));
13413 /* The value which unconditional instructions should have in place of the
13414 condition field. */
13415 inst
.uncond_value
= (opcode
->tag
== OT_csuffixF
) ? 0xf : -1;
13419 arm_feature_set variant
;
13421 variant
= cpu_variant
;
13422 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
13423 if (!ARM_CPU_HAS_FEATURE (variant
, arm_arch_t2
))
13424 ARM_CLEAR_FEATURE (variant
, variant
, fpu_any_hard
);
13425 /* Check that this instruction is supported for this CPU. */
13426 if (!opcode
->tvariant
13427 || (thumb_mode
== 1
13428 && !ARM_CPU_HAS_FEATURE (variant
, *opcode
->tvariant
)))
13430 as_bad (_("selected processor does not support `%s'"), str
);
13433 if (inst
.cond
!= COND_ALWAYS
&& !unified_syntax
13434 && opcode
->tencode
!= do_t_branch
)
13436 as_bad (_("Thumb does not support conditional execution"));
13440 /* Check conditional suffixes. */
13441 if (current_it_mask
)
13444 cond
= current_cc
^ ((current_it_mask
>> 4) & 1) ^ 1;
13445 current_it_mask
<<= 1;
13446 current_it_mask
&= 0x1f;
13447 /* The BKPT instruction is unconditional even in an IT block. */
13449 && cond
!= inst
.cond
&& opcode
->tencode
!= do_t_bkpt
)
13451 as_bad (_("incorrect condition in IT block"));
13455 else if (inst
.cond
!= COND_ALWAYS
&& opcode
->tencode
!= do_t_branch
)
13457 as_bad (_("thumb conditional instrunction not in IT block"));
13461 mapping_state (MAP_THUMB
);
13462 inst
.instruction
= opcode
->tvalue
;
13464 if (!parse_operands (p
, opcode
->operands
))
13465 opcode
->tencode ();
13467 /* Clear current_it_mask at the end of an IT block. */
13468 if (current_it_mask
== 0x10)
13469 current_it_mask
= 0;
13471 if (!(inst
.error
|| inst
.relax
))
13473 assert (inst
.instruction
< 0xe800 || inst
.instruction
> 0xffff);
13474 inst
.size
= (inst
.instruction
> 0xffff ? 4 : 2);
13475 if (inst
.size_req
&& inst
.size_req
!= inst
.size
)
13477 as_bad (_("cannot honor width suffix -- `%s'"), str
);
13481 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
13482 *opcode
->tvariant
);
13483 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
13484 set those bits when Thumb-2 32-bit instructions are seen. ie.
13485 anything other than bl/blx.
13486 This is overly pessimistic for relaxable instructions. */
13487 if ((inst
.size
== 4 && (inst
.instruction
& 0xf800e800) != 0xf000e800)
13489 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
13494 /* Check that this instruction is supported for this CPU. */
13495 if (!opcode
->avariant
||
13496 !ARM_CPU_HAS_FEATURE (cpu_variant
, *opcode
->avariant
))
13498 as_bad (_("selected processor does not support `%s'"), str
);
13503 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str
);
13507 mapping_state (MAP_ARM
);
13508 inst
.instruction
= opcode
->avalue
;
13509 if (opcode
->tag
== OT_unconditionalF
)
13510 inst
.instruction
|= 0xF << 28;
13512 inst
.instruction
|= inst
.cond
<< 28;
13513 inst
.size
= INSN_SIZE
;
13514 if (!parse_operands (p
, opcode
->operands
))
13515 opcode
->aencode ();
13516 /* Arm mode bx is marked as both v4T and v5 because it's still required
13517 on a hypothetical non-thumb v5 core. */
13518 if (ARM_CPU_HAS_FEATURE (*opcode
->avariant
, arm_ext_v4t
)
13519 || ARM_CPU_HAS_FEATURE (*opcode
->avariant
, arm_ext_v5
))
13520 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
, arm_ext_v4t
);
13522 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
13523 *opcode
->avariant
);
13528 /* Various frobbings of labels and their addresses. */
13531 arm_start_line_hook (void)
13533 last_label_seen
= NULL
;
13537 arm_frob_label (symbolS
* sym
)
13539 last_label_seen
= sym
;
13541 ARM_SET_THUMB (sym
, thumb_mode
);
13543 #if defined OBJ_COFF || defined OBJ_ELF
13544 ARM_SET_INTERWORK (sym
, support_interwork
);
13547 /* Note - do not allow local symbols (.Lxxx) to be labeled
13548 as Thumb functions. This is because these labels, whilst
13549 they exist inside Thumb code, are not the entry points for
13550 possible ARM->Thumb calls. Also, these labels can be used
13551 as part of a computed goto or switch statement. eg gcc
13552 can generate code that looks like this:
13554 ldr r2, [pc, .Laaa]
13564 The first instruction loads the address of the jump table.
13565 The second instruction converts a table index into a byte offset.
13566 The third instruction gets the jump address out of the table.
13567 The fourth instruction performs the jump.
13569 If the address stored at .Laaa is that of a symbol which has the
13570 Thumb_Func bit set, then the linker will arrange for this address
13571 to have the bottom bit set, which in turn would mean that the
13572 address computation performed by the third instruction would end
13573 up with the bottom bit set. Since the ARM is capable of unaligned
13574 word loads, the instruction would then load the incorrect address
13575 out of the jump table, and chaos would ensue. */
13576 if (label_is_thumb_function_name
13577 && (S_GET_NAME (sym
)[0] != '.' || S_GET_NAME (sym
)[1] != 'L')
13578 && (bfd_get_section_flags (stdoutput
, now_seg
) & SEC_CODE
) != 0)
13580 /* When the address of a Thumb function is taken the bottom
13581 bit of that address should be set. This will allow
13582 interworking between Arm and Thumb functions to work
13585 THUMB_SET_FUNC (sym
, 1);
13587 label_is_thumb_function_name
= FALSE
;
13591 dwarf2_emit_label (sym
);
13596 arm_data_in_code (void)
13598 if (thumb_mode
&& ! strncmp (input_line_pointer
+ 1, "data:", 5))
13600 *input_line_pointer
= '/';
13601 input_line_pointer
+= 5;
13602 *input_line_pointer
= 0;
13610 arm_canonicalize_symbol_name (char * name
)
13614 if (thumb_mode
&& (len
= strlen (name
)) > 5
13615 && streq (name
+ len
- 5, "/data"))
13616 *(name
+ len
- 5) = 0;
13621 /* Table of all register names defined by default. The user can
13622 define additional names with .req. Note that all register names
13623 should appear in both upper and lowercase variants. Some registers
13624 also have mixed-case names. */
13626 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
13627 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
13628 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
13629 #define REGSET(p,t) \
13630 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
13631 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
13632 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
13633 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
13634 #define REGSETH(p,t) \
13635 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
13636 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
13637 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
13638 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
13639 #define REGSET2(p,t) \
13640 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
13641 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
13642 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
13643 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
13645 static const struct reg_entry reg_names
[] =
13647 /* ARM integer registers. */
13648 REGSET(r
, RN
), REGSET(R
, RN
),
13650 /* ATPCS synonyms. */
13651 REGDEF(a1
,0,RN
), REGDEF(a2
,1,RN
), REGDEF(a3
, 2,RN
), REGDEF(a4
, 3,RN
),
13652 REGDEF(v1
,4,RN
), REGDEF(v2
,5,RN
), REGDEF(v3
, 6,RN
), REGDEF(v4
, 7,RN
),
13653 REGDEF(v5
,8,RN
), REGDEF(v6
,9,RN
), REGDEF(v7
,10,RN
), REGDEF(v8
,11,RN
),
13655 REGDEF(A1
,0,RN
), REGDEF(A2
,1,RN
), REGDEF(A3
, 2,RN
), REGDEF(A4
, 3,RN
),
13656 REGDEF(V1
,4,RN
), REGDEF(V2
,5,RN
), REGDEF(V3
, 6,RN
), REGDEF(V4
, 7,RN
),
13657 REGDEF(V5
,8,RN
), REGDEF(V6
,9,RN
), REGDEF(V7
,10,RN
), REGDEF(V8
,11,RN
),
13659 /* Well-known aliases. */
13660 REGDEF(wr
, 7,RN
), REGDEF(sb
, 9,RN
), REGDEF(sl
,10,RN
), REGDEF(fp
,11,RN
),
13661 REGDEF(ip
,12,RN
), REGDEF(sp
,13,RN
), REGDEF(lr
,14,RN
), REGDEF(pc
,15,RN
),
13663 REGDEF(WR
, 7,RN
), REGDEF(SB
, 9,RN
), REGDEF(SL
,10,RN
), REGDEF(FP
,11,RN
),
13664 REGDEF(IP
,12,RN
), REGDEF(SP
,13,RN
), REGDEF(LR
,14,RN
), REGDEF(PC
,15,RN
),
13666 /* Coprocessor numbers. */
13667 REGSET(p
, CP
), REGSET(P
, CP
),
13669 /* Coprocessor register numbers. The "cr" variants are for backward
13671 REGSET(c
, CN
), REGSET(C
, CN
),
13672 REGSET(cr
, CN
), REGSET(CR
, CN
),
13674 /* FPA registers. */
13675 REGNUM(f
,0,FN
), REGNUM(f
,1,FN
), REGNUM(f
,2,FN
), REGNUM(f
,3,FN
),
13676 REGNUM(f
,4,FN
), REGNUM(f
,5,FN
), REGNUM(f
,6,FN
), REGNUM(f
,7, FN
),
13678 REGNUM(F
,0,FN
), REGNUM(F
,1,FN
), REGNUM(F
,2,FN
), REGNUM(F
,3,FN
),
13679 REGNUM(F
,4,FN
), REGNUM(F
,5,FN
), REGNUM(F
,6,FN
), REGNUM(F
,7, FN
),
13681 /* VFP SP registers. */
13682 REGSET(s
,VFS
), REGSET(S
,VFS
),
13683 REGSETH(s
,VFS
), REGSETH(S
,VFS
),
13685 /* VFP DP Registers. */
13686 REGSET(d
,VFD
), REGSET(D
,VFD
),
13687 /* Extra Neon DP registers. */
13688 REGSETH(d
,VFD
), REGSETH(D
,VFD
),
13690 /* Neon QP registers. */
13691 REGSET2(q
,NQ
), REGSET2(Q
,NQ
),
13693 /* VFP control registers. */
13694 REGDEF(fpsid
,0,VFC
), REGDEF(fpscr
,1,VFC
), REGDEF(fpexc
,8,VFC
),
13695 REGDEF(FPSID
,0,VFC
), REGDEF(FPSCR
,1,VFC
), REGDEF(FPEXC
,8,VFC
),
13697 /* Maverick DSP coprocessor registers. */
13698 REGSET(mvf
,MVF
), REGSET(mvd
,MVD
), REGSET(mvfx
,MVFX
), REGSET(mvdx
,MVDX
),
13699 REGSET(MVF
,MVF
), REGSET(MVD
,MVD
), REGSET(MVFX
,MVFX
), REGSET(MVDX
,MVDX
),
13701 REGNUM(mvax
,0,MVAX
), REGNUM(mvax
,1,MVAX
),
13702 REGNUM(mvax
,2,MVAX
), REGNUM(mvax
,3,MVAX
),
13703 REGDEF(dspsc
,0,DSPSC
),
13705 REGNUM(MVAX
,0,MVAX
), REGNUM(MVAX
,1,MVAX
),
13706 REGNUM(MVAX
,2,MVAX
), REGNUM(MVAX
,3,MVAX
),
13707 REGDEF(DSPSC
,0,DSPSC
),
13709 /* iWMMXt data registers - p0, c0-15. */
13710 REGSET(wr
,MMXWR
), REGSET(wR
,MMXWR
), REGSET(WR
, MMXWR
),
13712 /* iWMMXt control registers - p1, c0-3. */
13713 REGDEF(wcid
, 0,MMXWC
), REGDEF(wCID
, 0,MMXWC
), REGDEF(WCID
, 0,MMXWC
),
13714 REGDEF(wcon
, 1,MMXWC
), REGDEF(wCon
, 1,MMXWC
), REGDEF(WCON
, 1,MMXWC
),
13715 REGDEF(wcssf
, 2,MMXWC
), REGDEF(wCSSF
, 2,MMXWC
), REGDEF(WCSSF
, 2,MMXWC
),
13716 REGDEF(wcasf
, 3,MMXWC
), REGDEF(wCASF
, 3,MMXWC
), REGDEF(WCASF
, 3,MMXWC
),
13718 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
13719 REGDEF(wcgr0
, 8,MMXWCG
), REGDEF(wCGR0
, 8,MMXWCG
), REGDEF(WCGR0
, 8,MMXWCG
),
13720 REGDEF(wcgr1
, 9,MMXWCG
), REGDEF(wCGR1
, 9,MMXWCG
), REGDEF(WCGR1
, 9,MMXWCG
),
13721 REGDEF(wcgr2
,10,MMXWCG
), REGDEF(wCGR2
,10,MMXWCG
), REGDEF(WCGR2
,10,MMXWCG
),
13722 REGDEF(wcgr3
,11,MMXWCG
), REGDEF(wCGR3
,11,MMXWCG
), REGDEF(WCGR3
,11,MMXWCG
),
13724 /* XScale accumulator registers. */
13725 REGNUM(acc
,0,XSCALE
), REGNUM(ACC
,0,XSCALE
),
13731 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
13732 within psr_required_here. */
13733 static const struct asm_psr psrs
[] =
13735 /* Backward compatibility notation. Note that "all" is no longer
13736 truly all possible PSR bits. */
13737 {"all", PSR_c
| PSR_f
},
13741 /* Individual flags. */
13746 /* Combinations of flags. */
13747 {"fs", PSR_f
| PSR_s
},
13748 {"fx", PSR_f
| PSR_x
},
13749 {"fc", PSR_f
| PSR_c
},
13750 {"sf", PSR_s
| PSR_f
},
13751 {"sx", PSR_s
| PSR_x
},
13752 {"sc", PSR_s
| PSR_c
},
13753 {"xf", PSR_x
| PSR_f
},
13754 {"xs", PSR_x
| PSR_s
},
13755 {"xc", PSR_x
| PSR_c
},
13756 {"cf", PSR_c
| PSR_f
},
13757 {"cs", PSR_c
| PSR_s
},
13758 {"cx", PSR_c
| PSR_x
},
13759 {"fsx", PSR_f
| PSR_s
| PSR_x
},
13760 {"fsc", PSR_f
| PSR_s
| PSR_c
},
13761 {"fxs", PSR_f
| PSR_x
| PSR_s
},
13762 {"fxc", PSR_f
| PSR_x
| PSR_c
},
13763 {"fcs", PSR_f
| PSR_c
| PSR_s
},
13764 {"fcx", PSR_f
| PSR_c
| PSR_x
},
13765 {"sfx", PSR_s
| PSR_f
| PSR_x
},
13766 {"sfc", PSR_s
| PSR_f
| PSR_c
},
13767 {"sxf", PSR_s
| PSR_x
| PSR_f
},
13768 {"sxc", PSR_s
| PSR_x
| PSR_c
},
13769 {"scf", PSR_s
| PSR_c
| PSR_f
},
13770 {"scx", PSR_s
| PSR_c
| PSR_x
},
13771 {"xfs", PSR_x
| PSR_f
| PSR_s
},
13772 {"xfc", PSR_x
| PSR_f
| PSR_c
},
13773 {"xsf", PSR_x
| PSR_s
| PSR_f
},
13774 {"xsc", PSR_x
| PSR_s
| PSR_c
},
13775 {"xcf", PSR_x
| PSR_c
| PSR_f
},
13776 {"xcs", PSR_x
| PSR_c
| PSR_s
},
13777 {"cfs", PSR_c
| PSR_f
| PSR_s
},
13778 {"cfx", PSR_c
| PSR_f
| PSR_x
},
13779 {"csf", PSR_c
| PSR_s
| PSR_f
},
13780 {"csx", PSR_c
| PSR_s
| PSR_x
},
13781 {"cxf", PSR_c
| PSR_x
| PSR_f
},
13782 {"cxs", PSR_c
| PSR_x
| PSR_s
},
13783 {"fsxc", PSR_f
| PSR_s
| PSR_x
| PSR_c
},
13784 {"fscx", PSR_f
| PSR_s
| PSR_c
| PSR_x
},
13785 {"fxsc", PSR_f
| PSR_x
| PSR_s
| PSR_c
},
13786 {"fxcs", PSR_f
| PSR_x
| PSR_c
| PSR_s
},
13787 {"fcsx", PSR_f
| PSR_c
| PSR_s
| PSR_x
},
13788 {"fcxs", PSR_f
| PSR_c
| PSR_x
| PSR_s
},
13789 {"sfxc", PSR_s
| PSR_f
| PSR_x
| PSR_c
},
13790 {"sfcx", PSR_s
| PSR_f
| PSR_c
| PSR_x
},
13791 {"sxfc", PSR_s
| PSR_x
| PSR_f
| PSR_c
},
13792 {"sxcf", PSR_s
| PSR_x
| PSR_c
| PSR_f
},
13793 {"scfx", PSR_s
| PSR_c
| PSR_f
| PSR_x
},
13794 {"scxf", PSR_s
| PSR_c
| PSR_x
| PSR_f
},
13795 {"xfsc", PSR_x
| PSR_f
| PSR_s
| PSR_c
},
13796 {"xfcs", PSR_x
| PSR_f
| PSR_c
| PSR_s
},
13797 {"xsfc", PSR_x
| PSR_s
| PSR_f
| PSR_c
},
13798 {"xscf", PSR_x
| PSR_s
| PSR_c
| PSR_f
},
13799 {"xcfs", PSR_x
| PSR_c
| PSR_f
| PSR_s
},
13800 {"xcsf", PSR_x
| PSR_c
| PSR_s
| PSR_f
},
13801 {"cfsx", PSR_c
| PSR_f
| PSR_s
| PSR_x
},
13802 {"cfxs", PSR_c
| PSR_f
| PSR_x
| PSR_s
},
13803 {"csfx", PSR_c
| PSR_s
| PSR_f
| PSR_x
},
13804 {"csxf", PSR_c
| PSR_s
| PSR_x
| PSR_f
},
13805 {"cxfs", PSR_c
| PSR_x
| PSR_f
| PSR_s
},
13806 {"cxsf", PSR_c
| PSR_x
| PSR_s
| PSR_f
},
13809 /* Table of V7M psr names. */
13810 static const struct asm_psr v7m_psrs
[] =
13823 {"basepri_max", 18},
13828 /* Table of all shift-in-operand names. */
13829 static const struct asm_shift_name shift_names
[] =
13831 { "asl", SHIFT_LSL
}, { "ASL", SHIFT_LSL
},
13832 { "lsl", SHIFT_LSL
}, { "LSL", SHIFT_LSL
},
13833 { "lsr", SHIFT_LSR
}, { "LSR", SHIFT_LSR
},
13834 { "asr", SHIFT_ASR
}, { "ASR", SHIFT_ASR
},
13835 { "ror", SHIFT_ROR
}, { "ROR", SHIFT_ROR
},
13836 { "rrx", SHIFT_RRX
}, { "RRX", SHIFT_RRX
}
13839 /* Table of all explicit relocation names. */
13841 static struct reloc_entry reloc_names
[] =
13843 { "got", BFD_RELOC_ARM_GOT32
}, { "GOT", BFD_RELOC_ARM_GOT32
},
13844 { "gotoff", BFD_RELOC_ARM_GOTOFF
}, { "GOTOFF", BFD_RELOC_ARM_GOTOFF
},
13845 { "plt", BFD_RELOC_ARM_PLT32
}, { "PLT", BFD_RELOC_ARM_PLT32
},
13846 { "target1", BFD_RELOC_ARM_TARGET1
}, { "TARGET1", BFD_RELOC_ARM_TARGET1
},
13847 { "target2", BFD_RELOC_ARM_TARGET2
}, { "TARGET2", BFD_RELOC_ARM_TARGET2
},
13848 { "sbrel", BFD_RELOC_ARM_SBREL32
}, { "SBREL", BFD_RELOC_ARM_SBREL32
},
13849 { "tlsgd", BFD_RELOC_ARM_TLS_GD32
}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32
},
13850 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32
}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32
},
13851 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32
}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32
},
13852 { "gottpoff",BFD_RELOC_ARM_TLS_IE32
}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32
},
13853 { "tpoff", BFD_RELOC_ARM_TLS_LE32
}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32
}
13857 /* Table of all conditional affixes. 0xF is not defined as a condition code. */
13858 static const struct asm_cond conds
[] =
13862 {"cs", 0x2}, {"hs", 0x2},
13863 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
13877 static struct asm_barrier_opt barrier_opt_names
[] =
13885 /* Table of ARM-format instructions. */
13887 /* Macros for gluing together operand strings. N.B. In all cases
13888 other than OPS0, the trailing OP_stop comes from default
13889 zero-initialization of the unspecified elements of the array. */
13890 #define OPS0() { OP_stop, }
13891 #define OPS1(a) { OP_##a, }
13892 #define OPS2(a,b) { OP_##a,OP_##b, }
13893 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
13894 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
13895 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
13896 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
13898 /* These macros abstract out the exact format of the mnemonic table and
13899 save some repeated characters. */
13901 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
13902 #define TxCE(mnem, op, top, nops, ops, ae, te) \
13903 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
13904 THUMB_VARIANT, do_##ae, do_##te }
13906 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
13907 a T_MNEM_xyz enumerator. */
13908 #define TCE(mnem, aop, top, nops, ops, ae, te) \
13909 TxCE(mnem, aop, 0x##top, nops, ops, ae, te)
13910 #define tCE(mnem, aop, top, nops, ops, ae, te) \
13911 TxCE(mnem, aop, T_MNEM_##top, nops, ops, ae, te)
13913 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
13914 infix after the third character. */
13915 #define TxC3(mnem, op, top, nops, ops, ae, te) \
13916 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
13917 THUMB_VARIANT, do_##ae, do_##te }
13918 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
13919 { #mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
13920 THUMB_VARIANT, do_##ae, do_##te }
13921 #define TC3(mnem, aop, top, nops, ops, ae, te) \
13922 TxC3(mnem, aop, 0x##top, nops, ops, ae, te)
13923 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
13924 TxC3w(mnem, aop, 0x##top, nops, ops, ae, te)
13925 #define tC3(mnem, aop, top, nops, ops, ae, te) \
13926 TxC3(mnem, aop, T_MNEM_##top, nops, ops, ae, te)
13927 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
13928 TxC3w(mnem, aop, T_MNEM_##top, nops, ops, ae, te)
13930 /* Mnemonic with a conditional infix in an unusual place. Each and every variant has to
13931 appear in the condition table. */
13932 #define TxCM_(m1, m2, m3, op, top, nops, ops, ae, te) \
13933 { #m1 #m2 #m3, OPS##nops ops, sizeof(#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof(#m1) - 1, \
13934 0x##op, top, ARM_VARIANT, THUMB_VARIANT, do_##ae, do_##te }
13936 #define TxCM(m1, m2, op, top, nops, ops, ae, te) \
13937 TxCM_(m1, , m2, op, top, nops, ops, ae, te), \
13938 TxCM_(m1, eq, m2, op, top, nops, ops, ae, te), \
13939 TxCM_(m1, ne, m2, op, top, nops, ops, ae, te), \
13940 TxCM_(m1, cs, m2, op, top, nops, ops, ae, te), \
13941 TxCM_(m1, hs, m2, op, top, nops, ops, ae, te), \
13942 TxCM_(m1, cc, m2, op, top, nops, ops, ae, te), \
13943 TxCM_(m1, ul, m2, op, top, nops, ops, ae, te), \
13944 TxCM_(m1, lo, m2, op, top, nops, ops, ae, te), \
13945 TxCM_(m1, mi, m2, op, top, nops, ops, ae, te), \
13946 TxCM_(m1, pl, m2, op, top, nops, ops, ae, te), \
13947 TxCM_(m1, vs, m2, op, top, nops, ops, ae, te), \
13948 TxCM_(m1, vc, m2, op, top, nops, ops, ae, te), \
13949 TxCM_(m1, hi, m2, op, top, nops, ops, ae, te), \
13950 TxCM_(m1, ls, m2, op, top, nops, ops, ae, te), \
13951 TxCM_(m1, ge, m2, op, top, nops, ops, ae, te), \
13952 TxCM_(m1, lt, m2, op, top, nops, ops, ae, te), \
13953 TxCM_(m1, gt, m2, op, top, nops, ops, ae, te), \
13954 TxCM_(m1, le, m2, op, top, nops, ops, ae, te), \
13955 TxCM_(m1, al, m2, op, top, nops, ops, ae, te)
13957 #define TCM(m1,m2, aop, top, nops, ops, ae, te) \
13958 TxCM(m1,m2, aop, 0x##top, nops, ops, ae, te)
13959 #define tCM(m1,m2, aop, top, nops, ops, ae, te) \
13960 TxCM(m1,m2, aop, T_MNEM_##top, nops, ops, ae, te)
13962 /* Mnemonic that cannot be conditionalized. The ARM condition-code
13963 field is still 0xE. Many of the Thumb variants can be executed
13964 conditionally, so this is checked separately. */
13965 #define TUE(mnem, op, top, nops, ops, ae, te) \
13966 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
13967 THUMB_VARIANT, do_##ae, do_##te }
13969 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
13970 condition code field. */
13971 #define TUF(mnem, op, top, nops, ops, ae, te) \
13972 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
13973 THUMB_VARIANT, do_##ae, do_##te }
13975 /* ARM-only variants of all the above. */
13976 #define CE(mnem, op, nops, ops, ae) \
13977 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
13979 #define C3(mnem, op, nops, ops, ae) \
13980 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
13982 /* Legacy mnemonics that always have conditional infix after the third
13984 #define CL(mnem, op, nops, ops, ae) \
13985 { #mnem, OPS##nops ops, OT_cinfix3_legacy, \
13986 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
13988 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
13989 #define cCE(mnem, op, nops, ops, ae) \
13990 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
13992 /* Legacy coprocessor instructions where conditional infix and conditional
13993 suffix are ambiguous. For consistency this includes all FPA instructions,
13994 not just the potentially ambiguous ones. */
13995 #define cCL(mnem, op, nops, ops, ae) \
13996 { #mnem, OPS##nops ops, OT_cinfix3_legacy, \
13997 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
13999 /* Coprocessor, takes either a suffix or a position-3 infix
14000 (for an FPA corner case). */
14001 #define C3E(mnem, op, nops, ops, ae) \
14002 { #mnem, OPS##nops ops, OT_csuf_or_in3, \
14003 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
14005 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
14006 { #m1 #m2 #m3, OPS##nops ops, \
14007 sizeof(#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof(#m1) - 1, \
14008 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
14010 #define CM(m1, m2, op, nops, ops, ae) \
14011 xCM_(m1, , m2, op, nops, ops, ae), \
14012 xCM_(m1, eq, m2, op, nops, ops, ae), \
14013 xCM_(m1, ne, m2, op, nops, ops, ae), \
14014 xCM_(m1, cs, m2, op, nops, ops, ae), \
14015 xCM_(m1, hs, m2, op, nops, ops, ae), \
14016 xCM_(m1, cc, m2, op, nops, ops, ae), \
14017 xCM_(m1, ul, m2, op, nops, ops, ae), \
14018 xCM_(m1, lo, m2, op, nops, ops, ae), \
14019 xCM_(m1, mi, m2, op, nops, ops, ae), \
14020 xCM_(m1, pl, m2, op, nops, ops, ae), \
14021 xCM_(m1, vs, m2, op, nops, ops, ae), \
14022 xCM_(m1, vc, m2, op, nops, ops, ae), \
14023 xCM_(m1, hi, m2, op, nops, ops, ae), \
14024 xCM_(m1, ls, m2, op, nops, ops, ae), \
14025 xCM_(m1, ge, m2, op, nops, ops, ae), \
14026 xCM_(m1, lt, m2, op, nops, ops, ae), \
14027 xCM_(m1, gt, m2, op, nops, ops, ae), \
14028 xCM_(m1, le, m2, op, nops, ops, ae), \
14029 xCM_(m1, al, m2, op, nops, ops, ae)
14031 #define UE(mnem, op, nops, ops, ae) \
14032 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
14034 #define UF(mnem, op, nops, ops, ae) \
14035 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
14037 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
14038 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
14039 use the same encoding function for each. */
14040 #define NUF(mnem, op, nops, ops, enc) \
14041 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
14042 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
14044 /* Neon data processing, version which indirects through neon_enc_tab for
14045 the various overloaded versions of opcodes. */
14046 #define nUF(mnem, op, nops, ops, enc) \
14047 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM_##op, N_MNEM_##op, \
14048 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
14050 /* Neon insn with conditional suffix for the ARM version, non-overloaded
14052 #define NCE_tag(mnem, op, nops, ops, enc, tag) \
14053 { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT, \
14054 THUMB_VARIANT, do_##enc, do_##enc }
14056 #define NCE(mnem, op, nops, ops, enc) \
14057 NCE_tag(mnem, op, nops, ops, enc, OT_csuffix)
14059 #define NCEF(mnem, op, nops, ops, enc) \
14060 NCE_tag(mnem, op, nops, ops, enc, OT_csuffixF)
14062 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
14063 #define nCE_tag(mnem, op, nops, ops, enc, tag) \
14064 { #mnem, OPS##nops ops, tag, N_MNEM_##op, N_MNEM_##op, \
14065 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
14067 #define nCE(mnem, op, nops, ops, enc) \
14068 nCE_tag(mnem, op, nops, ops, enc, OT_csuffix)
14070 #define nCEF(mnem, op, nops, ops, enc) \
14071 nCE_tag(mnem, op, nops, ops, enc, OT_csuffixF)
14075 /* Thumb-only, unconditional. */
14076 #define UT(mnem, op, nops, ops, te) TUE(mnem, 0, op, nops, ops, 0, te)
14078 static const struct asm_opcode insns
[] =
14080 #define ARM_VARIANT &arm_ext_v1 /* Core ARM Instructions. */
14081 #define THUMB_VARIANT &arm_ext_v4t
14082 tCE(and, 0000000, and, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
14083 tC3(ands
, 0100000, ands
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
14084 tCE(eor
, 0200000, eor
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
14085 tC3(eors
, 0300000, eors
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
14086 tCE(sub
, 0400000, sub
, 3, (RR
, oRR
, SH
), arit
, t_add_sub
),
14087 tC3(subs
, 0500000, subs
, 3, (RR
, oRR
, SH
), arit
, t_add_sub
),
14088 tCE(add
, 0800000, add
, 3, (RR
, oRR
, SH
), arit
, t_add_sub
),
14089 tC3(adds
, 0900000, adds
, 3, (RR
, oRR
, SH
), arit
, t_add_sub
),
14090 tCE(adc
, 0a00000
, adc
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
14091 tC3(adcs
, 0b00000, adcs
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
14092 tCE(sbc
, 0c00000
, sbc
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
14093 tC3(sbcs
, 0d00000
, sbcs
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
14094 tCE(orr
, 1800000, orr
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
14095 tC3(orrs
, 1900000, orrs
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
14096 tCE(bic
, 1c00000
, bic
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
14097 tC3(bics
, 1d00000
, bics
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
14099 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
14100 for setting PSR flag bits. They are obsolete in V6 and do not
14101 have Thumb equivalents. */
14102 tCE(tst
, 1100000, tst
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
14103 tC3w(tsts
, 1100000, tst
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
14104 CL(tstp
, 110f000
, 2, (RR
, SH
), cmp
),
14105 tCE(cmp
, 1500000, cmp
, 2, (RR
, SH
), cmp
, t_mov_cmp
),
14106 tC3w(cmps
, 1500000, cmp
, 2, (RR
, SH
), cmp
, t_mov_cmp
),
14107 CL(cmpp
, 150f000
, 2, (RR
, SH
), cmp
),
14108 tCE(cmn
, 1700000, cmn
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
14109 tC3w(cmns
, 1700000, cmn
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
14110 CL(cmnp
, 170f000
, 2, (RR
, SH
), cmp
),
14112 tCE(mov
, 1a00000
, mov
, 2, (RR
, SH
), mov
, t_mov_cmp
),
14113 tC3(movs
, 1b00000
, movs
, 2, (RR
, SH
), mov
, t_mov_cmp
),
14114 tCE(mvn
, 1e00000
, mvn
, 2, (RR
, SH
), mov
, t_mvn_tst
),
14115 tC3(mvns
, 1f00000
, mvns
, 2, (RR
, SH
), mov
, t_mvn_tst
),
14117 tCE(ldr
, 4100000, ldr
, 2, (RR
, ADDR
), ldst
, t_ldst
),
14118 tC3(ldrb
, 4500000, ldrb
, 2, (RR
, ADDR
), ldst
, t_ldst
),
14119 tCE(str
, 4000000, str
, 2, (RR
, ADDR
), ldst
, t_ldst
),
14120 tC3(strb
, 4400000, strb
, 2, (RR
, ADDR
), ldst
, t_ldst
),
14122 tCE(stm
, 8800000, stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
14123 tC3(stmia
, 8800000, stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
14124 tC3(stmea
, 8800000, stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
14125 tCE(ldm
, 8900000, ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
14126 tC3(ldmia
, 8900000, ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
14127 tC3(ldmfd
, 8900000, ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
14129 TCE(swi
, f000000
, df00
, 1, (EXPi
), swi
, t_swi
),
14130 TCE(svc
, f000000
, df00
, 1, (EXPi
), swi
, t_swi
),
14131 tCE(b
, a000000
, b
, 1, (EXPr
), branch
, t_branch
),
14132 TCE(bl
, b000000
, f000f800
, 1, (EXPr
), bl
, t_branch23
),
14135 tCE(adr
, 28f0000
, adr
, 2, (RR
, EXP
), adr
, t_adr
),
14136 C3(adrl
, 28f0000
, 2, (RR
, EXP
), adrl
),
14137 tCE(nop
, 1a00000
, nop
, 1, (oI255c
), nop
, t_nop
),
14139 /* Thumb-compatibility pseudo ops. */
14140 tCE(lsl
, 1a00000
, lsl
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
14141 tC3(lsls
, 1b00000
, lsls
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
14142 tCE(lsr
, 1a00020
, lsr
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
14143 tC3(lsrs
, 1b00020
, lsrs
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
14144 tCE(asr
, 1a00040
, asr
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
14145 tC3(asrs
, 1b00040
, asrs
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
14146 tCE(ror
, 1a00060
, ror
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
14147 tC3(rors
, 1b00060
, rors
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
14148 tCE(neg
, 2600000, neg
, 2, (RR
, RR
), rd_rn
, t_neg
),
14149 tC3(negs
, 2700000, negs
, 2, (RR
, RR
), rd_rn
, t_neg
),
14150 tCE(push
, 92d0000
, push
, 1, (REGLST
), push_pop
, t_push_pop
),
14151 tCE(pop
, 8bd0000
, pop
, 1, (REGLST
), push_pop
, t_push_pop
),
14153 #undef THUMB_VARIANT
14154 #define THUMB_VARIANT &arm_ext_v6
14155 TCE(cpy
, 1a00000
, 4600, 2, (RR
, RR
), rd_rm
, t_cpy
),
14157 /* V1 instructions with no Thumb analogue prior to V6T2. */
14158 #undef THUMB_VARIANT
14159 #define THUMB_VARIANT &arm_ext_v6t2
14160 TCE(rsb
, 0600000, ebc00000
, 3, (RR
, oRR
, SH
), arit
, t_rsb
),
14161 TC3(rsbs
, 0700000, ebd00000
, 3, (RR
, oRR
, SH
), arit
, t_rsb
),
14162 TCE(teq
, 1300000, ea900f00
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
14163 TC3w(teqs
, 1300000, ea900f00
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
14164 CL(teqp
, 130f000
, 2, (RR
, SH
), cmp
),
14166 TC3(ldrt
, 4300000, f8500e00
, 2, (RR
, ADDR
), ldstt
, t_ldstt
),
14167 TC3(ldrbt
, 4700000, f8100e00
, 2, (RR
, ADDR
), ldstt
, t_ldstt
),
14168 TC3(strt
, 4200000, f8400e00
, 2, (RR
, ADDR
), ldstt
, t_ldstt
),
14169 TC3(strbt
, 4600000, f8000e00
, 2, (RR
, ADDR
), ldstt
, t_ldstt
),
14171 TC3(stmdb
, 9000000, e9000000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
14172 TC3(stmfd
, 9000000, e9000000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
14174 TC3(ldmdb
, 9100000, e9100000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
14175 TC3(ldmea
, 9100000, e9100000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
14177 /* V1 instructions with no Thumb analogue at all. */
14178 CE(rsc
, 0e00000
, 3, (RR
, oRR
, SH
), arit
),
14179 C3(rscs
, 0f00000
, 3, (RR
, oRR
, SH
), arit
),
14181 C3(stmib
, 9800000, 2, (RRw
, REGLST
), ldmstm
),
14182 C3(stmfa
, 9800000, 2, (RRw
, REGLST
), ldmstm
),
14183 C3(stmda
, 8000000, 2, (RRw
, REGLST
), ldmstm
),
14184 C3(stmed
, 8000000, 2, (RRw
, REGLST
), ldmstm
),
14185 C3(ldmib
, 9900000, 2, (RRw
, REGLST
), ldmstm
),
14186 C3(ldmed
, 9900000, 2, (RRw
, REGLST
), ldmstm
),
14187 C3(ldmda
, 8100000, 2, (RRw
, REGLST
), ldmstm
),
14188 C3(ldmfa
, 8100000, 2, (RRw
, REGLST
), ldmstm
),
14191 #define ARM_VARIANT &arm_ext_v2 /* ARM 2 - multiplies. */
14192 #undef THUMB_VARIANT
14193 #define THUMB_VARIANT &arm_ext_v4t
14194 tCE(mul
, 0000090, mul
, 3, (RRnpc
, RRnpc
, oRR
), mul
, t_mul
),
14195 tC3(muls
, 0100090, muls
, 3, (RRnpc
, RRnpc
, oRR
), mul
, t_mul
),
14197 #undef THUMB_VARIANT
14198 #define THUMB_VARIANT &arm_ext_v6t2
14199 TCE(mla
, 0200090, fb000000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
, t_mla
),
14200 C3(mlas
, 0300090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
),
14202 /* Generic coprocessor instructions. */
14203 TCE(cdp
, e000000
, ee000000
, 6, (RCP
, I15b
, RCN
, RCN
, RCN
, oI7b
), cdp
, cdp
),
14204 TCE(ldc
, c100000
, ec100000
, 3, (RCP
, RCN
, ADDR
), lstc
, lstc
),
14205 TC3(ldcl
, c500000
, ec500000
, 3, (RCP
, RCN
, ADDR
), lstc
, lstc
),
14206 TCE(stc
, c000000
, ec000000
, 3, (RCP
, RCN
, ADDR
), lstc
, lstc
),
14207 TC3(stcl
, c400000
, ec400000
, 3, (RCP
, RCN
, ADDR
), lstc
, lstc
),
14208 TCE(mcr
, e000010
, ee000010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
14209 TCE(mrc
, e100010
, ee100010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
14212 #define ARM_VARIANT &arm_ext_v2s /* ARM 3 - swp instructions. */
14213 CE(swp
, 1000090, 3, (RRnpc
, RRnpc
, RRnpcb
), rd_rm_rn
),
14214 C3(swpb
, 1400090, 3, (RRnpc
, RRnpc
, RRnpcb
), rd_rm_rn
),
14217 #define ARM_VARIANT &arm_ext_v3 /* ARM 6 Status register instructions. */
14218 TCE(mrs
, 10f0000
, f3ef8000
, 2, (APSR_RR
, RVC_PSR
), mrs
, t_mrs
),
14219 TCE(msr
, 120f000
, f3808000
, 2, (RVC_PSR
, RR_EXi
), msr
, t_msr
),
14222 #define ARM_VARIANT &arm_ext_v3m /* ARM 7M long multiplies. */
14223 TCE(smull
, 0c00090
, fb800000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
14224 CM(smull
,s
, 0d00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
14225 TCE(umull
, 0800090, fba00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
14226 CM(umull
,s
, 0900090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
14227 TCE(smlal
, 0e00090
, fbc00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
14228 CM(smlal
,s
, 0f00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
14229 TCE(umlal
, 0a00090
, fbe00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
14230 CM(umlal
,s
, 0b00090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
14233 #define ARM_VARIANT &arm_ext_v4 /* ARM Architecture 4. */
14234 #undef THUMB_VARIANT
14235 #define THUMB_VARIANT &arm_ext_v4t
14236 tC3(ldrh
, 01000b0
, ldrh
, 2, (RR
, ADDR
), ldstv4
, t_ldst
),
14237 tC3(strh
, 00000b0
, strh
, 2, (RR
, ADDR
), ldstv4
, t_ldst
),
14238 tC3(ldrsh
, 01000f0
, ldrsh
, 2, (RR
, ADDR
), ldstv4
, t_ldst
),
14239 tC3(ldrsb
, 01000d0
, ldrsb
, 2, (RR
, ADDR
), ldstv4
, t_ldst
),
14240 tCM(ld
,sh
, 01000f0
, ldrsh
, 2, (RR
, ADDR
), ldstv4
, t_ldst
),
14241 tCM(ld
,sb
, 01000d0
, ldrsb
, 2, (RR
, ADDR
), ldstv4
, t_ldst
),
14244 #define ARM_VARIANT &arm_ext_v4t_5
14245 /* ARM Architecture 4T. */
14246 /* Note: bx (and blx) are required on V5, even if the processor does
14247 not support Thumb. */
14248 TCE(bx
, 12fff10
, 4700, 1, (RR
), bx
, t_bx
),
14251 #define ARM_VARIANT &arm_ext_v5 /* ARM Architecture 5T. */
14252 #undef THUMB_VARIANT
14253 #define THUMB_VARIANT &arm_ext_v5t
14254 /* Note: blx has 2 variants; the .value coded here is for
14255 BLX(2). Only this variant has conditional execution. */
14256 TCE(blx
, 12fff30
, 4780, 1, (RR_EXr
), blx
, t_blx
),
14257 TUE(bkpt
, 1200070, be00
, 1, (oIffffb
), bkpt
, t_bkpt
),
14259 #undef THUMB_VARIANT
14260 #define THUMB_VARIANT &arm_ext_v6t2
14261 TCE(clz
, 16f0f10
, fab0f080
, 2, (RRnpc
, RRnpc
), rd_rm
, t_clz
),
14262 TUF(ldc2
, c100000
, fc100000
, 3, (RCP
, RCN
, ADDR
), lstc
, lstc
),
14263 TUF(ldc2l
, c500000
, fc500000
, 3, (RCP
, RCN
, ADDR
), lstc
, lstc
),
14264 TUF(stc2
, c000000
, fc000000
, 3, (RCP
, RCN
, ADDR
), lstc
, lstc
),
14265 TUF(stc2l
, c400000
, fc400000
, 3, (RCP
, RCN
, ADDR
), lstc
, lstc
),
14266 TUF(cdp2
, e000000
, fe000000
, 6, (RCP
, I15b
, RCN
, RCN
, RCN
, oI7b
), cdp
, cdp
),
14267 TUF(mcr2
, e000010
, fe000010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
14268 TUF(mrc2
, e100010
, fe100010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
14271 #define ARM_VARIANT &arm_ext_v5exp /* ARM Architecture 5TExP. */
14272 TCE(smlabb
, 1000080, fb100000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
14273 TCE(smlatb
, 10000a0
, fb100020
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
14274 TCE(smlabt
, 10000c0
, fb100010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
14275 TCE(smlatt
, 10000e0
, fb100030
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
14277 TCE(smlawb
, 1200080, fb300000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
14278 TCE(smlawt
, 12000c0
, fb300010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
14280 TCE(smlalbb
, 1400080, fbc00080
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
14281 TCE(smlaltb
, 14000a0
, fbc000a0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
14282 TCE(smlalbt
, 14000c0
, fbc00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
14283 TCE(smlaltt
, 14000e0
, fbc000b0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
14285 TCE(smulbb
, 1600080, fb10f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
14286 TCE(smultb
, 16000a0
, fb10f020
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
14287 TCE(smulbt
, 16000c0
, fb10f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
14288 TCE(smultt
, 16000e0
, fb10f030
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
14290 TCE(smulwb
, 12000a0
, fb30f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
14291 TCE(smulwt
, 12000e0
, fb30f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
14293 TCE(qadd
, 1000050, fa80f080
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, rd_rm_rn
),
14294 TCE(qdadd
, 1400050, fa80f090
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, rd_rm_rn
),
14295 TCE(qsub
, 1200050, fa80f0a0
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, rd_rm_rn
),
14296 TCE(qdsub
, 1600050, fa80f0b0
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, rd_rm_rn
),
14299 #define ARM_VARIANT &arm_ext_v5e /* ARM Architecture 5TE. */
14300 TUF(pld
, 450f000
, f810f000
, 1, (ADDR
), pld
, t_pld
),
14301 TC3(ldrd
, 00000d0
, e9500000
, 3, (RRnpc
, oRRnpc
, ADDR
), ldrd
, t_ldstd
),
14302 TC3(strd
, 00000f0
, e9400000
, 3, (RRnpc
, oRRnpc
, ADDR
), ldrd
, t_ldstd
),
14304 TCE(mcrr
, c400000
, ec400000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
14305 TCE(mrrc
, c500000
, ec500000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
14308 #define ARM_VARIANT &arm_ext_v5j /* ARM Architecture 5TEJ. */
14309 TCE(bxj
, 12fff20
, f3c08f00
, 1, (RR
), bxj
, t_bxj
),
14312 #define ARM_VARIANT &arm_ext_v6 /* ARM V6. */
14313 #undef THUMB_VARIANT
14314 #define THUMB_VARIANT &arm_ext_v6
14315 TUF(cpsie
, 1080000, b660
, 2, (CPSF
, oI31b
), cpsi
, t_cpsi
),
14316 TUF(cpsid
, 10c0000
, b670
, 2, (CPSF
, oI31b
), cpsi
, t_cpsi
),
14317 tCE(rev
, 6bf0f30
, rev
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
14318 tCE(rev16
, 6bf0fb0
, rev16
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
14319 tCE(revsh
, 6ff0fb0
, revsh
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
14320 tCE(sxth
, 6bf0070
, sxth
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
14321 tCE(uxth
, 6ff0070
, uxth
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
14322 tCE(sxtb
, 6af0070
, sxtb
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
14323 tCE(uxtb
, 6ef0070
, uxtb
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
14324 TUF(setend
, 1010000, b650
, 1, (ENDI
), setend
, t_setend
),
14326 #undef THUMB_VARIANT
14327 #define THUMB_VARIANT &arm_ext_v6t2
14328 TCE(ldrex
, 1900f9f
, e8500f00
, 2, (RRnpc
, ADDR
), ldrex
, t_ldrex
),
14329 TUF(mcrr2
, c400000
, fc400000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
14330 TUF(mrrc2
, c500000
, fc500000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
14332 TCE(ssat
, 6a00010
, f3000000
, 4, (RRnpc
, I32
, RRnpc
, oSHllar
),ssat
, t_ssat
),
14333 TCE(usat
, 6e00010
, f3800000
, 4, (RRnpc
, I31
, RRnpc
, oSHllar
),usat
, t_usat
),
14335 /* ARM V6 not included in V7M (eg. integer SIMD). */
14336 #undef THUMB_VARIANT
14337 #define THUMB_VARIANT &arm_ext_v6_notm
14338 TUF(cps
, 1020000, f3af8100
, 1, (I31b
), imm0
, t_cps
),
14339 TCE(pkhbt
, 6800010, eac00000
, 4, (RRnpc
, RRnpc
, RRnpc
, oSHll
), pkhbt
, t_pkhbt
),
14340 TCE(pkhtb
, 6800050, eac00020
, 4, (RRnpc
, RRnpc
, RRnpc
, oSHar
), pkhtb
, t_pkhtb
),
14341 TCE(qadd16
, 6200f10
, fa90f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14342 TCE(qadd8
, 6200f90
, fa80f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14343 TCE(qaddsubx
, 6200f30
, faa0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14344 TCE(qsub16
, 6200f70
, fad0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14345 TCE(qsub8
, 6200ff0
, fac0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14346 TCE(qsubaddx
, 6200f50
, fae0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14347 TCE(sadd16
, 6100f10
, fa90f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14348 TCE(sadd8
, 6100f90
, fa80f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14349 TCE(saddsubx
, 6100f30
, faa0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14350 TCE(shadd16
, 6300f10
, fa90f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14351 TCE(shadd8
, 6300f90
, fa80f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14352 TCE(shaddsubx
, 6300f30
, faa0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14353 TCE(shsub16
, 6300f70
, fad0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14354 TCE(shsub8
, 6300ff0
, fac0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14355 TCE(shsubaddx
, 6300f50
, fae0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14356 TCE(ssub16
, 6100f70
, fad0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14357 TCE(ssub8
, 6100ff0
, fac0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14358 TCE(ssubaddx
, 6100f50
, fae0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14359 TCE(uadd16
, 6500f10
, fa90f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14360 TCE(uadd8
, 6500f90
, fa80f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14361 TCE(uaddsubx
, 6500f30
, faa0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14362 TCE(uhadd16
, 6700f10
, fa90f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14363 TCE(uhadd8
, 6700f90
, fa80f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14364 TCE(uhaddsubx
, 6700f30
, faa0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14365 TCE(uhsub16
, 6700f70
, fad0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14366 TCE(uhsub8
, 6700ff0
, fac0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14367 TCE(uhsubaddx
, 6700f50
, fae0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14368 TCE(uqadd16
, 6600f10
, fa90f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14369 TCE(uqadd8
, 6600f90
, fa80f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14370 TCE(uqaddsubx
, 6600f30
, faa0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14371 TCE(uqsub16
, 6600f70
, fad0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14372 TCE(uqsub8
, 6600ff0
, fac0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14373 TCE(uqsubaddx
, 6600f50
, fae0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14374 TCE(usub16
, 6500f70
, fad0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14375 TCE(usub8
, 6500ff0
, fac0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14376 TCE(usubaddx
, 6500f50
, fae0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14377 TUF(rfeia
, 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
14378 UF(rfeib
, 9900a00
, 1, (RRw
), rfe
),
14379 UF(rfeda
, 8100a00
, 1, (RRw
), rfe
),
14380 TUF(rfedb
, 9100a00
, e810c000
, 1, (RRw
), rfe
, rfe
),
14381 TUF(rfefd
, 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
14382 UF(rfefa
, 9900a00
, 1, (RRw
), rfe
),
14383 UF(rfeea
, 8100a00
, 1, (RRw
), rfe
),
14384 TUF(rfeed
, 9100a00
, e810c000
, 1, (RRw
), rfe
, rfe
),
14385 TCE(sxtah
, 6b00070
, fa00f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
14386 TCE(sxtab16
, 6800070, fa20f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
14387 TCE(sxtab
, 6a00070
, fa40f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
14388 TCE(sxtb16
, 68f0070
, fa2ff080
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
14389 TCE(uxtah
, 6f00070
, fa10f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
14390 TCE(uxtab16
, 6c00070
, fa30f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
14391 TCE(uxtab
, 6e00070
, fa50f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
14392 TCE(uxtb16
, 6cf0070
, fa3ff080
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
14393 TCE(sel
, 6800fb0
, faa0f080
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14394 TCE(smlad
, 7000010, fb200000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
14395 TCE(smladx
, 7000030, fb200010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
14396 TCE(smlald
, 7400010, fbc000c0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
14397 TCE(smlaldx
, 7400030, fbc000d0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
14398 TCE(smlsd
, 7000050, fb400000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
14399 TCE(smlsdx
, 7000070, fb400010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
14400 TCE(smlsld
, 7400050, fbd000c0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
14401 TCE(smlsldx
, 7400070, fbd000d0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
14402 TCE(smmla
, 7500010, fb500000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
14403 TCE(smmlar
, 7500030, fb500010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
14404 TCE(smmls
, 75000d0
, fb600000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
14405 TCE(smmlsr
, 75000f0
, fb600010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
14406 TCE(smmul
, 750f010
, fb50f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
14407 TCE(smmulr
, 750f030
, fb50f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
14408 TCE(smuad
, 700f010
, fb20f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
14409 TCE(smuadx
, 700f030
, fb20f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
14410 TCE(smusd
, 700f050
, fb40f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
14411 TCE(smusdx
, 700f070
, fb40f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
14412 TUF(srsia
, 8cd0500
, e980c000
, 1, (I31w
), srs
, srs
),
14413 UF(srsib
, 9cd0500
, 1, (I31w
), srs
),
14414 UF(srsda
, 84d0500
, 1, (I31w
), srs
),
14415 TUF(srsdb
, 94d0500
, e800c000
, 1, (I31w
), srs
, srs
),
14416 TCE(ssat16
, 6a00f30
, f3200000
, 3, (RRnpc
, I16
, RRnpc
), ssat16
, t_ssat16
),
14417 TCE(strex
, 1800f90
, e8400000
, 3, (RRnpc
, RRnpc
, ADDR
), strex
, t_strex
),
14418 TCE(umaal
, 0400090, fbe00060
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
, t_mlal
),
14419 TCE(usad8
, 780f010
, fb70f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
14420 TCE(usada8
, 7800010, fb700000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
14421 TCE(usat16
, 6e00f30
, f3a00000
, 3, (RRnpc
, I15
, RRnpc
), usat16
, t_usat16
),
14424 #define ARM_VARIANT &arm_ext_v6k
14425 #undef THUMB_VARIANT
14426 #define THUMB_VARIANT &arm_ext_v6k
14427 tCE(yield
, 320f001
, yield
, 0, (), noargs
, t_hint
),
14428 tCE(wfe
, 320f002
, wfe
, 0, (), noargs
, t_hint
),
14429 tCE(wfi
, 320f003
, wfi
, 0, (), noargs
, t_hint
),
14430 tCE(sev
, 320f004
, sev
, 0, (), noargs
, t_hint
),
14432 #undef THUMB_VARIANT
14433 #define THUMB_VARIANT &arm_ext_v6_notm
14434 TCE(ldrexd
, 1b00f9f
, e8d0007f
, 3, (RRnpc
, oRRnpc
, RRnpcb
), ldrexd
, t_ldrexd
),
14435 TCE(strexd
, 1a00f90
, e8c00070
, 4, (RRnpc
, RRnpc
, oRRnpc
, RRnpcb
), strexd
, t_strexd
),
14437 #undef THUMB_VARIANT
14438 #define THUMB_VARIANT &arm_ext_v6t2
14439 TCE(ldrexb
, 1d00f9f
, e8d00f4f
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
14440 TCE(ldrexh
, 1f00f9f
, e8d00f5f
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
14441 TCE(strexb
, 1c00f90
, e8c00f40
, 3, (RRnpc
, RRnpc
, ADDR
), strex
, rm_rd_rn
),
14442 TCE(strexh
, 1e00f90
, e8c00f50
, 3, (RRnpc
, RRnpc
, ADDR
), strex
, rm_rd_rn
),
14443 TUF(clrex
, 57ff01f
, f3bf8f2f
, 0, (), noargs
, noargs
),
14446 #define ARM_VARIANT &arm_ext_v6z
14447 TCE(smc
, 1600070, f7f08000
, 1, (EXPi
), smc
, t_smc
),
14450 #define ARM_VARIANT &arm_ext_v6t2
14451 TCE(bfc
, 7c0001f
, f36f0000
, 3, (RRnpc
, I31
, I32
), bfc
, t_bfc
),
14452 TCE(bfi
, 7c00010
, f3600000
, 4, (RRnpc
, RRnpc_I0
, I31
, I32
), bfi
, t_bfi
),
14453 TCE(sbfx
, 7a00050
, f3400000
, 4, (RR
, RR
, I31
, I32
), bfx
, t_bfx
),
14454 TCE(ubfx
, 7e00050
, f3c00000
, 4, (RR
, RR
, I31
, I32
), bfx
, t_bfx
),
14456 TCE(mls
, 0600090, fb000010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
, t_mla
),
14457 TCE(movw
, 3000000, f2400000
, 2, (RRnpc
, HALF
), mov16
, t_mov16
),
14458 TCE(movt
, 3400000, f2c00000
, 2, (RRnpc
, HALF
), mov16
, t_mov16
),
14459 TCE(rbit
, 3ff0f30
, fa90f0a0
, 2, (RR
, RR
), rd_rm
, t_rbit
),
14461 TC3(ldrht
, 03000b0
, f8300e00
, 2, (RR
, ADDR
), ldsttv4
, t_ldstt
),
14462 TC3(ldrsht
, 03000f0
, f9300e00
, 2, (RR
, ADDR
), ldsttv4
, t_ldstt
),
14463 TC3(ldrsbt
, 03000d0
, f9100e00
, 2, (RR
, ADDR
), ldsttv4
, t_ldstt
),
14464 TC3(strht
, 02000b0
, f8200e00
, 2, (RR
, ADDR
), ldsttv4
, t_ldstt
),
14466 UT(cbnz
, b900
, 2, (RR
, EXP
), t_czb
),
14467 UT(cbz
, b100
, 2, (RR
, EXP
), t_czb
),
14468 /* ARM does not really have an IT instruction. */
14469 TUE(it
, 0, bf08
, 1, (COND
), it
, t_it
),
14470 TUE(itt
, 0, bf0c
, 1, (COND
), it
, t_it
),
14471 TUE(ite
, 0, bf04
, 1, (COND
), it
, t_it
),
14472 TUE(ittt
, 0, bf0e
, 1, (COND
), it
, t_it
),
14473 TUE(itet
, 0, bf06
, 1, (COND
), it
, t_it
),
14474 TUE(itte
, 0, bf0a
, 1, (COND
), it
, t_it
),
14475 TUE(itee
, 0, bf02
, 1, (COND
), it
, t_it
),
14476 TUE(itttt
, 0, bf0f
, 1, (COND
), it
, t_it
),
14477 TUE(itett
, 0, bf07
, 1, (COND
), it
, t_it
),
14478 TUE(ittet
, 0, bf0b
, 1, (COND
), it
, t_it
),
14479 TUE(iteet
, 0, bf03
, 1, (COND
), it
, t_it
),
14480 TUE(ittte
, 0, bf0d
, 1, (COND
), it
, t_it
),
14481 TUE(itete
, 0, bf05
, 1, (COND
), it
, t_it
),
14482 TUE(ittee
, 0, bf09
, 1, (COND
), it
, t_it
),
14483 TUE(iteee
, 0, bf01
, 1, (COND
), it
, t_it
),
14485 /* Thumb2 only instructions. */
14487 #define ARM_VARIANT NULL
14489 TCE(addw
, 0, f2000000
, 3, (RR
, RR
, EXPi
), 0, t_add_sub_w
),
14490 TCE(subw
, 0, f2a00000
, 3, (RR
, RR
, EXPi
), 0, t_add_sub_w
),
14491 TCE(tbb
, 0, e8d0f000
, 1, (TB
), 0, t_tb
),
14492 TCE(tbh
, 0, e8d0f010
, 1, (TB
), 0, t_tb
),
14494 /* Thumb-2 hardware division instructions (R and M profiles only). */
14495 #undef THUMB_VARIANT
14496 #define THUMB_VARIANT &arm_ext_div
14497 TCE(sdiv
, 0, fb90f0f0
, 3, (RR
, oRR
, RR
), 0, t_div
),
14498 TCE(udiv
, 0, fbb0f0f0
, 3, (RR
, oRR
, RR
), 0, t_div
),
14500 /* ARM V7 instructions. */
14502 #define ARM_VARIANT &arm_ext_v7
14503 #undef THUMB_VARIANT
14504 #define THUMB_VARIANT &arm_ext_v7
14505 TUF(pli
, 450f000
, f910f000
, 1, (ADDR
), pli
, t_pld
),
14506 TCE(dbg
, 320f0f0
, f3af80f0
, 1, (I15
), dbg
, t_dbg
),
14507 TUF(dmb
, 57ff050
, f3bf8f50
, 1, (oBARRIER
), barrier
, t_barrier
),
14508 TUF(dsb
, 57ff040
, f3bf8f40
, 1, (oBARRIER
), barrier
, t_barrier
),
14509 TUF(isb
, 57ff060
, f3bf8f60
, 1, (oBARRIER
), barrier
, t_barrier
),
14512 #define ARM_VARIANT &fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
14513 cCE(wfs
, e200110
, 1, (RR
), rd
),
14514 cCE(rfs
, e300110
, 1, (RR
), rd
),
14515 cCE(wfc
, e400110
, 1, (RR
), rd
),
14516 cCE(rfc
, e500110
, 1, (RR
), rd
),
14518 cCL(ldfs
, c100100
, 2, (RF
, ADDR
), rd_cpaddr
),
14519 cCL(ldfd
, c108100
, 2, (RF
, ADDR
), rd_cpaddr
),
14520 cCL(ldfe
, c500100
, 2, (RF
, ADDR
), rd_cpaddr
),
14521 cCL(ldfp
, c508100
, 2, (RF
, ADDR
), rd_cpaddr
),
14523 cCL(stfs
, c000100
, 2, (RF
, ADDR
), rd_cpaddr
),
14524 cCL(stfd
, c008100
, 2, (RF
, ADDR
), rd_cpaddr
),
14525 cCL(stfe
, c400100
, 2, (RF
, ADDR
), rd_cpaddr
),
14526 cCL(stfp
, c408100
, 2, (RF
, ADDR
), rd_cpaddr
),
14528 cCL(mvfs
, e008100
, 2, (RF
, RF_IF
), rd_rm
),
14529 cCL(mvfsp
, e008120
, 2, (RF
, RF_IF
), rd_rm
),
14530 cCL(mvfsm
, e008140
, 2, (RF
, RF_IF
), rd_rm
),
14531 cCL(mvfsz
, e008160
, 2, (RF
, RF_IF
), rd_rm
),
14532 cCL(mvfd
, e008180
, 2, (RF
, RF_IF
), rd_rm
),
14533 cCL(mvfdp
, e0081a0
, 2, (RF
, RF_IF
), rd_rm
),
14534 cCL(mvfdm
, e0081c0
, 2, (RF
, RF_IF
), rd_rm
),
14535 cCL(mvfdz
, e0081e0
, 2, (RF
, RF_IF
), rd_rm
),
14536 cCL(mvfe
, e088100
, 2, (RF
, RF_IF
), rd_rm
),
14537 cCL(mvfep
, e088120
, 2, (RF
, RF_IF
), rd_rm
),
14538 cCL(mvfem
, e088140
, 2, (RF
, RF_IF
), rd_rm
),
14539 cCL(mvfez
, e088160
, 2, (RF
, RF_IF
), rd_rm
),
14541 cCL(mnfs
, e108100
, 2, (RF
, RF_IF
), rd_rm
),
14542 cCL(mnfsp
, e108120
, 2, (RF
, RF_IF
), rd_rm
),
14543 cCL(mnfsm
, e108140
, 2, (RF
, RF_IF
), rd_rm
),
14544 cCL(mnfsz
, e108160
, 2, (RF
, RF_IF
), rd_rm
),
14545 cCL(mnfd
, e108180
, 2, (RF
, RF_IF
), rd_rm
),
14546 cCL(mnfdp
, e1081a0
, 2, (RF
, RF_IF
), rd_rm
),
14547 cCL(mnfdm
, e1081c0
, 2, (RF
, RF_IF
), rd_rm
),
14548 cCL(mnfdz
, e1081e0
, 2, (RF
, RF_IF
), rd_rm
),
14549 cCL(mnfe
, e188100
, 2, (RF
, RF_IF
), rd_rm
),
14550 cCL(mnfep
, e188120
, 2, (RF
, RF_IF
), rd_rm
),
14551 cCL(mnfem
, e188140
, 2, (RF
, RF_IF
), rd_rm
),
14552 cCL(mnfez
, e188160
, 2, (RF
, RF_IF
), rd_rm
),
14554 cCL(abss
, e208100
, 2, (RF
, RF_IF
), rd_rm
),
14555 cCL(abssp
, e208120
, 2, (RF
, RF_IF
), rd_rm
),
14556 cCL(abssm
, e208140
, 2, (RF
, RF_IF
), rd_rm
),
14557 cCL(abssz
, e208160
, 2, (RF
, RF_IF
), rd_rm
),
14558 cCL(absd
, e208180
, 2, (RF
, RF_IF
), rd_rm
),
14559 cCL(absdp
, e2081a0
, 2, (RF
, RF_IF
), rd_rm
),
14560 cCL(absdm
, e2081c0
, 2, (RF
, RF_IF
), rd_rm
),
14561 cCL(absdz
, e2081e0
, 2, (RF
, RF_IF
), rd_rm
),
14562 cCL(abse
, e288100
, 2, (RF
, RF_IF
), rd_rm
),
14563 cCL(absep
, e288120
, 2, (RF
, RF_IF
), rd_rm
),
14564 cCL(absem
, e288140
, 2, (RF
, RF_IF
), rd_rm
),
14565 cCL(absez
, e288160
, 2, (RF
, RF_IF
), rd_rm
),
14567 cCL(rnds
, e308100
, 2, (RF
, RF_IF
), rd_rm
),
14568 cCL(rndsp
, e308120
, 2, (RF
, RF_IF
), rd_rm
),
14569 cCL(rndsm
, e308140
, 2, (RF
, RF_IF
), rd_rm
),
14570 cCL(rndsz
, e308160
, 2, (RF
, RF_IF
), rd_rm
),
14571 cCL(rndd
, e308180
, 2, (RF
, RF_IF
), rd_rm
),
14572 cCL(rnddp
, e3081a0
, 2, (RF
, RF_IF
), rd_rm
),
14573 cCL(rnddm
, e3081c0
, 2, (RF
, RF_IF
), rd_rm
),
14574 cCL(rnddz
, e3081e0
, 2, (RF
, RF_IF
), rd_rm
),
14575 cCL(rnde
, e388100
, 2, (RF
, RF_IF
), rd_rm
),
14576 cCL(rndep
, e388120
, 2, (RF
, RF_IF
), rd_rm
),
14577 cCL(rndem
, e388140
, 2, (RF
, RF_IF
), rd_rm
),
14578 cCL(rndez
, e388160
, 2, (RF
, RF_IF
), rd_rm
),
14580 cCL(sqts
, e408100
, 2, (RF
, RF_IF
), rd_rm
),
14581 cCL(sqtsp
, e408120
, 2, (RF
, RF_IF
), rd_rm
),
14582 cCL(sqtsm
, e408140
, 2, (RF
, RF_IF
), rd_rm
),
14583 cCL(sqtsz
, e408160
, 2, (RF
, RF_IF
), rd_rm
),
14584 cCL(sqtd
, e408180
, 2, (RF
, RF_IF
), rd_rm
),
14585 cCL(sqtdp
, e4081a0
, 2, (RF
, RF_IF
), rd_rm
),
14586 cCL(sqtdm
, e4081c0
, 2, (RF
, RF_IF
), rd_rm
),
14587 cCL(sqtdz
, e4081e0
, 2, (RF
, RF_IF
), rd_rm
),
14588 cCL(sqte
, e488100
, 2, (RF
, RF_IF
), rd_rm
),
14589 cCL(sqtep
, e488120
, 2, (RF
, RF_IF
), rd_rm
),
14590 cCL(sqtem
, e488140
, 2, (RF
, RF_IF
), rd_rm
),
14591 cCL(sqtez
, e488160
, 2, (RF
, RF_IF
), rd_rm
),
14593 cCL(logs
, e508100
, 2, (RF
, RF_IF
), rd_rm
),
14594 cCL(logsp
, e508120
, 2, (RF
, RF_IF
), rd_rm
),
14595 cCL(logsm
, e508140
, 2, (RF
, RF_IF
), rd_rm
),
14596 cCL(logsz
, e508160
, 2, (RF
, RF_IF
), rd_rm
),
14597 cCL(logd
, e508180
, 2, (RF
, RF_IF
), rd_rm
),
14598 cCL(logdp
, e5081a0
, 2, (RF
, RF_IF
), rd_rm
),
14599 cCL(logdm
, e5081c0
, 2, (RF
, RF_IF
), rd_rm
),
14600 cCL(logdz
, e5081e0
, 2, (RF
, RF_IF
), rd_rm
),
14601 cCL(loge
, e588100
, 2, (RF
, RF_IF
), rd_rm
),
14602 cCL(logep
, e588120
, 2, (RF
, RF_IF
), rd_rm
),
14603 cCL(logem
, e588140
, 2, (RF
, RF_IF
), rd_rm
),
14604 cCL(logez
, e588160
, 2, (RF
, RF_IF
), rd_rm
),
14606 cCL(lgns
, e608100
, 2, (RF
, RF_IF
), rd_rm
),
14607 cCL(lgnsp
, e608120
, 2, (RF
, RF_IF
), rd_rm
),
14608 cCL(lgnsm
, e608140
, 2, (RF
, RF_IF
), rd_rm
),
14609 cCL(lgnsz
, e608160
, 2, (RF
, RF_IF
), rd_rm
),
14610 cCL(lgnd
, e608180
, 2, (RF
, RF_IF
), rd_rm
),
14611 cCL(lgndp
, e6081a0
, 2, (RF
, RF_IF
), rd_rm
),
14612 cCL(lgndm
, e6081c0
, 2, (RF
, RF_IF
), rd_rm
),
14613 cCL(lgndz
, e6081e0
, 2, (RF
, RF_IF
), rd_rm
),
14614 cCL(lgne
, e688100
, 2, (RF
, RF_IF
), rd_rm
),
14615 cCL(lgnep
, e688120
, 2, (RF
, RF_IF
), rd_rm
),
14616 cCL(lgnem
, e688140
, 2, (RF
, RF_IF
), rd_rm
),
14617 cCL(lgnez
, e688160
, 2, (RF
, RF_IF
), rd_rm
),
14619 cCL(exps
, e708100
, 2, (RF
, RF_IF
), rd_rm
),
14620 cCL(expsp
, e708120
, 2, (RF
, RF_IF
), rd_rm
),
14621 cCL(expsm
, e708140
, 2, (RF
, RF_IF
), rd_rm
),
14622 cCL(expsz
, e708160
, 2, (RF
, RF_IF
), rd_rm
),
14623 cCL(expd
, e708180
, 2, (RF
, RF_IF
), rd_rm
),
14624 cCL(expdp
, e7081a0
, 2, (RF
, RF_IF
), rd_rm
),
14625 cCL(expdm
, e7081c0
, 2, (RF
, RF_IF
), rd_rm
),
14626 cCL(expdz
, e7081e0
, 2, (RF
, RF_IF
), rd_rm
),
14627 cCL(expe
, e788100
, 2, (RF
, RF_IF
), rd_rm
),
14628 cCL(expep
, e788120
, 2, (RF
, RF_IF
), rd_rm
),
14629 cCL(expem
, e788140
, 2, (RF
, RF_IF
), rd_rm
),
14630 cCL(expdz
, e788160
, 2, (RF
, RF_IF
), rd_rm
),
14632 cCL(sins
, e808100
, 2, (RF
, RF_IF
), rd_rm
),
14633 cCL(sinsp
, e808120
, 2, (RF
, RF_IF
), rd_rm
),
14634 cCL(sinsm
, e808140
, 2, (RF
, RF_IF
), rd_rm
),
14635 cCL(sinsz
, e808160
, 2, (RF
, RF_IF
), rd_rm
),
14636 cCL(sind
, e808180
, 2, (RF
, RF_IF
), rd_rm
),
14637 cCL(sindp
, e8081a0
, 2, (RF
, RF_IF
), rd_rm
),
14638 cCL(sindm
, e8081c0
, 2, (RF
, RF_IF
), rd_rm
),
14639 cCL(sindz
, e8081e0
, 2, (RF
, RF_IF
), rd_rm
),
14640 cCL(sine
, e888100
, 2, (RF
, RF_IF
), rd_rm
),
14641 cCL(sinep
, e888120
, 2, (RF
, RF_IF
), rd_rm
),
14642 cCL(sinem
, e888140
, 2, (RF
, RF_IF
), rd_rm
),
14643 cCL(sinez
, e888160
, 2, (RF
, RF_IF
), rd_rm
),
14645 cCL(coss
, e908100
, 2, (RF
, RF_IF
), rd_rm
),
14646 cCL(cossp
, e908120
, 2, (RF
, RF_IF
), rd_rm
),
14647 cCL(cossm
, e908140
, 2, (RF
, RF_IF
), rd_rm
),
14648 cCL(cossz
, e908160
, 2, (RF
, RF_IF
), rd_rm
),
14649 cCL(cosd
, e908180
, 2, (RF
, RF_IF
), rd_rm
),
14650 cCL(cosdp
, e9081a0
, 2, (RF
, RF_IF
), rd_rm
),
14651 cCL(cosdm
, e9081c0
, 2, (RF
, RF_IF
), rd_rm
),
14652 cCL(cosdz
, e9081e0
, 2, (RF
, RF_IF
), rd_rm
),
14653 cCL(cose
, e988100
, 2, (RF
, RF_IF
), rd_rm
),
14654 cCL(cosep
, e988120
, 2, (RF
, RF_IF
), rd_rm
),
14655 cCL(cosem
, e988140
, 2, (RF
, RF_IF
), rd_rm
),
14656 cCL(cosez
, e988160
, 2, (RF
, RF_IF
), rd_rm
),
14658 cCL(tans
, ea08100
, 2, (RF
, RF_IF
), rd_rm
),
14659 cCL(tansp
, ea08120
, 2, (RF
, RF_IF
), rd_rm
),
14660 cCL(tansm
, ea08140
, 2, (RF
, RF_IF
), rd_rm
),
14661 cCL(tansz
, ea08160
, 2, (RF
, RF_IF
), rd_rm
),
14662 cCL(tand
, ea08180
, 2, (RF
, RF_IF
), rd_rm
),
14663 cCL(tandp
, ea081a0
, 2, (RF
, RF_IF
), rd_rm
),
14664 cCL(tandm
, ea081c0
, 2, (RF
, RF_IF
), rd_rm
),
14665 cCL(tandz
, ea081e0
, 2, (RF
, RF_IF
), rd_rm
),
14666 cCL(tane
, ea88100
, 2, (RF
, RF_IF
), rd_rm
),
14667 cCL(tanep
, ea88120
, 2, (RF
, RF_IF
), rd_rm
),
14668 cCL(tanem
, ea88140
, 2, (RF
, RF_IF
), rd_rm
),
14669 cCL(tanez
, ea88160
, 2, (RF
, RF_IF
), rd_rm
),
14671 cCL(asns
, eb08100
, 2, (RF
, RF_IF
), rd_rm
),
14672 cCL(asnsp
, eb08120
, 2, (RF
, RF_IF
), rd_rm
),
14673 cCL(asnsm
, eb08140
, 2, (RF
, RF_IF
), rd_rm
),
14674 cCL(asnsz
, eb08160
, 2, (RF
, RF_IF
), rd_rm
),
14675 cCL(asnd
, eb08180
, 2, (RF
, RF_IF
), rd_rm
),
14676 cCL(asndp
, eb081a0
, 2, (RF
, RF_IF
), rd_rm
),
14677 cCL(asndm
, eb081c0
, 2, (RF
, RF_IF
), rd_rm
),
14678 cCL(asndz
, eb081e0
, 2, (RF
, RF_IF
), rd_rm
),
14679 cCL(asne
, eb88100
, 2, (RF
, RF_IF
), rd_rm
),
14680 cCL(asnep
, eb88120
, 2, (RF
, RF_IF
), rd_rm
),
14681 cCL(asnem
, eb88140
, 2, (RF
, RF_IF
), rd_rm
),
14682 cCL(asnez
, eb88160
, 2, (RF
, RF_IF
), rd_rm
),
14684 cCL(acss
, ec08100
, 2, (RF
, RF_IF
), rd_rm
),
14685 cCL(acssp
, ec08120
, 2, (RF
, RF_IF
), rd_rm
),
14686 cCL(acssm
, ec08140
, 2, (RF
, RF_IF
), rd_rm
),
14687 cCL(acssz
, ec08160
, 2, (RF
, RF_IF
), rd_rm
),
14688 cCL(acsd
, ec08180
, 2, (RF
, RF_IF
), rd_rm
),
14689 cCL(acsdp
, ec081a0
, 2, (RF
, RF_IF
), rd_rm
),
14690 cCL(acsdm
, ec081c0
, 2, (RF
, RF_IF
), rd_rm
),
14691 cCL(acsdz
, ec081e0
, 2, (RF
, RF_IF
), rd_rm
),
14692 cCL(acse
, ec88100
, 2, (RF
, RF_IF
), rd_rm
),
14693 cCL(acsep
, ec88120
, 2, (RF
, RF_IF
), rd_rm
),
14694 cCL(acsem
, ec88140
, 2, (RF
, RF_IF
), rd_rm
),
14695 cCL(acsez
, ec88160
, 2, (RF
, RF_IF
), rd_rm
),
14697 cCL(atns
, ed08100
, 2, (RF
, RF_IF
), rd_rm
),
14698 cCL(atnsp
, ed08120
, 2, (RF
, RF_IF
), rd_rm
),
14699 cCL(atnsm
, ed08140
, 2, (RF
, RF_IF
), rd_rm
),
14700 cCL(atnsz
, ed08160
, 2, (RF
, RF_IF
), rd_rm
),
14701 cCL(atnd
, ed08180
, 2, (RF
, RF_IF
), rd_rm
),
14702 cCL(atndp
, ed081a0
, 2, (RF
, RF_IF
), rd_rm
),
14703 cCL(atndm
, ed081c0
, 2, (RF
, RF_IF
), rd_rm
),
14704 cCL(atndz
, ed081e0
, 2, (RF
, RF_IF
), rd_rm
),
14705 cCL(atne
, ed88100
, 2, (RF
, RF_IF
), rd_rm
),
14706 cCL(atnep
, ed88120
, 2, (RF
, RF_IF
), rd_rm
),
14707 cCL(atnem
, ed88140
, 2, (RF
, RF_IF
), rd_rm
),
14708 cCL(atnez
, ed88160
, 2, (RF
, RF_IF
), rd_rm
),
14710 cCL(urds
, ee08100
, 2, (RF
, RF_IF
), rd_rm
),
14711 cCL(urdsp
, ee08120
, 2, (RF
, RF_IF
), rd_rm
),
14712 cCL(urdsm
, ee08140
, 2, (RF
, RF_IF
), rd_rm
),
14713 cCL(urdsz
, ee08160
, 2, (RF
, RF_IF
), rd_rm
),
14714 cCL(urdd
, ee08180
, 2, (RF
, RF_IF
), rd_rm
),
14715 cCL(urddp
, ee081a0
, 2, (RF
, RF_IF
), rd_rm
),
14716 cCL(urddm
, ee081c0
, 2, (RF
, RF_IF
), rd_rm
),
14717 cCL(urddz
, ee081e0
, 2, (RF
, RF_IF
), rd_rm
),
14718 cCL(urde
, ee88100
, 2, (RF
, RF_IF
), rd_rm
),
14719 cCL(urdep
, ee88120
, 2, (RF
, RF_IF
), rd_rm
),
14720 cCL(urdem
, ee88140
, 2, (RF
, RF_IF
), rd_rm
),
14721 cCL(urdez
, ee88160
, 2, (RF
, RF_IF
), rd_rm
),
14723 cCL(nrms
, ef08100
, 2, (RF
, RF_IF
), rd_rm
),
14724 cCL(nrmsp
, ef08120
, 2, (RF
, RF_IF
), rd_rm
),
14725 cCL(nrmsm
, ef08140
, 2, (RF
, RF_IF
), rd_rm
),
14726 cCL(nrmsz
, ef08160
, 2, (RF
, RF_IF
), rd_rm
),
14727 cCL(nrmd
, ef08180
, 2, (RF
, RF_IF
), rd_rm
),
14728 cCL(nrmdp
, ef081a0
, 2, (RF
, RF_IF
), rd_rm
),
14729 cCL(nrmdm
, ef081c0
, 2, (RF
, RF_IF
), rd_rm
),
14730 cCL(nrmdz
, ef081e0
, 2, (RF
, RF_IF
), rd_rm
),
14731 cCL(nrme
, ef88100
, 2, (RF
, RF_IF
), rd_rm
),
14732 cCL(nrmep
, ef88120
, 2, (RF
, RF_IF
), rd_rm
),
14733 cCL(nrmem
, ef88140
, 2, (RF
, RF_IF
), rd_rm
),
14734 cCL(nrmez
, ef88160
, 2, (RF
, RF_IF
), rd_rm
),
14736 cCL(adfs
, e000100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14737 cCL(adfsp
, e000120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14738 cCL(adfsm
, e000140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14739 cCL(adfsz
, e000160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14740 cCL(adfd
, e000180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14741 cCL(adfdp
, e0001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14742 cCL(adfdm
, e0001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14743 cCL(adfdz
, e0001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14744 cCL(adfe
, e080100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14745 cCL(adfep
, e080120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14746 cCL(adfem
, e080140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14747 cCL(adfez
, e080160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14749 cCL(sufs
, e200100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14750 cCL(sufsp
, e200120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14751 cCL(sufsm
, e200140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14752 cCL(sufsz
, e200160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14753 cCL(sufd
, e200180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14754 cCL(sufdp
, e2001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14755 cCL(sufdm
, e2001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14756 cCL(sufdz
, e2001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14757 cCL(sufe
, e280100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14758 cCL(sufep
, e280120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14759 cCL(sufem
, e280140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14760 cCL(sufez
, e280160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14762 cCL(rsfs
, e300100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14763 cCL(rsfsp
, e300120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14764 cCL(rsfsm
, e300140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14765 cCL(rsfsz
, e300160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14766 cCL(rsfd
, e300180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14767 cCL(rsfdp
, e3001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14768 cCL(rsfdm
, e3001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14769 cCL(rsfdz
, e3001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14770 cCL(rsfe
, e380100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14771 cCL(rsfep
, e380120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14772 cCL(rsfem
, e380140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14773 cCL(rsfez
, e380160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14775 cCL(mufs
, e100100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14776 cCL(mufsp
, e100120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14777 cCL(mufsm
, e100140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14778 cCL(mufsz
, e100160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14779 cCL(mufd
, e100180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14780 cCL(mufdp
, e1001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14781 cCL(mufdm
, e1001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14782 cCL(mufdz
, e1001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14783 cCL(mufe
, e180100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14784 cCL(mufep
, e180120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14785 cCL(mufem
, e180140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14786 cCL(mufez
, e180160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14788 cCL(dvfs
, e400100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14789 cCL(dvfsp
, e400120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14790 cCL(dvfsm
, e400140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14791 cCL(dvfsz
, e400160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14792 cCL(dvfd
, e400180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14793 cCL(dvfdp
, e4001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14794 cCL(dvfdm
, e4001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14795 cCL(dvfdz
, e4001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14796 cCL(dvfe
, e480100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14797 cCL(dvfep
, e480120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14798 cCL(dvfem
, e480140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14799 cCL(dvfez
, e480160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14801 cCL(rdfs
, e500100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14802 cCL(rdfsp
, e500120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14803 cCL(rdfsm
, e500140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14804 cCL(rdfsz
, e500160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14805 cCL(rdfd
, e500180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14806 cCL(rdfdp
, e5001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14807 cCL(rdfdm
, e5001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14808 cCL(rdfdz
, e5001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14809 cCL(rdfe
, e580100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14810 cCL(rdfep
, e580120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14811 cCL(rdfem
, e580140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14812 cCL(rdfez
, e580160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14814 cCL(pows
, e600100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14815 cCL(powsp
, e600120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14816 cCL(powsm
, e600140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14817 cCL(powsz
, e600160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14818 cCL(powd
, e600180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14819 cCL(powdp
, e6001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14820 cCL(powdm
, e6001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14821 cCL(powdz
, e6001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14822 cCL(powe
, e680100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14823 cCL(powep
, e680120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14824 cCL(powem
, e680140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14825 cCL(powez
, e680160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14827 cCL(rpws
, e700100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14828 cCL(rpwsp
, e700120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14829 cCL(rpwsm
, e700140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14830 cCL(rpwsz
, e700160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14831 cCL(rpwd
, e700180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14832 cCL(rpwdp
, e7001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14833 cCL(rpwdm
, e7001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14834 cCL(rpwdz
, e7001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14835 cCL(rpwe
, e780100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14836 cCL(rpwep
, e780120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14837 cCL(rpwem
, e780140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14838 cCL(rpwez
, e780160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14840 cCL(rmfs
, e800100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14841 cCL(rmfsp
, e800120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14842 cCL(rmfsm
, e800140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14843 cCL(rmfsz
, e800160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14844 cCL(rmfd
, e800180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14845 cCL(rmfdp
, e8001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14846 cCL(rmfdm
, e8001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14847 cCL(rmfdz
, e8001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14848 cCL(rmfe
, e880100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14849 cCL(rmfep
, e880120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14850 cCL(rmfem
, e880140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14851 cCL(rmfez
, e880160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14853 cCL(fmls
, e900100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14854 cCL(fmlsp
, e900120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14855 cCL(fmlsm
, e900140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14856 cCL(fmlsz
, e900160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14857 cCL(fmld
, e900180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14858 cCL(fmldp
, e9001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14859 cCL(fmldm
, e9001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14860 cCL(fmldz
, e9001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14861 cCL(fmle
, e980100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14862 cCL(fmlep
, e980120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14863 cCL(fmlem
, e980140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14864 cCL(fmlez
, e980160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14866 cCL(fdvs
, ea00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14867 cCL(fdvsp
, ea00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14868 cCL(fdvsm
, ea00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14869 cCL(fdvsz
, ea00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14870 cCL(fdvd
, ea00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14871 cCL(fdvdp
, ea001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14872 cCL(fdvdm
, ea001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14873 cCL(fdvdz
, ea001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14874 cCL(fdve
, ea80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14875 cCL(fdvep
, ea80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14876 cCL(fdvem
, ea80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14877 cCL(fdvez
, ea80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14879 cCL(frds
, eb00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14880 cCL(frdsp
, eb00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14881 cCL(frdsm
, eb00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14882 cCL(frdsz
, eb00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14883 cCL(frdd
, eb00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14884 cCL(frddp
, eb001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14885 cCL(frddm
, eb001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14886 cCL(frddz
, eb001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14887 cCL(frde
, eb80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14888 cCL(frdep
, eb80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14889 cCL(frdem
, eb80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14890 cCL(frdez
, eb80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14892 cCL(pols
, ec00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14893 cCL(polsp
, ec00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14894 cCL(polsm
, ec00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14895 cCL(polsz
, ec00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14896 cCL(pold
, ec00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14897 cCL(poldp
, ec001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14898 cCL(poldm
, ec001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14899 cCL(poldz
, ec001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14900 cCL(pole
, ec80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14901 cCL(polep
, ec80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14902 cCL(polem
, ec80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14903 cCL(polez
, ec80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14905 cCE(cmf
, e90f110
, 2, (RF
, RF_IF
), fpa_cmp
),
14906 C3E(cmfe
, ed0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
14907 cCE(cnf
, eb0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
14908 C3E(cnfe
, ef0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
14910 cCL(flts
, e000110
, 2, (RF
, RR
), rn_rd
),
14911 cCL(fltsp
, e000130
, 2, (RF
, RR
), rn_rd
),
14912 cCL(fltsm
, e000150
, 2, (RF
, RR
), rn_rd
),
14913 cCL(fltsz
, e000170
, 2, (RF
, RR
), rn_rd
),
14914 cCL(fltd
, e000190
, 2, (RF
, RR
), rn_rd
),
14915 cCL(fltdp
, e0001b0
, 2, (RF
, RR
), rn_rd
),
14916 cCL(fltdm
, e0001d0
, 2, (RF
, RR
), rn_rd
),
14917 cCL(fltdz
, e0001f0
, 2, (RF
, RR
), rn_rd
),
14918 cCL(flte
, e080110
, 2, (RF
, RR
), rn_rd
),
14919 cCL(fltep
, e080130
, 2, (RF
, RR
), rn_rd
),
14920 cCL(fltem
, e080150
, 2, (RF
, RR
), rn_rd
),
14921 cCL(fltez
, e080170
, 2, (RF
, RR
), rn_rd
),
14923 /* The implementation of the FIX instruction is broken on some
14924 assemblers, in that it accepts a precision specifier as well as a
14925 rounding specifier, despite the fact that this is meaningless.
14926 To be more compatible, we accept it as well, though of course it
14927 does not set any bits. */
14928 cCE(fix
, e100110
, 2, (RR
, RF
), rd_rm
),
14929 cCL(fixp
, e100130
, 2, (RR
, RF
), rd_rm
),
14930 cCL(fixm
, e100150
, 2, (RR
, RF
), rd_rm
),
14931 cCL(fixz
, e100170
, 2, (RR
, RF
), rd_rm
),
14932 cCL(fixsp
, e100130
, 2, (RR
, RF
), rd_rm
),
14933 cCL(fixsm
, e100150
, 2, (RR
, RF
), rd_rm
),
14934 cCL(fixsz
, e100170
, 2, (RR
, RF
), rd_rm
),
14935 cCL(fixdp
, e100130
, 2, (RR
, RF
), rd_rm
),
14936 cCL(fixdm
, e100150
, 2, (RR
, RF
), rd_rm
),
14937 cCL(fixdz
, e100170
, 2, (RR
, RF
), rd_rm
),
14938 cCL(fixep
, e100130
, 2, (RR
, RF
), rd_rm
),
14939 cCL(fixem
, e100150
, 2, (RR
, RF
), rd_rm
),
14940 cCL(fixez
, e100170
, 2, (RR
, RF
), rd_rm
),
14942 /* Instructions that were new with the real FPA, call them V2. */
14944 #define ARM_VARIANT &fpu_fpa_ext_v2
14945 cCE(lfm
, c100200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
14946 cCL(lfmfd
, c900200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
14947 cCL(lfmea
, d100200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
14948 cCE(sfm
, c000200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
14949 cCL(sfmfd
, d000200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
14950 cCL(sfmea
, c800200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
14953 #define ARM_VARIANT &fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
14954 /* Moves and type conversions. */
14955 cCE(fcpys
, eb00a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
14956 cCE(fmrs
, e100a10
, 2, (RR
, RVS
), vfp_reg_from_sp
),
14957 cCE(fmsr
, e000a10
, 2, (RVS
, RR
), vfp_sp_from_reg
),
14958 cCE(fmstat
, ef1fa10
, 0, (), noargs
),
14959 cCE(fsitos
, eb80ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
14960 cCE(fuitos
, eb80a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
14961 cCE(ftosis
, ebd0a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
14962 cCE(ftosizs
, ebd0ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
14963 cCE(ftouis
, ebc0a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
14964 cCE(ftouizs
, ebc0ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
14965 cCE(fmrx
, ef00a10
, 2, (RR
, RVC
), rd_rn
),
14966 cCE(fmxr
, ee00a10
, 2, (RVC
, RR
), rn_rd
),
14968 /* Memory operations. */
14969 cCE(flds
, d100a00
, 2, (RVS
, ADDR
), vfp_sp_ldst
),
14970 cCE(fsts
, d000a00
, 2, (RVS
, ADDR
), vfp_sp_ldst
),
14971 cCE(fldmias
, c900a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmia
),
14972 cCE(fldmfds
, c900a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmia
),
14973 cCE(fldmdbs
, d300a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmdb
),
14974 cCE(fldmeas
, d300a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmdb
),
14975 cCE(fldmiax
, c900b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmia
),
14976 cCE(fldmfdx
, c900b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmia
),
14977 cCE(fldmdbx
, d300b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmdb
),
14978 cCE(fldmeax
, d300b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmdb
),
14979 cCE(fstmias
, c800a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmia
),
14980 cCE(fstmeas
, c800a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmia
),
14981 cCE(fstmdbs
, d200a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmdb
),
14982 cCE(fstmfds
, d200a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmdb
),
14983 cCE(fstmiax
, c800b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmia
),
14984 cCE(fstmeax
, c800b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmia
),
14985 cCE(fstmdbx
, d200b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmdb
),
14986 cCE(fstmfdx
, d200b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmdb
),
14988 /* Monadic operations. */
14989 cCE(fabss
, eb00ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
14990 cCE(fnegs
, eb10a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
14991 cCE(fsqrts
, eb10ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
14993 /* Dyadic operations. */
14994 cCE(fadds
, e300a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
14995 cCE(fsubs
, e300a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
14996 cCE(fmuls
, e200a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
14997 cCE(fdivs
, e800a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
14998 cCE(fmacs
, e000a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
14999 cCE(fmscs
, e100a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
15000 cCE(fnmuls
, e200a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
15001 cCE(fnmacs
, e000a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
15002 cCE(fnmscs
, e100a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
15005 cCE(fcmps
, eb40a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
15006 cCE(fcmpzs
, eb50a40
, 1, (RVS
), vfp_sp_compare_z
),
15007 cCE(fcmpes
, eb40ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
15008 cCE(fcmpezs
, eb50ac0
, 1, (RVS
), vfp_sp_compare_z
),
15011 #define ARM_VARIANT &fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
15012 /* Moves and type conversions. */
15013 cCE(fcpyd
, eb00b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
15014 cCE(fcvtds
, eb70ac0
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
15015 cCE(fcvtsd
, eb70bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
15016 cCE(fmdhr
, e200b10
, 2, (RVD
, RR
), vfp_dp_rn_rd
),
15017 cCE(fmdlr
, e000b10
, 2, (RVD
, RR
), vfp_dp_rn_rd
),
15018 cCE(fmrdh
, e300b10
, 2, (RR
, RVD
), vfp_dp_rd_rn
),
15019 cCE(fmrdl
, e100b10
, 2, (RR
, RVD
), vfp_dp_rd_rn
),
15020 cCE(fsitod
, eb80bc0
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
15021 cCE(fuitod
, eb80b40
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
15022 cCE(ftosid
, ebd0b40
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
15023 cCE(ftosizd
, ebd0bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
15024 cCE(ftouid
, ebc0b40
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
15025 cCE(ftouizd
, ebc0bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
15027 /* Memory operations. */
15028 cCE(fldd
, d100b00
, 2, (RVD
, ADDR
), vfp_dp_ldst
),
15029 cCE(fstd
, d000b00
, 2, (RVD
, ADDR
), vfp_dp_ldst
),
15030 cCE(fldmiad
, c900b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmia
),
15031 cCE(fldmfdd
, c900b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmia
),
15032 cCE(fldmdbd
, d300b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmdb
),
15033 cCE(fldmead
, d300b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmdb
),
15034 cCE(fstmiad
, c800b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmia
),
15035 cCE(fstmead
, c800b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmia
),
15036 cCE(fstmdbd
, d200b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmdb
),
15037 cCE(fstmfdd
, d200b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmdb
),
15039 /* Monadic operations. */
15040 cCE(fabsd
, eb00bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
15041 cCE(fnegd
, eb10b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
15042 cCE(fsqrtd
, eb10bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
15044 /* Dyadic operations. */
15045 cCE(faddd
, e300b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
15046 cCE(fsubd
, e300b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
15047 cCE(fmuld
, e200b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
15048 cCE(fdivd
, e800b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
15049 cCE(fmacd
, e000b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
15050 cCE(fmscd
, e100b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
15051 cCE(fnmuld
, e200b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
15052 cCE(fnmacd
, e000b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
15053 cCE(fnmscd
, e100b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
15056 cCE(fcmpd
, eb40b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
15057 cCE(fcmpzd
, eb50b40
, 1, (RVD
), vfp_dp_rd
),
15058 cCE(fcmped
, eb40bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
15059 cCE(fcmpezd
, eb50bc0
, 1, (RVD
), vfp_dp_rd
),
15062 #define ARM_VARIANT &fpu_vfp_ext_v2
15063 cCE(fmsrr
, c400a10
, 3, (VRSLST
, RR
, RR
), vfp_sp2_from_reg2
),
15064 cCE(fmrrs
, c500a10
, 3, (RR
, RR
, VRSLST
), vfp_reg2_from_sp2
),
15065 cCE(fmdrr
, c400b10
, 3, (RVD
, RR
, RR
), vfp_dp_rm_rd_rn
),
15066 cCE(fmrrd
, c500b10
, 3, (RR
, RR
, RVD
), vfp_dp_rd_rn_rm
),
15068 /* Instructions which may belong to either the Neon or VFP instruction sets.
15069 Individual encoder functions perform additional architecture checks. */
15071 #define ARM_VARIANT &fpu_vfp_ext_v1xd
15072 #undef THUMB_VARIANT
15073 #define THUMB_VARIANT &fpu_vfp_ext_v1xd
15074 /* These mnemonics are unique to VFP. */
15075 NCE(vsqrt
, 0, 2, (RVSD
, RVSD
), vfp_nsyn_sqrt
),
15076 NCE(vdiv
, 0, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_div
),
15077 nCE(vnmul
, vnmul
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
15078 nCE(vnmla
, vnmla
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
15079 nCE(vnmls
, vnmls
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
15080 nCE(vcmp
, vcmp
, 2, (RVSD
, RVSD_I0
), vfp_nsyn_cmp
),
15081 nCE(vcmpe
, vcmpe
, 2, (RVSD
, RVSD_I0
), vfp_nsyn_cmp
),
15082 NCE(vpush
, 0, 1, (VRSDLST
), vfp_nsyn_push
),
15083 NCE(vpop
, 0, 1, (VRSDLST
), vfp_nsyn_pop
),
15084 NCE(vcvtz
, 0, 2, (RVSD
, RVSD
), vfp_nsyn_cvtz
),
15086 /* Mnemonics shared by Neon and VFP. */
15087 nCEF(vmul
, vmul
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mul
),
15088 nCEF(vmla
, vmla
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mac_maybe_scalar
),
15089 nCEF(vmls
, vmls
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mac_maybe_scalar
),
15091 nCEF(vadd
, vadd
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_addsub_if_i
),
15092 nCEF(vsub
, vsub
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_addsub_if_i
),
15094 NCEF(vabs
, 1b10300
, 2, (RNSDQ
, RNSDQ
), neon_abs_neg
),
15095 NCEF(vneg
, 1b10380
, 2, (RNSDQ
, RNSDQ
), neon_abs_neg
),
15097 NCE(vldm
, c900b00
, 2, (RRw
, VRSDLST
), neon_ldm_stm
),
15098 NCE(vldmia
, c900b00
, 2, (RRw
, VRSDLST
), neon_ldm_stm
),
15099 NCE(vldmdb
, d100b00
, 2, (RRw
, VRSDLST
), neon_ldm_stm
),
15100 NCE(vstm
, c800b00
, 2, (RRw
, VRSDLST
), neon_ldm_stm
),
15101 NCE(vstmia
, c800b00
, 2, (RRw
, VRSDLST
), neon_ldm_stm
),
15102 NCE(vstmdb
, d000b00
, 2, (RRw
, VRSDLST
), neon_ldm_stm
),
15103 NCE(vldr
, d100b00
, 2, (RVSD
, ADDR
), neon_ldr_str
),
15104 NCE(vstr
, d000b00
, 2, (RVSD
, ADDR
), neon_ldr_str
),
15106 nCEF(vcvt
, vcvt
, 3, (RNSDQ
, RNSDQ
, oI32b
), neon_cvt
),
15108 /* NOTE: All VMOV encoding is special-cased! */
15109 NCE(vmov
, 0, 1, (VMOV
), neon_mov
),
15110 NCE(vmovq
, 0, 1, (VMOV
), neon_mov
),
15112 #undef THUMB_VARIANT
15113 #define THUMB_VARIANT &fpu_neon_ext_v1
15115 #define ARM_VARIANT &fpu_neon_ext_v1
15116 /* Data processing with three registers of the same length. */
15117 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
15118 NUF(vaba
, 0000710, 3, (RNDQ
, RNDQ
, RNDQ
), neon_dyadic_i_su
),
15119 NUF(vabaq
, 0000710, 3, (RNQ
, RNQ
, RNQ
), neon_dyadic_i_su
),
15120 NUF(vhadd
, 0000000, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
15121 NUF(vhaddq
, 0000000, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
15122 NUF(vrhadd
, 0000100, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
15123 NUF(vrhaddq
, 0000100, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
15124 NUF(vhsub
, 0000200, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
15125 NUF(vhsubq
, 0000200, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
15126 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
15127 NUF(vqadd
, 0000010, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i64_su
),
15128 NUF(vqaddq
, 0000010, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i64_su
),
15129 NUF(vqsub
, 0000210, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i64_su
),
15130 NUF(vqsubq
, 0000210, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i64_su
),
15131 NUF(vrshl
, 0000500, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i64_su
),
15132 NUF(vrshlq
, 0000500, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i64_su
),
15133 NUF(vqrshl
, 0000510, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i64_su
),
15134 NUF(vqrshlq
, 0000510, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i64_su
),
15135 /* If not immediate, fall back to neon_dyadic_i64_su.
15136 shl_imm should accept I8 I16 I32 I64,
15137 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
15138 nUF(vshl
, vshl
, 3, (RNDQ
, oRNDQ
, RNDQ_I63b
), neon_shl_imm
),
15139 nUF(vshlq
, vshl
, 3, (RNQ
, oRNQ
, RNDQ_I63b
), neon_shl_imm
),
15140 nUF(vqshl
, vqshl
, 3, (RNDQ
, oRNDQ
, RNDQ_I63b
), neon_qshl_imm
),
15141 nUF(vqshlq
, vqshl
, 3, (RNQ
, oRNQ
, RNDQ_I63b
), neon_qshl_imm
),
15142 /* Logic ops, types optional & ignored. */
15143 nUF(vand
, vand
, 2, (RNDQ
, NILO
), neon_logic
),
15144 nUF(vandq
, vand
, 2, (RNQ
, NILO
), neon_logic
),
15145 nUF(vbic
, vbic
, 2, (RNDQ
, NILO
), neon_logic
),
15146 nUF(vbicq
, vbic
, 2, (RNQ
, NILO
), neon_logic
),
15147 nUF(vorr
, vorr
, 2, (RNDQ
, NILO
), neon_logic
),
15148 nUF(vorrq
, vorr
, 2, (RNQ
, NILO
), neon_logic
),
15149 nUF(vorn
, vorn
, 2, (RNDQ
, NILO
), neon_logic
),
15150 nUF(vornq
, vorn
, 2, (RNQ
, NILO
), neon_logic
),
15151 nUF(veor
, veor
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_logic
),
15152 nUF(veorq
, veor
, 3, (RNQ
, oRNQ
, RNQ
), neon_logic
),
15153 /* Bitfield ops, untyped. */
15154 NUF(vbsl
, 1100110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
15155 NUF(vbslq
, 1100110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
15156 NUF(vbit
, 1200110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
15157 NUF(vbitq
, 1200110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
15158 NUF(vbif
, 1300110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
15159 NUF(vbifq
, 1300110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
15160 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F32. */
15161 nUF(vabd
, vabd
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
15162 nUF(vabdq
, vabd
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
15163 nUF(vmax
, vmax
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
15164 nUF(vmaxq
, vmax
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
15165 nUF(vmin
, vmin
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
15166 nUF(vminq
, vmin
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
15167 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
15168 back to neon_dyadic_if_su. */
15169 nUF(vcge
, vcge
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp
),
15170 nUF(vcgeq
, vcge
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp
),
15171 nUF(vcgt
, vcgt
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp
),
15172 nUF(vcgtq
, vcgt
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp
),
15173 nUF(vclt
, vclt
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp_inv
),
15174 nUF(vcltq
, vclt
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp_inv
),
15175 nUF(vcle
, vcle
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp_inv
),
15176 nUF(vcleq
, vcle
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp_inv
),
15177 /* Comparison. Type I8 I16 I32 F32. Non-immediate -> neon_dyadic_if_i. */
15178 nUF(vceq
, vceq
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_ceq
),
15179 nUF(vceqq
, vceq
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_ceq
),
15180 /* As above, D registers only. */
15181 nUF(vpmax
, vpmax
, 3, (RND
, oRND
, RND
), neon_dyadic_if_su_d
),
15182 nUF(vpmin
, vpmin
, 3, (RND
, oRND
, RND
), neon_dyadic_if_su_d
),
15183 /* Int and float variants, signedness unimportant. */
15184 /* If not scalar, fall back to neon_dyadic_if_i. */
15185 nUF(vmlaq
, vmla
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mac_maybe_scalar
),
15186 nUF(vmlsq
, vmls
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mac_maybe_scalar
),
15187 nUF(vpadd
, vpadd
, 3, (RND
, oRND
, RND
), neon_dyadic_if_i_d
),
15188 /* Add/sub take types I8 I16 I32 I64 F32. */
15189 nUF(vaddq
, vadd
, 3, (RNQ
, oRNQ
, RNQ
), neon_addsub_if_i
),
15190 nUF(vsubq
, vsub
, 3, (RNQ
, oRNQ
, RNQ
), neon_addsub_if_i
),
15191 /* vtst takes sizes 8, 16, 32. */
15192 NUF(vtst
, 0000810, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_tst
),
15193 NUF(vtstq
, 0000810, 3, (RNQ
, oRNQ
, RNQ
), neon_tst
),
15194 /* VMUL takes I8 I16 I32 F32 P8. */
15195 nUF(vmulq
, vmul
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mul
),
15196 /* VQD{R}MULH takes S16 S32. */
15197 nUF(vqdmulh
, vqdmulh
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qdmulh
),
15198 nUF(vqdmulhq
, vqdmulh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qdmulh
),
15199 nUF(vqrdmulh
, vqrdmulh
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qdmulh
),
15200 nUF(vqrdmulhq
, vqrdmulh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qdmulh
),
15201 NUF(vacge
, 0000e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute
),
15202 NUF(vacgeq
, 0000e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute
),
15203 NUF(vacgt
, 0200e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute
),
15204 NUF(vacgtq
, 0200e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute
),
15205 NUF(vaclt
, 0000e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute_inv
),
15206 NUF(vacltq
, 0000e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute_inv
),
15207 NUF(vacle
, 0200e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute_inv
),
15208 NUF(vacleq
, 0200e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute_inv
),
15209 NUF(vrecps
, 0000f10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_step
),
15210 NUF(vrecpsq
, 0000f10
, 3, (RNQ
, oRNQ
, RNQ
), neon_step
),
15211 NUF(vrsqrts
, 0200f10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_step
),
15212 NUF(vrsqrtsq
, 0200f10
, 3, (RNQ
, oRNQ
, RNQ
), neon_step
),
15214 /* Two address, int/float. Types S8 S16 S32 F32. */
15215 NUF(vabsq
, 1b10300
, 2, (RNQ
, RNQ
), neon_abs_neg
),
15216 NUF(vnegq
, 1b10380
, 2, (RNQ
, RNQ
), neon_abs_neg
),
15218 /* Data processing with two registers and a shift amount. */
15219 /* Right shifts, and variants with rounding.
15220 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
15221 NUF(vshr
, 0800010, 3, (RNDQ
, oRNDQ
, I64z
), neon_rshift_round_imm
),
15222 NUF(vshrq
, 0800010, 3, (RNQ
, oRNQ
, I64z
), neon_rshift_round_imm
),
15223 NUF(vrshr
, 0800210, 3, (RNDQ
, oRNDQ
, I64z
), neon_rshift_round_imm
),
15224 NUF(vrshrq
, 0800210, 3, (RNQ
, oRNQ
, I64z
), neon_rshift_round_imm
),
15225 NUF(vsra
, 0800110, 3, (RNDQ
, oRNDQ
, I64
), neon_rshift_round_imm
),
15226 NUF(vsraq
, 0800110, 3, (RNQ
, oRNQ
, I64
), neon_rshift_round_imm
),
15227 NUF(vrsra
, 0800310, 3, (RNDQ
, oRNDQ
, I64
), neon_rshift_round_imm
),
15228 NUF(vrsraq
, 0800310, 3, (RNQ
, oRNQ
, I64
), neon_rshift_round_imm
),
15229 /* Shift and insert. Sizes accepted 8 16 32 64. */
15230 NUF(vsli
, 1800510, 3, (RNDQ
, oRNDQ
, I63
), neon_sli
),
15231 NUF(vsliq
, 1800510, 3, (RNQ
, oRNQ
, I63
), neon_sli
),
15232 NUF(vsri
, 1800410, 3, (RNDQ
, oRNDQ
, I64
), neon_sri
),
15233 NUF(vsriq
, 1800410, 3, (RNQ
, oRNQ
, I64
), neon_sri
),
15234 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
15235 NUF(vqshlu
, 1800610, 3, (RNDQ
, oRNDQ
, I63
), neon_qshlu_imm
),
15236 NUF(vqshluq
, 1800610, 3, (RNQ
, oRNQ
, I63
), neon_qshlu_imm
),
15237 /* Right shift immediate, saturating & narrowing, with rounding variants.
15238 Types accepted S16 S32 S64 U16 U32 U64. */
15239 NUF(vqshrn
, 0800910, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow
),
15240 NUF(vqrshrn
, 0800950, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow
),
15241 /* As above, unsigned. Types accepted S16 S32 S64. */
15242 NUF(vqshrun
, 0800810, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow_u
),
15243 NUF(vqrshrun
, 0800850, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow_u
),
15244 /* Right shift narrowing. Types accepted I16 I32 I64. */
15245 NUF(vshrn
, 0800810, 3, (RND
, RNQ
, I32z
), neon_rshift_narrow
),
15246 NUF(vrshrn
, 0800850, 3, (RND
, RNQ
, I32z
), neon_rshift_narrow
),
15247 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
15248 nUF(vshll
, vshll
, 3, (RNQ
, RND
, I32
), neon_shll
),
15249 /* CVT with optional immediate for fixed-point variant. */
15250 nUF(vcvtq
, vcvt
, 3, (RNQ
, RNQ
, oI32b
), neon_cvt
),
15252 nUF(vmvn
, vmvn
, 2, (RNDQ
, RNDQ_IMVNb
), neon_mvn
),
15253 nUF(vmvnq
, vmvn
, 2, (RNQ
, RNDQ_IMVNb
), neon_mvn
),
15255 /* Data processing, three registers of different lengths. */
15256 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
15257 NUF(vabal
, 0800500, 3, (RNQ
, RND
, RND
), neon_abal
),
15258 NUF(vabdl
, 0800700, 3, (RNQ
, RND
, RND
), neon_dyadic_long
),
15259 NUF(vaddl
, 0800000, 3, (RNQ
, RND
, RND
), neon_dyadic_long
),
15260 NUF(vsubl
, 0800200, 3, (RNQ
, RND
, RND
), neon_dyadic_long
),
15261 /* If not scalar, fall back to neon_dyadic_long.
15262 Vector types as above, scalar types S16 S32 U16 U32. */
15263 nUF(vmlal
, vmlal
, 3, (RNQ
, RND
, RND_RNSC
), neon_mac_maybe_scalar_long
),
15264 nUF(vmlsl
, vmlsl
, 3, (RNQ
, RND
, RND_RNSC
), neon_mac_maybe_scalar_long
),
15265 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
15266 NUF(vaddw
, 0800100, 3, (RNQ
, oRNQ
, RND
), neon_dyadic_wide
),
15267 NUF(vsubw
, 0800300, 3, (RNQ
, oRNQ
, RND
), neon_dyadic_wide
),
15268 /* Dyadic, narrowing insns. Types I16 I32 I64. */
15269 NUF(vaddhn
, 0800400, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
15270 NUF(vraddhn
, 1800400, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
15271 NUF(vsubhn
, 0800600, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
15272 NUF(vrsubhn
, 1800600, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
15273 /* Saturating doubling multiplies. Types S16 S32. */
15274 nUF(vqdmlal
, vqdmlal
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
15275 nUF(vqdmlsl
, vqdmlsl
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
15276 nUF(vqdmull
, vqdmull
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
15277 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
15278 S16 S32 U16 U32. */
15279 nUF(vmull
, vmull
, 3, (RNQ
, RND
, RND_RNSC
), neon_vmull
),
15281 /* Extract. Size 8. */
15282 NUF(vext
, 0b00000, 4, (RNDQ
, oRNDQ
, RNDQ
, I7
), neon_ext
),
15283 NUF(vextq
, 0b00000, 4, (RNQ
, oRNQ
, RNQ
, I7
), neon_ext
),
15285 /* Two registers, miscellaneous. */
15286 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
15287 NUF(vrev64
, 1b00000
, 2, (RNDQ
, RNDQ
), neon_rev
),
15288 NUF(vrev64q
, 1b00000
, 2, (RNQ
, RNQ
), neon_rev
),
15289 NUF(vrev32
, 1b00080
, 2, (RNDQ
, RNDQ
), neon_rev
),
15290 NUF(vrev32q
, 1b00080
, 2, (RNQ
, RNQ
), neon_rev
),
15291 NUF(vrev16
, 1b00100
, 2, (RNDQ
, RNDQ
), neon_rev
),
15292 NUF(vrev16q
, 1b00100
, 2, (RNQ
, RNQ
), neon_rev
),
15293 /* Vector replicate. Sizes 8 16 32. */
15294 nCE(vdup
, vdup
, 2, (RNDQ
, RR_RNSC
), neon_dup
),
15295 nCE(vdupq
, vdup
, 2, (RNQ
, RR_RNSC
), neon_dup
),
15296 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
15297 NUF(vmovl
, 0800a10
, 2, (RNQ
, RND
), neon_movl
),
15298 /* VMOVN. Types I16 I32 I64. */
15299 nUF(vmovn
, vmovn
, 2, (RND
, RNQ
), neon_movn
),
15300 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
15301 nUF(vqmovn
, vqmovn
, 2, (RND
, RNQ
), neon_qmovn
),
15302 /* VQMOVUN. Types S16 S32 S64. */
15303 nUF(vqmovun
, vqmovun
, 2, (RND
, RNQ
), neon_qmovun
),
15304 /* VZIP / VUZP. Sizes 8 16 32. */
15305 NUF(vzip
, 1b20180
, 2, (RNDQ
, RNDQ
), neon_zip_uzp
),
15306 NUF(vzipq
, 1b20180
, 2, (RNQ
, RNQ
), neon_zip_uzp
),
15307 NUF(vuzp
, 1b20100
, 2, (RNDQ
, RNDQ
), neon_zip_uzp
),
15308 NUF(vuzpq
, 1b20100
, 2, (RNQ
, RNQ
), neon_zip_uzp
),
15309 /* VQABS / VQNEG. Types S8 S16 S32. */
15310 NUF(vqabs
, 1b00700
, 2, (RNDQ
, RNDQ
), neon_sat_abs_neg
),
15311 NUF(vqabsq
, 1b00700
, 2, (RNQ
, RNQ
), neon_sat_abs_neg
),
15312 NUF(vqneg
, 1b00780
, 2, (RNDQ
, RNDQ
), neon_sat_abs_neg
),
15313 NUF(vqnegq
, 1b00780
, 2, (RNQ
, RNQ
), neon_sat_abs_neg
),
15314 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
15315 NUF(vpadal
, 1b00600
, 2, (RNDQ
, RNDQ
), neon_pair_long
),
15316 NUF(vpadalq
, 1b00600
, 2, (RNQ
, RNQ
), neon_pair_long
),
15317 NUF(vpaddl
, 1b00200
, 2, (RNDQ
, RNDQ
), neon_pair_long
),
15318 NUF(vpaddlq
, 1b00200
, 2, (RNQ
, RNQ
), neon_pair_long
),
15319 /* Reciprocal estimates. Types U32 F32. */
15320 NUF(vrecpe
, 1b30400
, 2, (RNDQ
, RNDQ
), neon_recip_est
),
15321 NUF(vrecpeq
, 1b30400
, 2, (RNQ
, RNQ
), neon_recip_est
),
15322 NUF(vrsqrte
, 1b30480
, 2, (RNDQ
, RNDQ
), neon_recip_est
),
15323 NUF(vrsqrteq
, 1b30480
, 2, (RNQ
, RNQ
), neon_recip_est
),
15324 /* VCLS. Types S8 S16 S32. */
15325 NUF(vcls
, 1b00400
, 2, (RNDQ
, RNDQ
), neon_cls
),
15326 NUF(vclsq
, 1b00400
, 2, (RNQ
, RNQ
), neon_cls
),
15327 /* VCLZ. Types I8 I16 I32. */
15328 NUF(vclz
, 1b00480
, 2, (RNDQ
, RNDQ
), neon_clz
),
15329 NUF(vclzq
, 1b00480
, 2, (RNQ
, RNQ
), neon_clz
),
15330 /* VCNT. Size 8. */
15331 NUF(vcnt
, 1b00500
, 2, (RNDQ
, RNDQ
), neon_cnt
),
15332 NUF(vcntq
, 1b00500
, 2, (RNQ
, RNQ
), neon_cnt
),
15333 /* Two address, untyped. */
15334 NUF(vswp
, 1b20000
, 2, (RNDQ
, RNDQ
), neon_swp
),
15335 NUF(vswpq
, 1b20000
, 2, (RNQ
, RNQ
), neon_swp
),
15336 /* VTRN. Sizes 8 16 32. */
15337 nUF(vtrn
, vtrn
, 2, (RNDQ
, RNDQ
), neon_trn
),
15338 nUF(vtrnq
, vtrn
, 2, (RNQ
, RNQ
), neon_trn
),
15340 /* Table lookup. Size 8. */
15341 NUF(vtbl
, 1b00800
, 3, (RND
, NRDLST
, RND
), neon_tbl_tbx
),
15342 NUF(vtbx
, 1b00840
, 3, (RND
, NRDLST
, RND
), neon_tbl_tbx
),
15344 #undef THUMB_VARIANT
15345 #define THUMB_VARIANT &fpu_vfp_v3_or_neon_ext
15347 #define ARM_VARIANT &fpu_vfp_v3_or_neon_ext
15348 /* Neon element/structure load/store. */
15349 nUF(vld1
, vld1
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
15350 nUF(vst1
, vst1
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
15351 nUF(vld2
, vld2
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
15352 nUF(vst2
, vst2
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
15353 nUF(vld3
, vld3
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
15354 nUF(vst3
, vst3
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
15355 nUF(vld4
, vld4
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
15356 nUF(vst4
, vst4
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
15358 #undef THUMB_VARIANT
15359 #define THUMB_VARIANT &fpu_vfp_ext_v3
15361 #define ARM_VARIANT &fpu_vfp_ext_v3
15362 cCE(fconsts
, eb00a00
, 2, (RVS
, I255
), vfp_sp_const
),
15363 cCE(fconstd
, eb00b00
, 2, (RVD
, I255
), vfp_dp_const
),
15364 cCE(fshtos
, eba0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
15365 cCE(fshtod
, eba0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
15366 cCE(fsltos
, eba0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
15367 cCE(fsltod
, eba0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
15368 cCE(fuhtos
, ebb0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
15369 cCE(fuhtod
, ebb0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
15370 cCE(fultos
, ebb0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
15371 cCE(fultod
, ebb0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
15372 cCE(ftoshs
, ebe0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
15373 cCE(ftoshd
, ebe0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
15374 cCE(ftosls
, ebe0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
15375 cCE(ftosld
, ebe0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
15376 cCE(ftouhs
, ebf0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
15377 cCE(ftouhd
, ebf0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
15378 cCE(ftouls
, ebf0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
15379 cCE(ftould
, ebf0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
15381 #undef THUMB_VARIANT
15383 #define ARM_VARIANT &arm_cext_xscale /* Intel XScale extensions. */
15384 cCE(mia
, e200010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
15385 cCE(miaph
, e280010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
15386 cCE(miabb
, e2c0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
15387 cCE(miabt
, e2d0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
15388 cCE(miatb
, e2e0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
15389 cCE(miatt
, e2f0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
15390 cCE(mar
, c400000
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mar
),
15391 cCE(mra
, c500000
, 3, (RRnpc
, RRnpc
, RXA
), xsc_mra
),
15394 #define ARM_VARIANT &arm_cext_iwmmxt /* Intel Wireless MMX technology. */
15395 cCE(tandcb
, e13f130
, 1, (RR
), iwmmxt_tandorc
),
15396 cCE(tandch
, e53f130
, 1, (RR
), iwmmxt_tandorc
),
15397 cCE(tandcw
, e93f130
, 1, (RR
), iwmmxt_tandorc
),
15398 cCE(tbcstb
, e400010
, 2, (RIWR
, RR
), rn_rd
),
15399 cCE(tbcsth
, e400050
, 2, (RIWR
, RR
), rn_rd
),
15400 cCE(tbcstw
, e400090
, 2, (RIWR
, RR
), rn_rd
),
15401 cCE(textrcb
, e130170
, 2, (RR
, I7
), iwmmxt_textrc
),
15402 cCE(textrch
, e530170
, 2, (RR
, I7
), iwmmxt_textrc
),
15403 cCE(textrcw
, e930170
, 2, (RR
, I7
), iwmmxt_textrc
),
15404 cCE(textrmub
, e100070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
15405 cCE(textrmuh
, e500070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
15406 cCE(textrmuw
, e900070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
15407 cCE(textrmsb
, e100078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
15408 cCE(textrmsh
, e500078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
15409 cCE(textrmsw
, e900078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
15410 cCE(tinsrb
, e600010
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
15411 cCE(tinsrh
, e600050
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
15412 cCE(tinsrw
, e600090
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
15413 cCE(tmcr
, e000110
, 2, (RIWC
, RR
), rn_rd
),
15414 cCE(tmcrr
, c400000
, 3, (RIWR
, RR
, RR
), rm_rd_rn
),
15415 cCE(tmia
, e200010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
15416 cCE(tmiaph
, e280010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
15417 cCE(tmiabb
, e2c0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
15418 cCE(tmiabt
, e2d0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
15419 cCE(tmiatb
, e2e0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
15420 cCE(tmiatt
, e2f0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
15421 cCE(tmovmskb
, e100030
, 2, (RR
, RIWR
), rd_rn
),
15422 cCE(tmovmskh
, e500030
, 2, (RR
, RIWR
), rd_rn
),
15423 cCE(tmovmskw
, e900030
, 2, (RR
, RIWR
), rd_rn
),
15424 cCE(tmrc
, e100110
, 2, (RR
, RIWC
), rd_rn
),
15425 cCE(tmrrc
, c500000
, 3, (RR
, RR
, RIWR
), rd_rn_rm
),
15426 cCE(torcb
, e13f150
, 1, (RR
), iwmmxt_tandorc
),
15427 cCE(torch
, e53f150
, 1, (RR
), iwmmxt_tandorc
),
15428 cCE(torcw
, e93f150
, 1, (RR
), iwmmxt_tandorc
),
15429 cCE(waccb
, e0001c0
, 2, (RIWR
, RIWR
), rd_rn
),
15430 cCE(wacch
, e4001c0
, 2, (RIWR
, RIWR
), rd_rn
),
15431 cCE(waccw
, e8001c0
, 2, (RIWR
, RIWR
), rd_rn
),
15432 cCE(waddbss
, e300180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15433 cCE(waddb
, e000180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15434 cCE(waddbus
, e100180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15435 cCE(waddhss
, e700180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15436 cCE(waddh
, e400180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15437 cCE(waddhus
, e500180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15438 cCE(waddwss
, eb00180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15439 cCE(waddw
, e800180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15440 cCE(waddwus
, e900180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15441 cCE(waligni
, e000020
, 4, (RIWR
, RIWR
, RIWR
, I7
), iwmmxt_waligni
),
15442 cCE(walignr0
, e800020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15443 cCE(walignr1
, e900020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15444 cCE(walignr2
, ea00020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15445 cCE(walignr3
, eb00020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15446 cCE(wand
, e200000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15447 cCE(wandn
, e300000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15448 cCE(wavg2b
, e800000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15449 cCE(wavg2br
, e900000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15450 cCE(wavg2h
, ec00000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15451 cCE(wavg2hr
, ed00000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15452 cCE(wcmpeqb
, e000060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15453 cCE(wcmpeqh
, e400060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15454 cCE(wcmpeqw
, e800060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15455 cCE(wcmpgtub
, e100060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15456 cCE(wcmpgtuh
, e500060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15457 cCE(wcmpgtuw
, e900060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15458 cCE(wcmpgtsb
, e300060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15459 cCE(wcmpgtsh
, e700060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15460 cCE(wcmpgtsw
, eb00060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15461 cCE(wldrb
, c100000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
15462 cCE(wldrh
, c500000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
15463 cCE(wldrw
, c100100
, 2, (RIWR_RIWC
, ADDR
), iwmmxt_wldstw
),
15464 cCE(wldrd
, c500100
, 2, (RIWR
, ADDR
), iwmmxt_wldstd
),
15465 cCE(wmacs
, e600100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15466 cCE(wmacsz
, e700100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15467 cCE(wmacu
, e400100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15468 cCE(wmacuz
, e500100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15469 cCE(wmadds
, ea00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15470 cCE(wmaddu
, e800100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15471 cCE(wmaxsb
, e200160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15472 cCE(wmaxsh
, e600160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15473 cCE(wmaxsw
, ea00160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15474 cCE(wmaxub
, e000160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15475 cCE(wmaxuh
, e400160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15476 cCE(wmaxuw
, e800160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15477 cCE(wminsb
, e300160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15478 cCE(wminsh
, e700160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15479 cCE(wminsw
, eb00160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15480 cCE(wminub
, e100160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15481 cCE(wminuh
, e500160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15482 cCE(wminuw
, e900160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15483 cCE(wmov
, e000000
, 2, (RIWR
, RIWR
), iwmmxt_wmov
),
15484 cCE(wmulsm
, e300100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15485 cCE(wmulsl
, e200100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15486 cCE(wmulum
, e100100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15487 cCE(wmulul
, e000100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15488 cCE(wor
, e000000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15489 cCE(wpackhss
, e700080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15490 cCE(wpackhus
, e500080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15491 cCE(wpackwss
, eb00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15492 cCE(wpackwus
, e900080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15493 cCE(wpackdss
, ef00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15494 cCE(wpackdus
, ed00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15495 cCE(wrorh
, e700040
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15496 cCE(wrorhg
, e700148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
15497 cCE(wrorw
, eb00040
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15498 cCE(wrorwg
, eb00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
15499 cCE(wrord
, ef00040
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15500 cCE(wrordg
, ef00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
15501 cCE(wsadb
, e000120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15502 cCE(wsadbz
, e100120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15503 cCE(wsadh
, e400120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15504 cCE(wsadhz
, e500120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15505 cCE(wshufh
, e0001e0
, 3, (RIWR
, RIWR
, I255
), iwmmxt_wshufh
),
15506 cCE(wsllh
, e500040
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15507 cCE(wsllhg
, e500148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
15508 cCE(wsllw
, e900040
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15509 cCE(wsllwg
, e900148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
15510 cCE(wslld
, ed00040
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15511 cCE(wslldg
, ed00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
15512 cCE(wsrah
, e400040
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15513 cCE(wsrahg
, e400148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
15514 cCE(wsraw
, e800040
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15515 cCE(wsrawg
, e800148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
15516 cCE(wsrad
, ec00040
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15517 cCE(wsradg
, ec00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
15518 cCE(wsrlh
, e600040
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15519 cCE(wsrlhg
, e600148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
15520 cCE(wsrlw
, ea00040
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15521 cCE(wsrlwg
, ea00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
15522 cCE(wsrld
, ee00040
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15523 cCE(wsrldg
, ee00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
15524 cCE(wstrb
, c000000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
15525 cCE(wstrh
, c400000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
15526 cCE(wstrw
, c000100
, 2, (RIWR_RIWC
, ADDR
), iwmmxt_wldstw
),
15527 cCE(wstrd
, c400100
, 2, (RIWR
, ADDR
), iwmmxt_wldstd
),
15528 cCE(wsubbss
, e3001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15529 cCE(wsubb
, e0001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15530 cCE(wsubbus
, e1001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15531 cCE(wsubhss
, e7001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15532 cCE(wsubh
, e4001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15533 cCE(wsubhus
, e5001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15534 cCE(wsubwss
, eb001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15535 cCE(wsubw
, e8001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15536 cCE(wsubwus
, e9001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15537 cCE(wunpckehub
,e0000c0
, 2, (RIWR
, RIWR
), rd_rn
),
15538 cCE(wunpckehuh
,e4000c0
, 2, (RIWR
, RIWR
), rd_rn
),
15539 cCE(wunpckehuw
,e8000c0
, 2, (RIWR
, RIWR
), rd_rn
),
15540 cCE(wunpckehsb
,e2000c0
, 2, (RIWR
, RIWR
), rd_rn
),
15541 cCE(wunpckehsh
,e6000c0
, 2, (RIWR
, RIWR
), rd_rn
),
15542 cCE(wunpckehsw
,ea000c0
, 2, (RIWR
, RIWR
), rd_rn
),
15543 cCE(wunpckihb
, e1000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15544 cCE(wunpckihh
, e5000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15545 cCE(wunpckihw
, e9000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15546 cCE(wunpckelub
,e0000e0
, 2, (RIWR
, RIWR
), rd_rn
),
15547 cCE(wunpckeluh
,e4000e0
, 2, (RIWR
, RIWR
), rd_rn
),
15548 cCE(wunpckeluw
,e8000e0
, 2, (RIWR
, RIWR
), rd_rn
),
15549 cCE(wunpckelsb
,e2000e0
, 2, (RIWR
, RIWR
), rd_rn
),
15550 cCE(wunpckelsh
,e6000e0
, 2, (RIWR
, RIWR
), rd_rn
),
15551 cCE(wunpckelsw
,ea000e0
, 2, (RIWR
, RIWR
), rd_rn
),
15552 cCE(wunpckilb
, e1000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15553 cCE(wunpckilh
, e5000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15554 cCE(wunpckilw
, e9000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15555 cCE(wxor
, e100000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15556 cCE(wzero
, e300000
, 1, (RIWR
), iwmmxt_wzero
),
15559 #define ARM_VARIANT &arm_cext_maverick /* Cirrus Maverick instructions. */
15560 cCE(cfldrs
, c100400
, 2, (RMF
, ADDR
), rd_cpaddr
),
15561 cCE(cfldrd
, c500400
, 2, (RMD
, ADDR
), rd_cpaddr
),
15562 cCE(cfldr32
, c100500
, 2, (RMFX
, ADDR
), rd_cpaddr
),
15563 cCE(cfldr64
, c500500
, 2, (RMDX
, ADDR
), rd_cpaddr
),
15564 cCE(cfstrs
, c000400
, 2, (RMF
, ADDR
), rd_cpaddr
),
15565 cCE(cfstrd
, c400400
, 2, (RMD
, ADDR
), rd_cpaddr
),
15566 cCE(cfstr32
, c000500
, 2, (RMFX
, ADDR
), rd_cpaddr
),
15567 cCE(cfstr64
, c400500
, 2, (RMDX
, ADDR
), rd_cpaddr
),
15568 cCE(cfmvsr
, e000450
, 2, (RMF
, RR
), rn_rd
),
15569 cCE(cfmvrs
, e100450
, 2, (RR
, RMF
), rd_rn
),
15570 cCE(cfmvdlr
, e000410
, 2, (RMD
, RR
), rn_rd
),
15571 cCE(cfmvrdl
, e100410
, 2, (RR
, RMD
), rd_rn
),
15572 cCE(cfmvdhr
, e000430
, 2, (RMD
, RR
), rn_rd
),
15573 cCE(cfmvrdh
, e100430
, 2, (RR
, RMD
), rd_rn
),
15574 cCE(cfmv64lr
, e000510
, 2, (RMDX
, RR
), rn_rd
),
15575 cCE(cfmvr64l
, e100510
, 2, (RR
, RMDX
), rd_rn
),
15576 cCE(cfmv64hr
, e000530
, 2, (RMDX
, RR
), rn_rd
),
15577 cCE(cfmvr64h
, e100530
, 2, (RR
, RMDX
), rd_rn
),
15578 cCE(cfmval32
, e200440
, 2, (RMAX
, RMFX
), rd_rn
),
15579 cCE(cfmv32al
, e100440
, 2, (RMFX
, RMAX
), rd_rn
),
15580 cCE(cfmvam32
, e200460
, 2, (RMAX
, RMFX
), rd_rn
),
15581 cCE(cfmv32am
, e100460
, 2, (RMFX
, RMAX
), rd_rn
),
15582 cCE(cfmvah32
, e200480
, 2, (RMAX
, RMFX
), rd_rn
),
15583 cCE(cfmv32ah
, e100480
, 2, (RMFX
, RMAX
), rd_rn
),
15584 cCE(cfmva32
, e2004a0
, 2, (RMAX
, RMFX
), rd_rn
),
15585 cCE(cfmv32a
, e1004a0
, 2, (RMFX
, RMAX
), rd_rn
),
15586 cCE(cfmva64
, e2004c0
, 2, (RMAX
, RMDX
), rd_rn
),
15587 cCE(cfmv64a
, e1004c0
, 2, (RMDX
, RMAX
), rd_rn
),
15588 cCE(cfmvsc32
, e2004e0
, 2, (RMDS
, RMDX
), mav_dspsc
),
15589 cCE(cfmv32sc
, e1004e0
, 2, (RMDX
, RMDS
), rd
),
15590 cCE(cfcpys
, e000400
, 2, (RMF
, RMF
), rd_rn
),
15591 cCE(cfcpyd
, e000420
, 2, (RMD
, RMD
), rd_rn
),
15592 cCE(cfcvtsd
, e000460
, 2, (RMD
, RMF
), rd_rn
),
15593 cCE(cfcvtds
, e000440
, 2, (RMF
, RMD
), rd_rn
),
15594 cCE(cfcvt32s
, e000480
, 2, (RMF
, RMFX
), rd_rn
),
15595 cCE(cfcvt32d
, e0004a0
, 2, (RMD
, RMFX
), rd_rn
),
15596 cCE(cfcvt64s
, e0004c0
, 2, (RMF
, RMDX
), rd_rn
),
15597 cCE(cfcvt64d
, e0004e0
, 2, (RMD
, RMDX
), rd_rn
),
15598 cCE(cfcvts32
, e100580
, 2, (RMFX
, RMF
), rd_rn
),
15599 cCE(cfcvtd32
, e1005a0
, 2, (RMFX
, RMD
), rd_rn
),
15600 cCE(cftruncs32
,e1005c0
, 2, (RMFX
, RMF
), rd_rn
),
15601 cCE(cftruncd32
,e1005e0
, 2, (RMFX
, RMD
), rd_rn
),
15602 cCE(cfrshl32
, e000550
, 3, (RMFX
, RMFX
, RR
), mav_triple
),
15603 cCE(cfrshl64
, e000570
, 3, (RMDX
, RMDX
, RR
), mav_triple
),
15604 cCE(cfsh32
, e000500
, 3, (RMFX
, RMFX
, I63s
), mav_shift
),
15605 cCE(cfsh64
, e200500
, 3, (RMDX
, RMDX
, I63s
), mav_shift
),
15606 cCE(cfcmps
, e100490
, 3, (RR
, RMF
, RMF
), rd_rn_rm
),
15607 cCE(cfcmpd
, e1004b0
, 3, (RR
, RMD
, RMD
), rd_rn_rm
),
15608 cCE(cfcmp32
, e100590
, 3, (RR
, RMFX
, RMFX
), rd_rn_rm
),
15609 cCE(cfcmp64
, e1005b0
, 3, (RR
, RMDX
, RMDX
), rd_rn_rm
),
15610 cCE(cfabss
, e300400
, 2, (RMF
, RMF
), rd_rn
),
15611 cCE(cfabsd
, e300420
, 2, (RMD
, RMD
), rd_rn
),
15612 cCE(cfnegs
, e300440
, 2, (RMF
, RMF
), rd_rn
),
15613 cCE(cfnegd
, e300460
, 2, (RMD
, RMD
), rd_rn
),
15614 cCE(cfadds
, e300480
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
15615 cCE(cfaddd
, e3004a0
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
15616 cCE(cfsubs
, e3004c0
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
15617 cCE(cfsubd
, e3004e0
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
15618 cCE(cfmuls
, e100400
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
15619 cCE(cfmuld
, e100420
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
15620 cCE(cfabs32
, e300500
, 2, (RMFX
, RMFX
), rd_rn
),
15621 cCE(cfabs64
, e300520
, 2, (RMDX
, RMDX
), rd_rn
),
15622 cCE(cfneg32
, e300540
, 2, (RMFX
, RMFX
), rd_rn
),
15623 cCE(cfneg64
, e300560
, 2, (RMDX
, RMDX
), rd_rn
),
15624 cCE(cfadd32
, e300580
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
15625 cCE(cfadd64
, e3005a0
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
15626 cCE(cfsub32
, e3005c0
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
15627 cCE(cfsub64
, e3005e0
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
15628 cCE(cfmul32
, e100500
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
15629 cCE(cfmul64
, e100520
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
15630 cCE(cfmac32
, e100540
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
15631 cCE(cfmsc32
, e100560
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
15632 cCE(cfmadd32
, e000600
, 4, (RMAX
, RMFX
, RMFX
, RMFX
), mav_quad
),
15633 cCE(cfmsub32
, e100600
, 4, (RMAX
, RMFX
, RMFX
, RMFX
), mav_quad
),
15634 cCE(cfmadda32
, e200600
, 4, (RMAX
, RMAX
, RMFX
, RMFX
), mav_quad
),
15635 cCE(cfmsuba32
, e300600
, 4, (RMAX
, RMAX
, RMFX
, RMFX
), mav_quad
),
15638 #undef THUMB_VARIANT
15665 /* MD interface: bits in the object file. */
15667 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
15668 for use in the a.out file, and stores them in the array pointed to by buf.
15669 This knows about the endian-ness of the target machine and does
15670 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
15671 2 (short) and 4 (long) Floating numbers are put out as a series of
15672 LITTLENUMS (shorts, here at least). */
15675 md_number_to_chars (char * buf
, valueT val
, int n
)
15677 if (target_big_endian
)
15678 number_to_chars_bigendian (buf
, val
, n
);
15680 number_to_chars_littleendian (buf
, val
, n
);
15684 md_chars_to_number (char * buf
, int n
)
15687 unsigned char * where
= (unsigned char *) buf
;
15689 if (target_big_endian
)
15694 result
|= (*where
++ & 255);
15702 result
|= (where
[n
] & 255);
15709 /* MD interface: Sections. */
15711 /* Estimate the size of a frag before relaxing. Assume everything fits in
15715 md_estimate_size_before_relax (fragS
* fragp
,
15716 segT segtype ATTRIBUTE_UNUSED
)
15722 /* Convert a machine dependent frag. */
15725 md_convert_frag (bfd
*abfd
, segT asec ATTRIBUTE_UNUSED
, fragS
*fragp
)
15727 unsigned long insn
;
15728 unsigned long old_op
;
15736 buf
= fragp
->fr_literal
+ fragp
->fr_fix
;
15738 old_op
= bfd_get_16(abfd
, buf
);
15739 if (fragp
->fr_symbol
) {
15740 exp
.X_op
= O_symbol
;
15741 exp
.X_add_symbol
= fragp
->fr_symbol
;
15743 exp
.X_op
= O_constant
;
15745 exp
.X_add_number
= fragp
->fr_offset
;
15746 opcode
= fragp
->fr_subtype
;
15749 case T_MNEM_ldr_pc
:
15750 case T_MNEM_ldr_pc2
:
15751 case T_MNEM_ldr_sp
:
15752 case T_MNEM_str_sp
:
15759 if (fragp
->fr_var
== 4)
15761 insn
= THUMB_OP32(opcode
);
15762 if ((old_op
>> 12) == 4 || (old_op
>> 12) == 9)
15764 insn
|= (old_op
& 0x700) << 4;
15768 insn
|= (old_op
& 7) << 12;
15769 insn
|= (old_op
& 0x38) << 13;
15771 insn
|= 0x00000c00;
15772 put_thumb32_insn (buf
, insn
);
15773 reloc_type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
15777 reloc_type
= BFD_RELOC_ARM_THUMB_OFFSET
;
15779 pc_rel
= (opcode
== T_MNEM_ldr_pc2
);
15782 if (fragp
->fr_var
== 4)
15784 insn
= THUMB_OP32 (opcode
);
15785 insn
|= (old_op
& 0xf0) << 4;
15786 put_thumb32_insn (buf
, insn
);
15787 reloc_type
= BFD_RELOC_ARM_T32_ADD_PC12
;
15791 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
15792 exp
.X_add_number
-= 4;
15800 if (fragp
->fr_var
== 4)
15802 int r0off
= (opcode
== T_MNEM_mov
15803 || opcode
== T_MNEM_movs
) ? 0 : 8;
15804 insn
= THUMB_OP32 (opcode
);
15805 insn
= (insn
& 0xe1ffffff) | 0x10000000;
15806 insn
|= (old_op
& 0x700) << r0off
;
15807 put_thumb32_insn (buf
, insn
);
15808 reloc_type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
15812 reloc_type
= BFD_RELOC_ARM_THUMB_IMM
;
15817 if (fragp
->fr_var
== 4)
15819 insn
= THUMB_OP32(opcode
);
15820 put_thumb32_insn (buf
, insn
);
15821 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH25
;
15824 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH12
;
15828 if (fragp
->fr_var
== 4)
15830 insn
= THUMB_OP32(opcode
);
15831 insn
|= (old_op
& 0xf00) << 14;
15832 put_thumb32_insn (buf
, insn
);
15833 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH20
;
15836 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH9
;
15839 case T_MNEM_add_sp
:
15840 case T_MNEM_add_pc
:
15841 case T_MNEM_inc_sp
:
15842 case T_MNEM_dec_sp
:
15843 if (fragp
->fr_var
== 4)
15845 /* ??? Choose between add and addw. */
15846 insn
= THUMB_OP32 (opcode
);
15847 insn
|= (old_op
& 0xf0) << 4;
15848 put_thumb32_insn (buf
, insn
);
15849 reloc_type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
15852 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
15860 if (fragp
->fr_var
== 4)
15862 insn
= THUMB_OP32 (opcode
);
15863 insn
|= (old_op
& 0xf0) << 4;
15864 insn
|= (old_op
& 0xf) << 16;
15865 put_thumb32_insn (buf
, insn
);
15866 reloc_type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
15869 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
15875 fixp
= fix_new_exp (fragp
, fragp
->fr_fix
, fragp
->fr_var
, &exp
, pc_rel
,
15877 fixp
->fx_file
= fragp
->fr_file
;
15878 fixp
->fx_line
= fragp
->fr_line
;
15879 fragp
->fr_fix
+= fragp
->fr_var
;
15882 /* Return the size of a relaxable immediate operand instruction.
15883 SHIFT and SIZE specify the form of the allowable immediate. */
15885 relax_immediate (fragS
*fragp
, int size
, int shift
)
15891 /* ??? Should be able to do better than this. */
15892 if (fragp
->fr_symbol
)
15895 low
= (1 << shift
) - 1;
15896 mask
= (1 << (shift
+ size
)) - (1 << shift
);
15897 offset
= fragp
->fr_offset
;
15898 /* Force misaligned offsets to 32-bit variant. */
15901 if (offset
& ~mask
)
15906 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
15909 relax_adr (fragS
*fragp
, asection
*sec
)
15914 /* Assume worst case for symbols not known to be in the same section. */
15915 if (!S_IS_DEFINED(fragp
->fr_symbol
)
15916 || sec
!= S_GET_SEGMENT (fragp
->fr_symbol
))
15919 val
= S_GET_VALUE(fragp
->fr_symbol
) + fragp
->fr_offset
;
15920 addr
= fragp
->fr_address
+ fragp
->fr_fix
;
15921 addr
= (addr
+ 4) & ~3;
15922 /* Fix the insn as the 4-byte version if the target address is not
15923 sufficiently aligned. This is prevents an infinite loop when two
15924 instructions have contradictory range/alignment requirements. */
15928 if (val
< 0 || val
> 1020)
15933 /* Return the size of a relaxable add/sub immediate instruction. */
15935 relax_addsub (fragS
*fragp
, asection
*sec
)
15940 buf
= fragp
->fr_literal
+ fragp
->fr_fix
;
15941 op
= bfd_get_16(sec
->owner
, buf
);
15942 if ((op
& 0xf) == ((op
>> 4) & 0xf))
15943 return relax_immediate (fragp
, 8, 0);
15945 return relax_immediate (fragp
, 3, 0);
15949 /* Return the size of a relaxable branch instruction. BITS is the
15950 size of the offset field in the narrow instruction. */
15953 relax_branch (fragS
*fragp
, asection
*sec
, int bits
)
15959 /* Assume worst case for symbols not known to be in the same section. */
15960 if (!S_IS_DEFINED(fragp
->fr_symbol
)
15961 || sec
!= S_GET_SEGMENT (fragp
->fr_symbol
))
15964 val
= S_GET_VALUE(fragp
->fr_symbol
) + fragp
->fr_offset
;
15965 addr
= fragp
->fr_address
+ fragp
->fr_fix
+ 4;
15968 /* Offset is a signed value *2 */
15970 if (val
>= limit
|| val
< -limit
)
15976 /* Relax a machine dependent frag. This returns the amount by which
15977 the current size of the frag should change. */
15980 arm_relax_frag (asection
*sec
, fragS
*fragp
, long stretch ATTRIBUTE_UNUSED
)
15985 oldsize
= fragp
->fr_var
;
15986 switch (fragp
->fr_subtype
)
15988 case T_MNEM_ldr_pc2
:
15989 newsize
= relax_adr(fragp
, sec
);
15991 case T_MNEM_ldr_pc
:
15992 case T_MNEM_ldr_sp
:
15993 case T_MNEM_str_sp
:
15994 newsize
= relax_immediate(fragp
, 8, 2);
15998 newsize
= relax_immediate(fragp
, 5, 2);
16002 newsize
= relax_immediate(fragp
, 5, 1);
16006 newsize
= relax_immediate(fragp
, 5, 0);
16009 newsize
= relax_adr(fragp
, sec
);
16015 newsize
= relax_immediate(fragp
, 8, 0);
16018 newsize
= relax_branch(fragp
, sec
, 11);
16021 newsize
= relax_branch(fragp
, sec
, 8);
16023 case T_MNEM_add_sp
:
16024 case T_MNEM_add_pc
:
16025 newsize
= relax_immediate (fragp
, 8, 2);
16027 case T_MNEM_inc_sp
:
16028 case T_MNEM_dec_sp
:
16029 newsize
= relax_immediate (fragp
, 7, 2);
16035 newsize
= relax_addsub (fragp
, sec
);
16042 fragp
->fr_var
= -newsize
;
16043 md_convert_frag (sec
->owner
, sec
, fragp
);
16045 return -(newsize
+ oldsize
);
16047 fragp
->fr_var
= newsize
;
16048 return newsize
- oldsize
;
16051 /* Round up a section size to the appropriate boundary. */
16054 md_section_align (segT segment ATTRIBUTE_UNUSED
,
16060 /* Round all sects to multiple of 4. */
16061 return (size
+ 3) & ~3;
16065 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
16066 of an rs_align_code fragment. */
16069 arm_handle_align (fragS
* fragP
)
16071 static char const arm_noop
[4] = { 0x00, 0x00, 0xa0, 0xe1 };
16072 static char const thumb_noop
[2] = { 0xc0, 0x46 };
16073 static char const arm_bigend_noop
[4] = { 0xe1, 0xa0, 0x00, 0x00 };
16074 static char const thumb_bigend_noop
[2] = { 0x46, 0xc0 };
16076 int bytes
, fix
, noop_size
;
16080 if (fragP
->fr_type
!= rs_align_code
)
16083 bytes
= fragP
->fr_next
->fr_address
- fragP
->fr_address
- fragP
->fr_fix
;
16084 p
= fragP
->fr_literal
+ fragP
->fr_fix
;
16087 if (bytes
> MAX_MEM_FOR_RS_ALIGN_CODE
)
16088 bytes
&= MAX_MEM_FOR_RS_ALIGN_CODE
;
16090 if (fragP
->tc_frag_data
)
16092 if (target_big_endian
)
16093 noop
= thumb_bigend_noop
;
16096 noop_size
= sizeof (thumb_noop
);
16100 if (target_big_endian
)
16101 noop
= arm_bigend_noop
;
16104 noop_size
= sizeof (arm_noop
);
16107 if (bytes
& (noop_size
- 1))
16109 fix
= bytes
& (noop_size
- 1);
16110 memset (p
, 0, fix
);
16115 while (bytes
>= noop_size
)
16117 memcpy (p
, noop
, noop_size
);
16119 bytes
-= noop_size
;
16123 fragP
->fr_fix
+= fix
;
16124 fragP
->fr_var
= noop_size
;
16127 /* Called from md_do_align. Used to create an alignment
16128 frag in a code section. */
16131 arm_frag_align_code (int n
, int max
)
16135 /* We assume that there will never be a requirement
16136 to support alignments greater than 32 bytes. */
16137 if (max
> MAX_MEM_FOR_RS_ALIGN_CODE
)
16138 as_fatal (_("alignments greater than 32 bytes not supported in .text sections."));
16140 p
= frag_var (rs_align_code
,
16141 MAX_MEM_FOR_RS_ALIGN_CODE
,
16143 (relax_substateT
) max
,
16150 /* Perform target specific initialisation of a frag. */
16153 arm_init_frag (fragS
* fragP
)
16155 /* Record whether this frag is in an ARM or a THUMB area. */
16156 fragP
->tc_frag_data
= thumb_mode
;
16160 /* When we change sections we need to issue a new mapping symbol. */
16163 arm_elf_change_section (void)
16166 segment_info_type
*seginfo
;
16168 /* Link an unlinked unwind index table section to the .text section. */
16169 if (elf_section_type (now_seg
) == SHT_ARM_EXIDX
16170 && elf_linked_to_section (now_seg
) == NULL
)
16171 elf_linked_to_section (now_seg
) = text_section
;
16173 if (!SEG_NORMAL (now_seg
))
16176 flags
= bfd_get_section_flags (stdoutput
, now_seg
);
16178 /* We can ignore sections that only contain debug info. */
16179 if ((flags
& SEC_ALLOC
) == 0)
16182 seginfo
= seg_info (now_seg
);
16183 mapstate
= seginfo
->tc_segment_info_data
.mapstate
;
16184 marked_pr_dependency
= seginfo
->tc_segment_info_data
.marked_pr_dependency
;
16188 arm_elf_section_type (const char * str
, size_t len
)
16190 if (len
== 5 && strncmp (str
, "exidx", 5) == 0)
16191 return SHT_ARM_EXIDX
;
16196 /* Code to deal with unwinding tables. */
16198 static void add_unwind_adjustsp (offsetT
);
16200 /* Cenerate and deferred unwind frame offset. */
16203 flush_pending_unwind (void)
16207 offset
= unwind
.pending_offset
;
16208 unwind
.pending_offset
= 0;
16210 add_unwind_adjustsp (offset
);
16213 /* Add an opcode to this list for this function. Two-byte opcodes should
16214 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
16218 add_unwind_opcode (valueT op
, int length
)
16220 /* Add any deferred stack adjustment. */
16221 if (unwind
.pending_offset
)
16222 flush_pending_unwind ();
16224 unwind
.sp_restored
= 0;
16226 if (unwind
.opcode_count
+ length
> unwind
.opcode_alloc
)
16228 unwind
.opcode_alloc
+= ARM_OPCODE_CHUNK_SIZE
;
16229 if (unwind
.opcodes
)
16230 unwind
.opcodes
= xrealloc (unwind
.opcodes
,
16231 unwind
.opcode_alloc
);
16233 unwind
.opcodes
= xmalloc (unwind
.opcode_alloc
);
16238 unwind
.opcodes
[unwind
.opcode_count
] = op
& 0xff;
16240 unwind
.opcode_count
++;
16244 /* Add unwind opcodes to adjust the stack pointer. */
16247 add_unwind_adjustsp (offsetT offset
)
16251 if (offset
> 0x200)
16253 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
16258 /* Long form: 0xb2, uleb128. */
16259 /* This might not fit in a word so add the individual bytes,
16260 remembering the list is built in reverse order. */
16261 o
= (valueT
) ((offset
- 0x204) >> 2);
16263 add_unwind_opcode (0, 1);
16265 /* Calculate the uleb128 encoding of the offset. */
16269 bytes
[n
] = o
& 0x7f;
16275 /* Add the insn. */
16277 add_unwind_opcode (bytes
[n
- 1], 1);
16278 add_unwind_opcode (0xb2, 1);
16280 else if (offset
> 0x100)
16282 /* Two short opcodes. */
16283 add_unwind_opcode (0x3f, 1);
16284 op
= (offset
- 0x104) >> 2;
16285 add_unwind_opcode (op
, 1);
16287 else if (offset
> 0)
16289 /* Short opcode. */
16290 op
= (offset
- 4) >> 2;
16291 add_unwind_opcode (op
, 1);
16293 else if (offset
< 0)
16296 while (offset
> 0x100)
16298 add_unwind_opcode (0x7f, 1);
16301 op
= ((offset
- 4) >> 2) | 0x40;
16302 add_unwind_opcode (op
, 1);
16306 /* Finish the list of unwind opcodes for this function. */
16308 finish_unwind_opcodes (void)
16312 if (unwind
.fp_used
)
16314 /* Adjust sp as necessary. */
16315 unwind
.pending_offset
+= unwind
.fp_offset
- unwind
.frame_size
;
16316 flush_pending_unwind ();
16318 /* After restoring sp from the frame pointer. */
16319 op
= 0x90 | unwind
.fp_reg
;
16320 add_unwind_opcode (op
, 1);
16323 flush_pending_unwind ();
16327 /* Start an exception table entry. If idx is nonzero this is an index table
16331 start_unwind_section (const segT text_seg
, int idx
)
16333 const char * text_name
;
16334 const char * prefix
;
16335 const char * prefix_once
;
16336 const char * group_name
;
16340 size_t sec_name_len
;
16347 prefix
= ELF_STRING_ARM_unwind
;
16348 prefix_once
= ELF_STRING_ARM_unwind_once
;
16349 type
= SHT_ARM_EXIDX
;
16353 prefix
= ELF_STRING_ARM_unwind_info
;
16354 prefix_once
= ELF_STRING_ARM_unwind_info_once
;
16355 type
= SHT_PROGBITS
;
16358 text_name
= segment_name (text_seg
);
16359 if (streq (text_name
, ".text"))
16362 if (strncmp (text_name
, ".gnu.linkonce.t.",
16363 strlen (".gnu.linkonce.t.")) == 0)
16365 prefix
= prefix_once
;
16366 text_name
+= strlen (".gnu.linkonce.t.");
16369 prefix_len
= strlen (prefix
);
16370 text_len
= strlen (text_name
);
16371 sec_name_len
= prefix_len
+ text_len
;
16372 sec_name
= xmalloc (sec_name_len
+ 1);
16373 memcpy (sec_name
, prefix
, prefix_len
);
16374 memcpy (sec_name
+ prefix_len
, text_name
, text_len
);
16375 sec_name
[prefix_len
+ text_len
] = '\0';
16381 /* Handle COMDAT group. */
16382 if (prefix
!= prefix_once
&& (text_seg
->flags
& SEC_LINK_ONCE
) != 0)
16384 group_name
= elf_group_name (text_seg
);
16385 if (group_name
== NULL
)
16387 as_bad ("Group section `%s' has no group signature",
16388 segment_name (text_seg
));
16389 ignore_rest_of_line ();
16392 flags
|= SHF_GROUP
;
16396 obj_elf_change_section (sec_name
, type
, flags
, 0, group_name
, linkonce
, 0);
16398 /* Set the setion link for index tables. */
16400 elf_linked_to_section (now_seg
) = text_seg
;
16404 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
16405 personality routine data. Returns zero, or the index table value for
16406 and inline entry. */
16409 create_unwind_entry (int have_data
)
16414 /* The current word of data. */
16416 /* The number of bytes left in this word. */
16419 finish_unwind_opcodes ();
16421 /* Remember the current text section. */
16422 unwind
.saved_seg
= now_seg
;
16423 unwind
.saved_subseg
= now_subseg
;
16425 start_unwind_section (now_seg
, 0);
16427 if (unwind
.personality_routine
== NULL
)
16429 if (unwind
.personality_index
== -2)
16432 as_bad (_("handerdata in cantunwind frame"));
16433 return 1; /* EXIDX_CANTUNWIND. */
16436 /* Use a default personality routine if none is specified. */
16437 if (unwind
.personality_index
== -1)
16439 if (unwind
.opcode_count
> 3)
16440 unwind
.personality_index
= 1;
16442 unwind
.personality_index
= 0;
16445 /* Space for the personality routine entry. */
16446 if (unwind
.personality_index
== 0)
16448 if (unwind
.opcode_count
> 3)
16449 as_bad (_("too many unwind opcodes for personality routine 0"));
16453 /* All the data is inline in the index table. */
16456 while (unwind
.opcode_count
> 0)
16458 unwind
.opcode_count
--;
16459 data
= (data
<< 8) | unwind
.opcodes
[unwind
.opcode_count
];
16463 /* Pad with "finish" opcodes. */
16465 data
= (data
<< 8) | 0xb0;
16472 /* We get two opcodes "free" in the first word. */
16473 size
= unwind
.opcode_count
- 2;
16476 /* An extra byte is required for the opcode count. */
16477 size
= unwind
.opcode_count
+ 1;
16479 size
= (size
+ 3) >> 2;
16481 as_bad (_("too many unwind opcodes"));
16483 frag_align (2, 0, 0);
16484 record_alignment (now_seg
, 2);
16485 unwind
.table_entry
= expr_build_dot ();
16487 /* Allocate the table entry. */
16488 ptr
= frag_more ((size
<< 2) + 4);
16489 where
= frag_now_fix () - ((size
<< 2) + 4);
16491 switch (unwind
.personality_index
)
16494 /* ??? Should this be a PLT generating relocation? */
16495 /* Custom personality routine. */
16496 fix_new (frag_now
, where
, 4, unwind
.personality_routine
, 0, 1,
16497 BFD_RELOC_ARM_PREL31
);
16502 /* Set the first byte to the number of additional words. */
16507 /* ABI defined personality routines. */
16509 /* Three opcodes bytes are packed into the first word. */
16516 /* The size and first two opcode bytes go in the first word. */
16517 data
= ((0x80 + unwind
.personality_index
) << 8) | size
;
16522 /* Should never happen. */
16526 /* Pack the opcodes into words (MSB first), reversing the list at the same
16528 while (unwind
.opcode_count
> 0)
16532 md_number_to_chars (ptr
, data
, 4);
16537 unwind
.opcode_count
--;
16539 data
= (data
<< 8) | unwind
.opcodes
[unwind
.opcode_count
];
16542 /* Finish off the last word. */
16545 /* Pad with "finish" opcodes. */
16547 data
= (data
<< 8) | 0xb0;
16549 md_number_to_chars (ptr
, data
, 4);
16554 /* Add an empty descriptor if there is no user-specified data. */
16555 ptr
= frag_more (4);
16556 md_number_to_chars (ptr
, 0, 4);
16562 /* Convert REGNAME to a DWARF-2 register number. */
16565 tc_arm_regname_to_dw2regnum (char *regname
)
16567 int reg
= arm_reg_parse (®name
, REG_TYPE_RN
);
16575 /* Initialize the DWARF-2 unwind information for this procedure. */
16578 tc_arm_frame_initial_instructions (void)
16580 cfi_add_CFA_def_cfa (REG_SP
, 0);
16582 #endif /* OBJ_ELF */
16585 /* MD interface: Symbol and relocation handling. */
16587 /* Return the address within the segment that a PC-relative fixup is
16588 relative to. For ARM, PC-relative fixups applied to instructions
16589 are generally relative to the location of the fixup plus 8 bytes.
16590 Thumb branches are offset by 4, and Thumb loads relative to PC
16591 require special handling. */
16594 md_pcrel_from_section (fixS
* fixP
, segT seg
)
16596 offsetT base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
16598 /* If this is pc-relative and we are going to emit a relocation
16599 then we just want to put out any pipeline compensation that the linker
16600 will need. Otherwise we want to use the calculated base.
16601 For WinCE we skip the bias for externals as well, since this
16602 is how the MS ARM-CE assembler behaves and we want to be compatible. */
16604 && ((fixP
->fx_addsy
&& S_GET_SEGMENT (fixP
->fx_addsy
) != seg
)
16605 || (arm_force_relocation (fixP
)
16607 && !S_IS_EXTERNAL (fixP
->fx_addsy
)
16612 switch (fixP
->fx_r_type
)
16614 /* PC relative addressing on the Thumb is slightly odd as the
16615 bottom two bits of the PC are forced to zero for the
16616 calculation. This happens *after* application of the
16617 pipeline offset. However, Thumb adrl already adjusts for
16618 this, so we need not do it again. */
16619 case BFD_RELOC_ARM_THUMB_ADD
:
16622 case BFD_RELOC_ARM_THUMB_OFFSET
:
16623 case BFD_RELOC_ARM_T32_OFFSET_IMM
:
16624 case BFD_RELOC_ARM_T32_ADD_PC12
:
16625 case BFD_RELOC_ARM_T32_CP_OFF_IMM
:
16626 return (base
+ 4) & ~3;
16628 /* Thumb branches are simply offset by +4. */
16629 case BFD_RELOC_THUMB_PCREL_BRANCH7
:
16630 case BFD_RELOC_THUMB_PCREL_BRANCH9
:
16631 case BFD_RELOC_THUMB_PCREL_BRANCH12
:
16632 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
16633 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
16634 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
16635 case BFD_RELOC_THUMB_PCREL_BLX
:
16638 /* ARM mode branches are offset by +8. However, the Windows CE
16639 loader expects the relocation not to take this into account. */
16640 case BFD_RELOC_ARM_PCREL_BRANCH
:
16641 case BFD_RELOC_ARM_PCREL_CALL
:
16642 case BFD_RELOC_ARM_PCREL_JUMP
:
16643 case BFD_RELOC_ARM_PCREL_BLX
:
16644 case BFD_RELOC_ARM_PLT32
:
16646 /* When handling fixups immediately, because we have already
16647 discovered the value of a symbol, or the address of the frag involved
16648 we must account for the offset by +8, as the OS loader will never see the reloc.
16649 see fixup_segment() in write.c
16650 The S_IS_EXTERNAL test handles the case of global symbols.
16651 Those need the calculated base, not just the pipe compensation the linker will need. */
16653 && fixP
->fx_addsy
!= NULL
16654 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
16655 && (S_IS_EXTERNAL (fixP
->fx_addsy
) || !arm_force_relocation (fixP
)))
16662 /* ARM mode loads relative to PC are also offset by +8. Unlike
16663 branches, the Windows CE loader *does* expect the relocation
16664 to take this into account. */
16665 case BFD_RELOC_ARM_OFFSET_IMM
:
16666 case BFD_RELOC_ARM_OFFSET_IMM8
:
16667 case BFD_RELOC_ARM_HWLITERAL
:
16668 case BFD_RELOC_ARM_LITERAL
:
16669 case BFD_RELOC_ARM_CP_OFF_IMM
:
16673 /* Other PC-relative relocations are un-offset. */
16679 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
16680 Otherwise we have no need to default values of symbols. */
16683 md_undefined_symbol (char * name ATTRIBUTE_UNUSED
)
16686 if (name
[0] == '_' && name
[1] == 'G'
16687 && streq (name
, GLOBAL_OFFSET_TABLE_NAME
))
16691 if (symbol_find (name
))
16692 as_bad ("GOT already in the symbol table");
16694 GOT_symbol
= symbol_new (name
, undefined_section
,
16695 (valueT
) 0, & zero_address_frag
);
16705 /* Subroutine of md_apply_fix. Check to see if an immediate can be
16706 computed as two separate immediate values, added together. We
16707 already know that this value cannot be computed by just one ARM
16710 static unsigned int
16711 validate_immediate_twopart (unsigned int val
,
16712 unsigned int * highpart
)
16717 for (i
= 0; i
< 32; i
+= 2)
16718 if (((a
= rotate_left (val
, i
)) & 0xff) != 0)
16724 * highpart
= (a
>> 8) | ((i
+ 24) << 7);
16726 else if (a
& 0xff0000)
16728 if (a
& 0xff000000)
16730 * highpart
= (a
>> 16) | ((i
+ 16) << 7);
16734 assert (a
& 0xff000000);
16735 * highpart
= (a
>> 24) | ((i
+ 8) << 7);
16738 return (a
& 0xff) | (i
<< 7);
16745 validate_offset_imm (unsigned int val
, int hwse
)
16747 if ((hwse
&& val
> 255) || val
> 4095)
16752 /* Subroutine of md_apply_fix. Do those data_ops which can take a
16753 negative immediate constant by altering the instruction. A bit of
16758 by inverting the second operand, and
16761 by negating the second operand. */
16764 negate_data_op (unsigned long * instruction
,
16765 unsigned long value
)
16768 unsigned long negated
, inverted
;
16770 negated
= encode_arm_immediate (-value
);
16771 inverted
= encode_arm_immediate (~value
);
16773 op
= (*instruction
>> DATA_OP_SHIFT
) & 0xf;
16776 /* First negates. */
16777 case OPCODE_SUB
: /* ADD <-> SUB */
16778 new_inst
= OPCODE_ADD
;
16783 new_inst
= OPCODE_SUB
;
16787 case OPCODE_CMP
: /* CMP <-> CMN */
16788 new_inst
= OPCODE_CMN
;
16793 new_inst
= OPCODE_CMP
;
16797 /* Now Inverted ops. */
16798 case OPCODE_MOV
: /* MOV <-> MVN */
16799 new_inst
= OPCODE_MVN
;
16804 new_inst
= OPCODE_MOV
;
16808 case OPCODE_AND
: /* AND <-> BIC */
16809 new_inst
= OPCODE_BIC
;
16814 new_inst
= OPCODE_AND
;
16818 case OPCODE_ADC
: /* ADC <-> SBC */
16819 new_inst
= OPCODE_SBC
;
16824 new_inst
= OPCODE_ADC
;
16828 /* We cannot do anything. */
16833 if (value
== (unsigned) FAIL
)
16836 *instruction
&= OPCODE_MASK
;
16837 *instruction
|= new_inst
<< DATA_OP_SHIFT
;
16841 /* Like negate_data_op, but for Thumb-2. */
16843 static unsigned int
16844 thumb32_negate_data_op (offsetT
*instruction
, offsetT value
)
16848 offsetT negated
, inverted
;
16850 negated
= encode_thumb32_immediate (-value
);
16851 inverted
= encode_thumb32_immediate (~value
);
16853 rd
= (*instruction
>> 8) & 0xf;
16854 op
= (*instruction
>> T2_DATA_OP_SHIFT
) & 0xf;
16857 /* ADD <-> SUB. Includes CMP <-> CMN. */
16858 case T2_OPCODE_SUB
:
16859 new_inst
= T2_OPCODE_ADD
;
16863 case T2_OPCODE_ADD
:
16864 new_inst
= T2_OPCODE_SUB
;
16868 /* ORR <-> ORN. Includes MOV <-> MVN. */
16869 case T2_OPCODE_ORR
:
16870 new_inst
= T2_OPCODE_ORN
;
16874 case T2_OPCODE_ORN
:
16875 new_inst
= T2_OPCODE_ORR
;
16879 /* AND <-> BIC. TST has no inverted equivalent. */
16880 case T2_OPCODE_AND
:
16881 new_inst
= T2_OPCODE_BIC
;
16888 case T2_OPCODE_BIC
:
16889 new_inst
= T2_OPCODE_AND
;
16894 case T2_OPCODE_ADC
:
16895 new_inst
= T2_OPCODE_SBC
;
16899 case T2_OPCODE_SBC
:
16900 new_inst
= T2_OPCODE_ADC
;
16904 /* We cannot do anything. */
16912 *instruction
&= T2_OPCODE_MASK
;
16913 *instruction
|= new_inst
<< T2_DATA_OP_SHIFT
;
16917 /* Read a 32-bit thumb instruction from buf. */
16918 static unsigned long
16919 get_thumb32_insn (char * buf
)
16921 unsigned long insn
;
16922 insn
= md_chars_to_number (buf
, THUMB_SIZE
) << 16;
16923 insn
|= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
16929 /* We usually want to set the low bit on the address of thumb function
16930 symbols. In particular .word foo - . should have the low bit set.
16931 Generic code tries to fold the difference of two symbols to
16932 a constant. Prevent this and force a relocation when the first symbols
16933 is a thumb function. */
16935 arm_optimize_expr (expressionS
*l
, operatorT op
, expressionS
*r
)
16937 if (op
== O_subtract
16938 && l
->X_op
== O_symbol
16939 && r
->X_op
== O_symbol
16940 && THUMB_IS_FUNC (l
->X_add_symbol
))
16942 l
->X_op
= O_subtract
;
16943 l
->X_op_symbol
= r
->X_add_symbol
;
16944 l
->X_add_number
-= r
->X_add_number
;
16947 /* Process as normal. */
16952 md_apply_fix (fixS
* fixP
,
16956 offsetT value
= * valP
;
16958 unsigned int newimm
;
16959 unsigned long temp
;
16961 char * buf
= fixP
->fx_where
+ fixP
->fx_frag
->fr_literal
;
16963 assert (fixP
->fx_r_type
<= BFD_RELOC_UNUSED
);
16965 /* Note whether this will delete the relocation. */
16966 if (fixP
->fx_addsy
== 0 && !fixP
->fx_pcrel
)
16969 /* On a 64-bit host, silently truncate 'value' to 32 bits for
16970 consistency with the behavior on 32-bit hosts. Remember value
16972 value
&= 0xffffffff;
16973 value
^= 0x80000000;
16974 value
-= 0x80000000;
16977 fixP
->fx_addnumber
= value
;
16979 /* Same treatment for fixP->fx_offset. */
16980 fixP
->fx_offset
&= 0xffffffff;
16981 fixP
->fx_offset
^= 0x80000000;
16982 fixP
->fx_offset
-= 0x80000000;
16984 switch (fixP
->fx_r_type
)
16986 case BFD_RELOC_NONE
:
16987 /* This will need to go in the object file. */
16991 case BFD_RELOC_ARM_IMMEDIATE
:
16992 /* We claim that this fixup has been processed here,
16993 even if in fact we generate an error because we do
16994 not have a reloc for it, so tc_gen_reloc will reject it. */
16998 && ! S_IS_DEFINED (fixP
->fx_addsy
))
17000 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17001 _("undefined symbol %s used as an immediate value"),
17002 S_GET_NAME (fixP
->fx_addsy
));
17006 newimm
= encode_arm_immediate (value
);
17007 temp
= md_chars_to_number (buf
, INSN_SIZE
);
17009 /* If the instruction will fail, see if we can fix things up by
17010 changing the opcode. */
17011 if (newimm
== (unsigned int) FAIL
17012 && (newimm
= negate_data_op (&temp
, value
)) == (unsigned int) FAIL
)
17014 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17015 _("invalid constant (%lx) after fixup"),
17016 (unsigned long) value
);
17020 newimm
|= (temp
& 0xfffff000);
17021 md_number_to_chars (buf
, (valueT
) newimm
, INSN_SIZE
);
17024 case BFD_RELOC_ARM_ADRL_IMMEDIATE
:
17026 unsigned int highpart
= 0;
17027 unsigned int newinsn
= 0xe1a00000; /* nop. */
17029 newimm
= encode_arm_immediate (value
);
17030 temp
= md_chars_to_number (buf
, INSN_SIZE
);
17032 /* If the instruction will fail, see if we can fix things up by
17033 changing the opcode. */
17034 if (newimm
== (unsigned int) FAIL
17035 && (newimm
= negate_data_op (& temp
, value
)) == (unsigned int) FAIL
)
17037 /* No ? OK - try using two ADD instructions to generate
17039 newimm
= validate_immediate_twopart (value
, & highpart
);
17041 /* Yes - then make sure that the second instruction is
17043 if (newimm
!= (unsigned int) FAIL
)
17045 /* Still No ? Try using a negated value. */
17046 else if ((newimm
= validate_immediate_twopart (- value
, & highpart
)) != (unsigned int) FAIL
)
17047 temp
= newinsn
= (temp
& OPCODE_MASK
) | OPCODE_SUB
<< DATA_OP_SHIFT
;
17048 /* Otherwise - give up. */
17051 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17052 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
17057 /* Replace the first operand in the 2nd instruction (which
17058 is the PC) with the destination register. We have
17059 already added in the PC in the first instruction and we
17060 do not want to do it again. */
17061 newinsn
&= ~ 0xf0000;
17062 newinsn
|= ((newinsn
& 0x0f000) << 4);
17065 newimm
|= (temp
& 0xfffff000);
17066 md_number_to_chars (buf
, (valueT
) newimm
, INSN_SIZE
);
17068 highpart
|= (newinsn
& 0xfffff000);
17069 md_number_to_chars (buf
+ INSN_SIZE
, (valueT
) highpart
, INSN_SIZE
);
17073 case BFD_RELOC_ARM_OFFSET_IMM
:
17074 if (!fixP
->fx_done
&& seg
->use_rela_p
)
17077 case BFD_RELOC_ARM_LITERAL
:
17083 if (validate_offset_imm (value
, 0) == FAIL
)
17085 if (fixP
->fx_r_type
== BFD_RELOC_ARM_LITERAL
)
17086 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17087 _("invalid literal constant: pool needs to be closer"));
17089 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17090 _("bad immediate value for offset (%ld)"),
17095 newval
= md_chars_to_number (buf
, INSN_SIZE
);
17096 newval
&= 0xff7ff000;
17097 newval
|= value
| (sign
? INDEX_UP
: 0);
17098 md_number_to_chars (buf
, newval
, INSN_SIZE
);
17101 case BFD_RELOC_ARM_OFFSET_IMM8
:
17102 case BFD_RELOC_ARM_HWLITERAL
:
17108 if (validate_offset_imm (value
, 1) == FAIL
)
17110 if (fixP
->fx_r_type
== BFD_RELOC_ARM_HWLITERAL
)
17111 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17112 _("invalid literal constant: pool needs to be closer"));
17114 as_bad (_("bad immediate value for half-word offset (%ld)"),
17119 newval
= md_chars_to_number (buf
, INSN_SIZE
);
17120 newval
&= 0xff7ff0f0;
17121 newval
|= ((value
>> 4) << 8) | (value
& 0xf) | (sign
? INDEX_UP
: 0);
17122 md_number_to_chars (buf
, newval
, INSN_SIZE
);
17125 case BFD_RELOC_ARM_T32_OFFSET_U8
:
17126 if (value
< 0 || value
> 1020 || value
% 4 != 0)
17127 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17128 _("bad immediate value for offset (%ld)"), (long) value
);
17131 newval
= md_chars_to_number (buf
+2, THUMB_SIZE
);
17133 md_number_to_chars (buf
+2, newval
, THUMB_SIZE
);
17136 case BFD_RELOC_ARM_T32_OFFSET_IMM
:
17137 /* This is a complicated relocation used for all varieties of Thumb32
17138 load/store instruction with immediate offset:
17140 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
17141 *4, optional writeback(W)
17142 (doubleword load/store)
17144 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
17145 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
17146 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
17147 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
17148 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
17150 Uppercase letters indicate bits that are already encoded at
17151 this point. Lowercase letters are our problem. For the
17152 second block of instructions, the secondary opcode nybble
17153 (bits 8..11) is present, and bit 23 is zero, even if this is
17154 a PC-relative operation. */
17155 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
17157 newval
|= md_chars_to_number (buf
+THUMB_SIZE
, THUMB_SIZE
);
17159 if ((newval
& 0xf0000000) == 0xe0000000)
17161 /* Doubleword load/store: 8-bit offset, scaled by 4. */
17163 newval
|= (1 << 23);
17166 if (value
% 4 != 0)
17168 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17169 _("offset not a multiple of 4"));
17175 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17176 _("offset out of range"));
17181 else if ((newval
& 0x000f0000) == 0x000f0000)
17183 /* PC-relative, 12-bit offset. */
17185 newval
|= (1 << 23);
17190 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17191 _("offset out of range"));
17196 else if ((newval
& 0x00000100) == 0x00000100)
17198 /* Writeback: 8-bit, +/- offset. */
17200 newval
|= (1 << 9);
17205 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17206 _("offset out of range"));
17211 else if ((newval
& 0x00000f00) == 0x00000e00)
17213 /* T-instruction: positive 8-bit offset. */
17214 if (value
< 0 || value
> 0xff)
17216 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17217 _("offset out of range"));
17225 /* Positive 12-bit or negative 8-bit offset. */
17229 newval
|= (1 << 23);
17239 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17240 _("offset out of range"));
17247 md_number_to_chars (buf
, (newval
>> 16) & 0xffff, THUMB_SIZE
);
17248 md_number_to_chars (buf
+ THUMB_SIZE
, newval
& 0xffff, THUMB_SIZE
);
17251 case BFD_RELOC_ARM_SHIFT_IMM
:
17252 newval
= md_chars_to_number (buf
, INSN_SIZE
);
17253 if (((unsigned long) value
) > 32
17255 && (((newval
& 0x60) == 0) || (newval
& 0x60) == 0x60)))
17257 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17258 _("shift expression is too large"));
17263 /* Shifts of zero must be done as lsl. */
17265 else if (value
== 32)
17267 newval
&= 0xfffff07f;
17268 newval
|= (value
& 0x1f) << 7;
17269 md_number_to_chars (buf
, newval
, INSN_SIZE
);
17272 case BFD_RELOC_ARM_T32_IMMEDIATE
:
17273 case BFD_RELOC_ARM_T32_IMM12
:
17274 case BFD_RELOC_ARM_T32_ADD_PC12
:
17275 /* We claim that this fixup has been processed here,
17276 even if in fact we generate an error because we do
17277 not have a reloc for it, so tc_gen_reloc will reject it. */
17281 && ! S_IS_DEFINED (fixP
->fx_addsy
))
17283 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17284 _("undefined symbol %s used as an immediate value"),
17285 S_GET_NAME (fixP
->fx_addsy
));
17289 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
17291 newval
|= md_chars_to_number (buf
+2, THUMB_SIZE
);
17293 /* FUTURE: Implement analogue of negate_data_op for T32. */
17294 if (fixP
->fx_r_type
== BFD_RELOC_ARM_T32_IMMEDIATE
)
17296 newimm
= encode_thumb32_immediate (value
);
17297 if (newimm
== (unsigned int) FAIL
)
17298 newimm
= thumb32_negate_data_op (&newval
, value
);
17302 /* 12 bit immediate for addw/subw. */
17306 newval
^= 0x00a00000;
17309 newimm
= (unsigned int) FAIL
;
17314 if (newimm
== (unsigned int)FAIL
)
17316 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17317 _("invalid constant (%lx) after fixup"),
17318 (unsigned long) value
);
17322 newval
|= (newimm
& 0x800) << 15;
17323 newval
|= (newimm
& 0x700) << 4;
17324 newval
|= (newimm
& 0x0ff);
17326 md_number_to_chars (buf
, (valueT
) ((newval
>> 16) & 0xffff), THUMB_SIZE
);
17327 md_number_to_chars (buf
+2, (valueT
) (newval
& 0xffff), THUMB_SIZE
);
17330 case BFD_RELOC_ARM_SMC
:
17331 if (((unsigned long) value
) > 0xffff)
17332 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17333 _("invalid smc expression"));
17334 newval
= md_chars_to_number (buf
, INSN_SIZE
);
17335 newval
|= (value
& 0xf) | ((value
& 0xfff0) << 4);
17336 md_number_to_chars (buf
, newval
, INSN_SIZE
);
17339 case BFD_RELOC_ARM_SWI
:
17340 if (fixP
->tc_fix_data
!= 0)
17342 if (((unsigned long) value
) > 0xff)
17343 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17344 _("invalid swi expression"));
17345 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
17347 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
17351 if (((unsigned long) value
) > 0x00ffffff)
17352 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17353 _("invalid swi expression"));
17354 newval
= md_chars_to_number (buf
, INSN_SIZE
);
17356 md_number_to_chars (buf
, newval
, INSN_SIZE
);
17360 case BFD_RELOC_ARM_MULTI
:
17361 if (((unsigned long) value
) > 0xffff)
17362 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17363 _("invalid expression in load/store multiple"));
17364 newval
= value
| md_chars_to_number (buf
, INSN_SIZE
);
17365 md_number_to_chars (buf
, newval
, INSN_SIZE
);
17369 case BFD_RELOC_ARM_PCREL_CALL
:
17370 newval
= md_chars_to_number (buf
, INSN_SIZE
);
17371 if ((newval
& 0xf0000000) == 0xf0000000)
17375 goto arm_branch_common
;
17377 case BFD_RELOC_ARM_PCREL_JUMP
:
17378 case BFD_RELOC_ARM_PLT32
:
17380 case BFD_RELOC_ARM_PCREL_BRANCH
:
17382 goto arm_branch_common
;
17384 case BFD_RELOC_ARM_PCREL_BLX
:
17387 /* We are going to store value (shifted right by two) in the
17388 instruction, in a 24 bit, signed field. Bits 26 through 32 either
17389 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
17390 also be be clear. */
17392 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17393 _("misaligned branch destination"));
17394 if ((value
& (offsetT
)0xfe000000) != (offsetT
)0
17395 && (value
& (offsetT
)0xfe000000) != (offsetT
)0xfe000000)
17396 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17397 _("branch out of range"));
17399 if (fixP
->fx_done
|| !seg
->use_rela_p
)
17401 newval
= md_chars_to_number (buf
, INSN_SIZE
);
17402 newval
|= (value
>> 2) & 0x00ffffff;
17403 /* Set the H bit on BLX instructions. */
17407 newval
|= 0x01000000;
17409 newval
&= ~0x01000000;
17411 md_number_to_chars (buf
, newval
, INSN_SIZE
);
17415 case BFD_RELOC_THUMB_PCREL_BRANCH7
: /* CZB */
17416 /* CZB can only branch forward. */
17418 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17419 _("branch out of range"));
17421 if (fixP
->fx_done
|| !seg
->use_rela_p
)
17423 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
17424 newval
|= ((value
& 0x3e) << 2) | ((value
& 0x40) << 3);
17425 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
17429 case BFD_RELOC_THUMB_PCREL_BRANCH9
: /* Conditional branch. */
17430 if ((value
& ~0xff) && ((value
& ~0xff) != ~0xff))
17431 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17432 _("branch out of range"));
17434 if (fixP
->fx_done
|| !seg
->use_rela_p
)
17436 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
17437 newval
|= (value
& 0x1ff) >> 1;
17438 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
17442 case BFD_RELOC_THUMB_PCREL_BRANCH12
: /* Unconditional branch. */
17443 if ((value
& ~0x7ff) && ((value
& ~0x7ff) != ~0x7ff))
17444 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17445 _("branch out of range"));
17447 if (fixP
->fx_done
|| !seg
->use_rela_p
)
17449 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
17450 newval
|= (value
& 0xfff) >> 1;
17451 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
17455 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
17456 if ((value
& ~0x1fffff) && ((value
& ~0x1fffff) != ~0x1fffff))
17457 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17458 _("conditional branch out of range"));
17460 if (fixP
->fx_done
|| !seg
->use_rela_p
)
17463 addressT S
, J1
, J2
, lo
, hi
;
17465 S
= (value
& 0x00100000) >> 20;
17466 J2
= (value
& 0x00080000) >> 19;
17467 J1
= (value
& 0x00040000) >> 18;
17468 hi
= (value
& 0x0003f000) >> 12;
17469 lo
= (value
& 0x00000ffe) >> 1;
17471 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
17472 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
17473 newval
|= (S
<< 10) | hi
;
17474 newval2
|= (J1
<< 13) | (J2
<< 11) | lo
;
17475 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
17476 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
17480 case BFD_RELOC_THUMB_PCREL_BLX
:
17481 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
17482 if ((value
& ~0x3fffff) && ((value
& ~0x3fffff) != ~0x3fffff))
17483 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17484 _("branch out of range"));
17486 if (fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BLX
)
17487 /* For a BLX instruction, make sure that the relocation is rounded up
17488 to a word boundary. This follows the semantics of the instruction
17489 which specifies that bit 1 of the target address will come from bit
17490 1 of the base address. */
17491 value
= (value
+ 1) & ~ 1;
17493 if (fixP
->fx_done
|| !seg
->use_rela_p
)
17497 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
17498 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
17499 newval
|= (value
& 0x7fffff) >> 12;
17500 newval2
|= (value
& 0xfff) >> 1;
17501 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
17502 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
17506 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
17507 if ((value
& ~0x1ffffff) && ((value
& ~0x1ffffff) != ~0x1ffffff))
17508 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17509 _("branch out of range"));
17511 if (fixP
->fx_done
|| !seg
->use_rela_p
)
17514 addressT S
, I1
, I2
, lo
, hi
;
17516 S
= (value
& 0x01000000) >> 24;
17517 I1
= (value
& 0x00800000) >> 23;
17518 I2
= (value
& 0x00400000) >> 22;
17519 hi
= (value
& 0x003ff000) >> 12;
17520 lo
= (value
& 0x00000ffe) >> 1;
17525 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
17526 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
17527 newval
|= (S
<< 10) | hi
;
17528 newval2
|= (I1
<< 13) | (I2
<< 11) | lo
;
17529 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
17530 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
17535 if (fixP
->fx_done
|| !seg
->use_rela_p
)
17536 md_number_to_chars (buf
, value
, 1);
17540 if (fixP
->fx_done
|| !seg
->use_rela_p
)
17541 md_number_to_chars (buf
, value
, 2);
17545 case BFD_RELOC_ARM_TLS_GD32
:
17546 case BFD_RELOC_ARM_TLS_LE32
:
17547 case BFD_RELOC_ARM_TLS_IE32
:
17548 case BFD_RELOC_ARM_TLS_LDM32
:
17549 case BFD_RELOC_ARM_TLS_LDO32
:
17550 S_SET_THREAD_LOCAL (fixP
->fx_addsy
);
17553 case BFD_RELOC_ARM_GOT32
:
17554 case BFD_RELOC_ARM_GOTOFF
:
17555 case BFD_RELOC_ARM_TARGET2
:
17556 if (fixP
->fx_done
|| !seg
->use_rela_p
)
17557 md_number_to_chars (buf
, 0, 4);
17561 case BFD_RELOC_RVA
:
17563 case BFD_RELOC_ARM_TARGET1
:
17564 case BFD_RELOC_ARM_ROSEGREL32
:
17565 case BFD_RELOC_ARM_SBREL32
:
17566 case BFD_RELOC_32_PCREL
:
17567 if (fixP
->fx_done
|| !seg
->use_rela_p
)
17569 /* For WinCE we only do this for pcrel fixups. */
17570 if (fixP
->fx_done
|| fixP
->fx_pcrel
)
17572 md_number_to_chars (buf
, value
, 4);
17576 case BFD_RELOC_ARM_PREL31
:
17577 if (fixP
->fx_done
|| !seg
->use_rela_p
)
17579 newval
= md_chars_to_number (buf
, 4) & 0x80000000;
17580 if ((value
^ (value
>> 1)) & 0x40000000)
17582 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17583 _("rel31 relocation overflow"));
17585 newval
|= value
& 0x7fffffff;
17586 md_number_to_chars (buf
, newval
, 4);
17591 case BFD_RELOC_ARM_CP_OFF_IMM
:
17592 case BFD_RELOC_ARM_T32_CP_OFF_IMM
:
17593 if (value
< -1023 || value
> 1023 || (value
& 3))
17594 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17595 _("co-processor offset out of range"));
17600 if (fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
17601 || fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
)
17602 newval
= md_chars_to_number (buf
, INSN_SIZE
);
17604 newval
= get_thumb32_insn (buf
);
17605 newval
&= 0xff7fff00;
17606 newval
|= (value
>> 2) | (sign
? INDEX_UP
: 0);
17608 newval
&= ~WRITE_BACK
;
17609 if (fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
17610 || fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
)
17611 md_number_to_chars (buf
, newval
, INSN_SIZE
);
17613 put_thumb32_insn (buf
, newval
);
17616 case BFD_RELOC_ARM_CP_OFF_IMM_S2
:
17617 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
:
17618 if (value
< -255 || value
> 255)
17619 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17620 _("co-processor offset out of range"));
17622 goto cp_off_common
;
17624 case BFD_RELOC_ARM_THUMB_OFFSET
:
17625 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
17626 /* Exactly what ranges, and where the offset is inserted depends
17627 on the type of instruction, we can establish this from the
17629 switch (newval
>> 12)
17631 case 4: /* PC load. */
17632 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
17633 forced to zero for these loads; md_pcrel_from has already
17634 compensated for this. */
17636 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17637 _("invalid offset, target not word aligned (0x%08lX)"),
17638 (((unsigned long) fixP
->fx_frag
->fr_address
17639 + (unsigned long) fixP
->fx_where
) & ~3)
17640 + (unsigned long) value
);
17642 if (value
& ~0x3fc)
17643 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17644 _("invalid offset, value too big (0x%08lX)"),
17647 newval
|= value
>> 2;
17650 case 9: /* SP load/store. */
17651 if (value
& ~0x3fc)
17652 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17653 _("invalid offset, value too big (0x%08lX)"),
17655 newval
|= value
>> 2;
17658 case 6: /* Word load/store. */
17660 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17661 _("invalid offset, value too big (0x%08lX)"),
17663 newval
|= value
<< 4; /* 6 - 2. */
17666 case 7: /* Byte load/store. */
17668 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17669 _("invalid offset, value too big (0x%08lX)"),
17671 newval
|= value
<< 6;
17674 case 8: /* Halfword load/store. */
17676 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17677 _("invalid offset, value too big (0x%08lX)"),
17679 newval
|= value
<< 5; /* 6 - 1. */
17683 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17684 "Unable to process relocation for thumb opcode: %lx",
17685 (unsigned long) newval
);
17688 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
17691 case BFD_RELOC_ARM_THUMB_ADD
:
17692 /* This is a complicated relocation, since we use it for all of
17693 the following immediate relocations:
17697 9bit ADD/SUB SP word-aligned
17698 10bit ADD PC/SP word-aligned
17700 The type of instruction being processed is encoded in the
17707 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
17709 int rd
= (newval
>> 4) & 0xf;
17710 int rs
= newval
& 0xf;
17711 int subtract
= !!(newval
& 0x8000);
17713 /* Check for HI regs, only very restricted cases allowed:
17714 Adjusting SP, and using PC or SP to get an address. */
17715 if ((rd
> 7 && (rd
!= REG_SP
|| rs
!= REG_SP
))
17716 || (rs
> 7 && rs
!= REG_SP
&& rs
!= REG_PC
))
17717 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17718 _("invalid Hi register with immediate"));
17720 /* If value is negative, choose the opposite instruction. */
17724 subtract
= !subtract
;
17726 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17727 _("immediate value out of range"));
17732 if (value
& ~0x1fc)
17733 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17734 _("invalid immediate for stack address calculation"));
17735 newval
= subtract
? T_OPCODE_SUB_ST
: T_OPCODE_ADD_ST
;
17736 newval
|= value
>> 2;
17738 else if (rs
== REG_PC
|| rs
== REG_SP
)
17740 if (subtract
|| value
& ~0x3fc)
17741 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17742 _("invalid immediate for address calculation (value = 0x%08lX)"),
17743 (unsigned long) value
);
17744 newval
= (rs
== REG_PC
? T_OPCODE_ADD_PC
: T_OPCODE_ADD_SP
);
17746 newval
|= value
>> 2;
17751 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17752 _("immediate value out of range"));
17753 newval
= subtract
? T_OPCODE_SUB_I8
: T_OPCODE_ADD_I8
;
17754 newval
|= (rd
<< 8) | value
;
17759 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17760 _("immediate value out of range"));
17761 newval
= subtract
? T_OPCODE_SUB_I3
: T_OPCODE_ADD_I3
;
17762 newval
|= rd
| (rs
<< 3) | (value
<< 6);
17765 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
17768 case BFD_RELOC_ARM_THUMB_IMM
:
17769 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
17770 if (value
< 0 || value
> 255)
17771 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17772 _("invalid immediate: %ld is too large"),
17775 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
17778 case BFD_RELOC_ARM_THUMB_SHIFT
:
17779 /* 5bit shift value (0..32). LSL cannot take 32. */
17780 newval
= md_chars_to_number (buf
, THUMB_SIZE
) & 0xf83f;
17781 temp
= newval
& 0xf800;
17782 if (value
< 0 || value
> 32 || (value
== 32 && temp
== T_OPCODE_LSL_I
))
17783 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17784 _("invalid shift value: %ld"), (long) value
);
17785 /* Shifts of zero must be encoded as LSL. */
17787 newval
= (newval
& 0x003f) | T_OPCODE_LSL_I
;
17788 /* Shifts of 32 are encoded as zero. */
17789 else if (value
== 32)
17791 newval
|= value
<< 6;
17792 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
17795 case BFD_RELOC_VTABLE_INHERIT
:
17796 case BFD_RELOC_VTABLE_ENTRY
:
17800 case BFD_RELOC_ARM_MOVW
:
17801 case BFD_RELOC_ARM_MOVT
:
17802 case BFD_RELOC_ARM_THUMB_MOVW
:
17803 case BFD_RELOC_ARM_THUMB_MOVT
:
17804 if (fixP
->fx_done
|| !seg
->use_rela_p
)
17806 /* REL format relocations are limited to a 16-bit addend. */
17807 if (!fixP
->fx_done
)
17809 if (value
< -0x1000 || value
> 0xffff)
17810 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17811 _("offset too big"));
17813 else if (fixP
->fx_r_type
== BFD_RELOC_ARM_MOVT
17814 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
)
17819 if (fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVW
17820 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
)
17822 newval
= get_thumb32_insn (buf
);
17823 newval
&= 0xfbf08f00;
17824 newval
|= (value
& 0xf000) << 4;
17825 newval
|= (value
& 0x0800) << 15;
17826 newval
|= (value
& 0x0700) << 4;
17827 newval
|= (value
& 0x00ff);
17828 put_thumb32_insn (buf
, newval
);
17832 newval
= md_chars_to_number (buf
, 4);
17833 newval
&= 0xfff0f000;
17834 newval
|= value
& 0x0fff;
17835 newval
|= (value
& 0xf000) << 4;
17836 md_number_to_chars (buf
, newval
, 4);
17841 case BFD_RELOC_UNUSED
:
17843 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17844 _("bad relocation fixup type (%d)"), fixP
->fx_r_type
);
17848 /* Translate internal representation of relocation info to BFD target
17852 tc_gen_reloc (asection
*section
, fixS
*fixp
)
17855 bfd_reloc_code_real_type code
;
17857 reloc
= xmalloc (sizeof (arelent
));
17859 reloc
->sym_ptr_ptr
= xmalloc (sizeof (asymbol
*));
17860 *reloc
->sym_ptr_ptr
= symbol_get_bfdsym (fixp
->fx_addsy
);
17861 reloc
->address
= fixp
->fx_frag
->fr_address
+ fixp
->fx_where
;
17863 if (fixp
->fx_pcrel
)
17865 if (section
->use_rela_p
)
17866 fixp
->fx_offset
-= md_pcrel_from_section (fixp
, section
);
17868 fixp
->fx_offset
= reloc
->address
;
17870 reloc
->addend
= fixp
->fx_offset
;
17872 switch (fixp
->fx_r_type
)
17875 if (fixp
->fx_pcrel
)
17877 code
= BFD_RELOC_8_PCREL
;
17882 if (fixp
->fx_pcrel
)
17884 code
= BFD_RELOC_16_PCREL
;
17889 if (fixp
->fx_pcrel
)
17891 code
= BFD_RELOC_32_PCREL
;
17895 case BFD_RELOC_ARM_MOVW
:
17896 if (fixp
->fx_pcrel
)
17898 code
= BFD_RELOC_ARM_MOVW_PCREL
;
17902 case BFD_RELOC_ARM_MOVT
:
17903 if (fixp
->fx_pcrel
)
17905 code
= BFD_RELOC_ARM_MOVT_PCREL
;
17909 case BFD_RELOC_ARM_THUMB_MOVW
:
17910 if (fixp
->fx_pcrel
)
17912 code
= BFD_RELOC_ARM_THUMB_MOVW_PCREL
;
17916 case BFD_RELOC_ARM_THUMB_MOVT
:
17917 if (fixp
->fx_pcrel
)
17919 code
= BFD_RELOC_ARM_THUMB_MOVT_PCREL
;
17923 case BFD_RELOC_NONE
:
17924 case BFD_RELOC_ARM_PCREL_BRANCH
:
17925 case BFD_RELOC_ARM_PCREL_BLX
:
17926 case BFD_RELOC_RVA
:
17927 case BFD_RELOC_THUMB_PCREL_BRANCH7
:
17928 case BFD_RELOC_THUMB_PCREL_BRANCH9
:
17929 case BFD_RELOC_THUMB_PCREL_BRANCH12
:
17930 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
17931 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
17932 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
17933 case BFD_RELOC_THUMB_PCREL_BLX
:
17934 case BFD_RELOC_VTABLE_ENTRY
:
17935 case BFD_RELOC_VTABLE_INHERIT
:
17936 code
= fixp
->fx_r_type
;
17939 case BFD_RELOC_ARM_LITERAL
:
17940 case BFD_RELOC_ARM_HWLITERAL
:
17941 /* If this is called then the a literal has
17942 been referenced across a section boundary. */
17943 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
17944 _("literal referenced across section boundary"));
17948 case BFD_RELOC_ARM_GOT32
:
17949 case BFD_RELOC_ARM_GOTOFF
:
17950 case BFD_RELOC_ARM_PLT32
:
17951 case BFD_RELOC_ARM_TARGET1
:
17952 case BFD_RELOC_ARM_ROSEGREL32
:
17953 case BFD_RELOC_ARM_SBREL32
:
17954 case BFD_RELOC_ARM_PREL31
:
17955 case BFD_RELOC_ARM_TARGET2
:
17956 case BFD_RELOC_ARM_TLS_LE32
:
17957 case BFD_RELOC_ARM_TLS_LDO32
:
17958 case BFD_RELOC_ARM_PCREL_CALL
:
17959 case BFD_RELOC_ARM_PCREL_JUMP
:
17960 code
= fixp
->fx_r_type
;
17963 case BFD_RELOC_ARM_TLS_GD32
:
17964 case BFD_RELOC_ARM_TLS_IE32
:
17965 case BFD_RELOC_ARM_TLS_LDM32
:
17966 /* BFD will include the symbol's address in the addend.
17967 But we don't want that, so subtract it out again here. */
17968 if (!S_IS_COMMON (fixp
->fx_addsy
))
17969 reloc
->addend
-= (*reloc
->sym_ptr_ptr
)->value
;
17970 code
= fixp
->fx_r_type
;
17974 case BFD_RELOC_ARM_IMMEDIATE
:
17975 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
17976 _("internal relocation (type: IMMEDIATE) not fixed up"));
17979 case BFD_RELOC_ARM_ADRL_IMMEDIATE
:
17980 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
17981 _("ADRL used for a symbol not defined in the same file"));
17984 case BFD_RELOC_ARM_OFFSET_IMM
:
17985 if (section
->use_rela_p
)
17987 code
= fixp
->fx_r_type
;
17991 if (fixp
->fx_addsy
!= NULL
17992 && !S_IS_DEFINED (fixp
->fx_addsy
)
17993 && S_IS_LOCAL (fixp
->fx_addsy
))
17995 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
17996 _("undefined local label `%s'"),
17997 S_GET_NAME (fixp
->fx_addsy
));
18001 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
18002 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
18009 switch (fixp
->fx_r_type
)
18011 case BFD_RELOC_NONE
: type
= "NONE"; break;
18012 case BFD_RELOC_ARM_OFFSET_IMM8
: type
= "OFFSET_IMM8"; break;
18013 case BFD_RELOC_ARM_SHIFT_IMM
: type
= "SHIFT_IMM"; break;
18014 case BFD_RELOC_ARM_SMC
: type
= "SMC"; break;
18015 case BFD_RELOC_ARM_SWI
: type
= "SWI"; break;
18016 case BFD_RELOC_ARM_MULTI
: type
= "MULTI"; break;
18017 case BFD_RELOC_ARM_CP_OFF_IMM
: type
= "CP_OFF_IMM"; break;
18018 case BFD_RELOC_ARM_T32_CP_OFF_IMM
: type
= "T32_CP_OFF_IMM"; break;
18019 case BFD_RELOC_ARM_THUMB_ADD
: type
= "THUMB_ADD"; break;
18020 case BFD_RELOC_ARM_THUMB_SHIFT
: type
= "THUMB_SHIFT"; break;
18021 case BFD_RELOC_ARM_THUMB_IMM
: type
= "THUMB_IMM"; break;
18022 case BFD_RELOC_ARM_THUMB_OFFSET
: type
= "THUMB_OFFSET"; break;
18023 default: type
= _("<unknown>"); break;
18025 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
18026 _("cannot represent %s relocation in this object file format"),
18033 if ((code
== BFD_RELOC_32_PCREL
|| code
== BFD_RELOC_32
)
18035 && fixp
->fx_addsy
== GOT_symbol
)
18037 code
= BFD_RELOC_ARM_GOTPC
;
18038 reloc
->addend
= fixp
->fx_offset
= reloc
->address
;
18042 reloc
->howto
= bfd_reloc_type_lookup (stdoutput
, code
);
18044 if (reloc
->howto
== NULL
)
18046 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
18047 _("cannot represent %s relocation in this object file format"),
18048 bfd_get_reloc_code_name (code
));
18052 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
18053 vtable entry to be used in the relocation's section offset. */
18054 if (fixp
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
18055 reloc
->address
= fixp
->fx_offset
;
18060 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
18063 cons_fix_new_arm (fragS
* frag
,
18068 bfd_reloc_code_real_type type
;
18072 FIXME: @@ Should look at CPU word size. */
18076 type
= BFD_RELOC_8
;
18079 type
= BFD_RELOC_16
;
18083 type
= BFD_RELOC_32
;
18086 type
= BFD_RELOC_64
;
18090 fix_new_exp (frag
, where
, (int) size
, exp
, pcrel
, type
);
18093 #if defined OBJ_COFF || defined OBJ_ELF
18095 arm_validate_fix (fixS
* fixP
)
18097 /* If the destination of the branch is a defined symbol which does not have
18098 the THUMB_FUNC attribute, then we must be calling a function which has
18099 the (interfacearm) attribute. We look for the Thumb entry point to that
18100 function and change the branch to refer to that function instead. */
18101 if (fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BRANCH23
18102 && fixP
->fx_addsy
!= NULL
18103 && S_IS_DEFINED (fixP
->fx_addsy
)
18104 && ! THUMB_IS_FUNC (fixP
->fx_addsy
))
18106 fixP
->fx_addsy
= find_real_start (fixP
->fx_addsy
);
18112 arm_force_relocation (struct fix
* fixp
)
18114 #if defined (OBJ_COFF) && defined (TE_PE)
18115 if (fixp
->fx_r_type
== BFD_RELOC_RVA
)
18119 /* Resolve these relocations even if the symbol is extern or weak. */
18120 if (fixp
->fx_r_type
== BFD_RELOC_ARM_IMMEDIATE
18121 || fixp
->fx_r_type
== BFD_RELOC_ARM_OFFSET_IMM
18122 || fixp
->fx_r_type
== BFD_RELOC_ARM_ADRL_IMMEDIATE
18123 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_IMMEDIATE
18124 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_IMM12
18125 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_PC12
)
18128 return generic_force_reloc (fixp
);
18133 arm_fix_adjustable (fixS
* fixP
)
18135 /* This is a little hack to help the gas/arm/adrl.s test. It prevents
18136 local labels from being added to the output symbol table when they
18137 are used with the ADRL pseudo op. The ADRL relocation should always
18138 be resolved before the binbary is emitted, so it is safe to say that
18139 it is adjustable. */
18140 if (fixP
->fx_r_type
== BFD_RELOC_ARM_ADRL_IMMEDIATE
)
18143 /* This is a hack for the gas/all/redef2.s test. This test causes symbols
18144 to be cloned, and without this test relocs would still be generated
18145 against the original, pre-cloned symbol. Such symbols would not appear
18146 in the symbol table however, and so a valid reloc could not be
18147 generated. So check to see if the fixup is against a symbol which has
18148 been removed from the symbol chain, and if it is, then allow it to be
18149 adjusted into a reloc against a section symbol. */
18150 if (fixP
->fx_addsy
!= NULL
18151 && ! S_IS_LOCAL (fixP
->fx_addsy
)
18152 && symbol_next (fixP
->fx_addsy
) == NULL
18153 && symbol_next (fixP
->fx_addsy
) == symbol_previous (fixP
->fx_addsy
))
18161 /* Relocations against function names must be left unadjusted,
18162 so that the linker can use this information to generate interworking
18163 stubs. The MIPS version of this function
18164 also prevents relocations that are mips-16 specific, but I do not
18165 know why it does this.
18168 There is one other problem that ought to be addressed here, but
18169 which currently is not: Taking the address of a label (rather
18170 than a function) and then later jumping to that address. Such
18171 addresses also ought to have their bottom bit set (assuming that
18172 they reside in Thumb code), but at the moment they will not. */
18175 arm_fix_adjustable (fixS
* fixP
)
18177 if (fixP
->fx_addsy
== NULL
)
18180 /* Preserve relocations against symbols with function type. */
18181 if (symbol_get_bfdsym (fixP
->fx_addsy
)->flags
& BSF_FUNCTION
)
18184 if (THUMB_IS_FUNC (fixP
->fx_addsy
)
18185 && fixP
->fx_subsy
== NULL
)
18188 /* We need the symbol name for the VTABLE entries. */
18189 if ( fixP
->fx_r_type
== BFD_RELOC_VTABLE_INHERIT
18190 || fixP
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
18193 /* Don't allow symbols to be discarded on GOT related relocs. */
18194 if (fixP
->fx_r_type
== BFD_RELOC_ARM_PLT32
18195 || fixP
->fx_r_type
== BFD_RELOC_ARM_GOT32
18196 || fixP
->fx_r_type
== BFD_RELOC_ARM_GOTOFF
18197 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_GD32
18198 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LE32
18199 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_IE32
18200 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LDM32
18201 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LDO32
18202 || fixP
->fx_r_type
== BFD_RELOC_ARM_TARGET2
)
18209 elf32_arm_target_format (void)
18212 return (target_big_endian
18213 ? "elf32-bigarm-symbian"
18214 : "elf32-littlearm-symbian");
18215 #elif defined (TE_VXWORKS)
18216 return (target_big_endian
18217 ? "elf32-bigarm-vxworks"
18218 : "elf32-littlearm-vxworks");
18220 if (target_big_endian
)
18221 return "elf32-bigarm";
18223 return "elf32-littlearm";
18228 armelf_frob_symbol (symbolS
* symp
,
18231 elf_frob_symbol (symp
, puntp
);
18235 /* MD interface: Finalization. */
18237 /* A good place to do this, although this was probably not intended
18238 for this kind of use. We need to dump the literal pool before
18239 references are made to a null symbol pointer. */
18244 literal_pool
* pool
;
18246 for (pool
= list_of_pools
; pool
; pool
= pool
->next
)
18248 /* Put it at the end of the relevent section. */
18249 subseg_set (pool
->section
, pool
->sub_section
);
18251 arm_elf_change_section ();
18257 /* Adjust the symbol table. This marks Thumb symbols as distinct from
18261 arm_adjust_symtab (void)
18266 for (sym
= symbol_rootP
; sym
!= NULL
; sym
= symbol_next (sym
))
18268 if (ARM_IS_THUMB (sym
))
18270 if (THUMB_IS_FUNC (sym
))
18272 /* Mark the symbol as a Thumb function. */
18273 if ( S_GET_STORAGE_CLASS (sym
) == C_STAT
18274 || S_GET_STORAGE_CLASS (sym
) == C_LABEL
) /* This can happen! */
18275 S_SET_STORAGE_CLASS (sym
, C_THUMBSTATFUNC
);
18277 else if (S_GET_STORAGE_CLASS (sym
) == C_EXT
)
18278 S_SET_STORAGE_CLASS (sym
, C_THUMBEXTFUNC
);
18280 as_bad (_("%s: unexpected function type: %d"),
18281 S_GET_NAME (sym
), S_GET_STORAGE_CLASS (sym
));
18283 else switch (S_GET_STORAGE_CLASS (sym
))
18286 S_SET_STORAGE_CLASS (sym
, C_THUMBEXT
);
18289 S_SET_STORAGE_CLASS (sym
, C_THUMBSTAT
);
18292 S_SET_STORAGE_CLASS (sym
, C_THUMBLABEL
);
18300 if (ARM_IS_INTERWORK (sym
))
18301 coffsymbol (symbol_get_bfdsym (sym
))->native
->u
.syment
.n_flags
= 0xFF;
18308 for (sym
= symbol_rootP
; sym
!= NULL
; sym
= symbol_next (sym
))
18310 if (ARM_IS_THUMB (sym
))
18312 elf_symbol_type
* elf_sym
;
18314 elf_sym
= elf_symbol (symbol_get_bfdsym (sym
));
18315 bind
= ELF_ST_BIND (elf_sym
->internal_elf_sym
.st_info
);
18317 if (! bfd_is_arm_special_symbol_name (elf_sym
->symbol
.name
,
18318 BFD_ARM_SPECIAL_SYM_TYPE_ANY
))
18320 /* If it's a .thumb_func, declare it as so,
18321 otherwise tag label as .code 16. */
18322 if (THUMB_IS_FUNC (sym
))
18323 elf_sym
->internal_elf_sym
.st_info
=
18324 ELF_ST_INFO (bind
, STT_ARM_TFUNC
);
18326 elf_sym
->internal_elf_sym
.st_info
=
18327 ELF_ST_INFO (bind
, STT_ARM_16BIT
);
18334 /* MD interface: Initialization. */
18337 set_constant_flonums (void)
18341 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
18342 if (atof_ieee ((char *) fp_const
[i
], 'x', fp_values
[i
]) == NULL
)
18352 if ( (arm_ops_hsh
= hash_new ()) == NULL
18353 || (arm_cond_hsh
= hash_new ()) == NULL
18354 || (arm_shift_hsh
= hash_new ()) == NULL
18355 || (arm_psr_hsh
= hash_new ()) == NULL
18356 || (arm_v7m_psr_hsh
= hash_new ()) == NULL
18357 || (arm_reg_hsh
= hash_new ()) == NULL
18358 || (arm_reloc_hsh
= hash_new ()) == NULL
18359 || (arm_barrier_opt_hsh
= hash_new ()) == NULL
)
18360 as_fatal (_("virtual memory exhausted"));
18362 for (i
= 0; i
< sizeof (insns
) / sizeof (struct asm_opcode
); i
++)
18363 hash_insert (arm_ops_hsh
, insns
[i
].template, (PTR
) (insns
+ i
));
18364 for (i
= 0; i
< sizeof (conds
) / sizeof (struct asm_cond
); i
++)
18365 hash_insert (arm_cond_hsh
, conds
[i
].template, (PTR
) (conds
+ i
));
18366 for (i
= 0; i
< sizeof (shift_names
) / sizeof (struct asm_shift_name
); i
++)
18367 hash_insert (arm_shift_hsh
, shift_names
[i
].name
, (PTR
) (shift_names
+ i
));
18368 for (i
= 0; i
< sizeof (psrs
) / sizeof (struct asm_psr
); i
++)
18369 hash_insert (arm_psr_hsh
, psrs
[i
].template, (PTR
) (psrs
+ i
));
18370 for (i
= 0; i
< sizeof (v7m_psrs
) / sizeof (struct asm_psr
); i
++)
18371 hash_insert (arm_v7m_psr_hsh
, v7m_psrs
[i
].template, (PTR
) (v7m_psrs
+ i
));
18372 for (i
= 0; i
< sizeof (reg_names
) / sizeof (struct reg_entry
); i
++)
18373 hash_insert (arm_reg_hsh
, reg_names
[i
].name
, (PTR
) (reg_names
+ i
));
18375 i
< sizeof (barrier_opt_names
) / sizeof (struct asm_barrier_opt
);
18377 hash_insert (arm_barrier_opt_hsh
, barrier_opt_names
[i
].template,
18378 (PTR
) (barrier_opt_names
+ i
));
18380 for (i
= 0; i
< sizeof (reloc_names
) / sizeof (struct reloc_entry
); i
++)
18381 hash_insert (arm_reloc_hsh
, reloc_names
[i
].name
, (PTR
) (reloc_names
+ i
));
18384 set_constant_flonums ();
18386 /* Set the cpu variant based on the command-line options. We prefer
18387 -mcpu= over -march= if both are set (as for GCC); and we prefer
18388 -mfpu= over any other way of setting the floating point unit.
18389 Use of legacy options with new options are faulted. */
18392 if (mcpu_cpu_opt
|| march_cpu_opt
)
18393 as_bad (_("use of old and new-style options to set CPU type"));
18395 mcpu_cpu_opt
= legacy_cpu
;
18397 else if (!mcpu_cpu_opt
)
18398 mcpu_cpu_opt
= march_cpu_opt
;
18403 as_bad (_("use of old and new-style options to set FPU type"));
18405 mfpu_opt
= legacy_fpu
;
18407 else if (!mfpu_opt
)
18409 #if !(defined (TE_LINUX) || defined (TE_NetBSD) || defined (TE_VXWORKS))
18410 /* Some environments specify a default FPU. If they don't, infer it
18411 from the processor. */
18413 mfpu_opt
= mcpu_fpu_opt
;
18415 mfpu_opt
= march_fpu_opt
;
18417 mfpu_opt
= &fpu_default
;
18424 mfpu_opt
= &fpu_default
;
18425 else if (ARM_CPU_HAS_FEATURE (*mcpu_fpu_opt
, arm_ext_v5
))
18426 mfpu_opt
= &fpu_arch_vfp_v2
;
18428 mfpu_opt
= &fpu_arch_fpa
;
18434 mcpu_cpu_opt
= &cpu_default
;
18435 selected_cpu
= cpu_default
;
18439 selected_cpu
= *mcpu_cpu_opt
;
18441 mcpu_cpu_opt
= &arm_arch_any
;
18444 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
18446 arm_arch_used
= thumb_arch_used
= arm_arch_none
;
18448 #if defined OBJ_COFF || defined OBJ_ELF
18450 unsigned int flags
= 0;
18452 #if defined OBJ_ELF
18453 flags
= meabi_flags
;
18455 switch (meabi_flags
)
18457 case EF_ARM_EABI_UNKNOWN
:
18459 /* Set the flags in the private structure. */
18460 if (uses_apcs_26
) flags
|= F_APCS26
;
18461 if (support_interwork
) flags
|= F_INTERWORK
;
18462 if (uses_apcs_float
) flags
|= F_APCS_FLOAT
;
18463 if (pic_code
) flags
|= F_PIC
;
18464 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_any_hard
))
18465 flags
|= F_SOFT_FLOAT
;
18467 switch (mfloat_abi_opt
)
18469 case ARM_FLOAT_ABI_SOFT
:
18470 case ARM_FLOAT_ABI_SOFTFP
:
18471 flags
|= F_SOFT_FLOAT
;
18474 case ARM_FLOAT_ABI_HARD
:
18475 if (flags
& F_SOFT_FLOAT
)
18476 as_bad (_("hard-float conflicts with specified fpu"));
18480 /* Using pure-endian doubles (even if soft-float). */
18481 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_endian_pure
))
18482 flags
|= F_VFP_FLOAT
;
18484 #if defined OBJ_ELF
18485 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_arch_maverick
))
18486 flags
|= EF_ARM_MAVERICK_FLOAT
;
18489 case EF_ARM_EABI_VER4
:
18490 case EF_ARM_EABI_VER5
:
18491 /* No additional flags to set. */
18498 bfd_set_private_flags (stdoutput
, flags
);
18500 /* We have run out flags in the COFF header to encode the
18501 status of ATPCS support, so instead we create a dummy,
18502 empty, debug section called .arm.atpcs. */
18507 sec
= bfd_make_section (stdoutput
, ".arm.atpcs");
18511 bfd_set_section_flags
18512 (stdoutput
, sec
, SEC_READONLY
| SEC_DEBUGGING
/* | SEC_HAS_CONTENTS */);
18513 bfd_set_section_size (stdoutput
, sec
, 0);
18514 bfd_set_section_contents (stdoutput
, sec
, NULL
, 0, 0);
18520 /* Record the CPU type as well. */
18521 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt
))
18522 mach
= bfd_mach_arm_iWMMXt
;
18523 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_xscale
))
18524 mach
= bfd_mach_arm_XScale
;
18525 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_maverick
))
18526 mach
= bfd_mach_arm_ep9312
;
18527 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v5e
))
18528 mach
= bfd_mach_arm_5TE
;
18529 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v5
))
18531 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
18532 mach
= bfd_mach_arm_5T
;
18534 mach
= bfd_mach_arm_5
;
18536 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4
))
18538 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
18539 mach
= bfd_mach_arm_4T
;
18541 mach
= bfd_mach_arm_4
;
18543 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v3m
))
18544 mach
= bfd_mach_arm_3M
;
18545 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v3
))
18546 mach
= bfd_mach_arm_3
;
18547 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v2s
))
18548 mach
= bfd_mach_arm_2a
;
18549 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v2
))
18550 mach
= bfd_mach_arm_2
;
18552 mach
= bfd_mach_arm_unknown
;
18554 bfd_set_arch_mach (stdoutput
, TARGET_ARCH
, mach
);
18557 /* Command line processing. */
18560 Invocation line includes a switch not recognized by the base assembler.
18561 See if it's a processor-specific option.
18563 This routine is somewhat complicated by the need for backwards
18564 compatibility (since older releases of gcc can't be changed).
18565 The new options try to make the interface as compatible as
18568 New options (supported) are:
18570 -mcpu=<cpu name> Assemble for selected processor
18571 -march=<architecture name> Assemble for selected architecture
18572 -mfpu=<fpu architecture> Assemble for selected FPU.
18573 -EB/-mbig-endian Big-endian
18574 -EL/-mlittle-endian Little-endian
18575 -k Generate PIC code
18576 -mthumb Start in Thumb mode
18577 -mthumb-interwork Code supports ARM/Thumb interworking
18579 For now we will also provide support for:
18581 -mapcs-32 32-bit Program counter
18582 -mapcs-26 26-bit Program counter
18583 -macps-float Floats passed in FP registers
18584 -mapcs-reentrant Reentrant code
18586 (sometime these will probably be replaced with -mapcs=<list of options>
18587 and -matpcs=<list of options>)
18589 The remaining options are only supported for back-wards compatibility.
18590 Cpu variants, the arm part is optional:
18591 -m[arm]1 Currently not supported.
18592 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
18593 -m[arm]3 Arm 3 processor
18594 -m[arm]6[xx], Arm 6 processors
18595 -m[arm]7[xx][t][[d]m] Arm 7 processors
18596 -m[arm]8[10] Arm 8 processors
18597 -m[arm]9[20][tdmi] Arm 9 processors
18598 -mstrongarm[110[0]] StrongARM processors
18599 -mxscale XScale processors
18600 -m[arm]v[2345[t[e]]] Arm architectures
18601 -mall All (except the ARM1)
18603 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
18604 -mfpe-old (No float load/store multiples)
18605 -mvfpxd VFP Single precision
18607 -mno-fpu Disable all floating point instructions
18609 The following CPU names are recognized:
18610 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
18611 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
18612 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
18613 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
18614 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
18615 arm10t arm10e, arm1020t, arm1020e, arm10200e,
18616 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
18620 const char * md_shortopts
= "m:k";
18622 #ifdef ARM_BI_ENDIAN
18623 #define OPTION_EB (OPTION_MD_BASE + 0)
18624 #define OPTION_EL (OPTION_MD_BASE + 1)
18626 #if TARGET_BYTES_BIG_ENDIAN
18627 #define OPTION_EB (OPTION_MD_BASE + 0)
18629 #define OPTION_EL (OPTION_MD_BASE + 1)
18633 struct option md_longopts
[] =
18636 {"EB", no_argument
, NULL
, OPTION_EB
},
18639 {"EL", no_argument
, NULL
, OPTION_EL
},
18641 {NULL
, no_argument
, NULL
, 0}
18644 size_t md_longopts_size
= sizeof (md_longopts
);
18646 struct arm_option_table
18648 char *option
; /* Option name to match. */
18649 char *help
; /* Help information. */
18650 int *var
; /* Variable to change. */
18651 int value
; /* What to change it to. */
18652 char *deprecated
; /* If non-null, print this message. */
18655 struct arm_option_table arm_opts
[] =
18657 {"k", N_("generate PIC code"), &pic_code
, 1, NULL
},
18658 {"mthumb", N_("assemble Thumb code"), &thumb_mode
, 1, NULL
},
18659 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
18660 &support_interwork
, 1, NULL
},
18661 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26
, 0, NULL
},
18662 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26
, 1, NULL
},
18663 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float
,
18665 {"mapcs-reentrant", N_("re-entrant code"), &pic_code
, 1, NULL
},
18666 {"matpcs", N_("code is ATPCS conformant"), &atpcs
, 1, NULL
},
18667 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian
, 1, NULL
},
18668 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian
, 0,
18671 /* These are recognized by the assembler, but have no affect on code. */
18672 {"mapcs-frame", N_("use frame pointer"), NULL
, 0, NULL
},
18673 {"mapcs-stack-check", N_("use stack size checking"), NULL
, 0, NULL
},
18674 {NULL
, NULL
, NULL
, 0, NULL
}
18677 struct arm_legacy_option_table
18679 char *option
; /* Option name to match. */
18680 const arm_feature_set
**var
; /* Variable to change. */
18681 const arm_feature_set value
; /* What to change it to. */
18682 char *deprecated
; /* If non-null, print this message. */
18685 const struct arm_legacy_option_table arm_legacy_opts
[] =
18687 /* DON'T add any new processors to this list -- we want the whole list
18688 to go away... Add them to the processors table instead. */
18689 {"marm1", &legacy_cpu
, ARM_ARCH_V1
, N_("use -mcpu=arm1")},
18690 {"m1", &legacy_cpu
, ARM_ARCH_V1
, N_("use -mcpu=arm1")},
18691 {"marm2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -mcpu=arm2")},
18692 {"m2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -mcpu=arm2")},
18693 {"marm250", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm250")},
18694 {"m250", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm250")},
18695 {"marm3", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm3")},
18696 {"m3", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm3")},
18697 {"marm6", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm6")},
18698 {"m6", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm6")},
18699 {"marm600", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm600")},
18700 {"m600", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm600")},
18701 {"marm610", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm610")},
18702 {"m610", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm610")},
18703 {"marm620", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm620")},
18704 {"m620", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm620")},
18705 {"marm7", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7")},
18706 {"m7", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7")},
18707 {"marm70", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm70")},
18708 {"m70", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm70")},
18709 {"marm700", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700")},
18710 {"m700", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700")},
18711 {"marm700i", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700i")},
18712 {"m700i", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700i")},
18713 {"marm710", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710")},
18714 {"m710", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710")},
18715 {"marm710c", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710c")},
18716 {"m710c", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710c")},
18717 {"marm720", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm720")},
18718 {"m720", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm720")},
18719 {"marm7d", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7d")},
18720 {"m7d", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7d")},
18721 {"marm7di", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7di")},
18722 {"m7di", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7di")},
18723 {"marm7m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7m")},
18724 {"m7m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7m")},
18725 {"marm7dm", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dm")},
18726 {"m7dm", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dm")},
18727 {"marm7dmi", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dmi")},
18728 {"m7dmi", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dmi")},
18729 {"marm7100", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7100")},
18730 {"m7100", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7100")},
18731 {"marm7500", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500")},
18732 {"m7500", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500")},
18733 {"marm7500fe", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500fe")},
18734 {"m7500fe", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500fe")},
18735 {"marm7t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
18736 {"m7t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
18737 {"marm7tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
18738 {"m7tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
18739 {"marm710t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm710t")},
18740 {"m710t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm710t")},
18741 {"marm720t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm720t")},
18742 {"m720t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm720t")},
18743 {"marm740t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm740t")},
18744 {"m740t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm740t")},
18745 {"marm8", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm8")},
18746 {"m8", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm8")},
18747 {"marm810", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm810")},
18748 {"m810", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm810")},
18749 {"marm9", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9")},
18750 {"m9", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9")},
18751 {"marm9tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9tdmi")},
18752 {"m9tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9tdmi")},
18753 {"marm920", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm920")},
18754 {"m920", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm920")},
18755 {"marm940", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm940")},
18756 {"m940", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm940")},
18757 {"mstrongarm", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=strongarm")},
18758 {"mstrongarm110", &legacy_cpu
, ARM_ARCH_V4
,
18759 N_("use -mcpu=strongarm110")},
18760 {"mstrongarm1100", &legacy_cpu
, ARM_ARCH_V4
,
18761 N_("use -mcpu=strongarm1100")},
18762 {"mstrongarm1110", &legacy_cpu
, ARM_ARCH_V4
,
18763 N_("use -mcpu=strongarm1110")},
18764 {"mxscale", &legacy_cpu
, ARM_ARCH_XSCALE
, N_("use -mcpu=xscale")},
18765 {"miwmmxt", &legacy_cpu
, ARM_ARCH_IWMMXT
, N_("use -mcpu=iwmmxt")},
18766 {"mall", &legacy_cpu
, ARM_ANY
, N_("use -mcpu=all")},
18768 /* Architecture variants -- don't add any more to this list either. */
18769 {"mv2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -march=armv2")},
18770 {"marmv2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -march=armv2")},
18771 {"mv2a", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -march=armv2a")},
18772 {"marmv2a", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -march=armv2a")},
18773 {"mv3", &legacy_cpu
, ARM_ARCH_V3
, N_("use -march=armv3")},
18774 {"marmv3", &legacy_cpu
, ARM_ARCH_V3
, N_("use -march=armv3")},
18775 {"mv3m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -march=armv3m")},
18776 {"marmv3m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -march=armv3m")},
18777 {"mv4", &legacy_cpu
, ARM_ARCH_V4
, N_("use -march=armv4")},
18778 {"marmv4", &legacy_cpu
, ARM_ARCH_V4
, N_("use -march=armv4")},
18779 {"mv4t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -march=armv4t")},
18780 {"marmv4t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -march=armv4t")},
18781 {"mv5", &legacy_cpu
, ARM_ARCH_V5
, N_("use -march=armv5")},
18782 {"marmv5", &legacy_cpu
, ARM_ARCH_V5
, N_("use -march=armv5")},
18783 {"mv5t", &legacy_cpu
, ARM_ARCH_V5T
, N_("use -march=armv5t")},
18784 {"marmv5t", &legacy_cpu
, ARM_ARCH_V5T
, N_("use -march=armv5t")},
18785 {"mv5e", &legacy_cpu
, ARM_ARCH_V5TE
, N_("use -march=armv5te")},
18786 {"marmv5e", &legacy_cpu
, ARM_ARCH_V5TE
, N_("use -march=armv5te")},
18788 /* Floating point variants -- don't add any more to this list either. */
18789 {"mfpe-old", &legacy_fpu
, FPU_ARCH_FPE
, N_("use -mfpu=fpe")},
18790 {"mfpa10", &legacy_fpu
, FPU_ARCH_FPA
, N_("use -mfpu=fpa10")},
18791 {"mfpa11", &legacy_fpu
, FPU_ARCH_FPA
, N_("use -mfpu=fpa11")},
18792 {"mno-fpu", &legacy_fpu
, ARM_ARCH_NONE
,
18793 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
18795 {NULL
, NULL
, ARM_ARCH_NONE
, NULL
}
18798 struct arm_cpu_option_table
18801 const arm_feature_set value
;
18802 /* For some CPUs we assume an FPU unless the user explicitly sets
18804 const arm_feature_set default_fpu
;
18805 /* The canonical name of the CPU, or NULL to use NAME converted to upper
18807 const char *canonical_name
;
18810 /* This list should, at a minimum, contain all the cpu names
18811 recognized by GCC. */
18812 static const struct arm_cpu_option_table arm_cpus
[] =
18814 {"all", ARM_ANY
, FPU_ARCH_FPA
, NULL
},
18815 {"arm1", ARM_ARCH_V1
, FPU_ARCH_FPA
, NULL
},
18816 {"arm2", ARM_ARCH_V2
, FPU_ARCH_FPA
, NULL
},
18817 {"arm250", ARM_ARCH_V2S
, FPU_ARCH_FPA
, NULL
},
18818 {"arm3", ARM_ARCH_V2S
, FPU_ARCH_FPA
, NULL
},
18819 {"arm6", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
18820 {"arm60", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
18821 {"arm600", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
18822 {"arm610", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
18823 {"arm620", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
18824 {"arm7", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
18825 {"arm7m", ARM_ARCH_V3M
, FPU_ARCH_FPA
, NULL
},
18826 {"arm7d", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
18827 {"arm7dm", ARM_ARCH_V3M
, FPU_ARCH_FPA
, NULL
},
18828 {"arm7di", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
18829 {"arm7dmi", ARM_ARCH_V3M
, FPU_ARCH_FPA
, NULL
},
18830 {"arm70", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
18831 {"arm700", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
18832 {"arm700i", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
18833 {"arm710", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
18834 {"arm710t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
18835 {"arm720", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
18836 {"arm720t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
18837 {"arm740t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
18838 {"arm710c", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
18839 {"arm7100", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
18840 {"arm7500", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
18841 {"arm7500fe", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
18842 {"arm7t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
18843 {"arm7tdmi", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
18844 {"arm7tdmi-s", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
18845 {"arm8", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
18846 {"arm810", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
18847 {"strongarm", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
18848 {"strongarm1", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
18849 {"strongarm110", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
18850 {"strongarm1100", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
18851 {"strongarm1110", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
18852 {"arm9", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
18853 {"arm920", ARM_ARCH_V4T
, FPU_ARCH_FPA
, "ARM920T"},
18854 {"arm920t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
18855 {"arm922t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
18856 {"arm940t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
18857 {"arm9tdmi", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
18858 /* For V5 or later processors we default to using VFP; but the user
18859 should really set the FPU type explicitly. */
18860 {"arm9e-r0", ARM_ARCH_V5TExP
, FPU_ARCH_VFP_V2
, NULL
},
18861 {"arm9e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
18862 {"arm926ej", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, "ARM926EJ-S"},
18863 {"arm926ejs", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, "ARM926EJ-S"},
18864 {"arm926ej-s", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, NULL
},
18865 {"arm946e-r0", ARM_ARCH_V5TExP
, FPU_ARCH_VFP_V2
, NULL
},
18866 {"arm946e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, "ARM946E-S"},
18867 {"arm946e-s", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
18868 {"arm966e-r0", ARM_ARCH_V5TExP
, FPU_ARCH_VFP_V2
, NULL
},
18869 {"arm966e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, "ARM966E-S"},
18870 {"arm966e-s", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
18871 {"arm968e-s", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
18872 {"arm10t", ARM_ARCH_V5T
, FPU_ARCH_VFP_V1
, NULL
},
18873 {"arm10tdmi", ARM_ARCH_V5T
, FPU_ARCH_VFP_V1
, NULL
},
18874 {"arm10e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
18875 {"arm1020", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, "ARM1020E"},
18876 {"arm1020t", ARM_ARCH_V5T
, FPU_ARCH_VFP_V1
, NULL
},
18877 {"arm1020e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
18878 {"arm1022e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
18879 {"arm1026ejs", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, "ARM1026EJ-S"},
18880 {"arm1026ej-s", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, NULL
},
18881 {"arm1136js", ARM_ARCH_V6
, FPU_NONE
, "ARM1136J-S"},
18882 {"arm1136j-s", ARM_ARCH_V6
, FPU_NONE
, NULL
},
18883 {"arm1136jfs", ARM_ARCH_V6
, FPU_ARCH_VFP_V2
, "ARM1136JF-S"},
18884 {"arm1136jf-s", ARM_ARCH_V6
, FPU_ARCH_VFP_V2
, NULL
},
18885 {"mpcore", ARM_ARCH_V6K
, FPU_ARCH_VFP_V2
, NULL
},
18886 {"mpcorenovfp", ARM_ARCH_V6K
, FPU_NONE
, NULL
},
18887 {"arm1156t2-s", ARM_ARCH_V6T2
, FPU_NONE
, NULL
},
18888 {"arm1156t2f-s", ARM_ARCH_V6T2
, FPU_ARCH_VFP_V2
, NULL
},
18889 {"arm1176jz-s", ARM_ARCH_V6ZK
, FPU_NONE
, NULL
},
18890 {"arm1176jzf-s", ARM_ARCH_V6ZK
, FPU_ARCH_VFP_V2
, NULL
},
18891 {"cortex-a8", ARM_ARCH_V7A
, ARM_FEATURE(0, FPU_VFP_V3
18892 | FPU_NEON_EXT_V1
),
18894 {"cortex-r4", ARM_ARCH_V7R
, FPU_NONE
, NULL
},
18895 {"cortex-m3", ARM_ARCH_V7M
, FPU_NONE
, NULL
},
18896 /* ??? XSCALE is really an architecture. */
18897 {"xscale", ARM_ARCH_XSCALE
, FPU_ARCH_VFP_V2
, NULL
},
18898 /* ??? iwmmxt is not a processor. */
18899 {"iwmmxt", ARM_ARCH_IWMMXT
, FPU_ARCH_VFP_V2
, NULL
},
18900 {"i80200", ARM_ARCH_XSCALE
, FPU_ARCH_VFP_V2
, NULL
},
18902 {"ep9312", ARM_FEATURE(ARM_AEXT_V4T
, ARM_CEXT_MAVERICK
), FPU_ARCH_MAVERICK
, "ARM920T"},
18903 {NULL
, ARM_ARCH_NONE
, ARM_ARCH_NONE
, NULL
}
18906 struct arm_arch_option_table
18909 const arm_feature_set value
;
18910 const arm_feature_set default_fpu
;
18913 /* This list should, at a minimum, contain all the architecture names
18914 recognized by GCC. */
18915 static const struct arm_arch_option_table arm_archs
[] =
18917 {"all", ARM_ANY
, FPU_ARCH_FPA
},
18918 {"armv1", ARM_ARCH_V1
, FPU_ARCH_FPA
},
18919 {"armv2", ARM_ARCH_V2
, FPU_ARCH_FPA
},
18920 {"armv2a", ARM_ARCH_V2S
, FPU_ARCH_FPA
},
18921 {"armv2s", ARM_ARCH_V2S
, FPU_ARCH_FPA
},
18922 {"armv3", ARM_ARCH_V3
, FPU_ARCH_FPA
},
18923 {"armv3m", ARM_ARCH_V3M
, FPU_ARCH_FPA
},
18924 {"armv4", ARM_ARCH_V4
, FPU_ARCH_FPA
},
18925 {"armv4xm", ARM_ARCH_V4xM
, FPU_ARCH_FPA
},
18926 {"armv4t", ARM_ARCH_V4T
, FPU_ARCH_FPA
},
18927 {"armv4txm", ARM_ARCH_V4TxM
, FPU_ARCH_FPA
},
18928 {"armv5", ARM_ARCH_V5
, FPU_ARCH_VFP
},
18929 {"armv5t", ARM_ARCH_V5T
, FPU_ARCH_VFP
},
18930 {"armv5txm", ARM_ARCH_V5TxM
, FPU_ARCH_VFP
},
18931 {"armv5te", ARM_ARCH_V5TE
, FPU_ARCH_VFP
},
18932 {"armv5texp", ARM_ARCH_V5TExP
, FPU_ARCH_VFP
},
18933 {"armv5tej", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP
},
18934 {"armv6", ARM_ARCH_V6
, FPU_ARCH_VFP
},
18935 {"armv6j", ARM_ARCH_V6
, FPU_ARCH_VFP
},
18936 {"armv6k", ARM_ARCH_V6K
, FPU_ARCH_VFP
},
18937 {"armv6z", ARM_ARCH_V6Z
, FPU_ARCH_VFP
},
18938 {"armv6zk", ARM_ARCH_V6ZK
, FPU_ARCH_VFP
},
18939 {"armv6t2", ARM_ARCH_V6T2
, FPU_ARCH_VFP
},
18940 {"armv6kt2", ARM_ARCH_V6KT2
, FPU_ARCH_VFP
},
18941 {"armv6zt2", ARM_ARCH_V6ZT2
, FPU_ARCH_VFP
},
18942 {"armv6zkt2", ARM_ARCH_V6ZKT2
, FPU_ARCH_VFP
},
18943 {"armv7", ARM_ARCH_V7
, FPU_ARCH_VFP
},
18944 {"armv7a", ARM_ARCH_V7A
, FPU_ARCH_VFP
},
18945 {"armv7r", ARM_ARCH_V7R
, FPU_ARCH_VFP
},
18946 {"armv7m", ARM_ARCH_V7M
, FPU_ARCH_VFP
},
18947 {"xscale", ARM_ARCH_XSCALE
, FPU_ARCH_VFP
},
18948 {"iwmmxt", ARM_ARCH_IWMMXT
, FPU_ARCH_VFP
},
18949 {NULL
, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
18952 /* ISA extensions in the co-processor space. */
18953 struct arm_option_cpu_value_table
18956 const arm_feature_set value
;
18959 static const struct arm_option_cpu_value_table arm_extensions
[] =
18961 {"maverick", ARM_FEATURE (0, ARM_CEXT_MAVERICK
)},
18962 {"xscale", ARM_FEATURE (0, ARM_CEXT_XSCALE
)},
18963 {"iwmmxt", ARM_FEATURE (0, ARM_CEXT_IWMMXT
)},
18964 {NULL
, ARM_ARCH_NONE
}
18967 /* This list should, at a minimum, contain all the fpu names
18968 recognized by GCC. */
18969 static const struct arm_option_cpu_value_table arm_fpus
[] =
18971 {"softfpa", FPU_NONE
},
18972 {"fpe", FPU_ARCH_FPE
},
18973 {"fpe2", FPU_ARCH_FPE
},
18974 {"fpe3", FPU_ARCH_FPA
}, /* Third release supports LFM/SFM. */
18975 {"fpa", FPU_ARCH_FPA
},
18976 {"fpa10", FPU_ARCH_FPA
},
18977 {"fpa11", FPU_ARCH_FPA
},
18978 {"arm7500fe", FPU_ARCH_FPA
},
18979 {"softvfp", FPU_ARCH_VFP
},
18980 {"softvfp+vfp", FPU_ARCH_VFP_V2
},
18981 {"vfp", FPU_ARCH_VFP_V2
},
18982 {"vfp9", FPU_ARCH_VFP_V2
},
18983 {"vfp3", FPU_ARCH_VFP_V3
},
18984 {"vfp10", FPU_ARCH_VFP_V2
},
18985 {"vfp10-r0", FPU_ARCH_VFP_V1
},
18986 {"vfpxd", FPU_ARCH_VFP_V1xD
},
18987 {"arm1020t", FPU_ARCH_VFP_V1
},
18988 {"arm1020e", FPU_ARCH_VFP_V2
},
18989 {"arm1136jfs", FPU_ARCH_VFP_V2
},
18990 {"arm1136jf-s", FPU_ARCH_VFP_V2
},
18991 {"maverick", FPU_ARCH_MAVERICK
},
18992 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1
},
18993 {NULL
, ARM_ARCH_NONE
}
18996 struct arm_option_value_table
19002 static const struct arm_option_value_table arm_float_abis
[] =
19004 {"hard", ARM_FLOAT_ABI_HARD
},
19005 {"softfp", ARM_FLOAT_ABI_SOFTFP
},
19006 {"soft", ARM_FLOAT_ABI_SOFT
},
19011 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
19012 static const struct arm_option_value_table arm_eabis
[] =
19014 {"gnu", EF_ARM_EABI_UNKNOWN
},
19015 {"4", EF_ARM_EABI_VER4
},
19016 {"5", EF_ARM_EABI_VER5
},
19021 struct arm_long_option_table
19023 char * option
; /* Substring to match. */
19024 char * help
; /* Help information. */
19025 int (* func
) (char * subopt
); /* Function to decode sub-option. */
19026 char * deprecated
; /* If non-null, print this message. */
19030 arm_parse_extension (char * str
, const arm_feature_set
**opt_p
)
19032 arm_feature_set
*ext_set
= xmalloc (sizeof (arm_feature_set
));
19034 /* Copy the feature set, so that we can modify it. */
19035 *ext_set
= **opt_p
;
19038 while (str
!= NULL
&& *str
!= 0)
19040 const struct arm_option_cpu_value_table
* opt
;
19046 as_bad (_("invalid architectural extension"));
19051 ext
= strchr (str
, '+');
19054 optlen
= ext
- str
;
19056 optlen
= strlen (str
);
19060 as_bad (_("missing architectural extension"));
19064 for (opt
= arm_extensions
; opt
->name
!= NULL
; opt
++)
19065 if (strncmp (opt
->name
, str
, optlen
) == 0)
19067 ARM_MERGE_FEATURE_SETS (*ext_set
, *ext_set
, opt
->value
);
19071 if (opt
->name
== NULL
)
19073 as_bad (_("unknown architectural extnsion `%s'"), str
);
19084 arm_parse_cpu (char * str
)
19086 const struct arm_cpu_option_table
* opt
;
19087 char * ext
= strchr (str
, '+');
19091 optlen
= ext
- str
;
19093 optlen
= strlen (str
);
19097 as_bad (_("missing cpu name `%s'"), str
);
19101 for (opt
= arm_cpus
; opt
->name
!= NULL
; opt
++)
19102 if (strncmp (opt
->name
, str
, optlen
) == 0)
19104 mcpu_cpu_opt
= &opt
->value
;
19105 mcpu_fpu_opt
= &opt
->default_fpu
;
19106 if (opt
->canonical_name
)
19107 strcpy(selected_cpu_name
, opt
->canonical_name
);
19111 for (i
= 0; i
< optlen
; i
++)
19112 selected_cpu_name
[i
] = TOUPPER (opt
->name
[i
]);
19113 selected_cpu_name
[i
] = 0;
19117 return arm_parse_extension (ext
, &mcpu_cpu_opt
);
19122 as_bad (_("unknown cpu `%s'"), str
);
19127 arm_parse_arch (char * str
)
19129 const struct arm_arch_option_table
*opt
;
19130 char *ext
= strchr (str
, '+');
19134 optlen
= ext
- str
;
19136 optlen
= strlen (str
);
19140 as_bad (_("missing architecture name `%s'"), str
);
19144 for (opt
= arm_archs
; opt
->name
!= NULL
; opt
++)
19145 if (streq (opt
->name
, str
))
19147 march_cpu_opt
= &opt
->value
;
19148 march_fpu_opt
= &opt
->default_fpu
;
19149 strcpy(selected_cpu_name
, opt
->name
);
19152 return arm_parse_extension (ext
, &march_cpu_opt
);
19157 as_bad (_("unknown architecture `%s'\n"), str
);
19162 arm_parse_fpu (char * str
)
19164 const struct arm_option_cpu_value_table
* opt
;
19166 for (opt
= arm_fpus
; opt
->name
!= NULL
; opt
++)
19167 if (streq (opt
->name
, str
))
19169 mfpu_opt
= &opt
->value
;
19173 as_bad (_("unknown floating point format `%s'\n"), str
);
19178 arm_parse_float_abi (char * str
)
19180 const struct arm_option_value_table
* opt
;
19182 for (opt
= arm_float_abis
; opt
->name
!= NULL
; opt
++)
19183 if (streq (opt
->name
, str
))
19185 mfloat_abi_opt
= opt
->value
;
19189 as_bad (_("unknown floating point abi `%s'\n"), str
);
19195 arm_parse_eabi (char * str
)
19197 const struct arm_option_value_table
*opt
;
19199 for (opt
= arm_eabis
; opt
->name
!= NULL
; opt
++)
19200 if (streq (opt
->name
, str
))
19202 meabi_flags
= opt
->value
;
19205 as_bad (_("unknown EABI `%s'\n"), str
);
19210 struct arm_long_option_table arm_long_opts
[] =
19212 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
19213 arm_parse_cpu
, NULL
},
19214 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
19215 arm_parse_arch
, NULL
},
19216 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
19217 arm_parse_fpu
, NULL
},
19218 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
19219 arm_parse_float_abi
, NULL
},
19221 {"meabi=", N_("<ver>\t assemble for eabi version <ver>"),
19222 arm_parse_eabi
, NULL
},
19224 {NULL
, NULL
, 0, NULL
}
19228 md_parse_option (int c
, char * arg
)
19230 struct arm_option_table
*opt
;
19231 const struct arm_legacy_option_table
*fopt
;
19232 struct arm_long_option_table
*lopt
;
19238 target_big_endian
= 1;
19244 target_big_endian
= 0;
19249 /* Listing option. Just ignore these, we don't support additional
19254 for (opt
= arm_opts
; opt
->option
!= NULL
; opt
++)
19256 if (c
== opt
->option
[0]
19257 && ((arg
== NULL
&& opt
->option
[1] == 0)
19258 || streq (arg
, opt
->option
+ 1)))
19260 #if WARN_DEPRECATED
19261 /* If the option is deprecated, tell the user. */
19262 if (opt
->deprecated
!= NULL
)
19263 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
,
19264 arg
? arg
: "", _(opt
->deprecated
));
19267 if (opt
->var
!= NULL
)
19268 *opt
->var
= opt
->value
;
19274 for (fopt
= arm_legacy_opts
; fopt
->option
!= NULL
; fopt
++)
19276 if (c
== fopt
->option
[0]
19277 && ((arg
== NULL
&& fopt
->option
[1] == 0)
19278 || streq (arg
, fopt
->option
+ 1)))
19280 #if WARN_DEPRECATED
19281 /* If the option is deprecated, tell the user. */
19282 if (fopt
->deprecated
!= NULL
)
19283 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
,
19284 arg
? arg
: "", _(fopt
->deprecated
));
19287 if (fopt
->var
!= NULL
)
19288 *fopt
->var
= &fopt
->value
;
19294 for (lopt
= arm_long_opts
; lopt
->option
!= NULL
; lopt
++)
19296 /* These options are expected to have an argument. */
19297 if (c
== lopt
->option
[0]
19299 && strncmp (arg
, lopt
->option
+ 1,
19300 strlen (lopt
->option
+ 1)) == 0)
19302 #if WARN_DEPRECATED
19303 /* If the option is deprecated, tell the user. */
19304 if (lopt
->deprecated
!= NULL
)
19305 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
, arg
,
19306 _(lopt
->deprecated
));
19309 /* Call the sup-option parser. */
19310 return lopt
->func (arg
+ strlen (lopt
->option
) - 1);
19321 md_show_usage (FILE * fp
)
19323 struct arm_option_table
*opt
;
19324 struct arm_long_option_table
*lopt
;
19326 fprintf (fp
, _(" ARM-specific assembler options:\n"));
19328 for (opt
= arm_opts
; opt
->option
!= NULL
; opt
++)
19329 if (opt
->help
!= NULL
)
19330 fprintf (fp
, " -%-23s%s\n", opt
->option
, _(opt
->help
));
19332 for (lopt
= arm_long_opts
; lopt
->option
!= NULL
; lopt
++)
19333 if (lopt
->help
!= NULL
)
19334 fprintf (fp
, " -%s%s\n", lopt
->option
, _(lopt
->help
));
19338 -EB assemble code for a big-endian cpu\n"));
19343 -EL assemble code for a little-endian cpu\n"));
19352 arm_feature_set flags
;
19353 } cpu_arch_ver_table
;
19355 /* Mapping from CPU features to EABI CPU arch values. Table must be sorted
19356 least features first. */
19357 static const cpu_arch_ver_table cpu_arch_ver
[] =
19362 {4, ARM_ARCH_V5TE
},
19363 {5, ARM_ARCH_V5TEJ
},
19367 {9, ARM_ARCH_V6T2
},
19368 {10, ARM_ARCH_V7A
},
19369 {10, ARM_ARCH_V7R
},
19370 {10, ARM_ARCH_V7M
},
19374 /* Set the public EABI object attributes. */
19376 aeabi_set_public_attributes (void)
19379 arm_feature_set flags
;
19380 arm_feature_set tmp
;
19381 const cpu_arch_ver_table
*p
;
19383 /* Choose the architecture based on the capabilities of the requested cpu
19384 (if any) and/or the instructions actually used. */
19385 ARM_MERGE_FEATURE_SETS (flags
, arm_arch_used
, thumb_arch_used
);
19386 ARM_MERGE_FEATURE_SETS (flags
, flags
, *mfpu_opt
);
19387 ARM_MERGE_FEATURE_SETS (flags
, flags
, selected_cpu
);
19391 for (p
= cpu_arch_ver
; p
->val
; p
++)
19393 if (ARM_CPU_HAS_FEATURE (tmp
, p
->flags
))
19396 ARM_CLEAR_FEATURE (tmp
, tmp
, p
->flags
);
19400 /* Tag_CPU_name. */
19401 if (selected_cpu_name
[0])
19405 p
= selected_cpu_name
;
19406 if (strncmp(p
, "armv", 4) == 0)
19411 for (i
= 0; p
[i
]; i
++)
19412 p
[i
] = TOUPPER (p
[i
]);
19414 elf32_arm_add_eabi_attr_string (stdoutput
, 5, p
);
19416 /* Tag_CPU_arch. */
19417 elf32_arm_add_eabi_attr_int (stdoutput
, 6, arch
);
19418 /* Tag_CPU_arch_profile. */
19419 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v7a
))
19420 elf32_arm_add_eabi_attr_int (stdoutput
, 7, 'A');
19421 else if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v7r
))
19422 elf32_arm_add_eabi_attr_int (stdoutput
, 7, 'R');
19423 else if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v7m
))
19424 elf32_arm_add_eabi_attr_int (stdoutput
, 7, 'M');
19425 /* Tag_ARM_ISA_use. */
19426 if (ARM_CPU_HAS_FEATURE (arm_arch_used
, arm_arch_full
))
19427 elf32_arm_add_eabi_attr_int (stdoutput
, 8, 1);
19428 /* Tag_THUMB_ISA_use. */
19429 if (ARM_CPU_HAS_FEATURE (thumb_arch_used
, arm_arch_full
))
19430 elf32_arm_add_eabi_attr_int (stdoutput
, 9,
19431 ARM_CPU_HAS_FEATURE (thumb_arch_used
, arm_arch_t2
) ? 2 : 1);
19432 /* Tag_VFP_arch. */
19433 if (ARM_CPU_HAS_FEATURE (thumb_arch_used
, fpu_vfp_ext_v3
)
19434 || ARM_CPU_HAS_FEATURE (arm_arch_used
, fpu_vfp_ext_v3
))
19435 elf32_arm_add_eabi_attr_int (stdoutput
, 10, 3);
19436 else if (ARM_CPU_HAS_FEATURE (thumb_arch_used
, fpu_vfp_ext_v2
)
19437 || ARM_CPU_HAS_FEATURE (arm_arch_used
, fpu_vfp_ext_v2
))
19438 elf32_arm_add_eabi_attr_int (stdoutput
, 10, 2);
19439 else if (ARM_CPU_HAS_FEATURE (thumb_arch_used
, fpu_vfp_ext_v1
)
19440 || ARM_CPU_HAS_FEATURE (arm_arch_used
, fpu_vfp_ext_v1
)
19441 || ARM_CPU_HAS_FEATURE (thumb_arch_used
, fpu_vfp_ext_v1xd
)
19442 || ARM_CPU_HAS_FEATURE (arm_arch_used
, fpu_vfp_ext_v1xd
))
19443 elf32_arm_add_eabi_attr_int (stdoutput
, 10, 1);
19444 /* Tag_WMMX_arch. */
19445 if (ARM_CPU_HAS_FEATURE (thumb_arch_used
, arm_cext_iwmmxt
)
19446 || ARM_CPU_HAS_FEATURE (arm_arch_used
, arm_cext_iwmmxt
))
19447 elf32_arm_add_eabi_attr_int (stdoutput
, 11, 1);
19448 /* Tag_NEON_arch. */
19449 if (ARM_CPU_HAS_FEATURE (thumb_arch_used
, fpu_neon_ext_v1
)
19450 || ARM_CPU_HAS_FEATURE (arm_arch_used
, fpu_neon_ext_v1
))
19451 elf32_arm_add_eabi_attr_int (stdoutput
, 12, 1);
19454 /* Add the .ARM.attributes section. */
19463 if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
19466 aeabi_set_public_attributes ();
19467 size
= elf32_arm_eabi_attr_size (stdoutput
);
19468 s
= subseg_new (".ARM.attributes", 0);
19469 bfd_set_section_flags (stdoutput
, s
, SEC_READONLY
| SEC_DATA
);
19470 addr
= frag_now_fix ();
19471 p
= frag_more (size
);
19472 elf32_arm_set_eabi_attr_contents (stdoutput
, (bfd_byte
*)p
, size
);
19474 #endif /* OBJ_ELF */
19477 /* Parse a .cpu directive. */
19480 s_arm_cpu (int ignored ATTRIBUTE_UNUSED
)
19482 const struct arm_cpu_option_table
*opt
;
19486 name
= input_line_pointer
;
19487 while (*input_line_pointer
&& !ISSPACE(*input_line_pointer
))
19488 input_line_pointer
++;
19489 saved_char
= *input_line_pointer
;
19490 *input_line_pointer
= 0;
19492 /* Skip the first "all" entry. */
19493 for (opt
= arm_cpus
+ 1; opt
->name
!= NULL
; opt
++)
19494 if (streq (opt
->name
, name
))
19496 mcpu_cpu_opt
= &opt
->value
;
19497 selected_cpu
= opt
->value
;
19498 if (opt
->canonical_name
)
19499 strcpy(selected_cpu_name
, opt
->canonical_name
);
19503 for (i
= 0; opt
->name
[i
]; i
++)
19504 selected_cpu_name
[i
] = TOUPPER (opt
->name
[i
]);
19505 selected_cpu_name
[i
] = 0;
19507 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
19508 *input_line_pointer
= saved_char
;
19509 demand_empty_rest_of_line ();
19512 as_bad (_("unknown cpu `%s'"), name
);
19513 *input_line_pointer
= saved_char
;
19514 ignore_rest_of_line ();
19518 /* Parse a .arch directive. */
19521 s_arm_arch (int ignored ATTRIBUTE_UNUSED
)
19523 const struct arm_arch_option_table
*opt
;
19527 name
= input_line_pointer
;
19528 while (*input_line_pointer
&& !ISSPACE(*input_line_pointer
))
19529 input_line_pointer
++;
19530 saved_char
= *input_line_pointer
;
19531 *input_line_pointer
= 0;
19533 /* Skip the first "all" entry. */
19534 for (opt
= arm_archs
+ 1; opt
->name
!= NULL
; opt
++)
19535 if (streq (opt
->name
, name
))
19537 mcpu_cpu_opt
= &opt
->value
;
19538 selected_cpu
= opt
->value
;
19539 strcpy(selected_cpu_name
, opt
->name
);
19540 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
19541 *input_line_pointer
= saved_char
;
19542 demand_empty_rest_of_line ();
19546 as_bad (_("unknown architecture `%s'\n"), name
);
19547 *input_line_pointer
= saved_char
;
19548 ignore_rest_of_line ();
19552 /* Parse a .fpu directive. */
19555 s_arm_fpu (int ignored ATTRIBUTE_UNUSED
)
19557 const struct arm_option_cpu_value_table
*opt
;
19561 name
= input_line_pointer
;
19562 while (*input_line_pointer
&& !ISSPACE(*input_line_pointer
))
19563 input_line_pointer
++;
19564 saved_char
= *input_line_pointer
;
19565 *input_line_pointer
= 0;
19567 for (opt
= arm_fpus
; opt
->name
!= NULL
; opt
++)
19568 if (streq (opt
->name
, name
))
19570 mfpu_opt
= &opt
->value
;
19571 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
19572 *input_line_pointer
= saved_char
;
19573 demand_empty_rest_of_line ();
19577 as_bad (_("unknown floating point format `%s'\n"), name
);
19578 *input_line_pointer
= saved_char
;
19579 ignore_rest_of_line ();