| 1 | /* tc-arm.c -- Assemble for the ARM |
| 2 | Copyright 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, |
| 3 | 2004, 2005, 2006 |
| 4 | Free Software Foundation, Inc. |
| 5 | Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org) |
| 6 | Modified by David Taylor (dtaylor@armltd.co.uk) |
| 7 | Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com) |
| 8 | Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com) |
| 9 | Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com) |
| 10 | |
| 11 | This file is part of GAS, the GNU Assembler. |
| 12 | |
| 13 | GAS is free software; you can redistribute it and/or modify |
| 14 | it under the terms of the GNU General Public License as published by |
| 15 | the Free Software Foundation; either version 2, or (at your option) |
| 16 | any later version. |
| 17 | |
| 18 | GAS is distributed in the hope that it will be useful, |
| 19 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 20 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 21 | GNU General Public License for more details. |
| 22 | |
| 23 | You should have received a copy of the GNU General Public License |
| 24 | along with GAS; see the file COPYING. If not, write to the Free |
| 25 | Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA |
| 26 | 02110-1301, USA. */ |
| 27 | |
| 28 | #include <limits.h> |
| 29 | #include <stdarg.h> |
| 30 | #define NO_RELOC 0 |
| 31 | #include "as.h" |
| 32 | #include "safe-ctype.h" |
| 33 | #include "subsegs.h" |
| 34 | #include "obstack.h" |
| 35 | |
| 36 | #include "opcode/arm.h" |
| 37 | |
| 38 | #ifdef OBJ_ELF |
| 39 | #include "elf/arm.h" |
| 40 | #include "dw2gencfi.h" |
| 41 | #endif |
| 42 | |
| 43 | #include "dwarf2dbg.h" |
| 44 | |
| 45 | #define WARN_DEPRECATED 1 |
| 46 | |
| 47 | #ifdef OBJ_ELF |
| 48 | /* Must be at least the size of the largest unwind opcode (currently two). */ |
| 49 | #define ARM_OPCODE_CHUNK_SIZE 8 |
| 50 | |
| 51 | /* This structure holds the unwinding state. */ |
| 52 | |
| 53 | static struct |
| 54 | { |
| 55 | symbolS * proc_start; |
| 56 | symbolS * table_entry; |
| 57 | symbolS * personality_routine; |
| 58 | int personality_index; |
| 59 | /* The segment containing the function. */ |
| 60 | segT saved_seg; |
| 61 | subsegT saved_subseg; |
| 62 | /* Opcodes generated from this function. */ |
| 63 | unsigned char * opcodes; |
| 64 | int opcode_count; |
| 65 | int opcode_alloc; |
| 66 | /* The number of bytes pushed to the stack. */ |
| 67 | offsetT frame_size; |
| 68 | /* We don't add stack adjustment opcodes immediately so that we can merge |
| 69 | multiple adjustments. We can also omit the final adjustment |
| 70 | when using a frame pointer. */ |
| 71 | offsetT pending_offset; |
| 72 | /* These two fields are set by both unwind_movsp and unwind_setfp. They |
| 73 | hold the reg+offset to use when restoring sp from a frame pointer. */ |
| 74 | offsetT fp_offset; |
| 75 | int fp_reg; |
| 76 | /* Nonzero if an unwind_setfp directive has been seen. */ |
| 77 | unsigned fp_used:1; |
| 78 | /* Nonzero if the last opcode restores sp from fp_reg. */ |
| 79 | unsigned sp_restored:1; |
| 80 | } unwind; |
| 81 | |
| 82 | /* Bit N indicates that an R_ARM_NONE relocation has been output for |
| 83 | __aeabi_unwind_cpp_prN already if set. This enables dependencies to be |
| 84 | emitted only once per section, to save unnecessary bloat. */ |
| 85 | static unsigned int marked_pr_dependency = 0; |
| 86 | |
| 87 | #endif /* OBJ_ELF */ |
| 88 | |
| 89 | /* Results from operand parsing worker functions. */ |
| 90 | |
| 91 | typedef enum |
| 92 | { |
| 93 | PARSE_OPERAND_SUCCESS, |
| 94 | PARSE_OPERAND_FAIL, |
| 95 | PARSE_OPERAND_FAIL_NO_BACKTRACK |
| 96 | } parse_operand_result; |
| 97 | |
| 98 | enum arm_float_abi |
| 99 | { |
| 100 | ARM_FLOAT_ABI_HARD, |
| 101 | ARM_FLOAT_ABI_SOFTFP, |
| 102 | ARM_FLOAT_ABI_SOFT |
| 103 | }; |
| 104 | |
| 105 | /* Types of processor to assemble for. */ |
| 106 | #ifndef CPU_DEFAULT |
| 107 | #if defined __XSCALE__ |
| 108 | #define CPU_DEFAULT ARM_ARCH_XSCALE |
| 109 | #else |
| 110 | #if defined __thumb__ |
| 111 | #define CPU_DEFAULT ARM_ARCH_V5T |
| 112 | #endif |
| 113 | #endif |
| 114 | #endif |
| 115 | |
| 116 | #ifndef FPU_DEFAULT |
| 117 | # ifdef TE_LINUX |
| 118 | # define FPU_DEFAULT FPU_ARCH_FPA |
| 119 | # elif defined (TE_NetBSD) |
| 120 | # ifdef OBJ_ELF |
| 121 | # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */ |
| 122 | # else |
| 123 | /* Legacy a.out format. */ |
| 124 | # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */ |
| 125 | # endif |
| 126 | # elif defined (TE_VXWORKS) |
| 127 | # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */ |
| 128 | # else |
| 129 | /* For backwards compatibility, default to FPA. */ |
| 130 | # define FPU_DEFAULT FPU_ARCH_FPA |
| 131 | # endif |
| 132 | #endif /* ifndef FPU_DEFAULT */ |
| 133 | |
| 134 | #define streq(a, b) (strcmp (a, b) == 0) |
| 135 | |
| 136 | static arm_feature_set cpu_variant; |
| 137 | static arm_feature_set arm_arch_used; |
| 138 | static arm_feature_set thumb_arch_used; |
| 139 | |
| 140 | /* Flags stored in private area of BFD structure. */ |
| 141 | static int uses_apcs_26 = FALSE; |
| 142 | static int atpcs = FALSE; |
| 143 | static int support_interwork = FALSE; |
| 144 | static int uses_apcs_float = FALSE; |
| 145 | static int pic_code = FALSE; |
| 146 | |
| 147 | /* Variables that we set while parsing command-line options. Once all |
| 148 | options have been read we re-process these values to set the real |
| 149 | assembly flags. */ |
| 150 | static const arm_feature_set *legacy_cpu = NULL; |
| 151 | static const arm_feature_set *legacy_fpu = NULL; |
| 152 | |
| 153 | static const arm_feature_set *mcpu_cpu_opt = NULL; |
| 154 | static const arm_feature_set *mcpu_fpu_opt = NULL; |
| 155 | static const arm_feature_set *march_cpu_opt = NULL; |
| 156 | static const arm_feature_set *march_fpu_opt = NULL; |
| 157 | static const arm_feature_set *mfpu_opt = NULL; |
| 158 | static const arm_feature_set *object_arch = NULL; |
| 159 | |
| 160 | /* Constants for known architecture features. */ |
| 161 | static const arm_feature_set fpu_default = FPU_DEFAULT; |
| 162 | static const arm_feature_set fpu_arch_vfp_v1 = FPU_ARCH_VFP_V1; |
| 163 | static const arm_feature_set fpu_arch_vfp_v2 = FPU_ARCH_VFP_V2; |
| 164 | static const arm_feature_set fpu_arch_vfp_v3 = FPU_ARCH_VFP_V3; |
| 165 | static const arm_feature_set fpu_arch_neon_v1 = FPU_ARCH_NEON_V1; |
| 166 | static const arm_feature_set fpu_arch_fpa = FPU_ARCH_FPA; |
| 167 | static const arm_feature_set fpu_any_hard = FPU_ANY_HARD; |
| 168 | static const arm_feature_set fpu_arch_maverick = FPU_ARCH_MAVERICK; |
| 169 | static const arm_feature_set fpu_endian_pure = FPU_ARCH_ENDIAN_PURE; |
| 170 | |
| 171 | #ifdef CPU_DEFAULT |
| 172 | static const arm_feature_set cpu_default = CPU_DEFAULT; |
| 173 | #endif |
| 174 | |
| 175 | static const arm_feature_set arm_ext_v1 = ARM_FEATURE (ARM_EXT_V1, 0); |
| 176 | static const arm_feature_set arm_ext_v2 = ARM_FEATURE (ARM_EXT_V1, 0); |
| 177 | static const arm_feature_set arm_ext_v2s = ARM_FEATURE (ARM_EXT_V2S, 0); |
| 178 | static const arm_feature_set arm_ext_v3 = ARM_FEATURE (ARM_EXT_V3, 0); |
| 179 | static const arm_feature_set arm_ext_v3m = ARM_FEATURE (ARM_EXT_V3M, 0); |
| 180 | static const arm_feature_set arm_ext_v4 = ARM_FEATURE (ARM_EXT_V4, 0); |
| 181 | static const arm_feature_set arm_ext_v4t = ARM_FEATURE (ARM_EXT_V4T, 0); |
| 182 | static const arm_feature_set arm_ext_v5 = ARM_FEATURE (ARM_EXT_V5, 0); |
| 183 | static const arm_feature_set arm_ext_v4t_5 = |
| 184 | ARM_FEATURE (ARM_EXT_V4T | ARM_EXT_V5, 0); |
| 185 | static const arm_feature_set arm_ext_v5t = ARM_FEATURE (ARM_EXT_V5T, 0); |
| 186 | static const arm_feature_set arm_ext_v5e = ARM_FEATURE (ARM_EXT_V5E, 0); |
| 187 | static const arm_feature_set arm_ext_v5exp = ARM_FEATURE (ARM_EXT_V5ExP, 0); |
| 188 | static const arm_feature_set arm_ext_v5j = ARM_FEATURE (ARM_EXT_V5J, 0); |
| 189 | static const arm_feature_set arm_ext_v6 = ARM_FEATURE (ARM_EXT_V6, 0); |
| 190 | static const arm_feature_set arm_ext_v6k = ARM_FEATURE (ARM_EXT_V6K, 0); |
| 191 | static const arm_feature_set arm_ext_v6z = ARM_FEATURE (ARM_EXT_V6Z, 0); |
| 192 | static const arm_feature_set arm_ext_v6t2 = ARM_FEATURE (ARM_EXT_V6T2, 0); |
| 193 | static const arm_feature_set arm_ext_v6_notm = ARM_FEATURE (ARM_EXT_V6_NOTM, 0); |
| 194 | static const arm_feature_set arm_ext_div = ARM_FEATURE (ARM_EXT_DIV, 0); |
| 195 | static const arm_feature_set arm_ext_v7 = ARM_FEATURE (ARM_EXT_V7, 0); |
| 196 | static const arm_feature_set arm_ext_v7a = ARM_FEATURE (ARM_EXT_V7A, 0); |
| 197 | static const arm_feature_set arm_ext_v7r = ARM_FEATURE (ARM_EXT_V7R, 0); |
| 198 | static const arm_feature_set arm_ext_v7m = ARM_FEATURE (ARM_EXT_V7M, 0); |
| 199 | |
| 200 | static const arm_feature_set arm_arch_any = ARM_ANY; |
| 201 | static const arm_feature_set arm_arch_full = ARM_FEATURE (-1, -1); |
| 202 | static const arm_feature_set arm_arch_t2 = ARM_ARCH_THUMB2; |
| 203 | static const arm_feature_set arm_arch_none = ARM_ARCH_NONE; |
| 204 | |
| 205 | static const arm_feature_set arm_cext_iwmmxt2 = |
| 206 | ARM_FEATURE (0, ARM_CEXT_IWMMXT2); |
| 207 | static const arm_feature_set arm_cext_iwmmxt = |
| 208 | ARM_FEATURE (0, ARM_CEXT_IWMMXT); |
| 209 | static const arm_feature_set arm_cext_xscale = |
| 210 | ARM_FEATURE (0, ARM_CEXT_XSCALE); |
| 211 | static const arm_feature_set arm_cext_maverick = |
| 212 | ARM_FEATURE (0, ARM_CEXT_MAVERICK); |
| 213 | static const arm_feature_set fpu_fpa_ext_v1 = ARM_FEATURE (0, FPU_FPA_EXT_V1); |
| 214 | static const arm_feature_set fpu_fpa_ext_v2 = ARM_FEATURE (0, FPU_FPA_EXT_V2); |
| 215 | static const arm_feature_set fpu_vfp_ext_v1xd = |
| 216 | ARM_FEATURE (0, FPU_VFP_EXT_V1xD); |
| 217 | static const arm_feature_set fpu_vfp_ext_v1 = ARM_FEATURE (0, FPU_VFP_EXT_V1); |
| 218 | static const arm_feature_set fpu_vfp_ext_v2 = ARM_FEATURE (0, FPU_VFP_EXT_V2); |
| 219 | static const arm_feature_set fpu_vfp_ext_v3 = ARM_FEATURE (0, FPU_VFP_EXT_V3); |
| 220 | static const arm_feature_set fpu_neon_ext_v1 = ARM_FEATURE (0, FPU_NEON_EXT_V1); |
| 221 | static const arm_feature_set fpu_vfp_v3_or_neon_ext = |
| 222 | ARM_FEATURE (0, FPU_NEON_EXT_V1 | FPU_VFP_EXT_V3); |
| 223 | |
| 224 | static int mfloat_abi_opt = -1; |
| 225 | /* Record user cpu selection for object attributes. */ |
| 226 | static arm_feature_set selected_cpu = ARM_ARCH_NONE; |
| 227 | /* Must be long enough to hold any of the names in arm_cpus. */ |
| 228 | static char selected_cpu_name[16]; |
| 229 | #ifdef OBJ_ELF |
| 230 | # ifdef EABI_DEFAULT |
| 231 | static int meabi_flags = EABI_DEFAULT; |
| 232 | # else |
| 233 | static int meabi_flags = EF_ARM_EABI_UNKNOWN; |
| 234 | # endif |
| 235 | #endif |
| 236 | |
| 237 | #ifdef OBJ_ELF |
| 238 | /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */ |
| 239 | symbolS * GOT_symbol; |
| 240 | #endif |
| 241 | |
| 242 | /* 0: assemble for ARM, |
| 243 | 1: assemble for Thumb, |
| 244 | 2: assemble for Thumb even though target CPU does not support thumb |
| 245 | instructions. */ |
| 246 | static int thumb_mode = 0; |
| 247 | |
| 248 | /* If unified_syntax is true, we are processing the new unified |
| 249 | ARM/Thumb syntax. Important differences from the old ARM mode: |
| 250 | |
| 251 | - Immediate operands do not require a # prefix. |
| 252 | - Conditional affixes always appear at the end of the |
| 253 | instruction. (For backward compatibility, those instructions |
| 254 | that formerly had them in the middle, continue to accept them |
| 255 | there.) |
| 256 | - The IT instruction may appear, and if it does is validated |
| 257 | against subsequent conditional affixes. It does not generate |
| 258 | machine code. |
| 259 | |
| 260 | Important differences from the old Thumb mode: |
| 261 | |
| 262 | - Immediate operands do not require a # prefix. |
| 263 | - Most of the V6T2 instructions are only available in unified mode. |
| 264 | - The .N and .W suffixes are recognized and honored (it is an error |
| 265 | if they cannot be honored). |
| 266 | - All instructions set the flags if and only if they have an 's' affix. |
| 267 | - Conditional affixes may be used. They are validated against |
| 268 | preceding IT instructions. Unlike ARM mode, you cannot use a |
| 269 | conditional affix except in the scope of an IT instruction. */ |
| 270 | |
| 271 | static bfd_boolean unified_syntax = FALSE; |
| 272 | |
| 273 | enum neon_el_type |
| 274 | { |
| 275 | NT_invtype, |
| 276 | NT_untyped, |
| 277 | NT_integer, |
| 278 | NT_float, |
| 279 | NT_poly, |
| 280 | NT_signed, |
| 281 | NT_unsigned |
| 282 | }; |
| 283 | |
| 284 | struct neon_type_el |
| 285 | { |
| 286 | enum neon_el_type type; |
| 287 | unsigned size; |
| 288 | }; |
| 289 | |
| 290 | #define NEON_MAX_TYPE_ELS 4 |
| 291 | |
| 292 | struct neon_type |
| 293 | { |
| 294 | struct neon_type_el el[NEON_MAX_TYPE_ELS]; |
| 295 | unsigned elems; |
| 296 | }; |
| 297 | |
| 298 | struct arm_it |
| 299 | { |
| 300 | const char * error; |
| 301 | unsigned long instruction; |
| 302 | int size; |
| 303 | int size_req; |
| 304 | int cond; |
| 305 | /* "uncond_value" is set to the value in place of the conditional field in |
| 306 | unconditional versions of the instruction, or -1 if nothing is |
| 307 | appropriate. */ |
| 308 | int uncond_value; |
| 309 | struct neon_type vectype; |
| 310 | /* Set to the opcode if the instruction needs relaxation. |
| 311 | Zero if the instruction is not relaxed. */ |
| 312 | unsigned long relax; |
| 313 | struct |
| 314 | { |
| 315 | bfd_reloc_code_real_type type; |
| 316 | expressionS exp; |
| 317 | int pc_rel; |
| 318 | } reloc; |
| 319 | |
| 320 | struct |
| 321 | { |
| 322 | unsigned reg; |
| 323 | signed int imm; |
| 324 | struct neon_type_el vectype; |
| 325 | unsigned present : 1; /* Operand present. */ |
| 326 | unsigned isreg : 1; /* Operand was a register. */ |
| 327 | unsigned immisreg : 1; /* .imm field is a second register. */ |
| 328 | unsigned isscalar : 1; /* Operand is a (Neon) scalar. */ |
| 329 | unsigned immisalign : 1; /* Immediate is an alignment specifier. */ |
| 330 | /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV |
| 331 | instructions. This allows us to disambiguate ARM <-> vector insns. */ |
| 332 | unsigned regisimm : 1; /* 64-bit immediate, reg forms high 32 bits. */ |
| 333 | unsigned isvec : 1; /* Is a single, double or quad VFP/Neon reg. */ |
| 334 | unsigned isquad : 1; /* Operand is Neon quad-precision register. */ |
| 335 | unsigned issingle : 1; /* Operand is VFP single-precision register. */ |
| 336 | unsigned hasreloc : 1; /* Operand has relocation suffix. */ |
| 337 | unsigned writeback : 1; /* Operand has trailing ! */ |
| 338 | unsigned preind : 1; /* Preindexed address. */ |
| 339 | unsigned postind : 1; /* Postindexed address. */ |
| 340 | unsigned negative : 1; /* Index register was negated. */ |
| 341 | unsigned shifted : 1; /* Shift applied to operation. */ |
| 342 | unsigned shift_kind : 3; /* Shift operation (enum shift_kind). */ |
| 343 | } operands[6]; |
| 344 | }; |
| 345 | |
| 346 | static struct arm_it inst; |
| 347 | |
| 348 | #define NUM_FLOAT_VALS 8 |
| 349 | |
| 350 | const char * fp_const[] = |
| 351 | { |
| 352 | "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0 |
| 353 | }; |
| 354 | |
| 355 | /* Number of littlenums required to hold an extended precision number. */ |
| 356 | #define MAX_LITTLENUMS 6 |
| 357 | |
| 358 | LITTLENUM_TYPE fp_values[NUM_FLOAT_VALS][MAX_LITTLENUMS]; |
| 359 | |
| 360 | #define FAIL (-1) |
| 361 | #define SUCCESS (0) |
| 362 | |
| 363 | #define SUFF_S 1 |
| 364 | #define SUFF_D 2 |
| 365 | #define SUFF_E 3 |
| 366 | #define SUFF_P 4 |
| 367 | |
| 368 | #define CP_T_X 0x00008000 |
| 369 | #define CP_T_Y 0x00400000 |
| 370 | |
| 371 | #define CONDS_BIT 0x00100000 |
| 372 | #define LOAD_BIT 0x00100000 |
| 373 | |
| 374 | #define DOUBLE_LOAD_FLAG 0x00000001 |
| 375 | |
| 376 | struct asm_cond |
| 377 | { |
| 378 | const char * template; |
| 379 | unsigned long value; |
| 380 | }; |
| 381 | |
| 382 | #define COND_ALWAYS 0xE |
| 383 | |
| 384 | struct asm_psr |
| 385 | { |
| 386 | const char *template; |
| 387 | unsigned long field; |
| 388 | }; |
| 389 | |
| 390 | struct asm_barrier_opt |
| 391 | { |
| 392 | const char *template; |
| 393 | unsigned long value; |
| 394 | }; |
| 395 | |
| 396 | /* The bit that distinguishes CPSR and SPSR. */ |
| 397 | #define SPSR_BIT (1 << 22) |
| 398 | |
| 399 | /* The individual PSR flag bits. */ |
| 400 | #define PSR_c (1 << 16) |
| 401 | #define PSR_x (1 << 17) |
| 402 | #define PSR_s (1 << 18) |
| 403 | #define PSR_f (1 << 19) |
| 404 | |
| 405 | struct reloc_entry |
| 406 | { |
| 407 | char *name; |
| 408 | bfd_reloc_code_real_type reloc; |
| 409 | }; |
| 410 | |
| 411 | enum vfp_reg_pos |
| 412 | { |
| 413 | VFP_REG_Sd, VFP_REG_Sm, VFP_REG_Sn, |
| 414 | VFP_REG_Dd, VFP_REG_Dm, VFP_REG_Dn |
| 415 | }; |
| 416 | |
| 417 | enum vfp_ldstm_type |
| 418 | { |
| 419 | VFP_LDSTMIA, VFP_LDSTMDB, VFP_LDSTMIAX, VFP_LDSTMDBX |
| 420 | }; |
| 421 | |
| 422 | /* Bits for DEFINED field in neon_typed_alias. */ |
| 423 | #define NTA_HASTYPE 1 |
| 424 | #define NTA_HASINDEX 2 |
| 425 | |
| 426 | struct neon_typed_alias |
| 427 | { |
| 428 | unsigned char defined; |
| 429 | unsigned char index; |
| 430 | struct neon_type_el eltype; |
| 431 | }; |
| 432 | |
| 433 | /* ARM register categories. This includes coprocessor numbers and various |
| 434 | architecture extensions' registers. */ |
| 435 | enum arm_reg_type |
| 436 | { |
| 437 | REG_TYPE_RN, |
| 438 | REG_TYPE_CP, |
| 439 | REG_TYPE_CN, |
| 440 | REG_TYPE_FN, |
| 441 | REG_TYPE_VFS, |
| 442 | REG_TYPE_VFD, |
| 443 | REG_TYPE_NQ, |
| 444 | REG_TYPE_VFSD, |
| 445 | REG_TYPE_NDQ, |
| 446 | REG_TYPE_NSDQ, |
| 447 | REG_TYPE_VFC, |
| 448 | REG_TYPE_MVF, |
| 449 | REG_TYPE_MVD, |
| 450 | REG_TYPE_MVFX, |
| 451 | REG_TYPE_MVDX, |
| 452 | REG_TYPE_MVAX, |
| 453 | REG_TYPE_DSPSC, |
| 454 | REG_TYPE_MMXWR, |
| 455 | REG_TYPE_MMXWC, |
| 456 | REG_TYPE_MMXWCG, |
| 457 | REG_TYPE_XSCALE, |
| 458 | }; |
| 459 | |
| 460 | /* Structure for a hash table entry for a register. |
| 461 | If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra |
| 462 | information which states whether a vector type or index is specified (for a |
| 463 | register alias created with .dn or .qn). Otherwise NEON should be NULL. */ |
| 464 | struct reg_entry |
| 465 | { |
| 466 | const char *name; |
| 467 | unsigned char number; |
| 468 | unsigned char type; |
| 469 | unsigned char builtin; |
| 470 | struct neon_typed_alias *neon; |
| 471 | }; |
| 472 | |
| 473 | /* Diagnostics used when we don't get a register of the expected type. */ |
| 474 | const char *const reg_expected_msgs[] = |
| 475 | { |
| 476 | N_("ARM register expected"), |
| 477 | N_("bad or missing co-processor number"), |
| 478 | N_("co-processor register expected"), |
| 479 | N_("FPA register expected"), |
| 480 | N_("VFP single precision register expected"), |
| 481 | N_("VFP/Neon double precision register expected"), |
| 482 | N_("Neon quad precision register expected"), |
| 483 | N_("VFP single or double precision register expected"), |
| 484 | N_("Neon double or quad precision register expected"), |
| 485 | N_("VFP single, double or Neon quad precision register expected"), |
| 486 | N_("VFP system register expected"), |
| 487 | N_("Maverick MVF register expected"), |
| 488 | N_("Maverick MVD register expected"), |
| 489 | N_("Maverick MVFX register expected"), |
| 490 | N_("Maverick MVDX register expected"), |
| 491 | N_("Maverick MVAX register expected"), |
| 492 | N_("Maverick DSPSC register expected"), |
| 493 | N_("iWMMXt data register expected"), |
| 494 | N_("iWMMXt control register expected"), |
| 495 | N_("iWMMXt scalar register expected"), |
| 496 | N_("XScale accumulator register expected"), |
| 497 | }; |
| 498 | |
| 499 | /* Some well known registers that we refer to directly elsewhere. */ |
| 500 | #define REG_SP 13 |
| 501 | #define REG_LR 14 |
| 502 | #define REG_PC 15 |
| 503 | |
| 504 | /* ARM instructions take 4bytes in the object file, Thumb instructions |
| 505 | take 2: */ |
| 506 | #define INSN_SIZE 4 |
| 507 | |
| 508 | struct asm_opcode |
| 509 | { |
| 510 | /* Basic string to match. */ |
| 511 | const char *template; |
| 512 | |
| 513 | /* Parameters to instruction. */ |
| 514 | unsigned char operands[8]; |
| 515 | |
| 516 | /* Conditional tag - see opcode_lookup. */ |
| 517 | unsigned int tag : 4; |
| 518 | |
| 519 | /* Basic instruction code. */ |
| 520 | unsigned int avalue : 28; |
| 521 | |
| 522 | /* Thumb-format instruction code. */ |
| 523 | unsigned int tvalue; |
| 524 | |
| 525 | /* Which architecture variant provides this instruction. */ |
| 526 | const arm_feature_set *avariant; |
| 527 | const arm_feature_set *tvariant; |
| 528 | |
| 529 | /* Function to call to encode instruction in ARM format. */ |
| 530 | void (* aencode) (void); |
| 531 | |
| 532 | /* Function to call to encode instruction in Thumb format. */ |
| 533 | void (* tencode) (void); |
| 534 | }; |
| 535 | |
| 536 | /* Defines for various bits that we will want to toggle. */ |
| 537 | #define INST_IMMEDIATE 0x02000000 |
| 538 | #define OFFSET_REG 0x02000000 |
| 539 | #define HWOFFSET_IMM 0x00400000 |
| 540 | #define SHIFT_BY_REG 0x00000010 |
| 541 | #define PRE_INDEX 0x01000000 |
| 542 | #define INDEX_UP 0x00800000 |
| 543 | #define WRITE_BACK 0x00200000 |
| 544 | #define LDM_TYPE_2_OR_3 0x00400000 |
| 545 | |
| 546 | #define LITERAL_MASK 0xf000f000 |
| 547 | #define OPCODE_MASK 0xfe1fffff |
| 548 | #define V4_STR_BIT 0x00000020 |
| 549 | |
| 550 | #define DATA_OP_SHIFT 21 |
| 551 | |
| 552 | #define T2_OPCODE_MASK 0xfe1fffff |
| 553 | #define T2_DATA_OP_SHIFT 21 |
| 554 | |
| 555 | /* Codes to distinguish the arithmetic instructions. */ |
| 556 | #define OPCODE_AND 0 |
| 557 | #define OPCODE_EOR 1 |
| 558 | #define OPCODE_SUB 2 |
| 559 | #define OPCODE_RSB 3 |
| 560 | #define OPCODE_ADD 4 |
| 561 | #define OPCODE_ADC 5 |
| 562 | #define OPCODE_SBC 6 |
| 563 | #define OPCODE_RSC 7 |
| 564 | #define OPCODE_TST 8 |
| 565 | #define OPCODE_TEQ 9 |
| 566 | #define OPCODE_CMP 10 |
| 567 | #define OPCODE_CMN 11 |
| 568 | #define OPCODE_ORR 12 |
| 569 | #define OPCODE_MOV 13 |
| 570 | #define OPCODE_BIC 14 |
| 571 | #define OPCODE_MVN 15 |
| 572 | |
| 573 | #define T2_OPCODE_AND 0 |
| 574 | #define T2_OPCODE_BIC 1 |
| 575 | #define T2_OPCODE_ORR 2 |
| 576 | #define T2_OPCODE_ORN 3 |
| 577 | #define T2_OPCODE_EOR 4 |
| 578 | #define T2_OPCODE_ADD 8 |
| 579 | #define T2_OPCODE_ADC 10 |
| 580 | #define T2_OPCODE_SBC 11 |
| 581 | #define T2_OPCODE_SUB 13 |
| 582 | #define T2_OPCODE_RSB 14 |
| 583 | |
| 584 | #define T_OPCODE_MUL 0x4340 |
| 585 | #define T_OPCODE_TST 0x4200 |
| 586 | #define T_OPCODE_CMN 0x42c0 |
| 587 | #define T_OPCODE_NEG 0x4240 |
| 588 | #define T_OPCODE_MVN 0x43c0 |
| 589 | |
| 590 | #define T_OPCODE_ADD_R3 0x1800 |
| 591 | #define T_OPCODE_SUB_R3 0x1a00 |
| 592 | #define T_OPCODE_ADD_HI 0x4400 |
| 593 | #define T_OPCODE_ADD_ST 0xb000 |
| 594 | #define T_OPCODE_SUB_ST 0xb080 |
| 595 | #define T_OPCODE_ADD_SP 0xa800 |
| 596 | #define T_OPCODE_ADD_PC 0xa000 |
| 597 | #define T_OPCODE_ADD_I8 0x3000 |
| 598 | #define T_OPCODE_SUB_I8 0x3800 |
| 599 | #define T_OPCODE_ADD_I3 0x1c00 |
| 600 | #define T_OPCODE_SUB_I3 0x1e00 |
| 601 | |
| 602 | #define T_OPCODE_ASR_R 0x4100 |
| 603 | #define T_OPCODE_LSL_R 0x4080 |
| 604 | #define T_OPCODE_LSR_R 0x40c0 |
| 605 | #define T_OPCODE_ROR_R 0x41c0 |
| 606 | #define T_OPCODE_ASR_I 0x1000 |
| 607 | #define T_OPCODE_LSL_I 0x0000 |
| 608 | #define T_OPCODE_LSR_I 0x0800 |
| 609 | |
| 610 | #define T_OPCODE_MOV_I8 0x2000 |
| 611 | #define T_OPCODE_CMP_I8 0x2800 |
| 612 | #define T_OPCODE_CMP_LR 0x4280 |
| 613 | #define T_OPCODE_MOV_HR 0x4600 |
| 614 | #define T_OPCODE_CMP_HR 0x4500 |
| 615 | |
| 616 | #define T_OPCODE_LDR_PC 0x4800 |
| 617 | #define T_OPCODE_LDR_SP 0x9800 |
| 618 | #define T_OPCODE_STR_SP 0x9000 |
| 619 | #define T_OPCODE_LDR_IW 0x6800 |
| 620 | #define T_OPCODE_STR_IW 0x6000 |
| 621 | #define T_OPCODE_LDR_IH 0x8800 |
| 622 | #define T_OPCODE_STR_IH 0x8000 |
| 623 | #define T_OPCODE_LDR_IB 0x7800 |
| 624 | #define T_OPCODE_STR_IB 0x7000 |
| 625 | #define T_OPCODE_LDR_RW 0x5800 |
| 626 | #define T_OPCODE_STR_RW 0x5000 |
| 627 | #define T_OPCODE_LDR_RH 0x5a00 |
| 628 | #define T_OPCODE_STR_RH 0x5200 |
| 629 | #define T_OPCODE_LDR_RB 0x5c00 |
| 630 | #define T_OPCODE_STR_RB 0x5400 |
| 631 | |
| 632 | #define T_OPCODE_PUSH 0xb400 |
| 633 | #define T_OPCODE_POP 0xbc00 |
| 634 | |
| 635 | #define T_OPCODE_BRANCH 0xe000 |
| 636 | |
| 637 | #define THUMB_SIZE 2 /* Size of thumb instruction. */ |
| 638 | #define THUMB_PP_PC_LR 0x0100 |
| 639 | #define THUMB_LOAD_BIT 0x0800 |
| 640 | #define THUMB2_LOAD_BIT 0x00100000 |
| 641 | |
| 642 | #define BAD_ARGS _("bad arguments to instruction") |
| 643 | #define BAD_PC _("r15 not allowed here") |
| 644 | #define BAD_COND _("instruction cannot be conditional") |
| 645 | #define BAD_OVERLAP _("registers may not be the same") |
| 646 | #define BAD_HIREG _("lo register required") |
| 647 | #define BAD_THUMB32 _("instruction not supported in Thumb16 mode") |
| 648 | #define BAD_ADDR_MODE _("instruction does not accept this addressing mode"); |
| 649 | #define BAD_BRANCH _("branch must be last instruction in IT block") |
| 650 | #define BAD_NOT_IT _("instruction not allowed in IT block") |
| 651 | #define BAD_FPU _("selected FPU does not support instruction") |
| 652 | |
| 653 | static struct hash_control *arm_ops_hsh; |
| 654 | static struct hash_control *arm_cond_hsh; |
| 655 | static struct hash_control *arm_shift_hsh; |
| 656 | static struct hash_control *arm_psr_hsh; |
| 657 | static struct hash_control *arm_v7m_psr_hsh; |
| 658 | static struct hash_control *arm_reg_hsh; |
| 659 | static struct hash_control *arm_reloc_hsh; |
| 660 | static struct hash_control *arm_barrier_opt_hsh; |
| 661 | |
| 662 | /* Stuff needed to resolve the label ambiguity |
| 663 | As: |
| 664 | ... |
| 665 | label: <insn> |
| 666 | may differ from: |
| 667 | ... |
| 668 | label: |
| 669 | <insn> |
| 670 | */ |
| 671 | |
| 672 | symbolS * last_label_seen; |
| 673 | static int label_is_thumb_function_name = FALSE; |
| 674 | \f |
| 675 | /* Literal pool structure. Held on a per-section |
| 676 | and per-sub-section basis. */ |
| 677 | |
| 678 | #define MAX_LITERAL_POOL_SIZE 1024 |
| 679 | typedef struct literal_pool |
| 680 | { |
| 681 | expressionS literals [MAX_LITERAL_POOL_SIZE]; |
| 682 | unsigned int next_free_entry; |
| 683 | unsigned int id; |
| 684 | symbolS * symbol; |
| 685 | segT section; |
| 686 | subsegT sub_section; |
| 687 | struct literal_pool * next; |
| 688 | } literal_pool; |
| 689 | |
| 690 | /* Pointer to a linked list of literal pools. */ |
| 691 | literal_pool * list_of_pools = NULL; |
| 692 | |
| 693 | /* State variables for IT block handling. */ |
| 694 | static bfd_boolean current_it_mask = 0; |
| 695 | static int current_cc; |
| 696 | |
| 697 | \f |
| 698 | /* Pure syntax. */ |
| 699 | |
| 700 | /* This array holds the chars that always start a comment. If the |
| 701 | pre-processor is disabled, these aren't very useful. */ |
| 702 | const char comment_chars[] = "@"; |
| 703 | |
| 704 | /* This array holds the chars that only start a comment at the beginning of |
| 705 | a line. If the line seems to have the form '# 123 filename' |
| 706 | .line and .file directives will appear in the pre-processed output. */ |
| 707 | /* Note that input_file.c hand checks for '#' at the beginning of the |
| 708 | first line of the input file. This is because the compiler outputs |
| 709 | #NO_APP at the beginning of its output. */ |
| 710 | /* Also note that comments like this one will always work. */ |
| 711 | const char line_comment_chars[] = "#"; |
| 712 | |
| 713 | const char line_separator_chars[] = ";"; |
| 714 | |
| 715 | /* Chars that can be used to separate mant |
| 716 | from exp in floating point numbers. */ |
| 717 | const char EXP_CHARS[] = "eE"; |
| 718 | |
| 719 | /* Chars that mean this number is a floating point constant. */ |
| 720 | /* As in 0f12.456 */ |
| 721 | /* or 0d1.2345e12 */ |
| 722 | |
| 723 | const char FLT_CHARS[] = "rRsSfFdDxXeEpP"; |
| 724 | |
| 725 | /* Prefix characters that indicate the start of an immediate |
| 726 | value. */ |
| 727 | #define is_immediate_prefix(C) ((C) == '#' || (C) == '$') |
| 728 | |
| 729 | /* Separator character handling. */ |
| 730 | |
| 731 | #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0) |
| 732 | |
| 733 | static inline int |
| 734 | skip_past_char (char ** str, char c) |
| 735 | { |
| 736 | if (**str == c) |
| 737 | { |
| 738 | (*str)++; |
| 739 | return SUCCESS; |
| 740 | } |
| 741 | else |
| 742 | return FAIL; |
| 743 | } |
| 744 | #define skip_past_comma(str) skip_past_char (str, ',') |
| 745 | |
| 746 | /* Arithmetic expressions (possibly involving symbols). */ |
| 747 | |
| 748 | /* Return TRUE if anything in the expression is a bignum. */ |
| 749 | |
| 750 | static int |
| 751 | walk_no_bignums (symbolS * sp) |
| 752 | { |
| 753 | if (symbol_get_value_expression (sp)->X_op == O_big) |
| 754 | return 1; |
| 755 | |
| 756 | if (symbol_get_value_expression (sp)->X_add_symbol) |
| 757 | { |
| 758 | return (walk_no_bignums (symbol_get_value_expression (sp)->X_add_symbol) |
| 759 | || (symbol_get_value_expression (sp)->X_op_symbol |
| 760 | && walk_no_bignums (symbol_get_value_expression (sp)->X_op_symbol))); |
| 761 | } |
| 762 | |
| 763 | return 0; |
| 764 | } |
| 765 | |
| 766 | static int in_my_get_expression = 0; |
| 767 | |
| 768 | /* Third argument to my_get_expression. */ |
| 769 | #define GE_NO_PREFIX 0 |
| 770 | #define GE_IMM_PREFIX 1 |
| 771 | #define GE_OPT_PREFIX 2 |
| 772 | /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit) |
| 773 | immediates, as can be used in Neon VMVN and VMOV immediate instructions. */ |
| 774 | #define GE_OPT_PREFIX_BIG 3 |
| 775 | |
| 776 | static int |
| 777 | my_get_expression (expressionS * ep, char ** str, int prefix_mode) |
| 778 | { |
| 779 | char * save_in; |
| 780 | segT seg; |
| 781 | |
| 782 | /* In unified syntax, all prefixes are optional. */ |
| 783 | if (unified_syntax) |
| 784 | prefix_mode = (prefix_mode == GE_OPT_PREFIX_BIG) ? prefix_mode |
| 785 | : GE_OPT_PREFIX; |
| 786 | |
| 787 | switch (prefix_mode) |
| 788 | { |
| 789 | case GE_NO_PREFIX: break; |
| 790 | case GE_IMM_PREFIX: |
| 791 | if (!is_immediate_prefix (**str)) |
| 792 | { |
| 793 | inst.error = _("immediate expression requires a # prefix"); |
| 794 | return FAIL; |
| 795 | } |
| 796 | (*str)++; |
| 797 | break; |
| 798 | case GE_OPT_PREFIX: |
| 799 | case GE_OPT_PREFIX_BIG: |
| 800 | if (is_immediate_prefix (**str)) |
| 801 | (*str)++; |
| 802 | break; |
| 803 | default: abort (); |
| 804 | } |
| 805 | |
| 806 | memset (ep, 0, sizeof (expressionS)); |
| 807 | |
| 808 | save_in = input_line_pointer; |
| 809 | input_line_pointer = *str; |
| 810 | in_my_get_expression = 1; |
| 811 | seg = expression (ep); |
| 812 | in_my_get_expression = 0; |
| 813 | |
| 814 | if (ep->X_op == O_illegal) |
| 815 | { |
| 816 | /* We found a bad expression in md_operand(). */ |
| 817 | *str = input_line_pointer; |
| 818 | input_line_pointer = save_in; |
| 819 | if (inst.error == NULL) |
| 820 | inst.error = _("bad expression"); |
| 821 | return 1; |
| 822 | } |
| 823 | |
| 824 | #ifdef OBJ_AOUT |
| 825 | if (seg != absolute_section |
| 826 | && seg != text_section |
| 827 | && seg != data_section |
| 828 | && seg != bss_section |
| 829 | && seg != undefined_section) |
| 830 | { |
| 831 | inst.error = _("bad segment"); |
| 832 | *str = input_line_pointer; |
| 833 | input_line_pointer = save_in; |
| 834 | return 1; |
| 835 | } |
| 836 | #endif |
| 837 | |
| 838 | /* Get rid of any bignums now, so that we don't generate an error for which |
| 839 | we can't establish a line number later on. Big numbers are never valid |
| 840 | in instructions, which is where this routine is always called. */ |
| 841 | if (prefix_mode != GE_OPT_PREFIX_BIG |
| 842 | && (ep->X_op == O_big |
| 843 | || (ep->X_add_symbol |
| 844 | && (walk_no_bignums (ep->X_add_symbol) |
| 845 | || (ep->X_op_symbol |
| 846 | && walk_no_bignums (ep->X_op_symbol)))))) |
| 847 | { |
| 848 | inst.error = _("invalid constant"); |
| 849 | *str = input_line_pointer; |
| 850 | input_line_pointer = save_in; |
| 851 | return 1; |
| 852 | } |
| 853 | |
| 854 | *str = input_line_pointer; |
| 855 | input_line_pointer = save_in; |
| 856 | return 0; |
| 857 | } |
| 858 | |
| 859 | /* Turn a string in input_line_pointer into a floating point constant |
| 860 | of type TYPE, and store the appropriate bytes in *LITP. The number |
| 861 | of LITTLENUMS emitted is stored in *SIZEP. An error message is |
| 862 | returned, or NULL on OK. |
| 863 | |
| 864 | Note that fp constants aren't represent in the normal way on the ARM. |
| 865 | In big endian mode, things are as expected. However, in little endian |
| 866 | mode fp constants are big-endian word-wise, and little-endian byte-wise |
| 867 | within the words. For example, (double) 1.1 in big endian mode is |
| 868 | the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is |
| 869 | the byte sequence 99 99 f1 3f 9a 99 99 99. |
| 870 | |
| 871 | ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */ |
| 872 | |
| 873 | char * |
| 874 | md_atof (int type, char * litP, int * sizeP) |
| 875 | { |
| 876 | int prec; |
| 877 | LITTLENUM_TYPE words[MAX_LITTLENUMS]; |
| 878 | char *t; |
| 879 | int i; |
| 880 | |
| 881 | switch (type) |
| 882 | { |
| 883 | case 'f': |
| 884 | case 'F': |
| 885 | case 's': |
| 886 | case 'S': |
| 887 | prec = 2; |
| 888 | break; |
| 889 | |
| 890 | case 'd': |
| 891 | case 'D': |
| 892 | case 'r': |
| 893 | case 'R': |
| 894 | prec = 4; |
| 895 | break; |
| 896 | |
| 897 | case 'x': |
| 898 | case 'X': |
| 899 | prec = 6; |
| 900 | break; |
| 901 | |
| 902 | case 'p': |
| 903 | case 'P': |
| 904 | prec = 6; |
| 905 | break; |
| 906 | |
| 907 | default: |
| 908 | *sizeP = 0; |
| 909 | return _("bad call to MD_ATOF()"); |
| 910 | } |
| 911 | |
| 912 | t = atof_ieee (input_line_pointer, type, words); |
| 913 | if (t) |
| 914 | input_line_pointer = t; |
| 915 | *sizeP = prec * 2; |
| 916 | |
| 917 | if (target_big_endian) |
| 918 | { |
| 919 | for (i = 0; i < prec; i++) |
| 920 | { |
| 921 | md_number_to_chars (litP, (valueT) words[i], 2); |
| 922 | litP += 2; |
| 923 | } |
| 924 | } |
| 925 | else |
| 926 | { |
| 927 | if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure)) |
| 928 | for (i = prec - 1; i >= 0; i--) |
| 929 | { |
| 930 | md_number_to_chars (litP, (valueT) words[i], 2); |
| 931 | litP += 2; |
| 932 | } |
| 933 | else |
| 934 | /* For a 4 byte float the order of elements in `words' is 1 0. |
| 935 | For an 8 byte float the order is 1 0 3 2. */ |
| 936 | for (i = 0; i < prec; i += 2) |
| 937 | { |
| 938 | md_number_to_chars (litP, (valueT) words[i + 1], 2); |
| 939 | md_number_to_chars (litP + 2, (valueT) words[i], 2); |
| 940 | litP += 4; |
| 941 | } |
| 942 | } |
| 943 | |
| 944 | return 0; |
| 945 | } |
| 946 | |
| 947 | /* We handle all bad expressions here, so that we can report the faulty |
| 948 | instruction in the error message. */ |
| 949 | void |
| 950 | md_operand (expressionS * expr) |
| 951 | { |
| 952 | if (in_my_get_expression) |
| 953 | expr->X_op = O_illegal; |
| 954 | } |
| 955 | |
| 956 | /* Immediate values. */ |
| 957 | |
| 958 | /* Generic immediate-value read function for use in directives. |
| 959 | Accepts anything that 'expression' can fold to a constant. |
| 960 | *val receives the number. */ |
| 961 | #ifdef OBJ_ELF |
| 962 | static int |
| 963 | immediate_for_directive (int *val) |
| 964 | { |
| 965 | expressionS exp; |
| 966 | exp.X_op = O_illegal; |
| 967 | |
| 968 | if (is_immediate_prefix (*input_line_pointer)) |
| 969 | { |
| 970 | input_line_pointer++; |
| 971 | expression (&exp); |
| 972 | } |
| 973 | |
| 974 | if (exp.X_op != O_constant) |
| 975 | { |
| 976 | as_bad (_("expected #constant")); |
| 977 | ignore_rest_of_line (); |
| 978 | return FAIL; |
| 979 | } |
| 980 | *val = exp.X_add_number; |
| 981 | return SUCCESS; |
| 982 | } |
| 983 | #endif |
| 984 | |
| 985 | /* Register parsing. */ |
| 986 | |
| 987 | /* Generic register parser. CCP points to what should be the |
| 988 | beginning of a register name. If it is indeed a valid register |
| 989 | name, advance CCP over it and return the reg_entry structure; |
| 990 | otherwise return NULL. Does not issue diagnostics. */ |
| 991 | |
| 992 | static struct reg_entry * |
| 993 | arm_reg_parse_multi (char **ccp) |
| 994 | { |
| 995 | char *start = *ccp; |
| 996 | char *p; |
| 997 | struct reg_entry *reg; |
| 998 | |
| 999 | #ifdef REGISTER_PREFIX |
| 1000 | if (*start != REGISTER_PREFIX) |
| 1001 | return NULL; |
| 1002 | start++; |
| 1003 | #endif |
| 1004 | #ifdef OPTIONAL_REGISTER_PREFIX |
| 1005 | if (*start == OPTIONAL_REGISTER_PREFIX) |
| 1006 | start++; |
| 1007 | #endif |
| 1008 | |
| 1009 | p = start; |
| 1010 | if (!ISALPHA (*p) || !is_name_beginner (*p)) |
| 1011 | return NULL; |
| 1012 | |
| 1013 | do |
| 1014 | p++; |
| 1015 | while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_'); |
| 1016 | |
| 1017 | reg = (struct reg_entry *) hash_find_n (arm_reg_hsh, start, p - start); |
| 1018 | |
| 1019 | if (!reg) |
| 1020 | return NULL; |
| 1021 | |
| 1022 | *ccp = p; |
| 1023 | return reg; |
| 1024 | } |
| 1025 | |
| 1026 | static int |
| 1027 | arm_reg_alt_syntax (char **ccp, char *start, struct reg_entry *reg, |
| 1028 | enum arm_reg_type type) |
| 1029 | { |
| 1030 | /* Alternative syntaxes are accepted for a few register classes. */ |
| 1031 | switch (type) |
| 1032 | { |
| 1033 | case REG_TYPE_MVF: |
| 1034 | case REG_TYPE_MVD: |
| 1035 | case REG_TYPE_MVFX: |
| 1036 | case REG_TYPE_MVDX: |
| 1037 | /* Generic coprocessor register names are allowed for these. */ |
| 1038 | if (reg && reg->type == REG_TYPE_CN) |
| 1039 | return reg->number; |
| 1040 | break; |
| 1041 | |
| 1042 | case REG_TYPE_CP: |
| 1043 | /* For backward compatibility, a bare number is valid here. */ |
| 1044 | { |
| 1045 | unsigned long processor = strtoul (start, ccp, 10); |
| 1046 | if (*ccp != start && processor <= 15) |
| 1047 | return processor; |
| 1048 | } |
| 1049 | |
| 1050 | case REG_TYPE_MMXWC: |
| 1051 | /* WC includes WCG. ??? I'm not sure this is true for all |
| 1052 | instructions that take WC registers. */ |
| 1053 | if (reg && reg->type == REG_TYPE_MMXWCG) |
| 1054 | return reg->number; |
| 1055 | break; |
| 1056 | |
| 1057 | default: |
| 1058 | break; |
| 1059 | } |
| 1060 | |
| 1061 | return FAIL; |
| 1062 | } |
| 1063 | |
| 1064 | /* As arm_reg_parse_multi, but the register must be of type TYPE, and the |
| 1065 | return value is the register number or FAIL. */ |
| 1066 | |
| 1067 | static int |
| 1068 | arm_reg_parse (char **ccp, enum arm_reg_type type) |
| 1069 | { |
| 1070 | char *start = *ccp; |
| 1071 | struct reg_entry *reg = arm_reg_parse_multi (ccp); |
| 1072 | int ret; |
| 1073 | |
| 1074 | /* Do not allow a scalar (reg+index) to parse as a register. */ |
| 1075 | if (reg && reg->neon && (reg->neon->defined & NTA_HASINDEX)) |
| 1076 | return FAIL; |
| 1077 | |
| 1078 | if (reg && reg->type == type) |
| 1079 | return reg->number; |
| 1080 | |
| 1081 | if ((ret = arm_reg_alt_syntax (ccp, start, reg, type)) != FAIL) |
| 1082 | return ret; |
| 1083 | |
| 1084 | *ccp = start; |
| 1085 | return FAIL; |
| 1086 | } |
| 1087 | |
| 1088 | /* Parse a Neon type specifier. *STR should point at the leading '.' |
| 1089 | character. Does no verification at this stage that the type fits the opcode |
| 1090 | properly. E.g., |
| 1091 | |
| 1092 | .i32.i32.s16 |
| 1093 | .s32.f32 |
| 1094 | .u16 |
| 1095 | |
| 1096 | Can all be legally parsed by this function. |
| 1097 | |
| 1098 | Fills in neon_type struct pointer with parsed information, and updates STR |
| 1099 | to point after the parsed type specifier. Returns SUCCESS if this was a legal |
| 1100 | type, FAIL if not. */ |
| 1101 | |
| 1102 | static int |
| 1103 | parse_neon_type (struct neon_type *type, char **str) |
| 1104 | { |
| 1105 | char *ptr = *str; |
| 1106 | |
| 1107 | if (type) |
| 1108 | type->elems = 0; |
| 1109 | |
| 1110 | while (type->elems < NEON_MAX_TYPE_ELS) |
| 1111 | { |
| 1112 | enum neon_el_type thistype = NT_untyped; |
| 1113 | unsigned thissize = -1u; |
| 1114 | |
| 1115 | if (*ptr != '.') |
| 1116 | break; |
| 1117 | |
| 1118 | ptr++; |
| 1119 | |
| 1120 | /* Just a size without an explicit type. */ |
| 1121 | if (ISDIGIT (*ptr)) |
| 1122 | goto parsesize; |
| 1123 | |
| 1124 | switch (TOLOWER (*ptr)) |
| 1125 | { |
| 1126 | case 'i': thistype = NT_integer; break; |
| 1127 | case 'f': thistype = NT_float; break; |
| 1128 | case 'p': thistype = NT_poly; break; |
| 1129 | case 's': thistype = NT_signed; break; |
| 1130 | case 'u': thistype = NT_unsigned; break; |
| 1131 | case 'd': |
| 1132 | thistype = NT_float; |
| 1133 | thissize = 64; |
| 1134 | ptr++; |
| 1135 | goto done; |
| 1136 | default: |
| 1137 | as_bad (_("unexpected character `%c' in type specifier"), *ptr); |
| 1138 | return FAIL; |
| 1139 | } |
| 1140 | |
| 1141 | ptr++; |
| 1142 | |
| 1143 | /* .f is an abbreviation for .f32. */ |
| 1144 | if (thistype == NT_float && !ISDIGIT (*ptr)) |
| 1145 | thissize = 32; |
| 1146 | else |
| 1147 | { |
| 1148 | parsesize: |
| 1149 | thissize = strtoul (ptr, &ptr, 10); |
| 1150 | |
| 1151 | if (thissize != 8 && thissize != 16 && thissize != 32 |
| 1152 | && thissize != 64) |
| 1153 | { |
| 1154 | as_bad (_("bad size %d in type specifier"), thissize); |
| 1155 | return FAIL; |
| 1156 | } |
| 1157 | } |
| 1158 | |
| 1159 | done: |
| 1160 | if (type) |
| 1161 | { |
| 1162 | type->el[type->elems].type = thistype; |
| 1163 | type->el[type->elems].size = thissize; |
| 1164 | type->elems++; |
| 1165 | } |
| 1166 | } |
| 1167 | |
| 1168 | /* Empty/missing type is not a successful parse. */ |
| 1169 | if (type->elems == 0) |
| 1170 | return FAIL; |
| 1171 | |
| 1172 | *str = ptr; |
| 1173 | |
| 1174 | return SUCCESS; |
| 1175 | } |
| 1176 | |
| 1177 | /* Errors may be set multiple times during parsing or bit encoding |
| 1178 | (particularly in the Neon bits), but usually the earliest error which is set |
| 1179 | will be the most meaningful. Avoid overwriting it with later (cascading) |
| 1180 | errors by calling this function. */ |
| 1181 | |
| 1182 | static void |
| 1183 | first_error (const char *err) |
| 1184 | { |
| 1185 | if (!inst.error) |
| 1186 | inst.error = err; |
| 1187 | } |
| 1188 | |
| 1189 | /* Parse a single type, e.g. ".s32", leading period included. */ |
| 1190 | static int |
| 1191 | parse_neon_operand_type (struct neon_type_el *vectype, char **ccp) |
| 1192 | { |
| 1193 | char *str = *ccp; |
| 1194 | struct neon_type optype; |
| 1195 | |
| 1196 | if (*str == '.') |
| 1197 | { |
| 1198 | if (parse_neon_type (&optype, &str) == SUCCESS) |
| 1199 | { |
| 1200 | if (optype.elems == 1) |
| 1201 | *vectype = optype.el[0]; |
| 1202 | else |
| 1203 | { |
| 1204 | first_error (_("only one type should be specified for operand")); |
| 1205 | return FAIL; |
| 1206 | } |
| 1207 | } |
| 1208 | else |
| 1209 | { |
| 1210 | first_error (_("vector type expected")); |
| 1211 | return FAIL; |
| 1212 | } |
| 1213 | } |
| 1214 | else |
| 1215 | return FAIL; |
| 1216 | |
| 1217 | *ccp = str; |
| 1218 | |
| 1219 | return SUCCESS; |
| 1220 | } |
| 1221 | |
| 1222 | /* Special meanings for indices (which have a range of 0-7), which will fit into |
| 1223 | a 4-bit integer. */ |
| 1224 | |
| 1225 | #define NEON_ALL_LANES 15 |
| 1226 | #define NEON_INTERLEAVE_LANES 14 |
| 1227 | |
| 1228 | /* Parse either a register or a scalar, with an optional type. Return the |
| 1229 | register number, and optionally fill in the actual type of the register |
| 1230 | when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and |
| 1231 | type/index information in *TYPEINFO. */ |
| 1232 | |
| 1233 | static int |
| 1234 | parse_typed_reg_or_scalar (char **ccp, enum arm_reg_type type, |
| 1235 | enum arm_reg_type *rtype, |
| 1236 | struct neon_typed_alias *typeinfo) |
| 1237 | { |
| 1238 | char *str = *ccp; |
| 1239 | struct reg_entry *reg = arm_reg_parse_multi (&str); |
| 1240 | struct neon_typed_alias atype; |
| 1241 | struct neon_type_el parsetype; |
| 1242 | |
| 1243 | atype.defined = 0; |
| 1244 | atype.index = -1; |
| 1245 | atype.eltype.type = NT_invtype; |
| 1246 | atype.eltype.size = -1; |
| 1247 | |
| 1248 | /* Try alternate syntax for some types of register. Note these are mutually |
| 1249 | exclusive with the Neon syntax extensions. */ |
| 1250 | if (reg == NULL) |
| 1251 | { |
| 1252 | int altreg = arm_reg_alt_syntax (&str, *ccp, reg, type); |
| 1253 | if (altreg != FAIL) |
| 1254 | *ccp = str; |
| 1255 | if (typeinfo) |
| 1256 | *typeinfo = atype; |
| 1257 | return altreg; |
| 1258 | } |
| 1259 | |
| 1260 | /* Undo polymorphism when a set of register types may be accepted. */ |
| 1261 | if ((type == REG_TYPE_NDQ |
| 1262 | && (reg->type == REG_TYPE_NQ || reg->type == REG_TYPE_VFD)) |
| 1263 | || (type == REG_TYPE_VFSD |
| 1264 | && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD)) |
| 1265 | || (type == REG_TYPE_NSDQ |
| 1266 | && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD |
| 1267 | || reg->type == REG_TYPE_NQ)) |
| 1268 | || (type == REG_TYPE_MMXWC |
| 1269 | && (reg->type == REG_TYPE_MMXWCG))) |
| 1270 | type = reg->type; |
| 1271 | |
| 1272 | if (type != reg->type) |
| 1273 | return FAIL; |
| 1274 | |
| 1275 | if (reg->neon) |
| 1276 | atype = *reg->neon; |
| 1277 | |
| 1278 | if (parse_neon_operand_type (&parsetype, &str) == SUCCESS) |
| 1279 | { |
| 1280 | if ((atype.defined & NTA_HASTYPE) != 0) |
| 1281 | { |
| 1282 | first_error (_("can't redefine type for operand")); |
| 1283 | return FAIL; |
| 1284 | } |
| 1285 | atype.defined |= NTA_HASTYPE; |
| 1286 | atype.eltype = parsetype; |
| 1287 | } |
| 1288 | |
| 1289 | if (skip_past_char (&str, '[') == SUCCESS) |
| 1290 | { |
| 1291 | if (type != REG_TYPE_VFD) |
| 1292 | { |
| 1293 | first_error (_("only D registers may be indexed")); |
| 1294 | return FAIL; |
| 1295 | } |
| 1296 | |
| 1297 | if ((atype.defined & NTA_HASINDEX) != 0) |
| 1298 | { |
| 1299 | first_error (_("can't change index for operand")); |
| 1300 | return FAIL; |
| 1301 | } |
| 1302 | |
| 1303 | atype.defined |= NTA_HASINDEX; |
| 1304 | |
| 1305 | if (skip_past_char (&str, ']') == SUCCESS) |
| 1306 | atype.index = NEON_ALL_LANES; |
| 1307 | else |
| 1308 | { |
| 1309 | expressionS exp; |
| 1310 | |
| 1311 | my_get_expression (&exp, &str, GE_NO_PREFIX); |
| 1312 | |
| 1313 | if (exp.X_op != O_constant) |
| 1314 | { |
| 1315 | first_error (_("constant expression required")); |
| 1316 | return FAIL; |
| 1317 | } |
| 1318 | |
| 1319 | if (skip_past_char (&str, ']') == FAIL) |
| 1320 | return FAIL; |
| 1321 | |
| 1322 | atype.index = exp.X_add_number; |
| 1323 | } |
| 1324 | } |
| 1325 | |
| 1326 | if (typeinfo) |
| 1327 | *typeinfo = atype; |
| 1328 | |
| 1329 | if (rtype) |
| 1330 | *rtype = type; |
| 1331 | |
| 1332 | *ccp = str; |
| 1333 | |
| 1334 | return reg->number; |
| 1335 | } |
| 1336 | |
| 1337 | /* Like arm_reg_parse, but allow allow the following extra features: |
| 1338 | - If RTYPE is non-zero, return the (possibly restricted) type of the |
| 1339 | register (e.g. Neon double or quad reg when either has been requested). |
| 1340 | - If this is a Neon vector type with additional type information, fill |
| 1341 | in the struct pointed to by VECTYPE (if non-NULL). |
| 1342 | This function will fault on encountering a scalar. |
| 1343 | */ |
| 1344 | |
| 1345 | static int |
| 1346 | arm_typed_reg_parse (char **ccp, enum arm_reg_type type, |
| 1347 | enum arm_reg_type *rtype, struct neon_type_el *vectype) |
| 1348 | { |
| 1349 | struct neon_typed_alias atype; |
| 1350 | char *str = *ccp; |
| 1351 | int reg = parse_typed_reg_or_scalar (&str, type, rtype, &atype); |
| 1352 | |
| 1353 | if (reg == FAIL) |
| 1354 | return FAIL; |
| 1355 | |
| 1356 | /* Do not allow a scalar (reg+index) to parse as a register. */ |
| 1357 | if ((atype.defined & NTA_HASINDEX) != 0) |
| 1358 | { |
| 1359 | first_error (_("register operand expected, but got scalar")); |
| 1360 | return FAIL; |
| 1361 | } |
| 1362 | |
| 1363 | if (vectype) |
| 1364 | *vectype = atype.eltype; |
| 1365 | |
| 1366 | *ccp = str; |
| 1367 | |
| 1368 | return reg; |
| 1369 | } |
| 1370 | |
| 1371 | #define NEON_SCALAR_REG(X) ((X) >> 4) |
| 1372 | #define NEON_SCALAR_INDEX(X) ((X) & 15) |
| 1373 | |
| 1374 | /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't |
| 1375 | have enough information to be able to do a good job bounds-checking. So, we |
| 1376 | just do easy checks here, and do further checks later. */ |
| 1377 | |
| 1378 | static int |
| 1379 | parse_scalar (char **ccp, int elsize, struct neon_type_el *type) |
| 1380 | { |
| 1381 | int reg; |
| 1382 | char *str = *ccp; |
| 1383 | struct neon_typed_alias atype; |
| 1384 | |
| 1385 | reg = parse_typed_reg_or_scalar (&str, REG_TYPE_VFD, NULL, &atype); |
| 1386 | |
| 1387 | if (reg == FAIL || (atype.defined & NTA_HASINDEX) == 0) |
| 1388 | return FAIL; |
| 1389 | |
| 1390 | if (atype.index == NEON_ALL_LANES) |
| 1391 | { |
| 1392 | first_error (_("scalar must have an index")); |
| 1393 | return FAIL; |
| 1394 | } |
| 1395 | else if (atype.index >= 64 / elsize) |
| 1396 | { |
| 1397 | first_error (_("scalar index out of range")); |
| 1398 | return FAIL; |
| 1399 | } |
| 1400 | |
| 1401 | if (type) |
| 1402 | *type = atype.eltype; |
| 1403 | |
| 1404 | *ccp = str; |
| 1405 | |
| 1406 | return reg * 16 + atype.index; |
| 1407 | } |
| 1408 | |
| 1409 | /* Parse an ARM register list. Returns the bitmask, or FAIL. */ |
| 1410 | static long |
| 1411 | parse_reg_list (char ** strp) |
| 1412 | { |
| 1413 | char * str = * strp; |
| 1414 | long range = 0; |
| 1415 | int another_range; |
| 1416 | |
| 1417 | /* We come back here if we get ranges concatenated by '+' or '|'. */ |
| 1418 | do |
| 1419 | { |
| 1420 | another_range = 0; |
| 1421 | |
| 1422 | if (*str == '{') |
| 1423 | { |
| 1424 | int in_range = 0; |
| 1425 | int cur_reg = -1; |
| 1426 | |
| 1427 | str++; |
| 1428 | do |
| 1429 | { |
| 1430 | int reg; |
| 1431 | |
| 1432 | if ((reg = arm_reg_parse (&str, REG_TYPE_RN)) == FAIL) |
| 1433 | { |
| 1434 | first_error (_(reg_expected_msgs[REG_TYPE_RN])); |
| 1435 | return FAIL; |
| 1436 | } |
| 1437 | |
| 1438 | if (in_range) |
| 1439 | { |
| 1440 | int i; |
| 1441 | |
| 1442 | if (reg <= cur_reg) |
| 1443 | { |
| 1444 | first_error (_("bad range in register list")); |
| 1445 | return FAIL; |
| 1446 | } |
| 1447 | |
| 1448 | for (i = cur_reg + 1; i < reg; i++) |
| 1449 | { |
| 1450 | if (range & (1 << i)) |
| 1451 | as_tsktsk |
| 1452 | (_("Warning: duplicated register (r%d) in register list"), |
| 1453 | i); |
| 1454 | else |
| 1455 | range |= 1 << i; |
| 1456 | } |
| 1457 | in_range = 0; |
| 1458 | } |
| 1459 | |
| 1460 | if (range & (1 << reg)) |
| 1461 | as_tsktsk (_("Warning: duplicated register (r%d) in register list"), |
| 1462 | reg); |
| 1463 | else if (reg <= cur_reg) |
| 1464 | as_tsktsk (_("Warning: register range not in ascending order")); |
| 1465 | |
| 1466 | range |= 1 << reg; |
| 1467 | cur_reg = reg; |
| 1468 | } |
| 1469 | while (skip_past_comma (&str) != FAIL |
| 1470 | || (in_range = 1, *str++ == '-')); |
| 1471 | str--; |
| 1472 | |
| 1473 | if (*str++ != '}') |
| 1474 | { |
| 1475 | first_error (_("missing `}'")); |
| 1476 | return FAIL; |
| 1477 | } |
| 1478 | } |
| 1479 | else |
| 1480 | { |
| 1481 | expressionS expr; |
| 1482 | |
| 1483 | if (my_get_expression (&expr, &str, GE_NO_PREFIX)) |
| 1484 | return FAIL; |
| 1485 | |
| 1486 | if (expr.X_op == O_constant) |
| 1487 | { |
| 1488 | if (expr.X_add_number |
| 1489 | != (expr.X_add_number & 0x0000ffff)) |
| 1490 | { |
| 1491 | inst.error = _("invalid register mask"); |
| 1492 | return FAIL; |
| 1493 | } |
| 1494 | |
| 1495 | if ((range & expr.X_add_number) != 0) |
| 1496 | { |
| 1497 | int regno = range & expr.X_add_number; |
| 1498 | |
| 1499 | regno &= -regno; |
| 1500 | regno = (1 << regno) - 1; |
| 1501 | as_tsktsk |
| 1502 | (_("Warning: duplicated register (r%d) in register list"), |
| 1503 | regno); |
| 1504 | } |
| 1505 | |
| 1506 | range |= expr.X_add_number; |
| 1507 | } |
| 1508 | else |
| 1509 | { |
| 1510 | if (inst.reloc.type != 0) |
| 1511 | { |
| 1512 | inst.error = _("expression too complex"); |
| 1513 | return FAIL; |
| 1514 | } |
| 1515 | |
| 1516 | memcpy (&inst.reloc.exp, &expr, sizeof (expressionS)); |
| 1517 | inst.reloc.type = BFD_RELOC_ARM_MULTI; |
| 1518 | inst.reloc.pc_rel = 0; |
| 1519 | } |
| 1520 | } |
| 1521 | |
| 1522 | if (*str == '|' || *str == '+') |
| 1523 | { |
| 1524 | str++; |
| 1525 | another_range = 1; |
| 1526 | } |
| 1527 | } |
| 1528 | while (another_range); |
| 1529 | |
| 1530 | *strp = str; |
| 1531 | return range; |
| 1532 | } |
| 1533 | |
| 1534 | /* Types of registers in a list. */ |
| 1535 | |
| 1536 | enum reg_list_els |
| 1537 | { |
| 1538 | REGLIST_VFP_S, |
| 1539 | REGLIST_VFP_D, |
| 1540 | REGLIST_NEON_D |
| 1541 | }; |
| 1542 | |
| 1543 | /* Parse a VFP register list. If the string is invalid return FAIL. |
| 1544 | Otherwise return the number of registers, and set PBASE to the first |
| 1545 | register. Parses registers of type ETYPE. |
| 1546 | If REGLIST_NEON_D is used, several syntax enhancements are enabled: |
| 1547 | - Q registers can be used to specify pairs of D registers |
| 1548 | - { } can be omitted from around a singleton register list |
| 1549 | FIXME: This is not implemented, as it would require backtracking in |
| 1550 | some cases, e.g.: |
| 1551 | vtbl.8 d3,d4,d5 |
| 1552 | This could be done (the meaning isn't really ambiguous), but doesn't |
| 1553 | fit in well with the current parsing framework. |
| 1554 | - 32 D registers may be used (also true for VFPv3). |
| 1555 | FIXME: Types are ignored in these register lists, which is probably a |
| 1556 | bug. */ |
| 1557 | |
| 1558 | static int |
| 1559 | parse_vfp_reg_list (char **ccp, unsigned int *pbase, enum reg_list_els etype) |
| 1560 | { |
| 1561 | char *str = *ccp; |
| 1562 | int base_reg; |
| 1563 | int new_base; |
| 1564 | enum arm_reg_type regtype = 0; |
| 1565 | int max_regs = 0; |
| 1566 | int count = 0; |
| 1567 | int warned = 0; |
| 1568 | unsigned long mask = 0; |
| 1569 | int i; |
| 1570 | |
| 1571 | if (*str != '{') |
| 1572 | { |
| 1573 | inst.error = _("expecting {"); |
| 1574 | return FAIL; |
| 1575 | } |
| 1576 | |
| 1577 | str++; |
| 1578 | |
| 1579 | switch (etype) |
| 1580 | { |
| 1581 | case REGLIST_VFP_S: |
| 1582 | regtype = REG_TYPE_VFS; |
| 1583 | max_regs = 32; |
| 1584 | break; |
| 1585 | |
| 1586 | case REGLIST_VFP_D: |
| 1587 | regtype = REG_TYPE_VFD; |
| 1588 | break; |
| 1589 | |
| 1590 | case REGLIST_NEON_D: |
| 1591 | regtype = REG_TYPE_NDQ; |
| 1592 | break; |
| 1593 | } |
| 1594 | |
| 1595 | if (etype != REGLIST_VFP_S) |
| 1596 | { |
| 1597 | /* VFPv3 allows 32 D registers. */ |
| 1598 | if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3)) |
| 1599 | { |
| 1600 | max_regs = 32; |
| 1601 | if (thumb_mode) |
| 1602 | ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, |
| 1603 | fpu_vfp_ext_v3); |
| 1604 | else |
| 1605 | ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, |
| 1606 | fpu_vfp_ext_v3); |
| 1607 | } |
| 1608 | else |
| 1609 | max_regs = 16; |
| 1610 | } |
| 1611 | |
| 1612 | base_reg = max_regs; |
| 1613 | |
| 1614 | do |
| 1615 | { |
| 1616 | int setmask = 1, addregs = 1; |
| 1617 | |
| 1618 | new_base = arm_typed_reg_parse (&str, regtype, ®type, NULL); |
| 1619 | |
| 1620 | if (new_base == FAIL) |
| 1621 | { |
| 1622 | first_error (_(reg_expected_msgs[regtype])); |
| 1623 | return FAIL; |
| 1624 | } |
| 1625 | |
| 1626 | if (new_base >= max_regs) |
| 1627 | { |
| 1628 | first_error (_("register out of range in list")); |
| 1629 | return FAIL; |
| 1630 | } |
| 1631 | |
| 1632 | /* Note: a value of 2 * n is returned for the register Q<n>. */ |
| 1633 | if (regtype == REG_TYPE_NQ) |
| 1634 | { |
| 1635 | setmask = 3; |
| 1636 | addregs = 2; |
| 1637 | } |
| 1638 | |
| 1639 | if (new_base < base_reg) |
| 1640 | base_reg = new_base; |
| 1641 | |
| 1642 | if (mask & (setmask << new_base)) |
| 1643 | { |
| 1644 | first_error (_("invalid register list")); |
| 1645 | return FAIL; |
| 1646 | } |
| 1647 | |
| 1648 | if ((mask >> new_base) != 0 && ! warned) |
| 1649 | { |
| 1650 | as_tsktsk (_("register list not in ascending order")); |
| 1651 | warned = 1; |
| 1652 | } |
| 1653 | |
| 1654 | mask |= setmask << new_base; |
| 1655 | count += addregs; |
| 1656 | |
| 1657 | if (*str == '-') /* We have the start of a range expression */ |
| 1658 | { |
| 1659 | int high_range; |
| 1660 | |
| 1661 | str++; |
| 1662 | |
| 1663 | if ((high_range = arm_typed_reg_parse (&str, regtype, NULL, NULL)) |
| 1664 | == FAIL) |
| 1665 | { |
| 1666 | inst.error = gettext (reg_expected_msgs[regtype]); |
| 1667 | return FAIL; |
| 1668 | } |
| 1669 | |
| 1670 | if (high_range >= max_regs) |
| 1671 | { |
| 1672 | first_error (_("register out of range in list")); |
| 1673 | return FAIL; |
| 1674 | } |
| 1675 | |
| 1676 | if (regtype == REG_TYPE_NQ) |
| 1677 | high_range = high_range + 1; |
| 1678 | |
| 1679 | if (high_range <= new_base) |
| 1680 | { |
| 1681 | inst.error = _("register range not in ascending order"); |
| 1682 | return FAIL; |
| 1683 | } |
| 1684 | |
| 1685 | for (new_base += addregs; new_base <= high_range; new_base += addregs) |
| 1686 | { |
| 1687 | if (mask & (setmask << new_base)) |
| 1688 | { |
| 1689 | inst.error = _("invalid register list"); |
| 1690 | return FAIL; |
| 1691 | } |
| 1692 | |
| 1693 | mask |= setmask << new_base; |
| 1694 | count += addregs; |
| 1695 | } |
| 1696 | } |
| 1697 | } |
| 1698 | while (skip_past_comma (&str) != FAIL); |
| 1699 | |
| 1700 | str++; |
| 1701 | |
| 1702 | /* Sanity check -- should have raised a parse error above. */ |
| 1703 | if (count == 0 || count > max_regs) |
| 1704 | abort (); |
| 1705 | |
| 1706 | *pbase = base_reg; |
| 1707 | |
| 1708 | /* Final test -- the registers must be consecutive. */ |
| 1709 | mask >>= base_reg; |
| 1710 | for (i = 0; i < count; i++) |
| 1711 | { |
| 1712 | if ((mask & (1u << i)) == 0) |
| 1713 | { |
| 1714 | inst.error = _("non-contiguous register range"); |
| 1715 | return FAIL; |
| 1716 | } |
| 1717 | } |
| 1718 | |
| 1719 | *ccp = str; |
| 1720 | |
| 1721 | return count; |
| 1722 | } |
| 1723 | |
| 1724 | /* True if two alias types are the same. */ |
| 1725 | |
| 1726 | static int |
| 1727 | neon_alias_types_same (struct neon_typed_alias *a, struct neon_typed_alias *b) |
| 1728 | { |
| 1729 | if (!a && !b) |
| 1730 | return 1; |
| 1731 | |
| 1732 | if (!a || !b) |
| 1733 | return 0; |
| 1734 | |
| 1735 | if (a->defined != b->defined) |
| 1736 | return 0; |
| 1737 | |
| 1738 | if ((a->defined & NTA_HASTYPE) != 0 |
| 1739 | && (a->eltype.type != b->eltype.type |
| 1740 | || a->eltype.size != b->eltype.size)) |
| 1741 | return 0; |
| 1742 | |
| 1743 | if ((a->defined & NTA_HASINDEX) != 0 |
| 1744 | && (a->index != b->index)) |
| 1745 | return 0; |
| 1746 | |
| 1747 | return 1; |
| 1748 | } |
| 1749 | |
| 1750 | /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions. |
| 1751 | The base register is put in *PBASE. |
| 1752 | The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of |
| 1753 | the return value. |
| 1754 | The register stride (minus one) is put in bit 4 of the return value. |
| 1755 | Bits [6:5] encode the list length (minus one). |
| 1756 | The type of the list elements is put in *ELTYPE, if non-NULL. */ |
| 1757 | |
| 1758 | #define NEON_LANE(X) ((X) & 0xf) |
| 1759 | #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1) |
| 1760 | #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1) |
| 1761 | |
| 1762 | static int |
| 1763 | parse_neon_el_struct_list (char **str, unsigned *pbase, |
| 1764 | struct neon_type_el *eltype) |
| 1765 | { |
| 1766 | char *ptr = *str; |
| 1767 | int base_reg = -1; |
| 1768 | int reg_incr = -1; |
| 1769 | int count = 0; |
| 1770 | int lane = -1; |
| 1771 | int leading_brace = 0; |
| 1772 | enum arm_reg_type rtype = REG_TYPE_NDQ; |
| 1773 | int addregs = 1; |
| 1774 | const char *const incr_error = "register stride must be 1 or 2"; |
| 1775 | const char *const type_error = "mismatched element/structure types in list"; |
| 1776 | struct neon_typed_alias firsttype; |
| 1777 | |
| 1778 | if (skip_past_char (&ptr, '{') == SUCCESS) |
| 1779 | leading_brace = 1; |
| 1780 | |
| 1781 | do |
| 1782 | { |
| 1783 | struct neon_typed_alias atype; |
| 1784 | int getreg = parse_typed_reg_or_scalar (&ptr, rtype, &rtype, &atype); |
| 1785 | |
| 1786 | if (getreg == FAIL) |
| 1787 | { |
| 1788 | first_error (_(reg_expected_msgs[rtype])); |
| 1789 | return FAIL; |
| 1790 | } |
| 1791 | |
| 1792 | if (base_reg == -1) |
| 1793 | { |
| 1794 | base_reg = getreg; |
| 1795 | if (rtype == REG_TYPE_NQ) |
| 1796 | { |
| 1797 | reg_incr = 1; |
| 1798 | addregs = 2; |
| 1799 | } |
| 1800 | firsttype = atype; |
| 1801 | } |
| 1802 | else if (reg_incr == -1) |
| 1803 | { |
| 1804 | reg_incr = getreg - base_reg; |
| 1805 | if (reg_incr < 1 || reg_incr > 2) |
| 1806 | { |
| 1807 | first_error (_(incr_error)); |
| 1808 | return FAIL; |
| 1809 | } |
| 1810 | } |
| 1811 | else if (getreg != base_reg + reg_incr * count) |
| 1812 | { |
| 1813 | first_error (_(incr_error)); |
| 1814 | return FAIL; |
| 1815 | } |
| 1816 | |
| 1817 | if (!neon_alias_types_same (&atype, &firsttype)) |
| 1818 | { |
| 1819 | first_error (_(type_error)); |
| 1820 | return FAIL; |
| 1821 | } |
| 1822 | |
| 1823 | /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list |
| 1824 | modes. */ |
| 1825 | if (ptr[0] == '-') |
| 1826 | { |
| 1827 | struct neon_typed_alias htype; |
| 1828 | int hireg, dregs = (rtype == REG_TYPE_NQ) ? 2 : 1; |
| 1829 | if (lane == -1) |
| 1830 | lane = NEON_INTERLEAVE_LANES; |
| 1831 | else if (lane != NEON_INTERLEAVE_LANES) |
| 1832 | { |
| 1833 | first_error (_(type_error)); |
| 1834 | return FAIL; |
| 1835 | } |
| 1836 | if (reg_incr == -1) |
| 1837 | reg_incr = 1; |
| 1838 | else if (reg_incr != 1) |
| 1839 | { |
| 1840 | first_error (_("don't use Rn-Rm syntax with non-unit stride")); |
| 1841 | return FAIL; |
| 1842 | } |
| 1843 | ptr++; |
| 1844 | hireg = parse_typed_reg_or_scalar (&ptr, rtype, NULL, &htype); |
| 1845 | if (hireg == FAIL) |
| 1846 | { |
| 1847 | first_error (_(reg_expected_msgs[rtype])); |
| 1848 | return FAIL; |
| 1849 | } |
| 1850 | if (!neon_alias_types_same (&htype, &firsttype)) |
| 1851 | { |
| 1852 | first_error (_(type_error)); |
| 1853 | return FAIL; |
| 1854 | } |
| 1855 | count += hireg + dregs - getreg; |
| 1856 | continue; |
| 1857 | } |
| 1858 | |
| 1859 | /* If we're using Q registers, we can't use [] or [n] syntax. */ |
| 1860 | if (rtype == REG_TYPE_NQ) |
| 1861 | { |
| 1862 | count += 2; |
| 1863 | continue; |
| 1864 | } |
| 1865 | |
| 1866 | if ((atype.defined & NTA_HASINDEX) != 0) |
| 1867 | { |
| 1868 | if (lane == -1) |
| 1869 | lane = atype.index; |
| 1870 | else if (lane != atype.index) |
| 1871 | { |
| 1872 | first_error (_(type_error)); |
| 1873 | return FAIL; |
| 1874 | } |
| 1875 | } |
| 1876 | else if (lane == -1) |
| 1877 | lane = NEON_INTERLEAVE_LANES; |
| 1878 | else if (lane != NEON_INTERLEAVE_LANES) |
| 1879 | { |
| 1880 | first_error (_(type_error)); |
| 1881 | return FAIL; |
| 1882 | } |
| 1883 | count++; |
| 1884 | } |
| 1885 | while ((count != 1 || leading_brace) && skip_past_comma (&ptr) != FAIL); |
| 1886 | |
| 1887 | /* No lane set by [x]. We must be interleaving structures. */ |
| 1888 | if (lane == -1) |
| 1889 | lane = NEON_INTERLEAVE_LANES; |
| 1890 | |
| 1891 | /* Sanity check. */ |
| 1892 | if (lane == -1 || base_reg == -1 || count < 1 || count > 4 |
| 1893 | || (count > 1 && reg_incr == -1)) |
| 1894 | { |
| 1895 | first_error (_("error parsing element/structure list")); |
| 1896 | return FAIL; |
| 1897 | } |
| 1898 | |
| 1899 | if ((count > 1 || leading_brace) && skip_past_char (&ptr, '}') == FAIL) |
| 1900 | { |
| 1901 | first_error (_("expected }")); |
| 1902 | return FAIL; |
| 1903 | } |
| 1904 | |
| 1905 | if (reg_incr == -1) |
| 1906 | reg_incr = 1; |
| 1907 | |
| 1908 | if (eltype) |
| 1909 | *eltype = firsttype.eltype; |
| 1910 | |
| 1911 | *pbase = base_reg; |
| 1912 | *str = ptr; |
| 1913 | |
| 1914 | return lane | ((reg_incr - 1) << 4) | ((count - 1) << 5); |
| 1915 | } |
| 1916 | |
| 1917 | /* Parse an explicit relocation suffix on an expression. This is |
| 1918 | either nothing, or a word in parentheses. Note that if !OBJ_ELF, |
| 1919 | arm_reloc_hsh contains no entries, so this function can only |
| 1920 | succeed if there is no () after the word. Returns -1 on error, |
| 1921 | BFD_RELOC_UNUSED if there wasn't any suffix. */ |
| 1922 | static int |
| 1923 | parse_reloc (char **str) |
| 1924 | { |
| 1925 | struct reloc_entry *r; |
| 1926 | char *p, *q; |
| 1927 | |
| 1928 | if (**str != '(') |
| 1929 | return BFD_RELOC_UNUSED; |
| 1930 | |
| 1931 | p = *str + 1; |
| 1932 | q = p; |
| 1933 | |
| 1934 | while (*q && *q != ')' && *q != ',') |
| 1935 | q++; |
| 1936 | if (*q != ')') |
| 1937 | return -1; |
| 1938 | |
| 1939 | if ((r = hash_find_n (arm_reloc_hsh, p, q - p)) == NULL) |
| 1940 | return -1; |
| 1941 | |
| 1942 | *str = q + 1; |
| 1943 | return r->reloc; |
| 1944 | } |
| 1945 | |
| 1946 | /* Directives: register aliases. */ |
| 1947 | |
| 1948 | static struct reg_entry * |
| 1949 | insert_reg_alias (char *str, int number, int type) |
| 1950 | { |
| 1951 | struct reg_entry *new; |
| 1952 | const char *name; |
| 1953 | |
| 1954 | if ((new = hash_find (arm_reg_hsh, str)) != 0) |
| 1955 | { |
| 1956 | if (new->builtin) |
| 1957 | as_warn (_("ignoring attempt to redefine built-in register '%s'"), str); |
| 1958 | |
| 1959 | /* Only warn about a redefinition if it's not defined as the |
| 1960 | same register. */ |
| 1961 | else if (new->number != number || new->type != type) |
| 1962 | as_warn (_("ignoring redefinition of register alias '%s'"), str); |
| 1963 | |
| 1964 | return 0; |
| 1965 | } |
| 1966 | |
| 1967 | name = xstrdup (str); |
| 1968 | new = xmalloc (sizeof (struct reg_entry)); |
| 1969 | |
| 1970 | new->name = name; |
| 1971 | new->number = number; |
| 1972 | new->type = type; |
| 1973 | new->builtin = FALSE; |
| 1974 | new->neon = NULL; |
| 1975 | |
| 1976 | if (hash_insert (arm_reg_hsh, name, (PTR) new)) |
| 1977 | abort (); |
| 1978 | |
| 1979 | return new; |
| 1980 | } |
| 1981 | |
| 1982 | static void |
| 1983 | insert_neon_reg_alias (char *str, int number, int type, |
| 1984 | struct neon_typed_alias *atype) |
| 1985 | { |
| 1986 | struct reg_entry *reg = insert_reg_alias (str, number, type); |
| 1987 | |
| 1988 | if (!reg) |
| 1989 | { |
| 1990 | first_error (_("attempt to redefine typed alias")); |
| 1991 | return; |
| 1992 | } |
| 1993 | |
| 1994 | if (atype) |
| 1995 | { |
| 1996 | reg->neon = xmalloc (sizeof (struct neon_typed_alias)); |
| 1997 | *reg->neon = *atype; |
| 1998 | } |
| 1999 | } |
| 2000 | |
| 2001 | /* Look for the .req directive. This is of the form: |
| 2002 | |
| 2003 | new_register_name .req existing_register_name |
| 2004 | |
| 2005 | If we find one, or if it looks sufficiently like one that we want to |
| 2006 | handle any error here, return non-zero. Otherwise return zero. */ |
| 2007 | |
| 2008 | static int |
| 2009 | create_register_alias (char * newname, char *p) |
| 2010 | { |
| 2011 | struct reg_entry *old; |
| 2012 | char *oldname, *nbuf; |
| 2013 | size_t nlen; |
| 2014 | |
| 2015 | /* The input scrubber ensures that whitespace after the mnemonic is |
| 2016 | collapsed to single spaces. */ |
| 2017 | oldname = p; |
| 2018 | if (strncmp (oldname, " .req ", 6) != 0) |
| 2019 | return 0; |
| 2020 | |
| 2021 | oldname += 6; |
| 2022 | if (*oldname == '\0') |
| 2023 | return 0; |
| 2024 | |
| 2025 | old = hash_find (arm_reg_hsh, oldname); |
| 2026 | if (!old) |
| 2027 | { |
| 2028 | as_warn (_("unknown register '%s' -- .req ignored"), oldname); |
| 2029 | return 1; |
| 2030 | } |
| 2031 | |
| 2032 | /* If TC_CASE_SENSITIVE is defined, then newname already points to |
| 2033 | the desired alias name, and p points to its end. If not, then |
| 2034 | the desired alias name is in the global original_case_string. */ |
| 2035 | #ifdef TC_CASE_SENSITIVE |
| 2036 | nlen = p - newname; |
| 2037 | #else |
| 2038 | newname = original_case_string; |
| 2039 | nlen = strlen (newname); |
| 2040 | #endif |
| 2041 | |
| 2042 | nbuf = alloca (nlen + 1); |
| 2043 | memcpy (nbuf, newname, nlen); |
| 2044 | nbuf[nlen] = '\0'; |
| 2045 | |
| 2046 | /* Create aliases under the new name as stated; an all-lowercase |
| 2047 | version of the new name; and an all-uppercase version of the new |
| 2048 | name. */ |
| 2049 | insert_reg_alias (nbuf, old->number, old->type); |
| 2050 | |
| 2051 | for (p = nbuf; *p; p++) |
| 2052 | *p = TOUPPER (*p); |
| 2053 | |
| 2054 | if (strncmp (nbuf, newname, nlen)) |
| 2055 | insert_reg_alias (nbuf, old->number, old->type); |
| 2056 | |
| 2057 | for (p = nbuf; *p; p++) |
| 2058 | *p = TOLOWER (*p); |
| 2059 | |
| 2060 | if (strncmp (nbuf, newname, nlen)) |
| 2061 | insert_reg_alias (nbuf, old->number, old->type); |
| 2062 | |
| 2063 | return 1; |
| 2064 | } |
| 2065 | |
| 2066 | /* Create a Neon typed/indexed register alias using directives, e.g.: |
| 2067 | X .dn d5.s32[1] |
| 2068 | Y .qn 6.s16 |
| 2069 | Z .dn d7 |
| 2070 | T .dn Z[0] |
| 2071 | These typed registers can be used instead of the types specified after the |
| 2072 | Neon mnemonic, so long as all operands given have types. Types can also be |
| 2073 | specified directly, e.g.: |
| 2074 | vadd d0.s32, d1.s32, d2.s32 |
| 2075 | */ |
| 2076 | |
| 2077 | static int |
| 2078 | create_neon_reg_alias (char *newname, char *p) |
| 2079 | { |
| 2080 | enum arm_reg_type basetype; |
| 2081 | struct reg_entry *basereg; |
| 2082 | struct reg_entry mybasereg; |
| 2083 | struct neon_type ntype; |
| 2084 | struct neon_typed_alias typeinfo; |
| 2085 | char *namebuf, *nameend; |
| 2086 | int namelen; |
| 2087 | |
| 2088 | typeinfo.defined = 0; |
| 2089 | typeinfo.eltype.type = NT_invtype; |
| 2090 | typeinfo.eltype.size = -1; |
| 2091 | typeinfo.index = -1; |
| 2092 | |
| 2093 | nameend = p; |
| 2094 | |
| 2095 | if (strncmp (p, " .dn ", 5) == 0) |
| 2096 | basetype = REG_TYPE_VFD; |
| 2097 | else if (strncmp (p, " .qn ", 5) == 0) |
| 2098 | basetype = REG_TYPE_NQ; |
| 2099 | else |
| 2100 | return 0; |
| 2101 | |
| 2102 | p += 5; |
| 2103 | |
| 2104 | if (*p == '\0') |
| 2105 | return 0; |
| 2106 | |
| 2107 | basereg = arm_reg_parse_multi (&p); |
| 2108 | |
| 2109 | if (basereg && basereg->type != basetype) |
| 2110 | { |
| 2111 | as_bad (_("bad type for register")); |
| 2112 | return 0; |
| 2113 | } |
| 2114 | |
| 2115 | if (basereg == NULL) |
| 2116 | { |
| 2117 | expressionS exp; |
| 2118 | /* Try parsing as an integer. */ |
| 2119 | my_get_expression (&exp, &p, GE_NO_PREFIX); |
| 2120 | if (exp.X_op != O_constant) |
| 2121 | { |
| 2122 | as_bad (_("expression must be constant")); |
| 2123 | return 0; |
| 2124 | } |
| 2125 | basereg = &mybasereg; |
| 2126 | basereg->number = (basetype == REG_TYPE_NQ) ? exp.X_add_number * 2 |
| 2127 | : exp.X_add_number; |
| 2128 | basereg->neon = 0; |
| 2129 | } |
| 2130 | |
| 2131 | if (basereg->neon) |
| 2132 | typeinfo = *basereg->neon; |
| 2133 | |
| 2134 | if (parse_neon_type (&ntype, &p) == SUCCESS) |
| 2135 | { |
| 2136 | /* We got a type. */ |
| 2137 | if (typeinfo.defined & NTA_HASTYPE) |
| 2138 | { |
| 2139 | as_bad (_("can't redefine the type of a register alias")); |
| 2140 | return 0; |
| 2141 | } |
| 2142 | |
| 2143 | typeinfo.defined |= NTA_HASTYPE; |
| 2144 | if (ntype.elems != 1) |
| 2145 | { |
| 2146 | as_bad (_("you must specify a single type only")); |
| 2147 | return 0; |
| 2148 | } |
| 2149 | typeinfo.eltype = ntype.el[0]; |
| 2150 | } |
| 2151 | |
| 2152 | if (skip_past_char (&p, '[') == SUCCESS) |
| 2153 | { |
| 2154 | expressionS exp; |
| 2155 | /* We got a scalar index. */ |
| 2156 | |
| 2157 | if (typeinfo.defined & NTA_HASINDEX) |
| 2158 | { |
| 2159 | as_bad (_("can't redefine the index of a scalar alias")); |
| 2160 | return 0; |
| 2161 | } |
| 2162 | |
| 2163 | my_get_expression (&exp, &p, GE_NO_PREFIX); |
| 2164 | |
| 2165 | if (exp.X_op != O_constant) |
| 2166 | { |
| 2167 | as_bad (_("scalar index must be constant")); |
| 2168 | return 0; |
| 2169 | } |
| 2170 | |
| 2171 | typeinfo.defined |= NTA_HASINDEX; |
| 2172 | typeinfo.index = exp.X_add_number; |
| 2173 | |
| 2174 | if (skip_past_char (&p, ']') == FAIL) |
| 2175 | { |
| 2176 | as_bad (_("expecting ]")); |
| 2177 | return 0; |
| 2178 | } |
| 2179 | } |
| 2180 | |
| 2181 | namelen = nameend - newname; |
| 2182 | namebuf = alloca (namelen + 1); |
| 2183 | strncpy (namebuf, newname, namelen); |
| 2184 | namebuf[namelen] = '\0'; |
| 2185 | |
| 2186 | insert_neon_reg_alias (namebuf, basereg->number, basetype, |
| 2187 | typeinfo.defined != 0 ? &typeinfo : NULL); |
| 2188 | |
| 2189 | /* Insert name in all uppercase. */ |
| 2190 | for (p = namebuf; *p; p++) |
| 2191 | *p = TOUPPER (*p); |
| 2192 | |
| 2193 | if (strncmp (namebuf, newname, namelen)) |
| 2194 | insert_neon_reg_alias (namebuf, basereg->number, basetype, |
| 2195 | typeinfo.defined != 0 ? &typeinfo : NULL); |
| 2196 | |
| 2197 | /* Insert name in all lowercase. */ |
| 2198 | for (p = namebuf; *p; p++) |
| 2199 | *p = TOLOWER (*p); |
| 2200 | |
| 2201 | if (strncmp (namebuf, newname, namelen)) |
| 2202 | insert_neon_reg_alias (namebuf, basereg->number, basetype, |
| 2203 | typeinfo.defined != 0 ? &typeinfo : NULL); |
| 2204 | |
| 2205 | return 1; |
| 2206 | } |
| 2207 | |
| 2208 | /* Should never be called, as .req goes between the alias and the |
| 2209 | register name, not at the beginning of the line. */ |
| 2210 | static void |
| 2211 | s_req (int a ATTRIBUTE_UNUSED) |
| 2212 | { |
| 2213 | as_bad (_("invalid syntax for .req directive")); |
| 2214 | } |
| 2215 | |
| 2216 | static void |
| 2217 | s_dn (int a ATTRIBUTE_UNUSED) |
| 2218 | { |
| 2219 | as_bad (_("invalid syntax for .dn directive")); |
| 2220 | } |
| 2221 | |
| 2222 | static void |
| 2223 | s_qn (int a ATTRIBUTE_UNUSED) |
| 2224 | { |
| 2225 | as_bad (_("invalid syntax for .qn directive")); |
| 2226 | } |
| 2227 | |
| 2228 | /* The .unreq directive deletes an alias which was previously defined |
| 2229 | by .req. For example: |
| 2230 | |
| 2231 | my_alias .req r11 |
| 2232 | .unreq my_alias */ |
| 2233 | |
| 2234 | static void |
| 2235 | s_unreq (int a ATTRIBUTE_UNUSED) |
| 2236 | { |
| 2237 | char * name; |
| 2238 | char saved_char; |
| 2239 | |
| 2240 | name = input_line_pointer; |
| 2241 | |
| 2242 | while (*input_line_pointer != 0 |
| 2243 | && *input_line_pointer != ' ' |
| 2244 | && *input_line_pointer != '\n') |
| 2245 | ++input_line_pointer; |
| 2246 | |
| 2247 | saved_char = *input_line_pointer; |
| 2248 | *input_line_pointer = 0; |
| 2249 | |
| 2250 | if (!*name) |
| 2251 | as_bad (_("invalid syntax for .unreq directive")); |
| 2252 | else |
| 2253 | { |
| 2254 | struct reg_entry *reg = hash_find (arm_reg_hsh, name); |
| 2255 | |
| 2256 | if (!reg) |
| 2257 | as_bad (_("unknown register alias '%s'"), name); |
| 2258 | else if (reg->builtin) |
| 2259 | as_warn (_("ignoring attempt to undefine built-in register '%s'"), |
| 2260 | name); |
| 2261 | else |
| 2262 | { |
| 2263 | hash_delete (arm_reg_hsh, name); |
| 2264 | free ((char *) reg->name); |
| 2265 | if (reg->neon) |
| 2266 | free (reg->neon); |
| 2267 | free (reg); |
| 2268 | } |
| 2269 | } |
| 2270 | |
| 2271 | *input_line_pointer = saved_char; |
| 2272 | demand_empty_rest_of_line (); |
| 2273 | } |
| 2274 | |
| 2275 | /* Directives: Instruction set selection. */ |
| 2276 | |
| 2277 | #ifdef OBJ_ELF |
| 2278 | /* This code is to handle mapping symbols as defined in the ARM ELF spec. |
| 2279 | (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0). |
| 2280 | Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag), |
| 2281 | and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */ |
| 2282 | |
| 2283 | static enum mstate mapstate = MAP_UNDEFINED; |
| 2284 | |
| 2285 | void |
| 2286 | mapping_state (enum mstate state) |
| 2287 | { |
| 2288 | symbolS * symbolP; |
| 2289 | const char * symname; |
| 2290 | int type; |
| 2291 | |
| 2292 | if (mapstate == state) |
| 2293 | /* The mapping symbol has already been emitted. |
| 2294 | There is nothing else to do. */ |
| 2295 | return; |
| 2296 | |
| 2297 | mapstate = state; |
| 2298 | |
| 2299 | switch (state) |
| 2300 | { |
| 2301 | case MAP_DATA: |
| 2302 | symname = "$d"; |
| 2303 | type = BSF_NO_FLAGS; |
| 2304 | break; |
| 2305 | case MAP_ARM: |
| 2306 | symname = "$a"; |
| 2307 | type = BSF_NO_FLAGS; |
| 2308 | break; |
| 2309 | case MAP_THUMB: |
| 2310 | symname = "$t"; |
| 2311 | type = BSF_NO_FLAGS; |
| 2312 | break; |
| 2313 | case MAP_UNDEFINED: |
| 2314 | return; |
| 2315 | default: |
| 2316 | abort (); |
| 2317 | } |
| 2318 | |
| 2319 | seg_info (now_seg)->tc_segment_info_data.mapstate = state; |
| 2320 | |
| 2321 | symbolP = symbol_new (symname, now_seg, (valueT) frag_now_fix (), frag_now); |
| 2322 | symbol_table_insert (symbolP); |
| 2323 | symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL; |
| 2324 | |
| 2325 | switch (state) |
| 2326 | { |
| 2327 | case MAP_ARM: |
| 2328 | THUMB_SET_FUNC (symbolP, 0); |
| 2329 | ARM_SET_THUMB (symbolP, 0); |
| 2330 | ARM_SET_INTERWORK (symbolP, support_interwork); |
| 2331 | break; |
| 2332 | |
| 2333 | case MAP_THUMB: |
| 2334 | THUMB_SET_FUNC (symbolP, 1); |
| 2335 | ARM_SET_THUMB (symbolP, 1); |
| 2336 | ARM_SET_INTERWORK (symbolP, support_interwork); |
| 2337 | break; |
| 2338 | |
| 2339 | case MAP_DATA: |
| 2340 | default: |
| 2341 | return; |
| 2342 | } |
| 2343 | } |
| 2344 | #else |
| 2345 | #define mapping_state(x) /* nothing */ |
| 2346 | #endif |
| 2347 | |
| 2348 | /* Find the real, Thumb encoded start of a Thumb function. */ |
| 2349 | |
| 2350 | static symbolS * |
| 2351 | find_real_start (symbolS * symbolP) |
| 2352 | { |
| 2353 | char * real_start; |
| 2354 | const char * name = S_GET_NAME (symbolP); |
| 2355 | symbolS * new_target; |
| 2356 | |
| 2357 | /* This definition must agree with the one in gcc/config/arm/thumb.c. */ |
| 2358 | #define STUB_NAME ".real_start_of" |
| 2359 | |
| 2360 | if (name == NULL) |
| 2361 | abort (); |
| 2362 | |
| 2363 | /* The compiler may generate BL instructions to local labels because |
| 2364 | it needs to perform a branch to a far away location. These labels |
| 2365 | do not have a corresponding ".real_start_of" label. We check |
| 2366 | both for S_IS_LOCAL and for a leading dot, to give a way to bypass |
| 2367 | the ".real_start_of" convention for nonlocal branches. */ |
| 2368 | if (S_IS_LOCAL (symbolP) || name[0] == '.') |
| 2369 | return symbolP; |
| 2370 | |
| 2371 | real_start = ACONCAT ((STUB_NAME, name, NULL)); |
| 2372 | new_target = symbol_find (real_start); |
| 2373 | |
| 2374 | if (new_target == NULL) |
| 2375 | { |
| 2376 | as_warn ("Failed to find real start of function: %s\n", name); |
| 2377 | new_target = symbolP; |
| 2378 | } |
| 2379 | |
| 2380 | return new_target; |
| 2381 | } |
| 2382 | |
| 2383 | static void |
| 2384 | opcode_select (int width) |
| 2385 | { |
| 2386 | switch (width) |
| 2387 | { |
| 2388 | case 16: |
| 2389 | if (! thumb_mode) |
| 2390 | { |
| 2391 | if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t)) |
| 2392 | as_bad (_("selected processor does not support THUMB opcodes")); |
| 2393 | |
| 2394 | thumb_mode = 1; |
| 2395 | /* No need to force the alignment, since we will have been |
| 2396 | coming from ARM mode, which is word-aligned. */ |
| 2397 | record_alignment (now_seg, 1); |
| 2398 | } |
| 2399 | mapping_state (MAP_THUMB); |
| 2400 | break; |
| 2401 | |
| 2402 | case 32: |
| 2403 | if (thumb_mode) |
| 2404 | { |
| 2405 | if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1)) |
| 2406 | as_bad (_("selected processor does not support ARM opcodes")); |
| 2407 | |
| 2408 | thumb_mode = 0; |
| 2409 | |
| 2410 | if (!need_pass_2) |
| 2411 | frag_align (2, 0, 0); |
| 2412 | |
| 2413 | record_alignment (now_seg, 1); |
| 2414 | } |
| 2415 | mapping_state (MAP_ARM); |
| 2416 | break; |
| 2417 | |
| 2418 | default: |
| 2419 | as_bad (_("invalid instruction size selected (%d)"), width); |
| 2420 | } |
| 2421 | } |
| 2422 | |
| 2423 | static void |
| 2424 | s_arm (int ignore ATTRIBUTE_UNUSED) |
| 2425 | { |
| 2426 | opcode_select (32); |
| 2427 | demand_empty_rest_of_line (); |
| 2428 | } |
| 2429 | |
| 2430 | static void |
| 2431 | s_thumb (int ignore ATTRIBUTE_UNUSED) |
| 2432 | { |
| 2433 | opcode_select (16); |
| 2434 | demand_empty_rest_of_line (); |
| 2435 | } |
| 2436 | |
| 2437 | static void |
| 2438 | s_code (int unused ATTRIBUTE_UNUSED) |
| 2439 | { |
| 2440 | int temp; |
| 2441 | |
| 2442 | temp = get_absolute_expression (); |
| 2443 | switch (temp) |
| 2444 | { |
| 2445 | case 16: |
| 2446 | case 32: |
| 2447 | opcode_select (temp); |
| 2448 | break; |
| 2449 | |
| 2450 | default: |
| 2451 | as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp); |
| 2452 | } |
| 2453 | } |
| 2454 | |
| 2455 | static void |
| 2456 | s_force_thumb (int ignore ATTRIBUTE_UNUSED) |
| 2457 | { |
| 2458 | /* If we are not already in thumb mode go into it, EVEN if |
| 2459 | the target processor does not support thumb instructions. |
| 2460 | This is used by gcc/config/arm/lib1funcs.asm for example |
| 2461 | to compile interworking support functions even if the |
| 2462 | target processor should not support interworking. */ |
| 2463 | if (! thumb_mode) |
| 2464 | { |
| 2465 | thumb_mode = 2; |
| 2466 | record_alignment (now_seg, 1); |
| 2467 | } |
| 2468 | |
| 2469 | demand_empty_rest_of_line (); |
| 2470 | } |
| 2471 | |
| 2472 | static void |
| 2473 | s_thumb_func (int ignore ATTRIBUTE_UNUSED) |
| 2474 | { |
| 2475 | s_thumb (0); |
| 2476 | |
| 2477 | /* The following label is the name/address of the start of a Thumb function. |
| 2478 | We need to know this for the interworking support. */ |
| 2479 | label_is_thumb_function_name = TRUE; |
| 2480 | } |
| 2481 | |
| 2482 | /* Perform a .set directive, but also mark the alias as |
| 2483 | being a thumb function. */ |
| 2484 | |
| 2485 | static void |
| 2486 | s_thumb_set (int equiv) |
| 2487 | { |
| 2488 | /* XXX the following is a duplicate of the code for s_set() in read.c |
| 2489 | We cannot just call that code as we need to get at the symbol that |
| 2490 | is created. */ |
| 2491 | char * name; |
| 2492 | char delim; |
| 2493 | char * end_name; |
| 2494 | symbolS * symbolP; |
| 2495 | |
| 2496 | /* Especial apologies for the random logic: |
| 2497 | This just grew, and could be parsed much more simply! |
| 2498 | Dean - in haste. */ |
| 2499 | name = input_line_pointer; |
| 2500 | delim = get_symbol_end (); |
| 2501 | end_name = input_line_pointer; |
| 2502 | *end_name = delim; |
| 2503 | |
| 2504 | if (*input_line_pointer != ',') |
| 2505 | { |
| 2506 | *end_name = 0; |
| 2507 | as_bad (_("expected comma after name \"%s\""), name); |
| 2508 | *end_name = delim; |
| 2509 | ignore_rest_of_line (); |
| 2510 | return; |
| 2511 | } |
| 2512 | |
| 2513 | input_line_pointer++; |
| 2514 | *end_name = 0; |
| 2515 | |
| 2516 | if (name[0] == '.' && name[1] == '\0') |
| 2517 | { |
| 2518 | /* XXX - this should not happen to .thumb_set. */ |
| 2519 | abort (); |
| 2520 | } |
| 2521 | |
| 2522 | if ((symbolP = symbol_find (name)) == NULL |
| 2523 | && (symbolP = md_undefined_symbol (name)) == NULL) |
| 2524 | { |
| 2525 | #ifndef NO_LISTING |
| 2526 | /* When doing symbol listings, play games with dummy fragments living |
| 2527 | outside the normal fragment chain to record the file and line info |
| 2528 | for this symbol. */ |
| 2529 | if (listing & LISTING_SYMBOLS) |
| 2530 | { |
| 2531 | extern struct list_info_struct * listing_tail; |
| 2532 | fragS * dummy_frag = xmalloc (sizeof (fragS)); |
| 2533 | |
| 2534 | memset (dummy_frag, 0, sizeof (fragS)); |
| 2535 | dummy_frag->fr_type = rs_fill; |
| 2536 | dummy_frag->line = listing_tail; |
| 2537 | symbolP = symbol_new (name, undefined_section, 0, dummy_frag); |
| 2538 | dummy_frag->fr_symbol = symbolP; |
| 2539 | } |
| 2540 | else |
| 2541 | #endif |
| 2542 | symbolP = symbol_new (name, undefined_section, 0, &zero_address_frag); |
| 2543 | |
| 2544 | #ifdef OBJ_COFF |
| 2545 | /* "set" symbols are local unless otherwise specified. */ |
| 2546 | SF_SET_LOCAL (symbolP); |
| 2547 | #endif /* OBJ_COFF */ |
| 2548 | } /* Make a new symbol. */ |
| 2549 | |
| 2550 | symbol_table_insert (symbolP); |
| 2551 | |
| 2552 | * end_name = delim; |
| 2553 | |
| 2554 | if (equiv |
| 2555 | && S_IS_DEFINED (symbolP) |
| 2556 | && S_GET_SEGMENT (symbolP) != reg_section) |
| 2557 | as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP)); |
| 2558 | |
| 2559 | pseudo_set (symbolP); |
| 2560 | |
| 2561 | demand_empty_rest_of_line (); |
| 2562 | |
| 2563 | /* XXX Now we come to the Thumb specific bit of code. */ |
| 2564 | |
| 2565 | THUMB_SET_FUNC (symbolP, 1); |
| 2566 | ARM_SET_THUMB (symbolP, 1); |
| 2567 | #if defined OBJ_ELF || defined OBJ_COFF |
| 2568 | ARM_SET_INTERWORK (symbolP, support_interwork); |
| 2569 | #endif |
| 2570 | } |
| 2571 | |
| 2572 | /* Directives: Mode selection. */ |
| 2573 | |
| 2574 | /* .syntax [unified|divided] - choose the new unified syntax |
| 2575 | (same for Arm and Thumb encoding, modulo slight differences in what |
| 2576 | can be represented) or the old divergent syntax for each mode. */ |
| 2577 | static void |
| 2578 | s_syntax (int unused ATTRIBUTE_UNUSED) |
| 2579 | { |
| 2580 | char *name, delim; |
| 2581 | |
| 2582 | name = input_line_pointer; |
| 2583 | delim = get_symbol_end (); |
| 2584 | |
| 2585 | if (!strcasecmp (name, "unified")) |
| 2586 | unified_syntax = TRUE; |
| 2587 | else if (!strcasecmp (name, "divided")) |
| 2588 | unified_syntax = FALSE; |
| 2589 | else |
| 2590 | { |
| 2591 | as_bad (_("unrecognized syntax mode \"%s\""), name); |
| 2592 | return; |
| 2593 | } |
| 2594 | *input_line_pointer = delim; |
| 2595 | demand_empty_rest_of_line (); |
| 2596 | } |
| 2597 | |
| 2598 | /* Directives: sectioning and alignment. */ |
| 2599 | |
| 2600 | /* Same as s_align_ptwo but align 0 => align 2. */ |
| 2601 | |
| 2602 | static void |
| 2603 | s_align (int unused ATTRIBUTE_UNUSED) |
| 2604 | { |
| 2605 | int temp; |
| 2606 | long temp_fill; |
| 2607 | long max_alignment = 15; |
| 2608 | |
| 2609 | temp = get_absolute_expression (); |
| 2610 | if (temp > max_alignment) |
| 2611 | as_bad (_("alignment too large: %d assumed"), temp = max_alignment); |
| 2612 | else if (temp < 0) |
| 2613 | { |
| 2614 | as_bad (_("alignment negative. 0 assumed.")); |
| 2615 | temp = 0; |
| 2616 | } |
| 2617 | |
| 2618 | if (*input_line_pointer == ',') |
| 2619 | { |
| 2620 | input_line_pointer++; |
| 2621 | temp_fill = get_absolute_expression (); |
| 2622 | } |
| 2623 | else |
| 2624 | temp_fill = 0; |
| 2625 | |
| 2626 | if (!temp) |
| 2627 | temp = 2; |
| 2628 | |
| 2629 | /* Only make a frag if we HAVE to. */ |
| 2630 | if (temp && !need_pass_2) |
| 2631 | frag_align (temp, (int) temp_fill, 0); |
| 2632 | demand_empty_rest_of_line (); |
| 2633 | |
| 2634 | record_alignment (now_seg, temp); |
| 2635 | } |
| 2636 | |
| 2637 | static void |
| 2638 | s_bss (int ignore ATTRIBUTE_UNUSED) |
| 2639 | { |
| 2640 | /* We don't support putting frags in the BSS segment, we fake it by |
| 2641 | marking in_bss, then looking at s_skip for clues. */ |
| 2642 | subseg_set (bss_section, 0); |
| 2643 | demand_empty_rest_of_line (); |
| 2644 | mapping_state (MAP_DATA); |
| 2645 | } |
| 2646 | |
| 2647 | static void |
| 2648 | s_even (int ignore ATTRIBUTE_UNUSED) |
| 2649 | { |
| 2650 | /* Never make frag if expect extra pass. */ |
| 2651 | if (!need_pass_2) |
| 2652 | frag_align (1, 0, 0); |
| 2653 | |
| 2654 | record_alignment (now_seg, 1); |
| 2655 | |
| 2656 | demand_empty_rest_of_line (); |
| 2657 | } |
| 2658 | |
| 2659 | /* Directives: Literal pools. */ |
| 2660 | |
| 2661 | static literal_pool * |
| 2662 | find_literal_pool (void) |
| 2663 | { |
| 2664 | literal_pool * pool; |
| 2665 | |
| 2666 | for (pool = list_of_pools; pool != NULL; pool = pool->next) |
| 2667 | { |
| 2668 | if (pool->section == now_seg |
| 2669 | && pool->sub_section == now_subseg) |
| 2670 | break; |
| 2671 | } |
| 2672 | |
| 2673 | return pool; |
| 2674 | } |
| 2675 | |
| 2676 | static literal_pool * |
| 2677 | find_or_make_literal_pool (void) |
| 2678 | { |
| 2679 | /* Next literal pool ID number. */ |
| 2680 | static unsigned int latest_pool_num = 1; |
| 2681 | literal_pool * pool; |
| 2682 | |
| 2683 | pool = find_literal_pool (); |
| 2684 | |
| 2685 | if (pool == NULL) |
| 2686 | { |
| 2687 | /* Create a new pool. */ |
| 2688 | pool = xmalloc (sizeof (* pool)); |
| 2689 | if (! pool) |
| 2690 | return NULL; |
| 2691 | |
| 2692 | pool->next_free_entry = 0; |
| 2693 | pool->section = now_seg; |
| 2694 | pool->sub_section = now_subseg; |
| 2695 | pool->next = list_of_pools; |
| 2696 | pool->symbol = NULL; |
| 2697 | |
| 2698 | /* Add it to the list. */ |
| 2699 | list_of_pools = pool; |
| 2700 | } |
| 2701 | |
| 2702 | /* New pools, and emptied pools, will have a NULL symbol. */ |
| 2703 | if (pool->symbol == NULL) |
| 2704 | { |
| 2705 | pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section, |
| 2706 | (valueT) 0, &zero_address_frag); |
| 2707 | pool->id = latest_pool_num ++; |
| 2708 | } |
| 2709 | |
| 2710 | /* Done. */ |
| 2711 | return pool; |
| 2712 | } |
| 2713 | |
| 2714 | /* Add the literal in the global 'inst' |
| 2715 | structure to the relevent literal pool. */ |
| 2716 | |
| 2717 | static int |
| 2718 | add_to_lit_pool (void) |
| 2719 | { |
| 2720 | literal_pool * pool; |
| 2721 | unsigned int entry; |
| 2722 | |
| 2723 | pool = find_or_make_literal_pool (); |
| 2724 | |
| 2725 | /* Check if this literal value is already in the pool. */ |
| 2726 | for (entry = 0; entry < pool->next_free_entry; entry ++) |
| 2727 | { |
| 2728 | if ((pool->literals[entry].X_op == inst.reloc.exp.X_op) |
| 2729 | && (inst.reloc.exp.X_op == O_constant) |
| 2730 | && (pool->literals[entry].X_add_number |
| 2731 | == inst.reloc.exp.X_add_number) |
| 2732 | && (pool->literals[entry].X_unsigned |
| 2733 | == inst.reloc.exp.X_unsigned)) |
| 2734 | break; |
| 2735 | |
| 2736 | if ((pool->literals[entry].X_op == inst.reloc.exp.X_op) |
| 2737 | && (inst.reloc.exp.X_op == O_symbol) |
| 2738 | && (pool->literals[entry].X_add_number |
| 2739 | == inst.reloc.exp.X_add_number) |
| 2740 | && (pool->literals[entry].X_add_symbol |
| 2741 | == inst.reloc.exp.X_add_symbol) |
| 2742 | && (pool->literals[entry].X_op_symbol |
| 2743 | == inst.reloc.exp.X_op_symbol)) |
| 2744 | break; |
| 2745 | } |
| 2746 | |
| 2747 | /* Do we need to create a new entry? */ |
| 2748 | if (entry == pool->next_free_entry) |
| 2749 | { |
| 2750 | if (entry >= MAX_LITERAL_POOL_SIZE) |
| 2751 | { |
| 2752 | inst.error = _("literal pool overflow"); |
| 2753 | return FAIL; |
| 2754 | } |
| 2755 | |
| 2756 | pool->literals[entry] = inst.reloc.exp; |
| 2757 | pool->next_free_entry += 1; |
| 2758 | } |
| 2759 | |
| 2760 | inst.reloc.exp.X_op = O_symbol; |
| 2761 | inst.reloc.exp.X_add_number = ((int) entry) * 4; |
| 2762 | inst.reloc.exp.X_add_symbol = pool->symbol; |
| 2763 | |
| 2764 | return SUCCESS; |
| 2765 | } |
| 2766 | |
| 2767 | /* Can't use symbol_new here, so have to create a symbol and then at |
| 2768 | a later date assign it a value. Thats what these functions do. */ |
| 2769 | |
| 2770 | static void |
| 2771 | symbol_locate (symbolS * symbolP, |
| 2772 | const char * name, /* It is copied, the caller can modify. */ |
| 2773 | segT segment, /* Segment identifier (SEG_<something>). */ |
| 2774 | valueT valu, /* Symbol value. */ |
| 2775 | fragS * frag) /* Associated fragment. */ |
| 2776 | { |
| 2777 | unsigned int name_length; |
| 2778 | char * preserved_copy_of_name; |
| 2779 | |
| 2780 | name_length = strlen (name) + 1; /* +1 for \0. */ |
| 2781 | obstack_grow (¬es, name, name_length); |
| 2782 | preserved_copy_of_name = obstack_finish (¬es); |
| 2783 | |
| 2784 | #ifdef tc_canonicalize_symbol_name |
| 2785 | preserved_copy_of_name = |
| 2786 | tc_canonicalize_symbol_name (preserved_copy_of_name); |
| 2787 | #endif |
| 2788 | |
| 2789 | S_SET_NAME (symbolP, preserved_copy_of_name); |
| 2790 | |
| 2791 | S_SET_SEGMENT (symbolP, segment); |
| 2792 | S_SET_VALUE (symbolP, valu); |
| 2793 | symbol_clear_list_pointers (symbolP); |
| 2794 | |
| 2795 | symbol_set_frag (symbolP, frag); |
| 2796 | |
| 2797 | /* Link to end of symbol chain. */ |
| 2798 | { |
| 2799 | extern int symbol_table_frozen; |
| 2800 | |
| 2801 | if (symbol_table_frozen) |
| 2802 | abort (); |
| 2803 | } |
| 2804 | |
| 2805 | symbol_append (symbolP, symbol_lastP, & symbol_rootP, & symbol_lastP); |
| 2806 | |
| 2807 | obj_symbol_new_hook (symbolP); |
| 2808 | |
| 2809 | #ifdef tc_symbol_new_hook |
| 2810 | tc_symbol_new_hook (symbolP); |
| 2811 | #endif |
| 2812 | |
| 2813 | #ifdef DEBUG_SYMS |
| 2814 | verify_symbol_chain (symbol_rootP, symbol_lastP); |
| 2815 | #endif /* DEBUG_SYMS */ |
| 2816 | } |
| 2817 | |
| 2818 | |
| 2819 | static void |
| 2820 | s_ltorg (int ignored ATTRIBUTE_UNUSED) |
| 2821 | { |
| 2822 | unsigned int entry; |
| 2823 | literal_pool * pool; |
| 2824 | char sym_name[20]; |
| 2825 | |
| 2826 | pool = find_literal_pool (); |
| 2827 | if (pool == NULL |
| 2828 | || pool->symbol == NULL |
| 2829 | || pool->next_free_entry == 0) |
| 2830 | return; |
| 2831 | |
| 2832 | mapping_state (MAP_DATA); |
| 2833 | |
| 2834 | /* Align pool as you have word accesses. |
| 2835 | Only make a frag if we have to. */ |
| 2836 | if (!need_pass_2) |
| 2837 | frag_align (2, 0, 0); |
| 2838 | |
| 2839 | record_alignment (now_seg, 2); |
| 2840 | |
| 2841 | sprintf (sym_name, "$$lit_\002%x", pool->id); |
| 2842 | |
| 2843 | symbol_locate (pool->symbol, sym_name, now_seg, |
| 2844 | (valueT) frag_now_fix (), frag_now); |
| 2845 | symbol_table_insert (pool->symbol); |
| 2846 | |
| 2847 | ARM_SET_THUMB (pool->symbol, thumb_mode); |
| 2848 | |
| 2849 | #if defined OBJ_COFF || defined OBJ_ELF |
| 2850 | ARM_SET_INTERWORK (pool->symbol, support_interwork); |
| 2851 | #endif |
| 2852 | |
| 2853 | for (entry = 0; entry < pool->next_free_entry; entry ++) |
| 2854 | /* First output the expression in the instruction to the pool. */ |
| 2855 | emit_expr (&(pool->literals[entry]), 4); /* .word */ |
| 2856 | |
| 2857 | /* Mark the pool as empty. */ |
| 2858 | pool->next_free_entry = 0; |
| 2859 | pool->symbol = NULL; |
| 2860 | } |
| 2861 | |
| 2862 | #ifdef OBJ_ELF |
| 2863 | /* Forward declarations for functions below, in the MD interface |
| 2864 | section. */ |
| 2865 | static void fix_new_arm (fragS *, int, short, expressionS *, int, int); |
| 2866 | static valueT create_unwind_entry (int); |
| 2867 | static void start_unwind_section (const segT, int); |
| 2868 | static void add_unwind_opcode (valueT, int); |
| 2869 | static void flush_pending_unwind (void); |
| 2870 | |
| 2871 | /* Directives: Data. */ |
| 2872 | |
| 2873 | static void |
| 2874 | s_arm_elf_cons (int nbytes) |
| 2875 | { |
| 2876 | expressionS exp; |
| 2877 | |
| 2878 | #ifdef md_flush_pending_output |
| 2879 | md_flush_pending_output (); |
| 2880 | #endif |
| 2881 | |
| 2882 | if (is_it_end_of_statement ()) |
| 2883 | { |
| 2884 | demand_empty_rest_of_line (); |
| 2885 | return; |
| 2886 | } |
| 2887 | |
| 2888 | #ifdef md_cons_align |
| 2889 | md_cons_align (nbytes); |
| 2890 | #endif |
| 2891 | |
| 2892 | mapping_state (MAP_DATA); |
| 2893 | do |
| 2894 | { |
| 2895 | int reloc; |
| 2896 | char *base = input_line_pointer; |
| 2897 | |
| 2898 | expression (& exp); |
| 2899 | |
| 2900 | if (exp.X_op != O_symbol) |
| 2901 | emit_expr (&exp, (unsigned int) nbytes); |
| 2902 | else |
| 2903 | { |
| 2904 | char *before_reloc = input_line_pointer; |
| 2905 | reloc = parse_reloc (&input_line_pointer); |
| 2906 | if (reloc == -1) |
| 2907 | { |
| 2908 | as_bad (_("unrecognized relocation suffix")); |
| 2909 | ignore_rest_of_line (); |
| 2910 | return; |
| 2911 | } |
| 2912 | else if (reloc == BFD_RELOC_UNUSED) |
| 2913 | emit_expr (&exp, (unsigned int) nbytes); |
| 2914 | else |
| 2915 | { |
| 2916 | reloc_howto_type *howto = bfd_reloc_type_lookup (stdoutput, reloc); |
| 2917 | int size = bfd_get_reloc_size (howto); |
| 2918 | |
| 2919 | if (reloc == BFD_RELOC_ARM_PLT32) |
| 2920 | { |
| 2921 | as_bad (_("(plt) is only valid on branch targets")); |
| 2922 | reloc = BFD_RELOC_UNUSED; |
| 2923 | size = 0; |
| 2924 | } |
| 2925 | |
| 2926 | if (size > nbytes) |
| 2927 | as_bad (_("%s relocations do not fit in %d bytes"), |
| 2928 | howto->name, nbytes); |
| 2929 | else |
| 2930 | { |
| 2931 | /* We've parsed an expression stopping at O_symbol. |
| 2932 | But there may be more expression left now that we |
| 2933 | have parsed the relocation marker. Parse it again. |
| 2934 | XXX Surely there is a cleaner way to do this. */ |
| 2935 | char *p = input_line_pointer; |
| 2936 | int offset; |
| 2937 | char *save_buf = alloca (input_line_pointer - base); |
| 2938 | memcpy (save_buf, base, input_line_pointer - base); |
| 2939 | memmove (base + (input_line_pointer - before_reloc), |
| 2940 | base, before_reloc - base); |
| 2941 | |
| 2942 | input_line_pointer = base + (input_line_pointer-before_reloc); |
| 2943 | expression (&exp); |
| 2944 | memcpy (base, save_buf, p - base); |
| 2945 | |
| 2946 | offset = nbytes - size; |
| 2947 | p = frag_more ((int) nbytes); |
| 2948 | fix_new_exp (frag_now, p - frag_now->fr_literal + offset, |
| 2949 | size, &exp, 0, reloc); |
| 2950 | } |
| 2951 | } |
| 2952 | } |
| 2953 | } |
| 2954 | while (*input_line_pointer++ == ','); |
| 2955 | |
| 2956 | /* Put terminator back into stream. */ |
| 2957 | input_line_pointer --; |
| 2958 | demand_empty_rest_of_line (); |
| 2959 | } |
| 2960 | |
| 2961 | |
| 2962 | /* Parse a .rel31 directive. */ |
| 2963 | |
| 2964 | static void |
| 2965 | s_arm_rel31 (int ignored ATTRIBUTE_UNUSED) |
| 2966 | { |
| 2967 | expressionS exp; |
| 2968 | char *p; |
| 2969 | valueT highbit; |
| 2970 | |
| 2971 | highbit = 0; |
| 2972 | if (*input_line_pointer == '1') |
| 2973 | highbit = 0x80000000; |
| 2974 | else if (*input_line_pointer != '0') |
| 2975 | as_bad (_("expected 0 or 1")); |
| 2976 | |
| 2977 | input_line_pointer++; |
| 2978 | if (*input_line_pointer != ',') |
| 2979 | as_bad (_("missing comma")); |
| 2980 | input_line_pointer++; |
| 2981 | |
| 2982 | #ifdef md_flush_pending_output |
| 2983 | md_flush_pending_output (); |
| 2984 | #endif |
| 2985 | |
| 2986 | #ifdef md_cons_align |
| 2987 | md_cons_align (4); |
| 2988 | #endif |
| 2989 | |
| 2990 | mapping_state (MAP_DATA); |
| 2991 | |
| 2992 | expression (&exp); |
| 2993 | |
| 2994 | p = frag_more (4); |
| 2995 | md_number_to_chars (p, highbit, 4); |
| 2996 | fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 1, |
| 2997 | BFD_RELOC_ARM_PREL31); |
| 2998 | |
| 2999 | demand_empty_rest_of_line (); |
| 3000 | } |
| 3001 | |
| 3002 | /* Directives: AEABI stack-unwind tables. */ |
| 3003 | |
| 3004 | /* Parse an unwind_fnstart directive. Simply records the current location. */ |
| 3005 | |
| 3006 | static void |
| 3007 | s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED) |
| 3008 | { |
| 3009 | demand_empty_rest_of_line (); |
| 3010 | /* Mark the start of the function. */ |
| 3011 | unwind.proc_start = expr_build_dot (); |
| 3012 | |
| 3013 | /* Reset the rest of the unwind info. */ |
| 3014 | unwind.opcode_count = 0; |
| 3015 | unwind.table_entry = NULL; |
| 3016 | unwind.personality_routine = NULL; |
| 3017 | unwind.personality_index = -1; |
| 3018 | unwind.frame_size = 0; |
| 3019 | unwind.fp_offset = 0; |
| 3020 | unwind.fp_reg = 13; |
| 3021 | unwind.fp_used = 0; |
| 3022 | unwind.sp_restored = 0; |
| 3023 | } |
| 3024 | |
| 3025 | |
| 3026 | /* Parse a handlerdata directive. Creates the exception handling table entry |
| 3027 | for the function. */ |
| 3028 | |
| 3029 | static void |
| 3030 | s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED) |
| 3031 | { |
| 3032 | demand_empty_rest_of_line (); |
| 3033 | if (unwind.table_entry) |
| 3034 | as_bad (_("dupicate .handlerdata directive")); |
| 3035 | |
| 3036 | create_unwind_entry (1); |
| 3037 | } |
| 3038 | |
| 3039 | /* Parse an unwind_fnend directive. Generates the index table entry. */ |
| 3040 | |
| 3041 | static void |
| 3042 | s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED) |
| 3043 | { |
| 3044 | long where; |
| 3045 | char *ptr; |
| 3046 | valueT val; |
| 3047 | |
| 3048 | demand_empty_rest_of_line (); |
| 3049 | |
| 3050 | /* Add eh table entry. */ |
| 3051 | if (unwind.table_entry == NULL) |
| 3052 | val = create_unwind_entry (0); |
| 3053 | else |
| 3054 | val = 0; |
| 3055 | |
| 3056 | /* Add index table entry. This is two words. */ |
| 3057 | start_unwind_section (unwind.saved_seg, 1); |
| 3058 | frag_align (2, 0, 0); |
| 3059 | record_alignment (now_seg, 2); |
| 3060 | |
| 3061 | ptr = frag_more (8); |
| 3062 | where = frag_now_fix () - 8; |
| 3063 | |
| 3064 | /* Self relative offset of the function start. */ |
| 3065 | fix_new (frag_now, where, 4, unwind.proc_start, 0, 1, |
| 3066 | BFD_RELOC_ARM_PREL31); |
| 3067 | |
| 3068 | /* Indicate dependency on EHABI-defined personality routines to the |
| 3069 | linker, if it hasn't been done already. */ |
| 3070 | if (unwind.personality_index >= 0 && unwind.personality_index < 3 |
| 3071 | && !(marked_pr_dependency & (1 << unwind.personality_index))) |
| 3072 | { |
| 3073 | static const char *const name[] = { |
| 3074 | "__aeabi_unwind_cpp_pr0", |
| 3075 | "__aeabi_unwind_cpp_pr1", |
| 3076 | "__aeabi_unwind_cpp_pr2" |
| 3077 | }; |
| 3078 | symbolS *pr = symbol_find_or_make (name[unwind.personality_index]); |
| 3079 | fix_new (frag_now, where, 0, pr, 0, 1, BFD_RELOC_NONE); |
| 3080 | marked_pr_dependency |= 1 << unwind.personality_index; |
| 3081 | seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency |
| 3082 | = marked_pr_dependency; |
| 3083 | } |
| 3084 | |
| 3085 | if (val) |
| 3086 | /* Inline exception table entry. */ |
| 3087 | md_number_to_chars (ptr + 4, val, 4); |
| 3088 | else |
| 3089 | /* Self relative offset of the table entry. */ |
| 3090 | fix_new (frag_now, where + 4, 4, unwind.table_entry, 0, 1, |
| 3091 | BFD_RELOC_ARM_PREL31); |
| 3092 | |
| 3093 | /* Restore the original section. */ |
| 3094 | subseg_set (unwind.saved_seg, unwind.saved_subseg); |
| 3095 | } |
| 3096 | |
| 3097 | |
| 3098 | /* Parse an unwind_cantunwind directive. */ |
| 3099 | |
| 3100 | static void |
| 3101 | s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED) |
| 3102 | { |
| 3103 | demand_empty_rest_of_line (); |
| 3104 | if (unwind.personality_routine || unwind.personality_index != -1) |
| 3105 | as_bad (_("personality routine specified for cantunwind frame")); |
| 3106 | |
| 3107 | unwind.personality_index = -2; |
| 3108 | } |
| 3109 | |
| 3110 | |
| 3111 | /* Parse a personalityindex directive. */ |
| 3112 | |
| 3113 | static void |
| 3114 | s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED) |
| 3115 | { |
| 3116 | expressionS exp; |
| 3117 | |
| 3118 | if (unwind.personality_routine || unwind.personality_index != -1) |
| 3119 | as_bad (_("duplicate .personalityindex directive")); |
| 3120 | |
| 3121 | expression (&exp); |
| 3122 | |
| 3123 | if (exp.X_op != O_constant |
| 3124 | || exp.X_add_number < 0 || exp.X_add_number > 15) |
| 3125 | { |
| 3126 | as_bad (_("bad personality routine number")); |
| 3127 | ignore_rest_of_line (); |
| 3128 | return; |
| 3129 | } |
| 3130 | |
| 3131 | unwind.personality_index = exp.X_add_number; |
| 3132 | |
| 3133 | demand_empty_rest_of_line (); |
| 3134 | } |
| 3135 | |
| 3136 | |
| 3137 | /* Parse a personality directive. */ |
| 3138 | |
| 3139 | static void |
| 3140 | s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED) |
| 3141 | { |
| 3142 | char *name, *p, c; |
| 3143 | |
| 3144 | if (unwind.personality_routine || unwind.personality_index != -1) |
| 3145 | as_bad (_("duplicate .personality directive")); |
| 3146 | |
| 3147 | name = input_line_pointer; |
| 3148 | c = get_symbol_end (); |
| 3149 | p = input_line_pointer; |
| 3150 | unwind.personality_routine = symbol_find_or_make (name); |
| 3151 | *p = c; |
| 3152 | demand_empty_rest_of_line (); |
| 3153 | } |
| 3154 | |
| 3155 | |
| 3156 | /* Parse a directive saving core registers. */ |
| 3157 | |
| 3158 | static void |
| 3159 | s_arm_unwind_save_core (void) |
| 3160 | { |
| 3161 | valueT op; |
| 3162 | long range; |
| 3163 | int n; |
| 3164 | |
| 3165 | range = parse_reg_list (&input_line_pointer); |
| 3166 | if (range == FAIL) |
| 3167 | { |
| 3168 | as_bad (_("expected register list")); |
| 3169 | ignore_rest_of_line (); |
| 3170 | return; |
| 3171 | } |
| 3172 | |
| 3173 | demand_empty_rest_of_line (); |
| 3174 | |
| 3175 | /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...} |
| 3176 | into .unwind_save {..., sp...}. We aren't bothered about the value of |
| 3177 | ip because it is clobbered by calls. */ |
| 3178 | if (unwind.sp_restored && unwind.fp_reg == 12 |
| 3179 | && (range & 0x3000) == 0x1000) |
| 3180 | { |
| 3181 | unwind.opcode_count--; |
| 3182 | unwind.sp_restored = 0; |
| 3183 | range = (range | 0x2000) & ~0x1000; |
| 3184 | unwind.pending_offset = 0; |
| 3185 | } |
| 3186 | |
| 3187 | /* Pop r4-r15. */ |
| 3188 | if (range & 0xfff0) |
| 3189 | { |
| 3190 | /* See if we can use the short opcodes. These pop a block of up to 8 |
| 3191 | registers starting with r4, plus maybe r14. */ |
| 3192 | for (n = 0; n < 8; n++) |
| 3193 | { |
| 3194 | /* Break at the first non-saved register. */ |
| 3195 | if ((range & (1 << (n + 4))) == 0) |
| 3196 | break; |
| 3197 | } |
| 3198 | /* See if there are any other bits set. */ |
| 3199 | if (n == 0 || (range & (0xfff0 << n) & 0xbff0) != 0) |
| 3200 | { |
| 3201 | /* Use the long form. */ |
| 3202 | op = 0x8000 | ((range >> 4) & 0xfff); |
| 3203 | add_unwind_opcode (op, 2); |
| 3204 | } |
| 3205 | else |
| 3206 | { |
| 3207 | /* Use the short form. */ |
| 3208 | if (range & 0x4000) |
| 3209 | op = 0xa8; /* Pop r14. */ |
| 3210 | else |
| 3211 | op = 0xa0; /* Do not pop r14. */ |
| 3212 | op |= (n - 1); |
| 3213 | add_unwind_opcode (op, 1); |
| 3214 | } |
| 3215 | } |
| 3216 | |
| 3217 | /* Pop r0-r3. */ |
| 3218 | if (range & 0xf) |
| 3219 | { |
| 3220 | op = 0xb100 | (range & 0xf); |
| 3221 | add_unwind_opcode (op, 2); |
| 3222 | } |
| 3223 | |
| 3224 | /* Record the number of bytes pushed. */ |
| 3225 | for (n = 0; n < 16; n++) |
| 3226 | { |
| 3227 | if (range & (1 << n)) |
| 3228 | unwind.frame_size += 4; |
| 3229 | } |
| 3230 | } |
| 3231 | |
| 3232 | |
| 3233 | /* Parse a directive saving FPA registers. */ |
| 3234 | |
| 3235 | static void |
| 3236 | s_arm_unwind_save_fpa (int reg) |
| 3237 | { |
| 3238 | expressionS exp; |
| 3239 | int num_regs; |
| 3240 | valueT op; |
| 3241 | |
| 3242 | /* Get Number of registers to transfer. */ |
| 3243 | if (skip_past_comma (&input_line_pointer) != FAIL) |
| 3244 | expression (&exp); |
| 3245 | else |
| 3246 | exp.X_op = O_illegal; |
| 3247 | |
| 3248 | if (exp.X_op != O_constant) |
| 3249 | { |
| 3250 | as_bad (_("expected , <constant>")); |
| 3251 | ignore_rest_of_line (); |
| 3252 | return; |
| 3253 | } |
| 3254 | |
| 3255 | num_regs = exp.X_add_number; |
| 3256 | |
| 3257 | if (num_regs < 1 || num_regs > 4) |
| 3258 | { |
| 3259 | as_bad (_("number of registers must be in the range [1:4]")); |
| 3260 | ignore_rest_of_line (); |
| 3261 | return; |
| 3262 | } |
| 3263 | |
| 3264 | demand_empty_rest_of_line (); |
| 3265 | |
| 3266 | if (reg == 4) |
| 3267 | { |
| 3268 | /* Short form. */ |
| 3269 | op = 0xb4 | (num_regs - 1); |
| 3270 | add_unwind_opcode (op, 1); |
| 3271 | } |
| 3272 | else |
| 3273 | { |
| 3274 | /* Long form. */ |
| 3275 | op = 0xc800 | (reg << 4) | (num_regs - 1); |
| 3276 | add_unwind_opcode (op, 2); |
| 3277 | } |
| 3278 | unwind.frame_size += num_regs * 12; |
| 3279 | } |
| 3280 | |
| 3281 | |
| 3282 | /* Parse a directive saving VFP registers for ARMv6 and above. */ |
| 3283 | |
| 3284 | static void |
| 3285 | s_arm_unwind_save_vfp_armv6 (void) |
| 3286 | { |
| 3287 | int count; |
| 3288 | unsigned int start; |
| 3289 | valueT op; |
| 3290 | int num_vfpv3_regs = 0; |
| 3291 | int num_regs_below_16; |
| 3292 | |
| 3293 | count = parse_vfp_reg_list (&input_line_pointer, &start, REGLIST_VFP_D); |
| 3294 | if (count == FAIL) |
| 3295 | { |
| 3296 | as_bad (_("expected register list")); |
| 3297 | ignore_rest_of_line (); |
| 3298 | return; |
| 3299 | } |
| 3300 | |
| 3301 | demand_empty_rest_of_line (); |
| 3302 | |
| 3303 | /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather |
| 3304 | than FSTMX/FLDMX-style ones). */ |
| 3305 | |
| 3306 | /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31. */ |
| 3307 | if (start >= 16) |
| 3308 | num_vfpv3_regs = count; |
| 3309 | else if (start + count > 16) |
| 3310 | num_vfpv3_regs = start + count - 16; |
| 3311 | |
| 3312 | if (num_vfpv3_regs > 0) |
| 3313 | { |
| 3314 | int start_offset = start > 16 ? start - 16 : 0; |
| 3315 | op = 0xc800 | (start_offset << 4) | (num_vfpv3_regs - 1); |
| 3316 | add_unwind_opcode (op, 2); |
| 3317 | } |
| 3318 | |
| 3319 | /* Generate opcode for registers numbered in the range 0 .. 15. */ |
| 3320 | num_regs_below_16 = num_vfpv3_regs > 0 ? 16 - (int) start : count; |
| 3321 | assert (num_regs_below_16 + num_vfpv3_regs == count); |
| 3322 | if (num_regs_below_16 > 0) |
| 3323 | { |
| 3324 | op = 0xc900 | (start << 4) | (num_regs_below_16 - 1); |
| 3325 | add_unwind_opcode (op, 2); |
| 3326 | } |
| 3327 | |
| 3328 | unwind.frame_size += count * 8; |
| 3329 | } |
| 3330 | |
| 3331 | |
| 3332 | /* Parse a directive saving VFP registers for pre-ARMv6. */ |
| 3333 | |
| 3334 | static void |
| 3335 | s_arm_unwind_save_vfp (void) |
| 3336 | { |
| 3337 | int count; |
| 3338 | unsigned int reg; |
| 3339 | valueT op; |
| 3340 | |
| 3341 | count = parse_vfp_reg_list (&input_line_pointer, ®, REGLIST_VFP_D); |
| 3342 | if (count == FAIL) |
| 3343 | { |
| 3344 | as_bad (_("expected register list")); |
| 3345 | ignore_rest_of_line (); |
| 3346 | return; |
| 3347 | } |
| 3348 | |
| 3349 | demand_empty_rest_of_line (); |
| 3350 | |
| 3351 | if (reg == 8) |
| 3352 | { |
| 3353 | /* Short form. */ |
| 3354 | op = 0xb8 | (count - 1); |
| 3355 | add_unwind_opcode (op, 1); |
| 3356 | } |
| 3357 | else |
| 3358 | { |
| 3359 | /* Long form. */ |
| 3360 | op = 0xb300 | (reg << 4) | (count - 1); |
| 3361 | add_unwind_opcode (op, 2); |
| 3362 | } |
| 3363 | unwind.frame_size += count * 8 + 4; |
| 3364 | } |
| 3365 | |
| 3366 | |
| 3367 | /* Parse a directive saving iWMMXt data registers. */ |
| 3368 | |
| 3369 | static void |
| 3370 | s_arm_unwind_save_mmxwr (void) |
| 3371 | { |
| 3372 | int reg; |
| 3373 | int hi_reg; |
| 3374 | int i; |
| 3375 | unsigned mask = 0; |
| 3376 | valueT op; |
| 3377 | |
| 3378 | if (*input_line_pointer == '{') |
| 3379 | input_line_pointer++; |
| 3380 | |
| 3381 | do |
| 3382 | { |
| 3383 | reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR); |
| 3384 | |
| 3385 | if (reg == FAIL) |
| 3386 | { |
| 3387 | as_bad (_(reg_expected_msgs[REG_TYPE_MMXWR])); |
| 3388 | goto error; |
| 3389 | } |
| 3390 | |
| 3391 | if (mask >> reg) |
| 3392 | as_tsktsk (_("register list not in ascending order")); |
| 3393 | mask |= 1 << reg; |
| 3394 | |
| 3395 | if (*input_line_pointer == '-') |
| 3396 | { |
| 3397 | input_line_pointer++; |
| 3398 | hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR); |
| 3399 | if (hi_reg == FAIL) |
| 3400 | { |
| 3401 | as_bad (_(reg_expected_msgs[REG_TYPE_MMXWR])); |
| 3402 | goto error; |
| 3403 | } |
| 3404 | else if (reg >= hi_reg) |
| 3405 | { |
| 3406 | as_bad (_("bad register range")); |
| 3407 | goto error; |
| 3408 | } |
| 3409 | for (; reg < hi_reg; reg++) |
| 3410 | mask |= 1 << reg; |
| 3411 | } |
| 3412 | } |
| 3413 | while (skip_past_comma (&input_line_pointer) != FAIL); |
| 3414 | |
| 3415 | if (*input_line_pointer == '}') |
| 3416 | input_line_pointer++; |
| 3417 | |
| 3418 | demand_empty_rest_of_line (); |
| 3419 | |
| 3420 | /* Generate any deferred opcodes because we're going to be looking at |
| 3421 | the list. */ |
| 3422 | flush_pending_unwind (); |
| 3423 | |
| 3424 | for (i = 0; i < 16; i++) |
| 3425 | { |
| 3426 | if (mask & (1 << i)) |
| 3427 | unwind.frame_size += 8; |
| 3428 | } |
| 3429 | |
| 3430 | /* Attempt to combine with a previous opcode. We do this because gcc |
| 3431 | likes to output separate unwind directives for a single block of |
| 3432 | registers. */ |
| 3433 | if (unwind.opcode_count > 0) |
| 3434 | { |
| 3435 | i = unwind.opcodes[unwind.opcode_count - 1]; |
| 3436 | if ((i & 0xf8) == 0xc0) |
| 3437 | { |
| 3438 | i &= 7; |
| 3439 | /* Only merge if the blocks are contiguous. */ |
| 3440 | if (i < 6) |
| 3441 | { |
| 3442 | if ((mask & 0xfe00) == (1 << 9)) |
| 3443 | { |
| 3444 | mask |= ((1 << (i + 11)) - 1) & 0xfc00; |
| 3445 | unwind.opcode_count--; |
| 3446 | } |
| 3447 | } |
| 3448 | else if (i == 6 && unwind.opcode_count >= 2) |
| 3449 | { |
| 3450 | i = unwind.opcodes[unwind.opcode_count - 2]; |
| 3451 | reg = i >> 4; |
| 3452 | i &= 0xf; |
| 3453 | |
| 3454 | op = 0xffff << (reg - 1); |
| 3455 | if (reg > 0 |
| 3456 | && ((mask & op) == (1u << (reg - 1)))) |
| 3457 | { |
| 3458 | op = (1 << (reg + i + 1)) - 1; |
| 3459 | op &= ~((1 << reg) - 1); |
| 3460 | mask |= op; |
| 3461 | unwind.opcode_count -= 2; |
| 3462 | } |
| 3463 | } |
| 3464 | } |
| 3465 | } |
| 3466 | |
| 3467 | hi_reg = 15; |
| 3468 | /* We want to generate opcodes in the order the registers have been |
| 3469 | saved, ie. descending order. */ |
| 3470 | for (reg = 15; reg >= -1; reg--) |
| 3471 | { |
| 3472 | /* Save registers in blocks. */ |
| 3473 | if (reg < 0 |
| 3474 | || !(mask & (1 << reg))) |
| 3475 | { |
| 3476 | /* We found an unsaved reg. Generate opcodes to save the |
| 3477 | preceeding block. */ |
| 3478 | if (reg != hi_reg) |
| 3479 | { |
| 3480 | if (reg == 9) |
| 3481 | { |
| 3482 | /* Short form. */ |
| 3483 | op = 0xc0 | (hi_reg - 10); |
| 3484 | add_unwind_opcode (op, 1); |
| 3485 | } |
| 3486 | else |
| 3487 | { |
| 3488 | /* Long form. */ |
| 3489 | op = 0xc600 | ((reg + 1) << 4) | ((hi_reg - reg) - 1); |
| 3490 | add_unwind_opcode (op, 2); |
| 3491 | } |
| 3492 | } |
| 3493 | hi_reg = reg - 1; |
| 3494 | } |
| 3495 | } |
| 3496 | |
| 3497 | return; |
| 3498 | error: |
| 3499 | ignore_rest_of_line (); |
| 3500 | } |
| 3501 | |
| 3502 | static void |
| 3503 | s_arm_unwind_save_mmxwcg (void) |
| 3504 | { |
| 3505 | int reg; |
| 3506 | int hi_reg; |
| 3507 | unsigned mask = 0; |
| 3508 | valueT op; |
| 3509 | |
| 3510 | if (*input_line_pointer == '{') |
| 3511 | input_line_pointer++; |
| 3512 | |
| 3513 | do |
| 3514 | { |
| 3515 | reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG); |
| 3516 | |
| 3517 | if (reg == FAIL) |
| 3518 | { |
| 3519 | as_bad (_(reg_expected_msgs[REG_TYPE_MMXWCG])); |
| 3520 | goto error; |
| 3521 | } |
| 3522 | |
| 3523 | reg -= 8; |
| 3524 | if (mask >> reg) |
| 3525 | as_tsktsk (_("register list not in ascending order")); |
| 3526 | mask |= 1 << reg; |
| 3527 | |
| 3528 | if (*input_line_pointer == '-') |
| 3529 | { |
| 3530 | input_line_pointer++; |
| 3531 | hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG); |
| 3532 | if (hi_reg == FAIL) |
| 3533 | { |
| 3534 | as_bad (_(reg_expected_msgs[REG_TYPE_MMXWCG])); |
| 3535 | goto error; |
| 3536 | } |
| 3537 | else if (reg >= hi_reg) |
| 3538 | { |
| 3539 | as_bad (_("bad register range")); |
| 3540 | goto error; |
| 3541 | } |
| 3542 | for (; reg < hi_reg; reg++) |
| 3543 | mask |= 1 << reg; |
| 3544 | } |
| 3545 | } |
| 3546 | while (skip_past_comma (&input_line_pointer) != FAIL); |
| 3547 | |
| 3548 | if (*input_line_pointer == '}') |
| 3549 | input_line_pointer++; |
| 3550 | |
| 3551 | demand_empty_rest_of_line (); |
| 3552 | |
| 3553 | /* Generate any deferred opcodes because we're going to be looking at |
| 3554 | the list. */ |
| 3555 | flush_pending_unwind (); |
| 3556 | |
| 3557 | for (reg = 0; reg < 16; reg++) |
| 3558 | { |
| 3559 | if (mask & (1 << reg)) |
| 3560 | unwind.frame_size += 4; |
| 3561 | } |
| 3562 | op = 0xc700 | mask; |
| 3563 | add_unwind_opcode (op, 2); |
| 3564 | return; |
| 3565 | error: |
| 3566 | ignore_rest_of_line (); |
| 3567 | } |
| 3568 | |
| 3569 | |
| 3570 | /* Parse an unwind_save directive. |
| 3571 | If the argument is non-zero, this is a .vsave directive. */ |
| 3572 | |
| 3573 | static void |
| 3574 | s_arm_unwind_save (int arch_v6) |
| 3575 | { |
| 3576 | char *peek; |
| 3577 | struct reg_entry *reg; |
| 3578 | bfd_boolean had_brace = FALSE; |
| 3579 | |
| 3580 | /* Figure out what sort of save we have. */ |
| 3581 | peek = input_line_pointer; |
| 3582 | |
| 3583 | if (*peek == '{') |
| 3584 | { |
| 3585 | had_brace = TRUE; |
| 3586 | peek++; |
| 3587 | } |
| 3588 | |
| 3589 | reg = arm_reg_parse_multi (&peek); |
| 3590 | |
| 3591 | if (!reg) |
| 3592 | { |
| 3593 | as_bad (_("register expected")); |
| 3594 | ignore_rest_of_line (); |
| 3595 | return; |
| 3596 | } |
| 3597 | |
| 3598 | switch (reg->type) |
| 3599 | { |
| 3600 | case REG_TYPE_FN: |
| 3601 | if (had_brace) |
| 3602 | { |
| 3603 | as_bad (_("FPA .unwind_save does not take a register list")); |
| 3604 | ignore_rest_of_line (); |
| 3605 | return; |
| 3606 | } |
| 3607 | s_arm_unwind_save_fpa (reg->number); |
| 3608 | return; |
| 3609 | |
| 3610 | case REG_TYPE_RN: s_arm_unwind_save_core (); return; |
| 3611 | case REG_TYPE_VFD: |
| 3612 | if (arch_v6) |
| 3613 | s_arm_unwind_save_vfp_armv6 (); |
| 3614 | else |
| 3615 | s_arm_unwind_save_vfp (); |
| 3616 | return; |
| 3617 | case REG_TYPE_MMXWR: s_arm_unwind_save_mmxwr (); return; |
| 3618 | case REG_TYPE_MMXWCG: s_arm_unwind_save_mmxwcg (); return; |
| 3619 | |
| 3620 | default: |
| 3621 | as_bad (_(".unwind_save does not support this kind of register")); |
| 3622 | ignore_rest_of_line (); |
| 3623 | } |
| 3624 | } |
| 3625 | |
| 3626 | |
| 3627 | /* Parse an unwind_movsp directive. */ |
| 3628 | |
| 3629 | static void |
| 3630 | s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED) |
| 3631 | { |
| 3632 | int reg; |
| 3633 | valueT op; |
| 3634 | int offset; |
| 3635 | |
| 3636 | reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN); |
| 3637 | if (reg == FAIL) |
| 3638 | { |
| 3639 | as_bad (_(reg_expected_msgs[REG_TYPE_RN])); |
| 3640 | ignore_rest_of_line (); |
| 3641 | return; |
| 3642 | } |
| 3643 | |
| 3644 | /* Optional constant. */ |
| 3645 | if (skip_past_comma (&input_line_pointer) != FAIL) |
| 3646 | { |
| 3647 | if (immediate_for_directive (&offset) == FAIL) |
| 3648 | return; |
| 3649 | } |
| 3650 | else |
| 3651 | offset = 0; |
| 3652 | |
| 3653 | demand_empty_rest_of_line (); |
| 3654 | |
| 3655 | if (reg == REG_SP || reg == REG_PC) |
| 3656 | { |
| 3657 | as_bad (_("SP and PC not permitted in .unwind_movsp directive")); |
| 3658 | return; |
| 3659 | } |
| 3660 | |
| 3661 | if (unwind.fp_reg != REG_SP) |
| 3662 | as_bad (_("unexpected .unwind_movsp directive")); |
| 3663 | |
| 3664 | /* Generate opcode to restore the value. */ |
| 3665 | op = 0x90 | reg; |
| 3666 | add_unwind_opcode (op, 1); |
| 3667 | |
| 3668 | /* Record the information for later. */ |
| 3669 | unwind.fp_reg = reg; |
| 3670 | unwind.fp_offset = unwind.frame_size - offset; |
| 3671 | unwind.sp_restored = 1; |
| 3672 | } |
| 3673 | |
| 3674 | /* Parse an unwind_pad directive. */ |
| 3675 | |
| 3676 | static void |
| 3677 | s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED) |
| 3678 | { |
| 3679 | int offset; |
| 3680 | |
| 3681 | if (immediate_for_directive (&offset) == FAIL) |
| 3682 | return; |
| 3683 | |
| 3684 | if (offset & 3) |
| 3685 | { |
| 3686 | as_bad (_("stack increment must be multiple of 4")); |
| 3687 | ignore_rest_of_line (); |
| 3688 | return; |
| 3689 | } |
| 3690 | |
| 3691 | /* Don't generate any opcodes, just record the details for later. */ |
| 3692 | unwind.frame_size += offset; |
| 3693 | unwind.pending_offset += offset; |
| 3694 | |
| 3695 | demand_empty_rest_of_line (); |
| 3696 | } |
| 3697 | |
| 3698 | /* Parse an unwind_setfp directive. */ |
| 3699 | |
| 3700 | static void |
| 3701 | s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED) |
| 3702 | { |
| 3703 | int sp_reg; |
| 3704 | int fp_reg; |
| 3705 | int offset; |
| 3706 | |
| 3707 | fp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN); |
| 3708 | if (skip_past_comma (&input_line_pointer) == FAIL) |
| 3709 | sp_reg = FAIL; |
| 3710 | else |
| 3711 | sp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN); |
| 3712 | |
| 3713 | if (fp_reg == FAIL || sp_reg == FAIL) |
| 3714 | { |
| 3715 | as_bad (_("expected <reg>, <reg>")); |
| 3716 | ignore_rest_of_line (); |
| 3717 | return; |
| 3718 | } |
| 3719 | |
| 3720 | /* Optional constant. */ |
| 3721 | if (skip_past_comma (&input_line_pointer) != FAIL) |
| 3722 | { |
| 3723 | if (immediate_for_directive (&offset) == FAIL) |
| 3724 | return; |
| 3725 | } |
| 3726 | else |
| 3727 | offset = 0; |
| 3728 | |
| 3729 | demand_empty_rest_of_line (); |
| 3730 | |
| 3731 | if (sp_reg != 13 && sp_reg != unwind.fp_reg) |
| 3732 | { |
| 3733 | as_bad (_("register must be either sp or set by a previous" |
| 3734 | "unwind_movsp directive")); |
| 3735 | return; |
| 3736 | } |
| 3737 | |
| 3738 | /* Don't generate any opcodes, just record the information for later. */ |
| 3739 | unwind.fp_reg = fp_reg; |
| 3740 | unwind.fp_used = 1; |
| 3741 | if (sp_reg == 13) |
| 3742 | unwind.fp_offset = unwind.frame_size - offset; |
| 3743 | else |
| 3744 | unwind.fp_offset -= offset; |
| 3745 | } |
| 3746 | |
| 3747 | /* Parse an unwind_raw directive. */ |
| 3748 | |
| 3749 | static void |
| 3750 | s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED) |
| 3751 | { |
| 3752 | expressionS exp; |
| 3753 | /* This is an arbitrary limit. */ |
| 3754 | unsigned char op[16]; |
| 3755 | int count; |
| 3756 | |
| 3757 | expression (&exp); |
| 3758 | if (exp.X_op == O_constant |
| 3759 | && skip_past_comma (&input_line_pointer) != FAIL) |
| 3760 | { |
| 3761 | unwind.frame_size += exp.X_add_number; |
| 3762 | expression (&exp); |
| 3763 | } |
| 3764 | else |
| 3765 | exp.X_op = O_illegal; |
| 3766 | |
| 3767 | if (exp.X_op != O_constant) |
| 3768 | { |
| 3769 | as_bad (_("expected <offset>, <opcode>")); |
| 3770 | ignore_rest_of_line (); |
| 3771 | return; |
| 3772 | } |
| 3773 | |
| 3774 | count = 0; |
| 3775 | |
| 3776 | /* Parse the opcode. */ |
| 3777 | for (;;) |
| 3778 | { |
| 3779 | if (count >= 16) |
| 3780 | { |
| 3781 | as_bad (_("unwind opcode too long")); |
| 3782 | ignore_rest_of_line (); |
| 3783 | } |
| 3784 | if (exp.X_op != O_constant || exp.X_add_number & ~0xff) |
| 3785 | { |
| 3786 | as_bad (_("invalid unwind opcode")); |
| 3787 | ignore_rest_of_line (); |
| 3788 | return; |
| 3789 | } |
| 3790 | op[count++] = exp.X_add_number; |
| 3791 | |
| 3792 | /* Parse the next byte. */ |
| 3793 | if (skip_past_comma (&input_line_pointer) == FAIL) |
| 3794 | break; |
| 3795 | |
| 3796 | expression (&exp); |
| 3797 | } |
| 3798 | |
| 3799 | /* Add the opcode bytes in reverse order. */ |
| 3800 | while (count--) |
| 3801 | add_unwind_opcode (op[count], 1); |
| 3802 | |
| 3803 | demand_empty_rest_of_line (); |
| 3804 | } |
| 3805 | |
| 3806 | |
| 3807 | /* Parse a .eabi_attribute directive. */ |
| 3808 | |
| 3809 | static void |
| 3810 | s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED) |
| 3811 | { |
| 3812 | expressionS exp; |
| 3813 | bfd_boolean is_string; |
| 3814 | int tag; |
| 3815 | unsigned int i = 0; |
| 3816 | char *s = NULL; |
| 3817 | char saved_char; |
| 3818 | |
| 3819 | expression (& exp); |
| 3820 | if (exp.X_op != O_constant) |
| 3821 | goto bad; |
| 3822 | |
| 3823 | tag = exp.X_add_number; |
| 3824 | if (tag == 4 || tag == 5 || tag == 32 || (tag > 32 && (tag & 1) != 0)) |
| 3825 | is_string = 1; |
| 3826 | else |
| 3827 | is_string = 0; |
| 3828 | |
| 3829 | if (skip_past_comma (&input_line_pointer) == FAIL) |
| 3830 | goto bad; |
| 3831 | if (tag == 32 || !is_string) |
| 3832 | { |
| 3833 | expression (& exp); |
| 3834 | if (exp.X_op != O_constant) |
| 3835 | { |
| 3836 | as_bad (_("expected numeric constant")); |
| 3837 | ignore_rest_of_line (); |
| 3838 | return; |
| 3839 | } |
| 3840 | i = exp.X_add_number; |
| 3841 | } |
| 3842 | if (tag == Tag_compatibility |
| 3843 | && skip_past_comma (&input_line_pointer) == FAIL) |
| 3844 | { |
| 3845 | as_bad (_("expected comma")); |
| 3846 | ignore_rest_of_line (); |
| 3847 | return; |
| 3848 | } |
| 3849 | if (is_string) |
| 3850 | { |
| 3851 | skip_whitespace(input_line_pointer); |
| 3852 | if (*input_line_pointer != '"') |
| 3853 | goto bad_string; |
| 3854 | input_line_pointer++; |
| 3855 | s = input_line_pointer; |
| 3856 | while (*input_line_pointer && *input_line_pointer != '"') |
| 3857 | input_line_pointer++; |
| 3858 | if (*input_line_pointer != '"') |
| 3859 | goto bad_string; |
| 3860 | saved_char = *input_line_pointer; |
| 3861 | *input_line_pointer = 0; |
| 3862 | } |
| 3863 | else |
| 3864 | { |
| 3865 | s = NULL; |
| 3866 | saved_char = 0; |
| 3867 | } |
| 3868 | |
| 3869 | if (tag == Tag_compatibility) |
| 3870 | elf32_arm_add_eabi_attr_compat (stdoutput, i, s); |
| 3871 | else if (is_string) |
| 3872 | elf32_arm_add_eabi_attr_string (stdoutput, tag, s); |
| 3873 | else |
| 3874 | elf32_arm_add_eabi_attr_int (stdoutput, tag, i); |
| 3875 | |
| 3876 | if (s) |
| 3877 | { |
| 3878 | *input_line_pointer = saved_char; |
| 3879 | input_line_pointer++; |
| 3880 | } |
| 3881 | demand_empty_rest_of_line (); |
| 3882 | return; |
| 3883 | bad_string: |
| 3884 | as_bad (_("bad string constant")); |
| 3885 | ignore_rest_of_line (); |
| 3886 | return; |
| 3887 | bad: |
| 3888 | as_bad (_("expected <tag> , <value>")); |
| 3889 | ignore_rest_of_line (); |
| 3890 | } |
| 3891 | #endif /* OBJ_ELF */ |
| 3892 | |
| 3893 | static void s_arm_arch (int); |
| 3894 | static void s_arm_object_arch (int); |
| 3895 | static void s_arm_cpu (int); |
| 3896 | static void s_arm_fpu (int); |
| 3897 | |
| 3898 | #ifdef TE_PE |
| 3899 | |
| 3900 | static void |
| 3901 | pe_directive_secrel (int dummy ATTRIBUTE_UNUSED) |
| 3902 | { |
| 3903 | expressionS exp; |
| 3904 | |
| 3905 | do |
| 3906 | { |
| 3907 | expression (&exp); |
| 3908 | if (exp.X_op == O_symbol) |
| 3909 | exp.X_op = O_secrel; |
| 3910 | |
| 3911 | emit_expr (&exp, 4); |
| 3912 | } |
| 3913 | while (*input_line_pointer++ == ','); |
| 3914 | |
| 3915 | input_line_pointer--; |
| 3916 | demand_empty_rest_of_line (); |
| 3917 | } |
| 3918 | #endif /* TE_PE */ |
| 3919 | |
| 3920 | /* This table describes all the machine specific pseudo-ops the assembler |
| 3921 | has to support. The fields are: |
| 3922 | pseudo-op name without dot |
| 3923 | function to call to execute this pseudo-op |
| 3924 | Integer arg to pass to the function. */ |
| 3925 | |
| 3926 | const pseudo_typeS md_pseudo_table[] = |
| 3927 | { |
| 3928 | /* Never called because '.req' does not start a line. */ |
| 3929 | { "req", s_req, 0 }, |
| 3930 | /* Following two are likewise never called. */ |
| 3931 | { "dn", s_dn, 0 }, |
| 3932 | { "qn", s_qn, 0 }, |
| 3933 | { "unreq", s_unreq, 0 }, |
| 3934 | { "bss", s_bss, 0 }, |
| 3935 | { "align", s_align, 0 }, |
| 3936 | { "arm", s_arm, 0 }, |
| 3937 | { "thumb", s_thumb, 0 }, |
| 3938 | { "code", s_code, 0 }, |
| 3939 | { "force_thumb", s_force_thumb, 0 }, |
| 3940 | { "thumb_func", s_thumb_func, 0 }, |
| 3941 | { "thumb_set", s_thumb_set, 0 }, |
| 3942 | { "even", s_even, 0 }, |
| 3943 | { "ltorg", s_ltorg, 0 }, |
| 3944 | { "pool", s_ltorg, 0 }, |
| 3945 | { "syntax", s_syntax, 0 }, |
| 3946 | { "cpu", s_arm_cpu, 0 }, |
| 3947 | { "arch", s_arm_arch, 0 }, |
| 3948 | { "object_arch", s_arm_object_arch, 0 }, |
| 3949 | { "fpu", s_arm_fpu, 0 }, |
| 3950 | #ifdef OBJ_ELF |
| 3951 | { "word", s_arm_elf_cons, 4 }, |
| 3952 | { "long", s_arm_elf_cons, 4 }, |
| 3953 | { "rel31", s_arm_rel31, 0 }, |
| 3954 | { "fnstart", s_arm_unwind_fnstart, 0 }, |
| 3955 | { "fnend", s_arm_unwind_fnend, 0 }, |
| 3956 | { "cantunwind", s_arm_unwind_cantunwind, 0 }, |
| 3957 | { "personality", s_arm_unwind_personality, 0 }, |
| 3958 | { "personalityindex", s_arm_unwind_personalityindex, 0 }, |
| 3959 | { "handlerdata", s_arm_unwind_handlerdata, 0 }, |
| 3960 | { "save", s_arm_unwind_save, 0 }, |
| 3961 | { "vsave", s_arm_unwind_save, 1 }, |
| 3962 | { "movsp", s_arm_unwind_movsp, 0 }, |
| 3963 | { "pad", s_arm_unwind_pad, 0 }, |
| 3964 | { "setfp", s_arm_unwind_setfp, 0 }, |
| 3965 | { "unwind_raw", s_arm_unwind_raw, 0 }, |
| 3966 | { "eabi_attribute", s_arm_eabi_attribute, 0 }, |
| 3967 | #else |
| 3968 | { "word", cons, 4}, |
| 3969 | |
| 3970 | /* These are used for dwarf. */ |
| 3971 | {"2byte", cons, 2}, |
| 3972 | {"4byte", cons, 4}, |
| 3973 | {"8byte", cons, 8}, |
| 3974 | /* These are used for dwarf2. */ |
| 3975 | { "file", (void (*) (int)) dwarf2_directive_file, 0 }, |
| 3976 | { "loc", dwarf2_directive_loc, 0 }, |
| 3977 | { "loc_mark_labels", dwarf2_directive_loc_mark_labels, 0 }, |
| 3978 | #endif |
| 3979 | { "extend", float_cons, 'x' }, |
| 3980 | { "ldouble", float_cons, 'x' }, |
| 3981 | { "packed", float_cons, 'p' }, |
| 3982 | #ifdef TE_PE |
| 3983 | {"secrel32", pe_directive_secrel, 0}, |
| 3984 | #endif |
| 3985 | { 0, 0, 0 } |
| 3986 | }; |
| 3987 | \f |
| 3988 | /* Parser functions used exclusively in instruction operands. */ |
| 3989 | |
| 3990 | /* Generic immediate-value read function for use in insn parsing. |
| 3991 | STR points to the beginning of the immediate (the leading #); |
| 3992 | VAL receives the value; if the value is outside [MIN, MAX] |
| 3993 | issue an error. PREFIX_OPT is true if the immediate prefix is |
| 3994 | optional. */ |
| 3995 | |
| 3996 | static int |
| 3997 | parse_immediate (char **str, int *val, int min, int max, |
| 3998 | bfd_boolean prefix_opt) |
| 3999 | { |
| 4000 | expressionS exp; |
| 4001 | my_get_expression (&exp, str, prefix_opt ? GE_OPT_PREFIX : GE_IMM_PREFIX); |
| 4002 | if (exp.X_op != O_constant) |
| 4003 | { |
| 4004 | inst.error = _("constant expression required"); |
| 4005 | return FAIL; |
| 4006 | } |
| 4007 | |
| 4008 | if (exp.X_add_number < min || exp.X_add_number > max) |
| 4009 | { |
| 4010 | inst.error = _("immediate value out of range"); |
| 4011 | return FAIL; |
| 4012 | } |
| 4013 | |
| 4014 | *val = exp.X_add_number; |
| 4015 | return SUCCESS; |
| 4016 | } |
| 4017 | |
| 4018 | /* Less-generic immediate-value read function with the possibility of loading a |
| 4019 | big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate |
| 4020 | instructions. Puts the result directly in inst.operands[i]. */ |
| 4021 | |
| 4022 | static int |
| 4023 | parse_big_immediate (char **str, int i) |
| 4024 | { |
| 4025 | expressionS exp; |
| 4026 | char *ptr = *str; |
| 4027 | |
| 4028 | my_get_expression (&exp, &ptr, GE_OPT_PREFIX_BIG); |
| 4029 | |
| 4030 | if (exp.X_op == O_constant) |
| 4031 | { |
| 4032 | inst.operands[i].imm = exp.X_add_number & 0xffffffff; |
| 4033 | /* If we're on a 64-bit host, then a 64-bit number can be returned using |
| 4034 | O_constant. We have to be careful not to break compilation for |
| 4035 | 32-bit X_add_number, though. */ |
| 4036 | if ((exp.X_add_number & ~0xffffffffl) != 0) |
| 4037 | { |
| 4038 | /* X >> 32 is illegal if sizeof (exp.X_add_number) == 4. */ |
| 4039 | inst.operands[i].reg = ((exp.X_add_number >> 16) >> 16) & 0xffffffff; |
| 4040 | inst.operands[i].regisimm = 1; |
| 4041 | } |
| 4042 | } |
| 4043 | else if (exp.X_op == O_big |
| 4044 | && LITTLENUM_NUMBER_OF_BITS * exp.X_add_number > 32 |
| 4045 | && LITTLENUM_NUMBER_OF_BITS * exp.X_add_number <= 64) |
| 4046 | { |
| 4047 | unsigned parts = 32 / LITTLENUM_NUMBER_OF_BITS, j, idx = 0; |
| 4048 | /* Bignums have their least significant bits in |
| 4049 | generic_bignum[0]. Make sure we put 32 bits in imm and |
| 4050 | 32 bits in reg, in a (hopefully) portable way. */ |
| 4051 | assert (parts != 0); |
| 4052 | inst.operands[i].imm = 0; |
| 4053 | for (j = 0; j < parts; j++, idx++) |
| 4054 | inst.operands[i].imm |= generic_bignum[idx] |
| 4055 | << (LITTLENUM_NUMBER_OF_BITS * j); |
| 4056 | inst.operands[i].reg = 0; |
| 4057 | for (j = 0; j < parts; j++, idx++) |
| 4058 | inst.operands[i].reg |= generic_bignum[idx] |
| 4059 | << (LITTLENUM_NUMBER_OF_BITS * j); |
| 4060 | inst.operands[i].regisimm = 1; |
| 4061 | } |
| 4062 | else |
| 4063 | return FAIL; |
| 4064 | |
| 4065 | *str = ptr; |
| 4066 | |
| 4067 | return SUCCESS; |
| 4068 | } |
| 4069 | |
| 4070 | /* Returns the pseudo-register number of an FPA immediate constant, |
| 4071 | or FAIL if there isn't a valid constant here. */ |
| 4072 | |
| 4073 | static int |
| 4074 | parse_fpa_immediate (char ** str) |
| 4075 | { |
| 4076 | LITTLENUM_TYPE words[MAX_LITTLENUMS]; |
| 4077 | char * save_in; |
| 4078 | expressionS exp; |
| 4079 | int i; |
| 4080 | int j; |
| 4081 | |
| 4082 | /* First try and match exact strings, this is to guarantee |
| 4083 | that some formats will work even for cross assembly. */ |
| 4084 | |
| 4085 | for (i = 0; fp_const[i]; i++) |
| 4086 | { |
| 4087 | if (strncmp (*str, fp_const[i], strlen (fp_const[i])) == 0) |
| 4088 | { |
| 4089 | char *start = *str; |
| 4090 | |
| 4091 | *str += strlen (fp_const[i]); |
| 4092 | if (is_end_of_line[(unsigned char) **str]) |
| 4093 | return i + 8; |
| 4094 | *str = start; |
| 4095 | } |
| 4096 | } |
| 4097 | |
| 4098 | /* Just because we didn't get a match doesn't mean that the constant |
| 4099 | isn't valid, just that it is in a format that we don't |
| 4100 | automatically recognize. Try parsing it with the standard |
| 4101 | expression routines. */ |
| 4102 | |
| 4103 | memset (words, 0, MAX_LITTLENUMS * sizeof (LITTLENUM_TYPE)); |
| 4104 | |
| 4105 | /* Look for a raw floating point number. */ |
| 4106 | if ((save_in = atof_ieee (*str, 'x', words)) != NULL |
| 4107 | && is_end_of_line[(unsigned char) *save_in]) |
| 4108 | { |
| 4109 | for (i = 0; i < NUM_FLOAT_VALS; i++) |
| 4110 | { |
| 4111 | for (j = 0; j < MAX_LITTLENUMS; j++) |
| 4112 | { |
| 4113 | if (words[j] != fp_values[i][j]) |
| 4114 | break; |
| 4115 | } |
| 4116 | |
| 4117 | if (j == MAX_LITTLENUMS) |
| 4118 | { |
| 4119 | *str = save_in; |
| 4120 | return i + 8; |
| 4121 | } |
| 4122 | } |
| 4123 | } |
| 4124 | |
| 4125 | /* Try and parse a more complex expression, this will probably fail |
| 4126 | unless the code uses a floating point prefix (eg "0f"). */ |
| 4127 | save_in = input_line_pointer; |
| 4128 | input_line_pointer = *str; |
| 4129 | if (expression (&exp) == absolute_section |
| 4130 | && exp.X_op == O_big |
| 4131 | && exp.X_add_number < 0) |
| 4132 | { |
| 4133 | /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it. |
| 4134 | Ditto for 15. */ |
| 4135 | if (gen_to_words (words, 5, (long) 15) == 0) |
| 4136 | { |
| 4137 | for (i = 0; i < NUM_FLOAT_VALS; i++) |
| 4138 | { |
| 4139 | for (j = 0; j < MAX_LITTLENUMS; j++) |
| 4140 | { |
| 4141 | if (words[j] != fp_values[i][j]) |
| 4142 | break; |
| 4143 | } |
| 4144 | |
| 4145 | if (j == MAX_LITTLENUMS) |
| 4146 | { |
| 4147 | *str = input_line_pointer; |
| 4148 | input_line_pointer = save_in; |
| 4149 | return i + 8; |
| 4150 | } |
| 4151 | } |
| 4152 | } |
| 4153 | } |
| 4154 | |
| 4155 | *str = input_line_pointer; |
| 4156 | input_line_pointer = save_in; |
| 4157 | inst.error = _("invalid FPA immediate expression"); |
| 4158 | return FAIL; |
| 4159 | } |
| 4160 | |
| 4161 | /* Returns 1 if a number has "quarter-precision" float format |
| 4162 | 0baBbbbbbc defgh000 00000000 00000000. */ |
| 4163 | |
| 4164 | static int |
| 4165 | is_quarter_float (unsigned imm) |
| 4166 | { |
| 4167 | int bs = (imm & 0x20000000) ? 0x3e000000 : 0x40000000; |
| 4168 | return (imm & 0x7ffff) == 0 && ((imm & 0x7e000000) ^ bs) == 0; |
| 4169 | } |
| 4170 | |
| 4171 | /* Parse an 8-bit "quarter-precision" floating point number of the form: |
| 4172 | 0baBbbbbbc defgh000 00000000 00000000. |
| 4173 | The minus-zero case needs special handling, since it can't be encoded in the |
| 4174 | "quarter-precision" float format, but can nonetheless be loaded as an integer |
| 4175 | constant. */ |
| 4176 | |
| 4177 | static unsigned |
| 4178 | parse_qfloat_immediate (char **ccp, int *immed) |
| 4179 | { |
| 4180 | char *str = *ccp; |
| 4181 | LITTLENUM_TYPE words[MAX_LITTLENUMS]; |
| 4182 | |
| 4183 | skip_past_char (&str, '#'); |
| 4184 | |
| 4185 | if ((str = atof_ieee (str, 's', words)) != NULL) |
| 4186 | { |
| 4187 | unsigned fpword = 0; |
| 4188 | int i; |
| 4189 | |
| 4190 | /* Our FP word must be 32 bits (single-precision FP). */ |
| 4191 | for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++) |
| 4192 | { |
| 4193 | fpword <<= LITTLENUM_NUMBER_OF_BITS; |
| 4194 | fpword |= words[i]; |
| 4195 | } |
| 4196 | |
| 4197 | if (is_quarter_float (fpword) || fpword == 0x80000000) |
| 4198 | *immed = fpword; |
| 4199 | else |
| 4200 | return FAIL; |
| 4201 | |
| 4202 | *ccp = str; |
| 4203 | |
| 4204 | return SUCCESS; |
| 4205 | } |
| 4206 | |
| 4207 | return FAIL; |
| 4208 | } |
| 4209 | |
| 4210 | /* Shift operands. */ |
| 4211 | enum shift_kind |
| 4212 | { |
| 4213 | SHIFT_LSL, SHIFT_LSR, SHIFT_ASR, SHIFT_ROR, SHIFT_RRX |
| 4214 | }; |
| 4215 | |
| 4216 | struct asm_shift_name |
| 4217 | { |
| 4218 | const char *name; |
| 4219 | enum shift_kind kind; |
| 4220 | }; |
| 4221 | |
| 4222 | /* Third argument to parse_shift. */ |
| 4223 | enum parse_shift_mode |
| 4224 | { |
| 4225 | NO_SHIFT_RESTRICT, /* Any kind of shift is accepted. */ |
| 4226 | SHIFT_IMMEDIATE, /* Shift operand must be an immediate. */ |
| 4227 | SHIFT_LSL_OR_ASR_IMMEDIATE, /* Shift must be LSL or ASR immediate. */ |
| 4228 | SHIFT_ASR_IMMEDIATE, /* Shift must be ASR immediate. */ |
| 4229 | SHIFT_LSL_IMMEDIATE, /* Shift must be LSL immediate. */ |
| 4230 | }; |
| 4231 | |
| 4232 | /* Parse a <shift> specifier on an ARM data processing instruction. |
| 4233 | This has three forms: |
| 4234 | |
| 4235 | (LSL|LSR|ASL|ASR|ROR) Rs |
| 4236 | (LSL|LSR|ASL|ASR|ROR) #imm |
| 4237 | RRX |
| 4238 | |
| 4239 | Note that ASL is assimilated to LSL in the instruction encoding, and |
| 4240 | RRX to ROR #0 (which cannot be written as such). */ |
| 4241 | |
| 4242 | static int |
| 4243 | parse_shift (char **str, int i, enum parse_shift_mode mode) |
| 4244 | { |
| 4245 | const struct asm_shift_name *shift_name; |
| 4246 | enum shift_kind shift; |
| 4247 | char *s = *str; |
| 4248 | char *p = s; |
| 4249 | int reg; |
| 4250 | |
| 4251 | for (p = *str; ISALPHA (*p); p++) |
| 4252 | ; |
| 4253 | |
| 4254 | if (p == *str) |
| 4255 | { |
| 4256 | inst.error = _("shift expression expected"); |
| 4257 | return FAIL; |
| 4258 | } |
| 4259 | |
| 4260 | shift_name = hash_find_n (arm_shift_hsh, *str, p - *str); |
| 4261 | |
| 4262 | if (shift_name == NULL) |
| 4263 | { |
| 4264 | inst.error = _("shift expression expected"); |
| 4265 | return FAIL; |
| 4266 | } |
| 4267 | |
| 4268 | shift = shift_name->kind; |
| 4269 | |
| 4270 | switch (mode) |
| 4271 | { |
| 4272 | case NO_SHIFT_RESTRICT: |
| 4273 | case SHIFT_IMMEDIATE: break; |
| 4274 | |
| 4275 | case SHIFT_LSL_OR_ASR_IMMEDIATE: |
| 4276 | if (shift != SHIFT_LSL && shift != SHIFT_ASR) |
| 4277 | { |
| 4278 | inst.error = _("'LSL' or 'ASR' required"); |
| 4279 | return FAIL; |
| 4280 | } |
| 4281 | break; |
| 4282 | |
| 4283 | case SHIFT_LSL_IMMEDIATE: |
| 4284 | if (shift != SHIFT_LSL) |
| 4285 | { |
| 4286 | inst.error = _("'LSL' required"); |
| 4287 | return FAIL; |
| 4288 | } |
| 4289 | break; |
| 4290 | |
| 4291 | case SHIFT_ASR_IMMEDIATE: |
| 4292 | if (shift != SHIFT_ASR) |
| 4293 | { |
| 4294 | inst.error = _("'ASR' required"); |
| 4295 | return FAIL; |
| 4296 | } |
| 4297 | break; |
| 4298 | |
| 4299 | default: abort (); |
| 4300 | } |
| 4301 | |
| 4302 | if (shift != SHIFT_RRX) |
| 4303 | { |
| 4304 | /* Whitespace can appear here if the next thing is a bare digit. */ |
| 4305 | skip_whitespace (p); |
| 4306 | |
| 4307 | if (mode == NO_SHIFT_RESTRICT |
| 4308 | && (reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL) |
| 4309 | { |
| 4310 | inst.operands[i].imm = reg; |
| 4311 | inst.operands[i].immisreg = 1; |
| 4312 | } |
| 4313 | else if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX)) |
| 4314 | return FAIL; |
| 4315 | } |
| 4316 | inst.operands[i].shift_kind = shift; |
| 4317 | inst.operands[i].shifted = 1; |
| 4318 | *str = p; |
| 4319 | return SUCCESS; |
| 4320 | } |
| 4321 | |
| 4322 | /* Parse a <shifter_operand> for an ARM data processing instruction: |
| 4323 | |
| 4324 | #<immediate> |
| 4325 | #<immediate>, <rotate> |
| 4326 | <Rm> |
| 4327 | <Rm>, <shift> |
| 4328 | |
| 4329 | where <shift> is defined by parse_shift above, and <rotate> is a |
| 4330 | multiple of 2 between 0 and 30. Validation of immediate operands |
| 4331 | is deferred to md_apply_fix. */ |
| 4332 | |
| 4333 | static int |
| 4334 | parse_shifter_operand (char **str, int i) |
| 4335 | { |
| 4336 | int value; |
| 4337 | expressionS expr; |
| 4338 | |
| 4339 | if ((value = arm_reg_parse (str, REG_TYPE_RN)) != FAIL) |
| 4340 | { |
| 4341 | inst.operands[i].reg = value; |
| 4342 | inst.operands[i].isreg = 1; |
| 4343 | |
| 4344 | /* parse_shift will override this if appropriate */ |
| 4345 | inst.reloc.exp.X_op = O_constant; |
| 4346 | inst.reloc.exp.X_add_number = 0; |
| 4347 | |
| 4348 | if (skip_past_comma (str) == FAIL) |
| 4349 | return SUCCESS; |
| 4350 | |
| 4351 | /* Shift operation on register. */ |
| 4352 | return parse_shift (str, i, NO_SHIFT_RESTRICT); |
| 4353 | } |
| 4354 | |
| 4355 | if (my_get_expression (&inst.reloc.exp, str, GE_IMM_PREFIX)) |
| 4356 | return FAIL; |
| 4357 | |
| 4358 | if (skip_past_comma (str) == SUCCESS) |
| 4359 | { |
| 4360 | /* #x, y -- ie explicit rotation by Y. */ |
| 4361 | if (my_get_expression (&expr, str, GE_NO_PREFIX)) |
| 4362 | return FAIL; |
| 4363 | |
| 4364 | if (expr.X_op != O_constant || inst.reloc.exp.X_op != O_constant) |
| 4365 | { |
| 4366 | inst.error = _("constant expression expected"); |
| 4367 | return FAIL; |
| 4368 | } |
| 4369 | |
| 4370 | value = expr.X_add_number; |
| 4371 | if (value < 0 || value > 30 || value % 2 != 0) |
| 4372 | { |
| 4373 | inst.error = _("invalid rotation"); |
| 4374 | return FAIL; |
| 4375 | } |
| 4376 | if (inst.reloc.exp.X_add_number < 0 || inst.reloc.exp.X_add_number > 255) |
| 4377 | { |
| 4378 | inst.error = _("invalid constant"); |
| 4379 | return FAIL; |
| 4380 | } |
| 4381 | |
| 4382 | /* Convert to decoded value. md_apply_fix will put it back. */ |
| 4383 | inst.reloc.exp.X_add_number |
| 4384 | = (((inst.reloc.exp.X_add_number << (32 - value)) |
| 4385 | | (inst.reloc.exp.X_add_number >> value)) & 0xffffffff); |
| 4386 | } |
| 4387 | |
| 4388 | inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE; |
| 4389 | inst.reloc.pc_rel = 0; |
| 4390 | return SUCCESS; |
| 4391 | } |
| 4392 | |
| 4393 | /* Group relocation information. Each entry in the table contains the |
| 4394 | textual name of the relocation as may appear in assembler source |
| 4395 | and must end with a colon. |
| 4396 | Along with this textual name are the relocation codes to be used if |
| 4397 | the corresponding instruction is an ALU instruction (ADD or SUB only), |
| 4398 | an LDR, an LDRS, or an LDC. */ |
| 4399 | |
| 4400 | struct group_reloc_table_entry |
| 4401 | { |
| 4402 | const char *name; |
| 4403 | int alu_code; |
| 4404 | int ldr_code; |
| 4405 | int ldrs_code; |
| 4406 | int ldc_code; |
| 4407 | }; |
| 4408 | |
| 4409 | typedef enum |
| 4410 | { |
| 4411 | /* Varieties of non-ALU group relocation. */ |
| 4412 | |
| 4413 | GROUP_LDR, |
| 4414 | GROUP_LDRS, |
| 4415 | GROUP_LDC |
| 4416 | } group_reloc_type; |
| 4417 | |
| 4418 | static struct group_reloc_table_entry group_reloc_table[] = |
| 4419 | { /* Program counter relative: */ |
| 4420 | { "pc_g0_nc", |
| 4421 | BFD_RELOC_ARM_ALU_PC_G0_NC, /* ALU */ |
| 4422 | 0, /* LDR */ |
| 4423 | 0, /* LDRS */ |
| 4424 | 0 }, /* LDC */ |
| 4425 | { "pc_g0", |
| 4426 | BFD_RELOC_ARM_ALU_PC_G0, /* ALU */ |
| 4427 | BFD_RELOC_ARM_LDR_PC_G0, /* LDR */ |
| 4428 | BFD_RELOC_ARM_LDRS_PC_G0, /* LDRS */ |
| 4429 | BFD_RELOC_ARM_LDC_PC_G0 }, /* LDC */ |
| 4430 | { "pc_g1_nc", |
| 4431 | BFD_RELOC_ARM_ALU_PC_G1_NC, /* ALU */ |
| 4432 | 0, /* LDR */ |
| 4433 | 0, /* LDRS */ |
| 4434 | 0 }, /* LDC */ |
| 4435 | { "pc_g1", |
| 4436 | BFD_RELOC_ARM_ALU_PC_G1, /* ALU */ |
| 4437 | BFD_RELOC_ARM_LDR_PC_G1, /* LDR */ |
| 4438 | BFD_RELOC_ARM_LDRS_PC_G1, /* LDRS */ |
| 4439 | BFD_RELOC_ARM_LDC_PC_G1 }, /* LDC */ |
| 4440 | { "pc_g2", |
| 4441 | BFD_RELOC_ARM_ALU_PC_G2, /* ALU */ |
| 4442 | BFD_RELOC_ARM_LDR_PC_G2, /* LDR */ |
| 4443 | BFD_RELOC_ARM_LDRS_PC_G2, /* LDRS */ |
| 4444 | BFD_RELOC_ARM_LDC_PC_G2 }, /* LDC */ |
| 4445 | /* Section base relative */ |
| 4446 | { "sb_g0_nc", |
| 4447 | BFD_RELOC_ARM_ALU_SB_G0_NC, /* ALU */ |
| 4448 | 0, /* LDR */ |
| 4449 | 0, /* LDRS */ |
| 4450 | 0 }, /* LDC */ |
| 4451 | { "sb_g0", |
| 4452 | BFD_RELOC_ARM_ALU_SB_G0, /* ALU */ |
| 4453 | BFD_RELOC_ARM_LDR_SB_G0, /* LDR */ |
| 4454 | BFD_RELOC_ARM_LDRS_SB_G0, /* LDRS */ |
| 4455 | BFD_RELOC_ARM_LDC_SB_G0 }, /* LDC */ |
| 4456 | { "sb_g1_nc", |
| 4457 | BFD_RELOC_ARM_ALU_SB_G1_NC, /* ALU */ |
| 4458 | 0, /* LDR */ |
| 4459 | 0, /* LDRS */ |
| 4460 | 0 }, /* LDC */ |
| 4461 | { "sb_g1", |
| 4462 | BFD_RELOC_ARM_ALU_SB_G1, /* ALU */ |
| 4463 | BFD_RELOC_ARM_LDR_SB_G1, /* LDR */ |
| 4464 | BFD_RELOC_ARM_LDRS_SB_G1, /* LDRS */ |
| 4465 | BFD_RELOC_ARM_LDC_SB_G1 }, /* LDC */ |
| 4466 | { "sb_g2", |
| 4467 | BFD_RELOC_ARM_ALU_SB_G2, /* ALU */ |
| 4468 | BFD_RELOC_ARM_LDR_SB_G2, /* LDR */ |
| 4469 | BFD_RELOC_ARM_LDRS_SB_G2, /* LDRS */ |
| 4470 | BFD_RELOC_ARM_LDC_SB_G2 } }; /* LDC */ |
| 4471 | |
| 4472 | /* Given the address of a pointer pointing to the textual name of a group |
| 4473 | relocation as may appear in assembler source, attempt to find its details |
| 4474 | in group_reloc_table. The pointer will be updated to the character after |
| 4475 | the trailing colon. On failure, FAIL will be returned; SUCCESS |
| 4476 | otherwise. On success, *entry will be updated to point at the relevant |
| 4477 | group_reloc_table entry. */ |
| 4478 | |
| 4479 | static int |
| 4480 | find_group_reloc_table_entry (char **str, struct group_reloc_table_entry **out) |
| 4481 | { |
| 4482 | unsigned int i; |
| 4483 | for (i = 0; i < ARRAY_SIZE (group_reloc_table); i++) |
| 4484 | { |
| 4485 | int length = strlen (group_reloc_table[i].name); |
| 4486 | |
| 4487 | if (strncasecmp (group_reloc_table[i].name, *str, length) == 0 && |
| 4488 | (*str)[length] == ':') |
| 4489 | { |
| 4490 | *out = &group_reloc_table[i]; |
| 4491 | *str += (length + 1); |
| 4492 | return SUCCESS; |
| 4493 | } |
| 4494 | } |
| 4495 | |
| 4496 | return FAIL; |
| 4497 | } |
| 4498 | |
| 4499 | /* Parse a <shifter_operand> for an ARM data processing instruction |
| 4500 | (as for parse_shifter_operand) where group relocations are allowed: |
| 4501 | |
| 4502 | #<immediate> |
| 4503 | #<immediate>, <rotate> |
| 4504 | #:<group_reloc>:<expression> |
| 4505 | <Rm> |
| 4506 | <Rm>, <shift> |
| 4507 | |
| 4508 | where <group_reloc> is one of the strings defined in group_reloc_table. |
| 4509 | The hashes are optional. |
| 4510 | |
| 4511 | Everything else is as for parse_shifter_operand. */ |
| 4512 | |
| 4513 | static parse_operand_result |
| 4514 | parse_shifter_operand_group_reloc (char **str, int i) |
| 4515 | { |
| 4516 | /* Determine if we have the sequence of characters #: or just : |
| 4517 | coming next. If we do, then we check for a group relocation. |
| 4518 | If we don't, punt the whole lot to parse_shifter_operand. */ |
| 4519 | |
| 4520 | if (((*str)[0] == '#' && (*str)[1] == ':') |
| 4521 | || (*str)[0] == ':') |
| 4522 | { |
| 4523 | struct group_reloc_table_entry *entry; |
| 4524 | |
| 4525 | if ((*str)[0] == '#') |
| 4526 | (*str) += 2; |
| 4527 | else |
| 4528 | (*str)++; |
| 4529 | |
| 4530 | /* Try to parse a group relocation. Anything else is an error. */ |
| 4531 | if (find_group_reloc_table_entry (str, &entry) == FAIL) |
| 4532 | { |
| 4533 | inst.error = _("unknown group relocation"); |
| 4534 | return PARSE_OPERAND_FAIL_NO_BACKTRACK; |
| 4535 | } |
| 4536 | |
| 4537 | /* We now have the group relocation table entry corresponding to |
| 4538 | the name in the assembler source. Next, we parse the expression. */ |
| 4539 | if (my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX)) |
| 4540 | return PARSE_OPERAND_FAIL_NO_BACKTRACK; |
| 4541 | |
| 4542 | /* Record the relocation type (always the ALU variant here). */ |
| 4543 | inst.reloc.type = entry->alu_code; |
| 4544 | assert (inst.reloc.type != 0); |
| 4545 | |
| 4546 | return PARSE_OPERAND_SUCCESS; |
| 4547 | } |
| 4548 | else |
| 4549 | return parse_shifter_operand (str, i) == SUCCESS |
| 4550 | ? PARSE_OPERAND_SUCCESS : PARSE_OPERAND_FAIL; |
| 4551 | |
| 4552 | /* Never reached. */ |
| 4553 | } |
| 4554 | |
| 4555 | /* Parse all forms of an ARM address expression. Information is written |
| 4556 | to inst.operands[i] and/or inst.reloc. |
| 4557 | |
| 4558 | Preindexed addressing (.preind=1): |
| 4559 | |
| 4560 | [Rn, #offset] .reg=Rn .reloc.exp=offset |
| 4561 | [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1 |
| 4562 | [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1 |
| 4563 | .shift_kind=shift .reloc.exp=shift_imm |
| 4564 | |
| 4565 | These three may have a trailing ! which causes .writeback to be set also. |
| 4566 | |
| 4567 | Postindexed addressing (.postind=1, .writeback=1): |
| 4568 | |
| 4569 | [Rn], #offset .reg=Rn .reloc.exp=offset |
| 4570 | [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1 |
| 4571 | [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1 |
| 4572 | .shift_kind=shift .reloc.exp=shift_imm |
| 4573 | |
| 4574 | Unindexed addressing (.preind=0, .postind=0): |
| 4575 | |
| 4576 | [Rn], {option} .reg=Rn .imm=option .immisreg=0 |
| 4577 | |
| 4578 | Other: |
| 4579 | |
| 4580 | [Rn]{!} shorthand for [Rn,#0]{!} |
| 4581 | =immediate .isreg=0 .reloc.exp=immediate |
| 4582 | label .reg=PC .reloc.pc_rel=1 .reloc.exp=label |
| 4583 | |
| 4584 | It is the caller's responsibility to check for addressing modes not |
| 4585 | supported by the instruction, and to set inst.reloc.type. */ |
| 4586 | |
| 4587 | static parse_operand_result |
| 4588 | parse_address_main (char **str, int i, int group_relocations, |
| 4589 | group_reloc_type group_type) |
| 4590 | { |
| 4591 | char *p = *str; |
| 4592 | int reg; |
| 4593 | |
| 4594 | if (skip_past_char (&p, '[') == FAIL) |
| 4595 | { |
| 4596 | if (skip_past_char (&p, '=') == FAIL) |
| 4597 | { |
| 4598 | /* bare address - translate to PC-relative offset */ |
| 4599 | inst.reloc.pc_rel = 1; |
| 4600 | inst.operands[i].reg = REG_PC; |
| 4601 | inst.operands[i].isreg = 1; |
| 4602 | inst.operands[i].preind = 1; |
| 4603 | } |
| 4604 | /* else a load-constant pseudo op, no special treatment needed here */ |
| 4605 | |
| 4606 | if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX)) |
| 4607 | return PARSE_OPERAND_FAIL; |
| 4608 | |
| 4609 | *str = p; |
| 4610 | return PARSE_OPERAND_SUCCESS; |
| 4611 | } |
| 4612 | |
| 4613 | if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL) |
| 4614 | { |
| 4615 | inst.error = _(reg_expected_msgs[REG_TYPE_RN]); |
| 4616 | return PARSE_OPERAND_FAIL; |
| 4617 | } |
| 4618 | inst.operands[i].reg = reg; |
| 4619 | inst.operands[i].isreg = 1; |
| 4620 | |
| 4621 | if (skip_past_comma (&p) == SUCCESS) |
| 4622 | { |
| 4623 | inst.operands[i].preind = 1; |
| 4624 | |
| 4625 | if (*p == '+') p++; |
| 4626 | else if (*p == '-') p++, inst.operands[i].negative = 1; |
| 4627 | |
| 4628 | if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL) |
| 4629 | { |
| 4630 | inst.operands[i].imm = reg; |
| 4631 | inst.operands[i].immisreg = 1; |
| 4632 | |
| 4633 | if (skip_past_comma (&p) == SUCCESS) |
| 4634 | if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL) |
| 4635 | return PARSE_OPERAND_FAIL; |
| 4636 | } |
| 4637 | else if (skip_past_char (&p, ':') == SUCCESS) |
| 4638 | { |
| 4639 | /* FIXME: '@' should be used here, but it's filtered out by generic |
| 4640 | code before we get to see it here. This may be subject to |
| 4641 | change. */ |
| 4642 | expressionS exp; |
| 4643 | my_get_expression (&exp, &p, GE_NO_PREFIX); |
| 4644 | if (exp.X_op != O_constant) |
| 4645 | { |
| 4646 | inst.error = _("alignment must be constant"); |
| 4647 | return PARSE_OPERAND_FAIL; |
| 4648 | } |
| 4649 | inst.operands[i].imm = exp.X_add_number << 8; |
| 4650 | inst.operands[i].immisalign = 1; |
| 4651 | /* Alignments are not pre-indexes. */ |
| 4652 | inst.operands[i].preind = 0; |
| 4653 | } |
| 4654 | else |
| 4655 | { |
| 4656 | if (inst.operands[i].negative) |
| 4657 | { |
| 4658 | inst.operands[i].negative = 0; |
| 4659 | p--; |
| 4660 | } |
| 4661 | |
| 4662 | if (group_relocations && |
| 4663 | ((*p == '#' && *(p + 1) == ':') || *p == ':')) |
| 4664 | |
| 4665 | { |
| 4666 | struct group_reloc_table_entry *entry; |
| 4667 | |
| 4668 | /* Skip over the #: or : sequence. */ |
| 4669 | if (*p == '#') |
| 4670 | p += 2; |
| 4671 | else |
| 4672 | p++; |
| 4673 | |
| 4674 | /* Try to parse a group relocation. Anything else is an |
| 4675 | error. */ |
| 4676 | if (find_group_reloc_table_entry (&p, &entry) == FAIL) |
| 4677 | { |
| 4678 | inst.error = _("unknown group relocation"); |
| 4679 | return PARSE_OPERAND_FAIL_NO_BACKTRACK; |
| 4680 | } |
| 4681 | |
| 4682 | /* We now have the group relocation table entry corresponding to |
| 4683 | the name in the assembler source. Next, we parse the |
| 4684 | expression. */ |
| 4685 | if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX)) |
| 4686 | return PARSE_OPERAND_FAIL_NO_BACKTRACK; |
| 4687 | |
| 4688 | /* Record the relocation type. */ |
| 4689 | switch (group_type) |
| 4690 | { |
| 4691 | case GROUP_LDR: |
| 4692 | inst.reloc.type = entry->ldr_code; |
| 4693 | break; |
| 4694 | |
| 4695 | case GROUP_LDRS: |
| 4696 | inst.reloc.type = entry->ldrs_code; |
| 4697 | break; |
| 4698 | |
| 4699 | case GROUP_LDC: |
| 4700 | inst.reloc.type = entry->ldc_code; |
| 4701 | break; |
| 4702 | |
| 4703 | default: |
| 4704 | assert (0); |
| 4705 | } |
| 4706 | |
| 4707 | if (inst.reloc.type == 0) |
| 4708 | { |
| 4709 | inst.error = _("this group relocation is not allowed on this instruction"); |
| 4710 | return PARSE_OPERAND_FAIL_NO_BACKTRACK; |
| 4711 | } |
| 4712 | } |
| 4713 | else |
| 4714 | if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX)) |
| 4715 | return PARSE_OPERAND_FAIL; |
| 4716 | } |
| 4717 | } |
| 4718 | |
| 4719 | if (skip_past_char (&p, ']') == FAIL) |
| 4720 | { |
| 4721 | inst.error = _("']' expected"); |
| 4722 | return PARSE_OPERAND_FAIL; |
| 4723 | } |
| 4724 | |
| 4725 | if (skip_past_char (&p, '!') == SUCCESS) |
| 4726 | inst.operands[i].writeback = 1; |
| 4727 | |
| 4728 | else if (skip_past_comma (&p) == SUCCESS) |
| 4729 | { |
| 4730 | if (skip_past_char (&p, '{') == SUCCESS) |
| 4731 | { |
| 4732 | /* [Rn], {expr} - unindexed, with option */ |
| 4733 | if (parse_immediate (&p, &inst.operands[i].imm, |
| 4734 | 0, 255, TRUE) == FAIL) |
| 4735 | return PARSE_OPERAND_FAIL; |
| 4736 | |
| 4737 | if (skip_past_char (&p, '}') == FAIL) |
| 4738 | { |
| 4739 | inst.error = _("'}' expected at end of 'option' field"); |
| 4740 | return PARSE_OPERAND_FAIL; |
| 4741 | } |
| 4742 | if (inst.operands[i].preind) |
| 4743 | { |
| 4744 | inst.error = _("cannot combine index with option"); |
| 4745 | return PARSE_OPERAND_FAIL; |
| 4746 | } |
| 4747 | *str = p; |
| 4748 | return PARSE_OPERAND_SUCCESS; |
| 4749 | } |
| 4750 | else |
| 4751 | { |
| 4752 | inst.operands[i].postind = 1; |
| 4753 | inst.operands[i].writeback = 1; |
| 4754 | |
| 4755 | if (inst.operands[i].preind) |
| 4756 | { |
| 4757 | inst.error = _("cannot combine pre- and post-indexing"); |
| 4758 | return PARSE_OPERAND_FAIL; |
| 4759 | } |
| 4760 | |
| 4761 | if (*p == '+') p++; |
| 4762 | else if (*p == '-') p++, inst.operands[i].negative = 1; |
| 4763 | |
| 4764 | if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL) |
| 4765 | { |
| 4766 | /* We might be using the immediate for alignment already. If we |
| 4767 | are, OR the register number into the low-order bits. */ |
| 4768 | if (inst.operands[i].immisalign) |
| 4769 | inst.operands[i].imm |= reg; |
| 4770 | else |
| 4771 | inst.operands[i].imm = reg; |
| 4772 | inst.operands[i].immisreg = 1; |
| 4773 | |
| 4774 | if (skip_past_comma (&p) == SUCCESS) |
| 4775 | if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL) |
| 4776 | return PARSE_OPERAND_FAIL; |
| 4777 | } |
| 4778 | else |
| 4779 | { |
| 4780 | if (inst.operands[i].negative) |
| 4781 | { |
| 4782 | inst.operands[i].negative = 0; |
| 4783 | p--; |
| 4784 | } |
| 4785 | if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX)) |
| 4786 | return PARSE_OPERAND_FAIL; |
| 4787 | } |
| 4788 | } |
| 4789 | } |
| 4790 | |
| 4791 | /* If at this point neither .preind nor .postind is set, we have a |
| 4792 | bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */ |
| 4793 | if (inst.operands[i].preind == 0 && inst.operands[i].postind == 0) |
| 4794 | { |
| 4795 | inst.operands[i].preind = 1; |
| 4796 | inst.reloc.exp.X_op = O_constant; |
| 4797 | inst.reloc.exp.X_add_number = 0; |
| 4798 | } |
| 4799 | *str = p; |
| 4800 | return PARSE_OPERAND_SUCCESS; |
| 4801 | } |
| 4802 | |
| 4803 | static int |
| 4804 | parse_address (char **str, int i) |
| 4805 | { |
| 4806 | return parse_address_main (str, i, 0, 0) == PARSE_OPERAND_SUCCESS |
| 4807 | ? SUCCESS : FAIL; |
| 4808 | } |
| 4809 | |
| 4810 | static parse_operand_result |
| 4811 | parse_address_group_reloc (char **str, int i, group_reloc_type type) |
| 4812 | { |
| 4813 | return parse_address_main (str, i, 1, type); |
| 4814 | } |
| 4815 | |
| 4816 | /* Parse an operand for a MOVW or MOVT instruction. */ |
| 4817 | static int |
| 4818 | parse_half (char **str) |
| 4819 | { |
| 4820 | char * p; |
| 4821 | |
| 4822 | p = *str; |
| 4823 | skip_past_char (&p, '#'); |
| 4824 | if (strncasecmp (p, ":lower16:", 9) == 0) |
| 4825 | inst.reloc.type = BFD_RELOC_ARM_MOVW; |
| 4826 | else if (strncasecmp (p, ":upper16:", 9) == 0) |
| 4827 | inst.reloc.type = BFD_RELOC_ARM_MOVT; |
| 4828 | |
| 4829 | if (inst.reloc.type != BFD_RELOC_UNUSED) |
| 4830 | { |
| 4831 | p += 9; |
| 4832 | skip_whitespace(p); |
| 4833 | } |
| 4834 | |
| 4835 | if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX)) |
| 4836 | return FAIL; |
| 4837 | |
| 4838 | if (inst.reloc.type == BFD_RELOC_UNUSED) |
| 4839 | { |
| 4840 | if (inst.reloc.exp.X_op != O_constant) |
| 4841 | { |
| 4842 | inst.error = _("constant expression expected"); |
| 4843 | return FAIL; |
| 4844 | } |
| 4845 | if (inst.reloc.exp.X_add_number < 0 |
| 4846 | || inst.reloc.exp.X_add_number > 0xffff) |
| 4847 | { |
| 4848 | inst.error = _("immediate value out of range"); |
| 4849 | return FAIL; |
| 4850 | } |
| 4851 | } |
| 4852 | *str = p; |
| 4853 | return SUCCESS; |
| 4854 | } |
| 4855 | |
| 4856 | /* Miscellaneous. */ |
| 4857 | |
| 4858 | /* Parse a PSR flag operand. The value returned is FAIL on syntax error, |
| 4859 | or a bitmask suitable to be or-ed into the ARM msr instruction. */ |
| 4860 | static int |
| 4861 | parse_psr (char **str) |
| 4862 | { |
| 4863 | char *p; |
| 4864 | unsigned long psr_field; |
| 4865 | const struct asm_psr *psr; |
| 4866 | char *start; |
| 4867 | |
| 4868 | /* CPSR's and SPSR's can now be lowercase. This is just a convenience |
| 4869 | feature for ease of use and backwards compatibility. */ |
| 4870 | p = *str; |
| 4871 | if (strncasecmp (p, "SPSR", 4) == 0) |
| 4872 | psr_field = SPSR_BIT; |
| 4873 | else if (strncasecmp (p, "CPSR", 4) == 0) |
| 4874 | psr_field = 0; |
| 4875 | else |
| 4876 | { |
| 4877 | start = p; |
| 4878 | do |
| 4879 | p++; |
| 4880 | while (ISALNUM (*p) || *p == '_'); |
| 4881 | |
| 4882 | psr = hash_find_n (arm_v7m_psr_hsh, start, p - start); |
| 4883 | if (!psr) |
| 4884 | return FAIL; |
| 4885 | |
| 4886 | *str = p; |
| 4887 | return psr->field; |
| 4888 | } |
| 4889 | |
| 4890 | p += 4; |
| 4891 | if (*p == '_') |
| 4892 | { |
| 4893 | /* A suffix follows. */ |
| 4894 | p++; |
| 4895 | start = p; |
| 4896 | |
| 4897 | do |
| 4898 | p++; |
| 4899 | while (ISALNUM (*p) || *p == '_'); |
| 4900 | |
| 4901 | psr = hash_find_n (arm_psr_hsh, start, p - start); |
| 4902 | if (!psr) |
| 4903 | goto error; |
| 4904 | |
| 4905 | psr_field |= psr->field; |
| 4906 | } |
| 4907 | else |
| 4908 | { |
| 4909 | if (ISALNUM (*p)) |
| 4910 | goto error; /* Garbage after "[CS]PSR". */ |
| 4911 | |
| 4912 | psr_field |= (PSR_c | PSR_f); |
| 4913 | } |
| 4914 | *str = p; |
| 4915 | return psr_field; |
| 4916 | |
| 4917 | error: |
| 4918 | inst.error = _("flag for {c}psr instruction expected"); |
| 4919 | return FAIL; |
| 4920 | } |
| 4921 | |
| 4922 | /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a |
| 4923 | value suitable for splatting into the AIF field of the instruction. */ |
| 4924 | |
| 4925 | static int |
| 4926 | parse_cps_flags (char **str) |
| 4927 | { |
| 4928 | int val = 0; |
| 4929 | int saw_a_flag = 0; |
| 4930 | char *s = *str; |
| 4931 | |
| 4932 | for (;;) |
| 4933 | switch (*s++) |
| 4934 | { |
| 4935 | case '\0': case ',': |
| 4936 | goto done; |
| 4937 | |
| 4938 | case 'a': case 'A': saw_a_flag = 1; val |= 0x4; break; |
| 4939 | case 'i': case 'I': saw_a_flag = 1; val |= 0x2; break; |
| 4940 | case 'f': case 'F': saw_a_flag = 1; val |= 0x1; break; |
| 4941 | |
| 4942 | default: |
| 4943 | inst.error = _("unrecognized CPS flag"); |
| 4944 | return FAIL; |
| 4945 | } |
| 4946 | |
| 4947 | done: |
| 4948 | if (saw_a_flag == 0) |
| 4949 | { |
| 4950 | inst.error = _("missing CPS flags"); |
| 4951 | return FAIL; |
| 4952 | } |
| 4953 | |
| 4954 | *str = s - 1; |
| 4955 | return val; |
| 4956 | } |
| 4957 | |
| 4958 | /* Parse an endian specifier ("BE" or "LE", case insensitive); |
| 4959 | returns 0 for big-endian, 1 for little-endian, FAIL for an error. */ |
| 4960 | |
| 4961 | static int |
| 4962 | parse_endian_specifier (char **str) |
| 4963 | { |
| 4964 | int little_endian; |
| 4965 | char *s = *str; |
| 4966 | |
| 4967 | if (strncasecmp (s, "BE", 2)) |
| 4968 | little_endian = 0; |
| 4969 | else if (strncasecmp (s, "LE", 2)) |
| 4970 | little_endian = 1; |
| 4971 | else |
| 4972 | { |
| 4973 | inst.error = _("valid endian specifiers are be or le"); |
| 4974 | return FAIL; |
| 4975 | } |
| 4976 | |
| 4977 | if (ISALNUM (s[2]) || s[2] == '_') |
| 4978 | { |
| 4979 | inst.error = _("valid endian specifiers are be or le"); |
| 4980 | return FAIL; |
| 4981 | } |
| 4982 | |
| 4983 | *str = s + 2; |
| 4984 | return little_endian; |
| 4985 | } |
| 4986 | |
| 4987 | /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a |
| 4988 | value suitable for poking into the rotate field of an sxt or sxta |
| 4989 | instruction, or FAIL on error. */ |
| 4990 | |
| 4991 | static int |
| 4992 | parse_ror (char **str) |
| 4993 | { |
| 4994 | int rot; |
| 4995 | char *s = *str; |
| 4996 | |
| 4997 | if (strncasecmp (s, "ROR", 3) == 0) |
| 4998 | s += 3; |
| 4999 | else |
| 5000 | { |
| 5001 | inst.error = _("missing rotation field after comma"); |
| 5002 | return FAIL; |
| 5003 | } |
| 5004 | |
| 5005 | if (parse_immediate (&s, &rot, 0, 24, FALSE) == FAIL) |
| 5006 | return FAIL; |
| 5007 | |
| 5008 | switch (rot) |
| 5009 | { |
| 5010 | case 0: *str = s; return 0x0; |
| 5011 | case 8: *str = s; return 0x1; |
| 5012 | case 16: *str = s; return 0x2; |
| 5013 | case 24: *str = s; return 0x3; |
| 5014 | |
| 5015 | default: |
| 5016 | inst.error = _("rotation can only be 0, 8, 16, or 24"); |
| 5017 | return FAIL; |
| 5018 | } |
| 5019 | } |
| 5020 | |
| 5021 | /* Parse a conditional code (from conds[] below). The value returned is in the |
| 5022 | range 0 .. 14, or FAIL. */ |
| 5023 | static int |
| 5024 | parse_cond (char **str) |
| 5025 | { |
| 5026 | char *p, *q; |
| 5027 | const struct asm_cond *c; |
| 5028 | |
| 5029 | p = q = *str; |
| 5030 | while (ISALPHA (*q)) |
| 5031 | q++; |
| 5032 | |
| 5033 | c = hash_find_n (arm_cond_hsh, p, q - p); |
| 5034 | if (!c) |
| 5035 | { |
| 5036 | inst.error = _("condition required"); |
| 5037 | return FAIL; |
| 5038 | } |
| 5039 | |
| 5040 | *str = q; |
| 5041 | return c->value; |
| 5042 | } |
| 5043 | |
| 5044 | /* Parse an option for a barrier instruction. Returns the encoding for the |
| 5045 | option, or FAIL. */ |
| 5046 | static int |
| 5047 | parse_barrier (char **str) |
| 5048 | { |
| 5049 | char *p, *q; |
| 5050 | const struct asm_barrier_opt *o; |
| 5051 | |
| 5052 | p = q = *str; |
| 5053 | while (ISALPHA (*q)) |
| 5054 | q++; |
| 5055 | |
| 5056 | o = hash_find_n (arm_barrier_opt_hsh, p, q - p); |
| 5057 | if (!o) |
| 5058 | return FAIL; |
| 5059 | |
| 5060 | *str = q; |
| 5061 | return o->value; |
| 5062 | } |
| 5063 | |
| 5064 | /* Parse the operands of a table branch instruction. Similar to a memory |
| 5065 | operand. */ |
| 5066 | static int |
| 5067 | parse_tb (char **str) |
| 5068 | { |
| 5069 | char * p = *str; |
| 5070 | int reg; |
| 5071 | |
| 5072 | if (skip_past_char (&p, '[') == FAIL) |
| 5073 | { |
| 5074 | inst.error = _("'[' expected"); |
| 5075 | return FAIL; |
| 5076 | } |
| 5077 | |
| 5078 | if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL) |
| 5079 | { |
| 5080 | inst.error = _(reg_expected_msgs[REG_TYPE_RN]); |
| 5081 | return FAIL; |
| 5082 | } |
| 5083 | inst.operands[0].reg = reg; |
| 5084 | |
| 5085 | if (skip_past_comma (&p) == FAIL) |
| 5086 | { |
| 5087 | inst.error = _("',' expected"); |
| 5088 | return FAIL; |
| 5089 | } |
| 5090 | |
| 5091 | if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL) |
| 5092 | { |
| 5093 | inst.error = _(reg_expected_msgs[REG_TYPE_RN]); |
| 5094 | return FAIL; |
| 5095 | } |
| 5096 | inst.operands[0].imm = reg; |
| 5097 | |
| 5098 | if (skip_past_comma (&p) == SUCCESS) |
| 5099 | { |
| 5100 | if (parse_shift (&p, 0, SHIFT_LSL_IMMEDIATE) == FAIL) |
| 5101 | return FAIL; |
| 5102 | if (inst.reloc.exp.X_add_number != 1) |
| 5103 | { |
| 5104 | inst.error = _("invalid shift"); |
| 5105 | return FAIL; |
| 5106 | } |
| 5107 | inst.operands[0].shifted = 1; |
| 5108 | } |
| 5109 | |
| 5110 | if (skip_past_char (&p, ']') == FAIL) |
| 5111 | { |
| 5112 | inst.error = _("']' expected"); |
| 5113 | return FAIL; |
| 5114 | } |
| 5115 | *str = p; |
| 5116 | return SUCCESS; |
| 5117 | } |
| 5118 | |
| 5119 | /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more |
| 5120 | information on the types the operands can take and how they are encoded. |
| 5121 | Up to four operands may be read; this function handles setting the |
| 5122 | ".present" field for each read operand itself. |
| 5123 | Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS, |
| 5124 | else returns FAIL. */ |
| 5125 | |
| 5126 | static int |
| 5127 | parse_neon_mov (char **str, int *which_operand) |
| 5128 | { |
| 5129 | int i = *which_operand, val; |
| 5130 | enum arm_reg_type rtype; |
| 5131 | char *ptr = *str; |
| 5132 | struct neon_type_el optype; |
| 5133 | |
| 5134 | if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL) |
| 5135 | { |
| 5136 | /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */ |
| 5137 | inst.operands[i].reg = val; |
| 5138 | inst.operands[i].isscalar = 1; |
| 5139 | inst.operands[i].vectype = optype; |
| 5140 | inst.operands[i++].present = 1; |
| 5141 | |
| 5142 | if (skip_past_comma (&ptr) == FAIL) |
| 5143 | goto wanted_comma; |
| 5144 | |
| 5145 | if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL) |
| 5146 | goto wanted_arm; |
| 5147 | |
| 5148 | inst.operands[i].reg = val; |
| 5149 | inst.operands[i].isreg = 1; |
| 5150 | inst.operands[i].present = 1; |
| 5151 | } |
| 5152 | else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype, &optype)) |
| 5153 | != FAIL) |
| 5154 | { |
| 5155 | /* Cases 0, 1, 2, 3, 5 (D only). */ |
| 5156 | if (skip_past_comma (&ptr) == FAIL) |
| 5157 | goto wanted_comma; |
| 5158 | |
| 5159 | inst.operands[i].reg = val; |
| 5160 | inst.operands[i].isreg = 1; |
| 5161 | inst.operands[i].isquad = (rtype == REG_TYPE_NQ); |
| 5162 | inst.operands[i].issingle = (rtype == REG_TYPE_VFS); |
| 5163 | inst.operands[i].isvec = 1; |
| 5164 | inst.operands[i].vectype = optype; |
| 5165 | inst.operands[i++].present = 1; |
| 5166 | |
| 5167 | if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL) |
| 5168 | { |
| 5169 | /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>. |
| 5170 | Case 13: VMOV <Sd>, <Rm> */ |
| 5171 | inst.operands[i].reg = val; |
| 5172 | inst.operands[i].isreg = 1; |
| 5173 | inst.operands[i].present = 1; |
| 5174 | |
| 5175 | if (rtype == REG_TYPE_NQ) |
| 5176 | { |
| 5177 | first_error (_("can't use Neon quad register here")); |
| 5178 | return FAIL; |
| 5179 | } |
| 5180 | else if (rtype != REG_TYPE_VFS) |
| 5181 | { |
| 5182 | i++; |
| 5183 | if (skip_past_comma (&ptr) == FAIL) |
| 5184 | goto wanted_comma; |
| 5185 | if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL) |
| 5186 | goto wanted_arm; |
| 5187 | inst.operands[i].reg = val; |
| 5188 | inst.operands[i].isreg = 1; |
| 5189 | inst.operands[i].present = 1; |
| 5190 | } |
| 5191 | } |
| 5192 | else if (parse_qfloat_immediate (&ptr, &inst.operands[i].imm) == SUCCESS) |
| 5193 | /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm> |
| 5194 | Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm> |
| 5195 | Case 10: VMOV.F32 <Sd>, #<imm> |
| 5196 | Case 11: VMOV.F64 <Dd>, #<imm> */ |
| 5197 | ; |
| 5198 | else if (parse_big_immediate (&ptr, i) == SUCCESS) |
| 5199 | /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm> |
| 5200 | Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */ |
| 5201 | ; |
| 5202 | else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype, |
| 5203 | &optype)) != FAIL) |
| 5204 | { |
| 5205 | /* Case 0: VMOV<c><q> <Qd>, <Qm> |
| 5206 | Case 1: VMOV<c><q> <Dd>, <Dm> |
| 5207 | Case 8: VMOV.F32 <Sd>, <Sm> |
| 5208 | Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */ |
| 5209 | |
| 5210 | inst.operands[i].reg = val; |
| 5211 | inst.operands[i].isreg = 1; |
| 5212 | inst.operands[i].isquad = (rtype == REG_TYPE_NQ); |
| 5213 | inst.operands[i].issingle = (rtype == REG_TYPE_VFS); |
| 5214 | inst.operands[i].isvec = 1; |
| 5215 | inst.operands[i].vectype = optype; |
| 5216 | inst.operands[i].present = 1; |
| 5217 | |
| 5218 | if (skip_past_comma (&ptr) == SUCCESS) |
| 5219 | { |
| 5220 | /* Case 15. */ |
| 5221 | i++; |
| 5222 | |
| 5223 | if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL) |
| 5224 | goto wanted_arm; |
| 5225 | |
| 5226 | inst.operands[i].reg = val; |
| 5227 | inst.operands[i].isreg = 1; |
| 5228 | inst.operands[i++].present = 1; |
| 5229 | |
| 5230 | if (skip_past_comma (&ptr) == FAIL) |
| 5231 | goto wanted_comma; |
| 5232 | |
| 5233 | if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL) |
| 5234 | goto wanted_arm; |
| 5235 | |
| 5236 | inst.operands[i].reg = val; |
| 5237 | inst.operands[i].isreg = 1; |
| 5238 | inst.operands[i++].present = 1; |
| 5239 | } |
| 5240 | } |
| 5241 | else |
| 5242 | { |
| 5243 | first_error (_("expected <Rm> or <Dm> or <Qm> operand")); |
| 5244 | return FAIL; |
| 5245 | } |
| 5246 | } |
| 5247 | else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL) |
| 5248 | { |
| 5249 | /* Cases 6, 7. */ |
| 5250 | inst.operands[i].reg = val; |
| 5251 | inst.operands[i].isreg = 1; |
| 5252 | inst.operands[i++].present = 1; |
| 5253 | |
| 5254 | if (skip_past_comma (&ptr) == FAIL) |
| 5255 | goto wanted_comma; |
| 5256 | |
| 5257 | if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL) |
| 5258 | { |
| 5259 | /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */ |
| 5260 | inst.operands[i].reg = val; |
| 5261 | inst.operands[i].isscalar = 1; |
| 5262 | inst.operands[i].present = 1; |
| 5263 | inst.operands[i].vectype = optype; |
| 5264 | } |
| 5265 | else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL) |
| 5266 | { |
| 5267 | /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */ |
| 5268 | inst.operands[i].reg = val; |
| 5269 | inst.operands[i].isreg = 1; |
| 5270 | inst.operands[i++].present = 1; |
| 5271 | |
| 5272 | if (skip_past_comma (&ptr) == FAIL) |
| 5273 | goto wanted_comma; |
| 5274 | |
| 5275 | if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFSD, &rtype, &optype)) |
| 5276 | == FAIL) |
| 5277 | { |
| 5278 | first_error (_(reg_expected_msgs[REG_TYPE_VFSD])); |
| 5279 | return FAIL; |
| 5280 | } |
| 5281 | |
| 5282 | inst.operands[i].reg = val; |
| 5283 | inst.operands[i].isreg = 1; |
| 5284 | inst.operands[i].isvec = 1; |
| 5285 | inst.operands[i].issingle = (rtype == REG_TYPE_VFS); |
| 5286 | inst.operands[i].vectype = optype; |
| 5287 | inst.operands[i].present = 1; |
| 5288 | |
| 5289 | if (rtype == REG_TYPE_VFS) |
| 5290 | { |
| 5291 | /* Case 14. */ |
| 5292 | i++; |
| 5293 | if (skip_past_comma (&ptr) == FAIL) |
| 5294 | goto wanted_comma; |
| 5295 | if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL, |
| 5296 | &optype)) == FAIL) |
| 5297 | { |
| 5298 | first_error (_(reg_expected_msgs[REG_TYPE_VFS])); |
| 5299 | return FAIL; |
| 5300 | } |
| 5301 | inst.operands[i].reg = val; |
| 5302 | inst.operands[i].isreg = 1; |
| 5303 | inst.operands[i].isvec = 1; |
| 5304 | inst.operands[i].issingle = 1; |
| 5305 | inst.operands[i].vectype = optype; |
| 5306 | inst.operands[i].present = 1; |
| 5307 | } |
| 5308 | } |
| 5309 | else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL, &optype)) |
| 5310 | != FAIL) |
| 5311 | { |
| 5312 | /* Case 13. */ |
| 5313 | inst.operands[i].reg = val; |
| 5314 | inst.operands[i].isreg = 1; |
| 5315 | inst.operands[i].isvec = 1; |
| 5316 | inst.operands[i].issingle = 1; |
| 5317 | inst.operands[i].vectype = optype; |
| 5318 | inst.operands[i++].present = 1; |
| 5319 | } |
| 5320 | } |
| 5321 | else |
| 5322 | { |
| 5323 | first_error (_("parse error")); |
| 5324 | return FAIL; |
| 5325 | } |
| 5326 | |
| 5327 | /* Successfully parsed the operands. Update args. */ |
| 5328 | *which_operand = i; |
| 5329 | *str = ptr; |
| 5330 | return SUCCESS; |
| 5331 | |
| 5332 | wanted_comma: |
| 5333 | first_error (_("expected comma")); |
| 5334 | return FAIL; |
| 5335 | |
| 5336 | wanted_arm: |
| 5337 | first_error (_(reg_expected_msgs[REG_TYPE_RN])); |
| 5338 | return FAIL; |
| 5339 | } |
| 5340 | |
| 5341 | /* Matcher codes for parse_operands. */ |
| 5342 | enum operand_parse_code |
| 5343 | { |
| 5344 | OP_stop, /* end of line */ |
| 5345 | |
| 5346 | OP_RR, /* ARM register */ |
| 5347 | OP_RRnpc, /* ARM register, not r15 */ |
| 5348 | OP_RRnpcb, /* ARM register, not r15, in square brackets */ |
| 5349 | OP_RRw, /* ARM register, not r15, optional trailing ! */ |
| 5350 | OP_RCP, /* Coprocessor number */ |
| 5351 | OP_RCN, /* Coprocessor register */ |
| 5352 | OP_RF, /* FPA register */ |
| 5353 | OP_RVS, /* VFP single precision register */ |
| 5354 | OP_RVD, /* VFP double precision register (0..15) */ |
| 5355 | OP_RND, /* Neon double precision register (0..31) */ |
| 5356 | OP_RNQ, /* Neon quad precision register */ |
| 5357 | OP_RVSD, /* VFP single or double precision register */ |
| 5358 | OP_RNDQ, /* Neon double or quad precision register */ |
| 5359 | OP_RNSDQ, /* Neon single, double or quad precision register */ |
| 5360 | OP_RNSC, /* Neon scalar D[X] */ |
| 5361 | OP_RVC, /* VFP control register */ |
| 5362 | OP_RMF, /* Maverick F register */ |
| 5363 | OP_RMD, /* Maverick D register */ |
| 5364 | OP_RMFX, /* Maverick FX register */ |
| 5365 | OP_RMDX, /* Maverick DX register */ |
| 5366 | OP_RMAX, /* Maverick AX register */ |
| 5367 | OP_RMDS, /* Maverick DSPSC register */ |
| 5368 | OP_RIWR, /* iWMMXt wR register */ |
| 5369 | OP_RIWC, /* iWMMXt wC register */ |
| 5370 | OP_RIWG, /* iWMMXt wCG register */ |
| 5371 | OP_RXA, /* XScale accumulator register */ |
| 5372 | |
| 5373 | OP_REGLST, /* ARM register list */ |
| 5374 | OP_VRSLST, /* VFP single-precision register list */ |
| 5375 | OP_VRDLST, /* VFP double-precision register list */ |
| 5376 | OP_VRSDLST, /* VFP single or double-precision register list (& quad) */ |
| 5377 | OP_NRDLST, /* Neon double-precision register list (d0-d31, qN aliases) */ |
| 5378 | OP_NSTRLST, /* Neon element/structure list */ |
| 5379 | |
| 5380 | OP_NILO, /* Neon immediate/logic operands 2 or 2+3. (VBIC, VORR...) */ |
| 5381 | OP_RNDQ_I0, /* Neon D or Q reg, or immediate zero. */ |
| 5382 | OP_RVSD_I0, /* VFP S or D reg, or immediate zero. */ |
| 5383 | OP_RR_RNSC, /* ARM reg or Neon scalar. */ |
| 5384 | OP_RNSDQ_RNSC, /* Vector S, D or Q reg, or Neon scalar. */ |
| 5385 | OP_RNDQ_RNSC, /* Neon D or Q reg, or Neon scalar. */ |
| 5386 | OP_RND_RNSC, /* Neon D reg, or Neon scalar. */ |
| 5387 | OP_VMOV, /* Neon VMOV operands. */ |
| 5388 | OP_RNDQ_IMVNb,/* Neon D or Q reg, or immediate good for VMVN. */ |
| 5389 | OP_RNDQ_I63b, /* Neon D or Q reg, or immediate for shift. */ |
| 5390 | OP_RIWR_I32z, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2. */ |
| 5391 | |
| 5392 | OP_I0, /* immediate zero */ |
| 5393 | OP_I7, /* immediate value 0 .. 7 */ |
| 5394 | OP_I15, /* 0 .. 15 */ |
| 5395 | OP_I16, /* 1 .. 16 */ |
| 5396 | OP_I16z, /* 0 .. 16 */ |
| 5397 | OP_I31, /* 0 .. 31 */ |
| 5398 | OP_I31w, /* 0 .. 31, optional trailing ! */ |
| 5399 | OP_I32, /* 1 .. 32 */ |
| 5400 | OP_I32z, /* 0 .. 32 */ |
| 5401 | OP_I63, /* 0 .. 63 */ |
| 5402 | OP_I63s, /* -64 .. 63 */ |
| 5403 | OP_I64, /* 1 .. 64 */ |
| 5404 | OP_I64z, /* 0 .. 64 */ |
| 5405 | OP_I255, /* 0 .. 255 */ |
| 5406 | |
| 5407 | OP_I4b, /* immediate, prefix optional, 1 .. 4 */ |
| 5408 | OP_I7b, /* 0 .. 7 */ |
| 5409 | OP_I15b, /* 0 .. 15 */ |
| 5410 | OP_I31b, /* 0 .. 31 */ |
| 5411 | |
| 5412 | OP_SH, /* shifter operand */ |
| 5413 | OP_SHG, /* shifter operand with possible group relocation */ |
| 5414 | OP_ADDR, /* Memory address expression (any mode) */ |
| 5415 | OP_ADDRGLDR, /* Mem addr expr (any mode) with possible LDR group reloc */ |
| 5416 | OP_ADDRGLDRS, /* Mem addr expr (any mode) with possible LDRS group reloc */ |
| 5417 | OP_ADDRGLDC, /* Mem addr expr (any mode) with possible LDC group reloc */ |
| 5418 | OP_EXP, /* arbitrary expression */ |
| 5419 | OP_EXPi, /* same, with optional immediate prefix */ |
| 5420 | OP_EXPr, /* same, with optional relocation suffix */ |
| 5421 | OP_HALF, /* 0 .. 65535 or low/high reloc. */ |
| 5422 | |
| 5423 | OP_CPSF, /* CPS flags */ |
| 5424 | OP_ENDI, /* Endianness specifier */ |
| 5425 | OP_PSR, /* CPSR/SPSR mask for msr */ |
| 5426 | OP_COND, /* conditional code */ |
| 5427 | OP_TB, /* Table branch. */ |
| 5428 | |
| 5429 | OP_RVC_PSR, /* CPSR/SPSR mask for msr, or VFP control register. */ |
| 5430 | OP_APSR_RR, /* ARM register or "APSR_nzcv". */ |
| 5431 | |
| 5432 | OP_RRnpc_I0, /* ARM register or literal 0 */ |
| 5433 | OP_RR_EXr, /* ARM register or expression with opt. reloc suff. */ |
| 5434 | OP_RR_EXi, /* ARM register or expression with imm prefix */ |
| 5435 | OP_RF_IF, /* FPA register or immediate */ |
| 5436 | OP_RIWR_RIWC, /* iWMMXt R or C reg */ |
| 5437 | OP_RIWC_RIWG, /* iWMMXt wC or wCG reg */ |
| 5438 | |
| 5439 | /* Optional operands. */ |
| 5440 | OP_oI7b, /* immediate, prefix optional, 0 .. 7 */ |
| 5441 | OP_oI31b, /* 0 .. 31 */ |
| 5442 | OP_oI32b, /* 1 .. 32 */ |
| 5443 | OP_oIffffb, /* 0 .. 65535 */ |
| 5444 | OP_oI255c, /* curly-brace enclosed, 0 .. 255 */ |
| 5445 | |
| 5446 | OP_oRR, /* ARM register */ |
| 5447 | OP_oRRnpc, /* ARM register, not the PC */ |
| 5448 | OP_oRND, /* Optional Neon double precision register */ |
| 5449 | OP_oRNQ, /* Optional Neon quad precision register */ |
| 5450 | OP_oRNDQ, /* Optional Neon double or quad precision register */ |
| 5451 | OP_oRNSDQ, /* Optional single, double or quad precision vector register */ |
| 5452 | OP_oSHll, /* LSL immediate */ |
| 5453 | OP_oSHar, /* ASR immediate */ |
| 5454 | OP_oSHllar, /* LSL or ASR immediate */ |
| 5455 | OP_oROR, /* ROR 0/8/16/24 */ |
| 5456 | OP_oBARRIER, /* Option argument for a barrier instruction. */ |
| 5457 | |
| 5458 | OP_FIRST_OPTIONAL = OP_oI7b |
| 5459 | }; |
| 5460 | |
| 5461 | /* Generic instruction operand parser. This does no encoding and no |
| 5462 | semantic validation; it merely squirrels values away in the inst |
| 5463 | structure. Returns SUCCESS or FAIL depending on whether the |
| 5464 | specified grammar matched. */ |
| 5465 | static int |
| 5466 | parse_operands (char *str, const unsigned char *pattern) |
| 5467 | { |
| 5468 | unsigned const char *upat = pattern; |
| 5469 | char *backtrack_pos = 0; |
| 5470 | const char *backtrack_error = 0; |
| 5471 | int i, val, backtrack_index = 0; |
| 5472 | enum arm_reg_type rtype; |
| 5473 | parse_operand_result result; |
| 5474 | |
| 5475 | #define po_char_or_fail(chr) do { \ |
| 5476 | if (skip_past_char (&str, chr) == FAIL) \ |
| 5477 | goto bad_args; \ |
| 5478 | } while (0) |
| 5479 | |
| 5480 | #define po_reg_or_fail(regtype) do { \ |
| 5481 | val = arm_typed_reg_parse (&str, regtype, &rtype, \ |
| 5482 | &inst.operands[i].vectype); \ |
| 5483 | if (val == FAIL) \ |
| 5484 | { \ |
| 5485 | first_error (_(reg_expected_msgs[regtype])); \ |
| 5486 | goto failure; \ |
| 5487 | } \ |
| 5488 | inst.operands[i].reg = val; \ |
| 5489 | inst.operands[i].isreg = 1; \ |
| 5490 | inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \ |
| 5491 | inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \ |
| 5492 | inst.operands[i].isvec = (rtype == REG_TYPE_VFS \ |
| 5493 | || rtype == REG_TYPE_VFD \ |
| 5494 | || rtype == REG_TYPE_NQ); \ |
| 5495 | } while (0) |
| 5496 | |
| 5497 | #define po_reg_or_goto(regtype, label) do { \ |
| 5498 | val = arm_typed_reg_parse (&str, regtype, &rtype, \ |
| 5499 | &inst.operands[i].vectype); \ |
| 5500 | if (val == FAIL) \ |
| 5501 | goto label; \ |
| 5502 | \ |
| 5503 | inst.operands[i].reg = val; \ |
| 5504 | inst.operands[i].isreg = 1; \ |
| 5505 | inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \ |
| 5506 | inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \ |
| 5507 | inst.operands[i].isvec = (rtype == REG_TYPE_VFS \ |
| 5508 | || rtype == REG_TYPE_VFD \ |
| 5509 | || rtype == REG_TYPE_NQ); \ |
| 5510 | } while (0) |
| 5511 | |
| 5512 | #define po_imm_or_fail(min, max, popt) do { \ |
| 5513 | if (parse_immediate (&str, &val, min, max, popt) == FAIL) \ |
| 5514 | goto failure; \ |
| 5515 | inst.operands[i].imm = val; \ |
| 5516 | } while (0) |
| 5517 | |
| 5518 | #define po_scalar_or_goto(elsz, label) do { \ |
| 5519 | val = parse_scalar (&str, elsz, &inst.operands[i].vectype); \ |
| 5520 | if (val == FAIL) \ |
| 5521 | goto label; \ |
| 5522 | inst.operands[i].reg = val; \ |
| 5523 | inst.operands[i].isscalar = 1; \ |
| 5524 | } while (0) |
| 5525 | |
| 5526 | #define po_misc_or_fail(expr) do { \ |
| 5527 | if (expr) \ |
| 5528 | goto failure; \ |
| 5529 | } while (0) |
| 5530 | |
| 5531 | #define po_misc_or_fail_no_backtrack(expr) do { \ |
| 5532 | result = expr; \ |
| 5533 | if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK)\ |
| 5534 | backtrack_pos = 0; \ |
| 5535 | if (result != PARSE_OPERAND_SUCCESS) \ |
| 5536 | goto failure; \ |
| 5537 | } while (0) |
| 5538 | |
| 5539 | skip_whitespace (str); |
| 5540 | |
| 5541 | for (i = 0; upat[i] != OP_stop; i++) |
| 5542 | { |
| 5543 | if (upat[i] >= OP_FIRST_OPTIONAL) |
| 5544 | { |
| 5545 | /* Remember where we are in case we need to backtrack. */ |
| 5546 | assert (!backtrack_pos); |
| 5547 | backtrack_pos = str; |
| 5548 | backtrack_error = inst.error; |
| 5549 | backtrack_index = i; |
| 5550 | } |
| 5551 | |
| 5552 | if (i > 0) |
| 5553 | po_char_or_fail (','); |
| 5554 | |
| 5555 | switch (upat[i]) |
| 5556 | { |
| 5557 | /* Registers */ |
| 5558 | case OP_oRRnpc: |
| 5559 | case OP_RRnpc: |
| 5560 | case OP_oRR: |
| 5561 | case OP_RR: po_reg_or_fail (REG_TYPE_RN); break; |
| 5562 | case OP_RCP: po_reg_or_fail (REG_TYPE_CP); break; |
| 5563 | case OP_RCN: po_reg_or_fail (REG_TYPE_CN); break; |
| 5564 | case OP_RF: po_reg_or_fail (REG_TYPE_FN); break; |
| 5565 | case OP_RVS: po_reg_or_fail (REG_TYPE_VFS); break; |
| 5566 | case OP_RVD: po_reg_or_fail (REG_TYPE_VFD); break; |
| 5567 | case OP_oRND: |
| 5568 | case OP_RND: po_reg_or_fail (REG_TYPE_VFD); break; |
| 5569 | case OP_RVC: po_reg_or_fail (REG_TYPE_VFC); break; |
| 5570 | case OP_RMF: po_reg_or_fail (REG_TYPE_MVF); break; |
| 5571 | case OP_RMD: po_reg_or_fail (REG_TYPE_MVD); break; |
| 5572 | case OP_RMFX: po_reg_or_fail (REG_TYPE_MVFX); break; |
| 5573 | case OP_RMDX: po_reg_or_fail (REG_TYPE_MVDX); break; |
| 5574 | case OP_RMAX: po_reg_or_fail (REG_TYPE_MVAX); break; |
| 5575 | case OP_RMDS: po_reg_or_fail (REG_TYPE_DSPSC); break; |
| 5576 | case OP_RIWR: po_reg_or_fail (REG_TYPE_MMXWR); break; |
| 5577 | case OP_RIWC: po_reg_or_fail (REG_TYPE_MMXWC); break; |
| 5578 | case OP_RIWG: po_reg_or_fail (REG_TYPE_MMXWCG); break; |
| 5579 | case OP_RXA: po_reg_or_fail (REG_TYPE_XSCALE); break; |
| 5580 | case OP_oRNQ: |
| 5581 | case OP_RNQ: po_reg_or_fail (REG_TYPE_NQ); break; |
| 5582 | case OP_oRNDQ: |
| 5583 | case OP_RNDQ: po_reg_or_fail (REG_TYPE_NDQ); break; |
| 5584 | case OP_RVSD: po_reg_or_fail (REG_TYPE_VFSD); break; |
| 5585 | case OP_oRNSDQ: |
| 5586 | case OP_RNSDQ: po_reg_or_fail (REG_TYPE_NSDQ); break; |
| 5587 | |
| 5588 | /* Neon scalar. Using an element size of 8 means that some invalid |
| 5589 | scalars are accepted here, so deal with those in later code. */ |
| 5590 | case OP_RNSC: po_scalar_or_goto (8, failure); break; |
| 5591 | |
| 5592 | /* WARNING: We can expand to two operands here. This has the potential |
| 5593 | to totally confuse the backtracking mechanism! It will be OK at |
| 5594 | least as long as we don't try to use optional args as well, |
| 5595 | though. */ |
| 5596 | case OP_NILO: |
| 5597 | { |
| 5598 | po_reg_or_goto (REG_TYPE_NDQ, try_imm); |
| 5599 | inst.operands[i].present = 1; |
| 5600 | i++; |
| 5601 | skip_past_comma (&str); |
| 5602 | po_reg_or_goto (REG_TYPE_NDQ, one_reg_only); |
| 5603 | break; |
| 5604 | one_reg_only: |
| 5605 | /* Optional register operand was omitted. Unfortunately, it's in |
| 5606 | operands[i-1] and we need it to be in inst.operands[i]. Fix that |
| 5607 | here (this is a bit grotty). */ |
| 5608 | inst.operands[i] = inst.operands[i-1]; |
| 5609 | inst.operands[i-1].present = 0; |
| 5610 | break; |
| 5611 | try_imm: |
| 5612 | /* There's a possibility of getting a 64-bit immediate here, so |
| 5613 | we need special handling. */ |
| 5614 | if (parse_big_immediate (&str, i) == FAIL) |
| 5615 | { |
| 5616 | inst.error = _("immediate value is out of range"); |
| 5617 | goto failure; |
| 5618 | } |
| 5619 | } |
| 5620 | break; |
| 5621 | |
| 5622 | case OP_RNDQ_I0: |
| 5623 | { |
| 5624 | po_reg_or_goto (REG_TYPE_NDQ, try_imm0); |
| 5625 | break; |
| 5626 | try_imm0: |
| 5627 | po_imm_or_fail (0, 0, TRUE); |
| 5628 | } |
| 5629 | break; |
| 5630 | |
| 5631 | case OP_RVSD_I0: |
| 5632 | po_reg_or_goto (REG_TYPE_VFSD, try_imm0); |
| 5633 | break; |
| 5634 | |
| 5635 | case OP_RR_RNSC: |
| 5636 | { |
| 5637 | po_scalar_or_goto (8, try_rr); |
| 5638 | break; |
| 5639 | try_rr: |
| 5640 | po_reg_or_fail (REG_TYPE_RN); |
| 5641 | } |
| 5642 | break; |
| 5643 | |
| 5644 | case OP_RNSDQ_RNSC: |
| 5645 | { |
| 5646 | po_scalar_or_goto (8, try_nsdq); |
| 5647 | break; |
| 5648 | try_nsdq: |
| 5649 | po_reg_or_fail (REG_TYPE_NSDQ); |
| 5650 | } |
| 5651 | break; |
| 5652 | |
| 5653 | case OP_RNDQ_RNSC: |
| 5654 | { |
| 5655 | po_scalar_or_goto (8, try_ndq); |
| 5656 | break; |
| 5657 | try_ndq: |
| 5658 | po_reg_or_fail (REG_TYPE_NDQ); |
| 5659 | } |
| 5660 | break; |
| 5661 | |
| 5662 | case OP_RND_RNSC: |
| 5663 | { |
| 5664 | po_scalar_or_goto (8, try_vfd); |
| 5665 | break; |
| 5666 | try_vfd: |
| 5667 | po_reg_or_fail (REG_TYPE_VFD); |
| 5668 | } |
| 5669 | break; |
| 5670 | |
| 5671 | case OP_VMOV: |
| 5672 | /* WARNING: parse_neon_mov can move the operand counter, i. If we're |
| 5673 | not careful then bad things might happen. */ |
| 5674 | po_misc_or_fail (parse_neon_mov (&str, &i) == FAIL); |
| 5675 | break; |
| 5676 | |
| 5677 | case OP_RNDQ_IMVNb: |
| 5678 | { |
| 5679 | po_reg_or_goto (REG_TYPE_NDQ, try_mvnimm); |
| 5680 | break; |
| 5681 | try_mvnimm: |
| 5682 | /* There's a possibility of getting a 64-bit immediate here, so |
| 5683 | we need special handling. */ |
| 5684 | if (parse_big_immediate (&str, i) == FAIL) |
| 5685 | { |
| 5686 | inst.error = _("immediate value is out of range"); |
| 5687 | goto failure; |
| 5688 | } |
| 5689 | } |
| 5690 | break; |
| 5691 | |
| 5692 | case OP_RNDQ_I63b: |
| 5693 | { |
| 5694 | po_reg_or_goto (REG_TYPE_NDQ, try_shimm); |
| 5695 | break; |
| 5696 | try_shimm: |
| 5697 | po_imm_or_fail (0, 63, TRUE); |
| 5698 | } |
| 5699 | break; |
| 5700 | |
| 5701 | case OP_RRnpcb: |
| 5702 | po_char_or_fail ('['); |
| 5703 | po_reg_or_fail (REG_TYPE_RN); |
| 5704 | po_char_or_fail (']'); |
| 5705 | break; |
| 5706 | |
| 5707 | case OP_RRw: |
| 5708 | po_reg_or_fail (REG_TYPE_RN); |
| 5709 | if (skip_past_char (&str, '!') == SUCCESS) |
| 5710 | inst.operands[i].writeback = 1; |
| 5711 | break; |
| 5712 | |
| 5713 | /* Immediates */ |
| 5714 | case OP_I7: po_imm_or_fail ( 0, 7, FALSE); break; |
| 5715 | case OP_I15: po_imm_or_fail ( 0, 15, FALSE); break; |
| 5716 | case OP_I16: po_imm_or_fail ( 1, 16, FALSE); break; |
| 5717 | case OP_I16z: po_imm_or_fail ( 0, 16, FALSE); break; |
| 5718 | case OP_I31: po_imm_or_fail ( 0, 31, FALSE); break; |
| 5719 | case OP_I32: po_imm_or_fail ( 1, 32, FALSE); break; |
| 5720 | case OP_I32z: po_imm_or_fail ( 0, 32, FALSE); break; |
| 5721 | case OP_I63s: po_imm_or_fail (-64, 63, FALSE); break; |
| 5722 | case OP_I63: po_imm_or_fail ( 0, 63, FALSE); break; |
| 5723 | case OP_I64: po_imm_or_fail ( 1, 64, FALSE); break; |
| 5724 | case OP_I64z: po_imm_or_fail ( 0, 64, FALSE); break; |
| 5725 | case OP_I255: po_imm_or_fail ( 0, 255, FALSE); break; |
| 5726 | |
| 5727 | case OP_I4b: po_imm_or_fail ( 1, 4, TRUE); break; |
| 5728 | case OP_oI7b: |
| 5729 | case OP_I7b: po_imm_or_fail ( 0, 7, TRUE); break; |
| 5730 | case OP_I15b: po_imm_or_fail ( 0, 15, TRUE); break; |
| 5731 | case OP_oI31b: |
| 5732 | case OP_I31b: po_imm_or_fail ( 0, 31, TRUE); break; |
| 5733 | case OP_oI32b: po_imm_or_fail ( 1, 32, TRUE); break; |
| 5734 | case OP_oIffffb: po_imm_or_fail ( 0, 0xffff, TRUE); break; |
| 5735 | |
| 5736 | /* Immediate variants */ |
| 5737 | case OP_oI255c: |
| 5738 | po_char_or_fail ('{'); |
| 5739 | po_imm_or_fail (0, 255, TRUE); |
| 5740 | po_char_or_fail ('}'); |
| 5741 | break; |
| 5742 | |
| 5743 | case OP_I31w: |
| 5744 | /* The expression parser chokes on a trailing !, so we have |
| 5745 | to find it first and zap it. */ |
| 5746 | { |
| 5747 | char *s = str; |
| 5748 | while (*s && *s != ',') |
| 5749 | s++; |
| 5750 | if (s[-1] == '!') |
| 5751 | { |
| 5752 | s[-1] = '\0'; |
| 5753 | inst.operands[i].writeback = 1; |
| 5754 | } |
| 5755 | po_imm_or_fail (0, 31, TRUE); |
| 5756 | if (str == s - 1) |
| 5757 | str = s; |
| 5758 | } |
| 5759 | break; |
| 5760 | |
| 5761 | /* Expressions */ |
| 5762 | case OP_EXPi: EXPi: |
| 5763 | po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str, |
| 5764 | GE_OPT_PREFIX)); |
| 5765 | break; |
| 5766 | |
| 5767 | case OP_EXP: |
| 5768 | po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str, |
| 5769 | GE_NO_PREFIX)); |
| 5770 | break; |
| 5771 | |
| 5772 | case OP_EXPr: EXPr: |
| 5773 | po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str, |
| 5774 | GE_NO_PREFIX)); |
| 5775 | if (inst.reloc.exp.X_op == O_symbol) |
| 5776 | { |
| 5777 | val = parse_reloc (&str); |
| 5778 | if (val == -1) |
| 5779 | { |
| 5780 | inst.error = _("unrecognized relocation suffix"); |
| 5781 | goto failure; |
| 5782 | } |
| 5783 | else if (val != BFD_RELOC_UNUSED) |
| 5784 | { |
| 5785 | inst.operands[i].imm = val; |
| 5786 | inst.operands[i].hasreloc = 1; |
| 5787 | } |
| 5788 | } |
| 5789 | break; |
| 5790 | |
| 5791 | /* Operand for MOVW or MOVT. */ |
| 5792 | case OP_HALF: |
| 5793 | po_misc_or_fail (parse_half (&str)); |
| 5794 | break; |
| 5795 | |
| 5796 | /* Register or expression */ |
| 5797 | case OP_RR_EXr: po_reg_or_goto (REG_TYPE_RN, EXPr); break; |
| 5798 | case OP_RR_EXi: po_reg_or_goto (REG_TYPE_RN, EXPi); break; |
| 5799 | |
| 5800 | /* Register or immediate */ |
| 5801 | case OP_RRnpc_I0: po_reg_or_goto (REG_TYPE_RN, I0); break; |
| 5802 | I0: po_imm_or_fail (0, 0, FALSE); break; |
| 5803 | |
| 5804 | case OP_RF_IF: po_reg_or_goto (REG_TYPE_FN, IF); break; |
| 5805 | IF: |
| 5806 | if (!is_immediate_prefix (*str)) |
| 5807 | goto bad_args; |
| 5808 | str++; |
| 5809 | val = parse_fpa_immediate (&str); |
| 5810 | if (val == FAIL) |
| 5811 | goto failure; |
| 5812 | /* FPA immediates are encoded as registers 8-15. |
| 5813 | parse_fpa_immediate has already applied the offset. */ |
| 5814 | inst.operands[i].reg = val; |
| 5815 | inst.operands[i].isreg = 1; |
| 5816 | break; |
| 5817 | |
| 5818 | case OP_RIWR_I32z: po_reg_or_goto (REG_TYPE_MMXWR, I32z); break; |
| 5819 | I32z: po_imm_or_fail (0, 32, FALSE); break; |
| 5820 | |
| 5821 | /* Two kinds of register */ |
| 5822 | case OP_RIWR_RIWC: |
| 5823 | { |
| 5824 | struct reg_entry *rege = arm_reg_parse_multi (&str); |
| 5825 | if (!rege |
| 5826 | || (rege->type != REG_TYPE_MMXWR |
| 5827 | && rege->type != REG_TYPE_MMXWC |
| 5828 | && rege->type != REG_TYPE_MMXWCG)) |
| 5829 | { |
| 5830 | inst.error = _("iWMMXt data or control register expected"); |
| 5831 | goto failure; |
| 5832 | } |
| 5833 | inst.operands[i].reg = rege->number; |
| 5834 | inst.operands[i].isreg = (rege->type == REG_TYPE_MMXWR); |
| 5835 | } |
| 5836 | break; |
| 5837 | |
| 5838 | case OP_RIWC_RIWG: |
| 5839 | { |
| 5840 | struct reg_entry *rege = arm_reg_parse_multi (&str); |
| 5841 | if (!rege |
| 5842 | || (rege->type != REG_TYPE_MMXWC |
| 5843 | && rege->type != REG_TYPE_MMXWCG)) |
| 5844 | { |
| 5845 | inst.error = _("iWMMXt control register expected"); |
| 5846 | goto failure; |
| 5847 | } |
| 5848 | inst.operands[i].reg = rege->number; |
| 5849 | inst.operands[i].isreg = 1; |
| 5850 | } |
| 5851 | break; |
| 5852 | |
| 5853 | /* Misc */ |
| 5854 | case OP_CPSF: val = parse_cps_flags (&str); break; |
| 5855 | case OP_ENDI: val = parse_endian_specifier (&str); break; |
| 5856 | case OP_oROR: val = parse_ror (&str); break; |
| 5857 | case OP_PSR: val = parse_psr (&str); break; |
| 5858 | case OP_COND: val = parse_cond (&str); break; |
| 5859 | case OP_oBARRIER:val = parse_barrier (&str); break; |
| 5860 | |
| 5861 | case OP_RVC_PSR: |
| 5862 | po_reg_or_goto (REG_TYPE_VFC, try_psr); |
| 5863 | inst.operands[i].isvec = 1; /* Mark VFP control reg as vector. */ |
| 5864 | break; |
| 5865 | try_psr: |
| 5866 | val = parse_psr (&str); |
| 5867 | break; |
| 5868 | |
| 5869 | case OP_APSR_RR: |
| 5870 | po_reg_or_goto (REG_TYPE_RN, try_apsr); |
| 5871 | break; |
| 5872 | try_apsr: |
| 5873 | /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS |
| 5874 | instruction). */ |
| 5875 | if (strncasecmp (str, "APSR_", 5) == 0) |
| 5876 | { |
| 5877 | unsigned found = 0; |
| 5878 | str += 5; |
| 5879 | while (found < 15) |
| 5880 | switch (*str++) |
| 5881 | { |
| 5882 | case 'c': found = (found & 1) ? 16 : found | 1; break; |
| 5883 | case 'n': found = (found & 2) ? 16 : found | 2; break; |
| 5884 | case 'z': found = (found & 4) ? 16 : found | 4; break; |
| 5885 | case 'v': found = (found & 8) ? 16 : found | 8; break; |
| 5886 | default: found = 16; |
| 5887 | } |
| 5888 | if (found != 15) |
| 5889 | goto failure; |
| 5890 | inst.operands[i].isvec = 1; |
| 5891 | } |
| 5892 | else |
| 5893 | goto failure; |
| 5894 | break; |
| 5895 | |
| 5896 | case OP_TB: |
| 5897 | po_misc_or_fail (parse_tb (&str)); |
| 5898 | break; |
| 5899 | |
| 5900 | /* Register lists */ |
| 5901 | case OP_REGLST: |
| 5902 | val = parse_reg_list (&str); |
| 5903 | if (*str == '^') |
| 5904 | { |
| 5905 | inst.operands[1].writeback = 1; |
| 5906 | str++; |
| 5907 | } |
| 5908 | break; |
| 5909 | |
| 5910 | case OP_VRSLST: |
| 5911 | val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_S); |
| 5912 | break; |
| 5913 | |
| 5914 | case OP_VRDLST: |
| 5915 | val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_D); |
| 5916 | break; |
| 5917 | |
| 5918 | case OP_VRSDLST: |
| 5919 | /* Allow Q registers too. */ |
| 5920 | val = parse_vfp_reg_list (&str, &inst.operands[i].reg, |
| 5921 | REGLIST_NEON_D); |
| 5922 | if (val == FAIL) |
| 5923 | { |
| 5924 | inst.error = NULL; |
| 5925 | val = parse_vfp_reg_list (&str, &inst.operands[i].reg, |
| 5926 | REGLIST_VFP_S); |
| 5927 | inst.operands[i].issingle = 1; |
| 5928 | } |
| 5929 | break; |
| 5930 | |
| 5931 | case OP_NRDLST: |
| 5932 | val = parse_vfp_reg_list (&str, &inst.operands[i].reg, |
| 5933 | REGLIST_NEON_D); |
| 5934 | break; |
| 5935 | |
| 5936 | case OP_NSTRLST: |
| 5937 | val = parse_neon_el_struct_list (&str, &inst.operands[i].reg, |
| 5938 | &inst.operands[i].vectype); |
| 5939 | break; |
| 5940 | |
| 5941 | /* Addressing modes */ |
| 5942 | case OP_ADDR: |
| 5943 | po_misc_or_fail (parse_address (&str, i)); |
| 5944 | break; |
| 5945 | |
| 5946 | case OP_ADDRGLDR: |
| 5947 | po_misc_or_fail_no_backtrack ( |
| 5948 | parse_address_group_reloc (&str, i, GROUP_LDR)); |
| 5949 | break; |
| 5950 | |
| 5951 | case OP_ADDRGLDRS: |
| 5952 | po_misc_or_fail_no_backtrack ( |
| 5953 | parse_address_group_reloc (&str, i, GROUP_LDRS)); |
| 5954 | break; |
| 5955 | |
| 5956 | case OP_ADDRGLDC: |
| 5957 | po_misc_or_fail_no_backtrack ( |
| 5958 | parse_address_group_reloc (&str, i, GROUP_LDC)); |
| 5959 | break; |
| 5960 | |
| 5961 | case OP_SH: |
| 5962 | po_misc_or_fail (parse_shifter_operand (&str, i)); |
| 5963 | break; |
| 5964 | |
| 5965 | case OP_SHG: |
| 5966 | po_misc_or_fail_no_backtrack ( |
| 5967 | parse_shifter_operand_group_reloc (&str, i)); |
| 5968 | break; |
| 5969 | |
| 5970 | case OP_oSHll: |
| 5971 | po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_IMMEDIATE)); |
| 5972 | break; |
| 5973 | |
| 5974 | case OP_oSHar: |
| 5975 | po_misc_or_fail (parse_shift (&str, i, SHIFT_ASR_IMMEDIATE)); |
| 5976 | break; |
| 5977 | |
| 5978 | case OP_oSHllar: |
| 5979 | po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_OR_ASR_IMMEDIATE)); |
| 5980 | break; |
| 5981 | |
| 5982 | default: |
| 5983 | as_fatal ("unhandled operand code %d", upat[i]); |
| 5984 | } |
| 5985 | |
| 5986 | /* Various value-based sanity checks and shared operations. We |
| 5987 | do not signal immediate failures for the register constraints; |
| 5988 | this allows a syntax error to take precedence. */ |
| 5989 | switch (upat[i]) |
| 5990 | { |
| 5991 | case OP_oRRnpc: |
| 5992 | case OP_RRnpc: |
| 5993 | case OP_RRnpcb: |
| 5994 | case OP_RRw: |
| 5995 | case OP_RRnpc_I0: |
| 5996 | if (inst.operands[i].isreg && inst.operands[i].reg == REG_PC) |
| 5997 | inst.error = BAD_PC; |
| 5998 | break; |
| 5999 | |
| 6000 | case OP_CPSF: |
| 6001 | case OP_ENDI: |
| 6002 | case OP_oROR: |
| 6003 | case OP_PSR: |
| 6004 | case OP_RVC_PSR: |
| 6005 | case OP_COND: |
| 6006 | case OP_oBARRIER: |
| 6007 | case OP_REGLST: |
| 6008 | case OP_VRSLST: |
| 6009 | case OP_VRDLST: |
| 6010 | case OP_VRSDLST: |
| 6011 | case OP_NRDLST: |
| 6012 | case OP_NSTRLST: |
| 6013 | if (val == FAIL) |
| 6014 | goto failure; |
| 6015 | inst.operands[i].imm = val; |
| 6016 | break; |
| 6017 | |
| 6018 | default: |
| 6019 | break; |
| 6020 | } |
| 6021 | |
| 6022 | /* If we get here, this operand was successfully parsed. */ |
| 6023 | inst.operands[i].present = 1; |
| 6024 | continue; |
| 6025 | |
| 6026 | bad_args: |
| 6027 | inst.error = BAD_ARGS; |
| 6028 | |
| 6029 | failure: |
| 6030 | if (!backtrack_pos) |
| 6031 | { |
| 6032 | /* The parse routine should already have set inst.error, but set a |
| 6033 | defaut here just in case. */ |
| 6034 | if (!inst.error) |
| 6035 | inst.error = _("syntax error"); |
| 6036 | return FAIL; |
| 6037 | } |
| 6038 | |
| 6039 | /* Do not backtrack over a trailing optional argument that |
| 6040 | absorbed some text. We will only fail again, with the |
| 6041 | 'garbage following instruction' error message, which is |
| 6042 | probably less helpful than the current one. */ |
| 6043 | if (backtrack_index == i && backtrack_pos != str |
| 6044 | && upat[i+1] == OP_stop) |
| 6045 | { |
| 6046 | if (!inst.error) |
| 6047 | inst.error = _("syntax error"); |
| 6048 | return FAIL; |
| 6049 | } |
| 6050 | |
| 6051 | /* Try again, skipping the optional argument at backtrack_pos. */ |
| 6052 | str = backtrack_pos; |
| 6053 | inst.error = backtrack_error; |
| 6054 | inst.operands[backtrack_index].present = 0; |
| 6055 | i = backtrack_index; |
| 6056 | backtrack_pos = 0; |
| 6057 | } |
| 6058 | |
| 6059 | /* Check that we have parsed all the arguments. */ |
| 6060 | if (*str != '\0' && !inst.error) |
| 6061 | inst.error = _("garbage following instruction"); |
| 6062 | |
| 6063 | return inst.error ? FAIL : SUCCESS; |
| 6064 | } |
| 6065 | |
| 6066 | #undef po_char_or_fail |
| 6067 | #undef po_reg_or_fail |
| 6068 | #undef po_reg_or_goto |
| 6069 | #undef po_imm_or_fail |
| 6070 | #undef po_scalar_or_fail |
| 6071 | \f |
| 6072 | /* Shorthand macro for instruction encoding functions issuing errors. */ |
| 6073 | #define constraint(expr, err) do { \ |
| 6074 | if (expr) \ |
| 6075 | { \ |
| 6076 | inst.error = err; \ |
| 6077 | return; \ |
| 6078 | } \ |
| 6079 | } while (0) |
| 6080 | |
| 6081 | /* Functions for operand encoding. ARM, then Thumb. */ |
| 6082 | |
| 6083 | #define rotate_left(v, n) (v << n | v >> (32 - n)) |
| 6084 | |
| 6085 | /* If VAL can be encoded in the immediate field of an ARM instruction, |
| 6086 | return the encoded form. Otherwise, return FAIL. */ |
| 6087 | |
| 6088 | static unsigned int |
| 6089 | encode_arm_immediate (unsigned int val) |
| 6090 | { |
| 6091 | unsigned int a, i; |
| 6092 | |
| 6093 | for (i = 0; i < 32; i += 2) |
| 6094 | if ((a = rotate_left (val, i)) <= 0xff) |
| 6095 | return a | (i << 7); /* 12-bit pack: [shift-cnt,const]. */ |
| 6096 | |
| 6097 | return FAIL; |
| 6098 | } |
| 6099 | |
| 6100 | /* If VAL can be encoded in the immediate field of a Thumb32 instruction, |
| 6101 | return the encoded form. Otherwise, return FAIL. */ |
| 6102 | static unsigned int |
| 6103 | encode_thumb32_immediate (unsigned int val) |
| 6104 | { |
| 6105 | unsigned int a, i; |
| 6106 | |
| 6107 | if (val <= 0xff) |
| 6108 | return val; |
| 6109 | |
| 6110 | for (i = 1; i <= 24; i++) |
| 6111 | { |
| 6112 | a = val >> i; |
| 6113 | if ((val & ~(0xff << i)) == 0) |
| 6114 | return ((val >> i) & 0x7f) | ((32 - i) << 7); |
| 6115 | } |
| 6116 | |
| 6117 | a = val & 0xff; |
| 6118 | if (val == ((a << 16) | a)) |
| 6119 | return 0x100 | a; |
| 6120 | if (val == ((a << 24) | (a << 16) | (a << 8) | a)) |
| 6121 | return 0x300 | a; |
| 6122 | |
| 6123 | a = val & 0xff00; |
| 6124 | if (val == ((a << 16) | a)) |
| 6125 | return 0x200 | (a >> 8); |
| 6126 | |
| 6127 | return FAIL; |
| 6128 | } |
| 6129 | /* Encode a VFP SP or DP register number into inst.instruction. */ |
| 6130 | |
| 6131 | static void |
| 6132 | encode_arm_vfp_reg (int reg, enum vfp_reg_pos pos) |
| 6133 | { |
| 6134 | if ((pos == VFP_REG_Dd || pos == VFP_REG_Dn || pos == VFP_REG_Dm) |
| 6135 | && reg > 15) |
| 6136 | { |
| 6137 | if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3)) |
| 6138 | { |
| 6139 | if (thumb_mode) |
| 6140 | ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, |
| 6141 | fpu_vfp_ext_v3); |
| 6142 | else |
| 6143 | ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, |
| 6144 | fpu_vfp_ext_v3); |
| 6145 | } |
| 6146 | else |
| 6147 | { |
| 6148 | first_error (_("D register out of range for selected VFP version")); |
| 6149 | return; |
| 6150 | } |
| 6151 | } |
| 6152 | |
| 6153 | switch (pos) |
| 6154 | { |
| 6155 | case VFP_REG_Sd: |
| 6156 | inst.instruction |= ((reg >> 1) << 12) | ((reg & 1) << 22); |
| 6157 | break; |
| 6158 | |
| 6159 | case VFP_REG_Sn: |
| 6160 | inst.instruction |= ((reg >> 1) << 16) | ((reg & 1) << 7); |
| 6161 | break; |
| 6162 | |
| 6163 | case VFP_REG_Sm: |
| 6164 | inst.instruction |= ((reg >> 1) << 0) | ((reg & 1) << 5); |
| 6165 | break; |
| 6166 | |
| 6167 | case VFP_REG_Dd: |
| 6168 | inst.instruction |= ((reg & 15) << 12) | ((reg >> 4) << 22); |
| 6169 | break; |
| 6170 | |
| 6171 | case VFP_REG_Dn: |
| 6172 | inst.instruction |= ((reg & 15) << 16) | ((reg >> 4) << 7); |
| 6173 | break; |
| 6174 | |
| 6175 | case VFP_REG_Dm: |
| 6176 | inst.instruction |= (reg & 15) | ((reg >> 4) << 5); |
| 6177 | break; |
| 6178 | |
| 6179 | default: |
| 6180 | abort (); |
| 6181 | } |
| 6182 | } |
| 6183 | |
| 6184 | /* Encode a <shift> in an ARM-format instruction. The immediate, |
| 6185 | if any, is handled by md_apply_fix. */ |
| 6186 | static void |
| 6187 | encode_arm_shift (int i) |
| 6188 | { |
| 6189 | if (inst.operands[i].shift_kind == SHIFT_RRX) |
| 6190 | inst.instruction |= SHIFT_ROR << 5; |
| 6191 | else |
| 6192 | { |
| 6193 | inst.instruction |= inst.operands[i].shift_kind << 5; |
| 6194 | if (inst.operands[i].immisreg) |
| 6195 | { |
| 6196 | inst.instruction |= SHIFT_BY_REG; |
| 6197 | inst.instruction |= inst.operands[i].imm << 8; |
| 6198 | } |
| 6199 | else |
| 6200 | inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM; |
| 6201 | } |
| 6202 | } |
| 6203 | |
| 6204 | static void |
| 6205 | encode_arm_shifter_operand (int i) |
| 6206 | { |
| 6207 | if (inst.operands[i].isreg) |
| 6208 | { |
| 6209 | inst.instruction |= inst.operands[i].reg; |
| 6210 | encode_arm_shift (i); |
| 6211 | } |
| 6212 | else |
| 6213 | inst.instruction |= INST_IMMEDIATE; |
| 6214 | } |
| 6215 | |
| 6216 | /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */ |
| 6217 | static void |
| 6218 | encode_arm_addr_mode_common (int i, bfd_boolean is_t) |
| 6219 | { |
| 6220 | assert (inst.operands[i].isreg); |
| 6221 | inst.instruction |= inst.operands[i].reg << 16; |
| 6222 | |
| 6223 | if (inst.operands[i].preind) |
| 6224 | { |
| 6225 | if (is_t) |
| 6226 | { |
| 6227 | inst.error = _("instruction does not accept preindexed addressing"); |
| 6228 | return; |
| 6229 | } |
| 6230 | inst.instruction |= PRE_INDEX; |
| 6231 | if (inst.operands[i].writeback) |
| 6232 | inst.instruction |= WRITE_BACK; |
| 6233 | |
| 6234 | } |
| 6235 | else if (inst.operands[i].postind) |
| 6236 | { |
| 6237 | assert (inst.operands[i].writeback); |
| 6238 | if (is_t) |
| 6239 | inst.instruction |= WRITE_BACK; |
| 6240 | } |
| 6241 | else /* unindexed - only for coprocessor */ |
| 6242 | { |
| 6243 | inst.error = _("instruction does not accept unindexed addressing"); |
| 6244 | return; |
| 6245 | } |
| 6246 | |
| 6247 | if (((inst.instruction & WRITE_BACK) || !(inst.instruction & PRE_INDEX)) |
| 6248 | && (((inst.instruction & 0x000f0000) >> 16) |
| 6249 | == ((inst.instruction & 0x0000f000) >> 12))) |
| 6250 | as_warn ((inst.instruction & LOAD_BIT) |
| 6251 | ? _("destination register same as write-back base") |
| 6252 | : _("source register same as write-back base")); |
| 6253 | } |
| 6254 | |
| 6255 | /* inst.operands[i] was set up by parse_address. Encode it into an |
| 6256 | ARM-format mode 2 load or store instruction. If is_t is true, |
| 6257 | reject forms that cannot be used with a T instruction (i.e. not |
| 6258 | post-indexed). */ |
| 6259 | static void |
| 6260 | encode_arm_addr_mode_2 (int i, bfd_boolean is_t) |
| 6261 | { |
| 6262 | encode_arm_addr_mode_common (i, is_t); |
| 6263 | |
| 6264 | if (inst.operands[i].immisreg) |
| 6265 | { |
| 6266 | inst.instruction |= INST_IMMEDIATE; /* yes, this is backwards */ |
| 6267 | inst.instruction |= inst.operands[i].imm; |
| 6268 | if (!inst.operands[i].negative) |
| 6269 | inst.instruction |= INDEX_UP; |
| 6270 | if (inst.operands[i].shifted) |
| 6271 | { |
| 6272 | if (inst.operands[i].shift_kind == SHIFT_RRX) |
| 6273 | inst.instruction |= SHIFT_ROR << 5; |
| 6274 | else |
| 6275 | { |
| 6276 | inst.instruction |= inst.operands[i].shift_kind << 5; |
| 6277 | inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM; |
| 6278 | } |
| 6279 | } |
| 6280 | } |
| 6281 | else /* immediate offset in inst.reloc */ |
| 6282 | { |
| 6283 | if (inst.reloc.type == BFD_RELOC_UNUSED) |
| 6284 | inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM; |
| 6285 | } |
| 6286 | } |
| 6287 | |
| 6288 | /* inst.operands[i] was set up by parse_address. Encode it into an |
| 6289 | ARM-format mode 3 load or store instruction. Reject forms that |
| 6290 | cannot be used with such instructions. If is_t is true, reject |
| 6291 | forms that cannot be used with a T instruction (i.e. not |
| 6292 | post-indexed). */ |
| 6293 | static void |
| 6294 | encode_arm_addr_mode_3 (int i, bfd_boolean is_t) |
| 6295 | { |
| 6296 | if (inst.operands[i].immisreg && inst.operands[i].shifted) |
| 6297 | { |
| 6298 | inst.error = _("instruction does not accept scaled register index"); |
| 6299 | return; |
| 6300 | } |
| 6301 | |
| 6302 | encode_arm_addr_mode_common (i, is_t); |
| 6303 | |
| 6304 | if (inst.operands[i].immisreg) |
| 6305 | { |
| 6306 | inst.instruction |= inst.operands[i].imm; |
| 6307 | if (!inst.operands[i].negative) |
| 6308 | inst.instruction |= INDEX_UP; |
| 6309 | } |
| 6310 | else /* immediate offset in inst.reloc */ |
| 6311 | { |
| 6312 | inst.instruction |= HWOFFSET_IMM; |
| 6313 | if (inst.reloc.type == BFD_RELOC_UNUSED) |
| 6314 | inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM8; |
| 6315 | } |
| 6316 | } |
| 6317 | |
| 6318 | /* inst.operands[i] was set up by parse_address. Encode it into an |
| 6319 | ARM-format instruction. Reject all forms which cannot be encoded |
| 6320 | into a coprocessor load/store instruction. If wb_ok is false, |
| 6321 | reject use of writeback; if unind_ok is false, reject use of |
| 6322 | unindexed addressing. If reloc_override is not 0, use it instead |
| 6323 | of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one |
| 6324 | (in which case it is preserved). */ |
| 6325 | |
| 6326 | static int |
| 6327 | encode_arm_cp_address (int i, int wb_ok, int unind_ok, int reloc_override) |
| 6328 | { |
| 6329 | inst.instruction |= inst.operands[i].reg << 16; |
| 6330 | |
| 6331 | assert (!(inst.operands[i].preind && inst.operands[i].postind)); |
| 6332 | |
| 6333 | if (!inst.operands[i].preind && !inst.operands[i].postind) /* unindexed */ |
| 6334 | { |
| 6335 | assert (!inst.operands[i].writeback); |
| 6336 | if (!unind_ok) |
| 6337 | { |
| 6338 | inst.error = _("instruction does not support unindexed addressing"); |
| 6339 | return FAIL; |
| 6340 | } |
| 6341 | inst.instruction |= inst.operands[i].imm; |
| 6342 | inst.instruction |= INDEX_UP; |
| 6343 | return SUCCESS; |
| 6344 | } |
| 6345 | |
| 6346 | if (inst.operands[i].preind) |
| 6347 | inst.instruction |= PRE_INDEX; |
| 6348 | |
| 6349 | if (inst.operands[i].writeback) |
| 6350 | { |
| 6351 | if (inst.operands[i].reg == REG_PC) |
| 6352 | { |
| 6353 | inst.error = _("pc may not be used with write-back"); |
| 6354 | return FAIL; |
| 6355 | } |
| 6356 | if (!wb_ok) |
| 6357 | { |
| 6358 | inst.error = _("instruction does not support writeback"); |
| 6359 | return FAIL; |
| 6360 | } |
| 6361 | inst.instruction |= WRITE_BACK; |
| 6362 | } |
| 6363 | |
| 6364 | if (reloc_override) |
| 6365 | inst.reloc.type = reloc_override; |
| 6366 | else if ((inst.reloc.type < BFD_RELOC_ARM_ALU_PC_G0_NC |
| 6367 | || inst.reloc.type > BFD_RELOC_ARM_LDC_SB_G2) |
| 6368 | && inst.reloc.type != BFD_RELOC_ARM_LDR_PC_G0) |
| 6369 | { |
| 6370 | if (thumb_mode) |
| 6371 | inst.reloc.type = BFD_RELOC_ARM_T32_CP_OFF_IMM; |
| 6372 | else |
| 6373 | inst.reloc.type = BFD_RELOC_ARM_CP_OFF_IMM; |
| 6374 | } |
| 6375 | |
| 6376 | return SUCCESS; |
| 6377 | } |
| 6378 | |
| 6379 | /* inst.reloc.exp describes an "=expr" load pseudo-operation. |
| 6380 | Determine whether it can be performed with a move instruction; if |
| 6381 | it can, convert inst.instruction to that move instruction and |
| 6382 | return 1; if it can't, convert inst.instruction to a literal-pool |
| 6383 | load and return 0. If this is not a valid thing to do in the |
| 6384 | current context, set inst.error and return 1. |
| 6385 | |
| 6386 | inst.operands[i] describes the destination register. */ |
| 6387 | |
| 6388 | static int |
| 6389 | move_or_literal_pool (int i, bfd_boolean thumb_p, bfd_boolean mode_3) |
| 6390 | { |
| 6391 | unsigned long tbit; |
| 6392 | |
| 6393 | if (thumb_p) |
| 6394 | tbit = (inst.instruction > 0xffff) ? THUMB2_LOAD_BIT : THUMB_LOAD_BIT; |
| 6395 | else |
| 6396 | tbit = LOAD_BIT; |
| 6397 | |
| 6398 | if ((inst.instruction & tbit) == 0) |
| 6399 | { |
| 6400 | inst.error = _("invalid pseudo operation"); |
| 6401 | return 1; |
| 6402 | } |
| 6403 | if (inst.reloc.exp.X_op != O_constant && inst.reloc.exp.X_op != O_symbol) |
| 6404 | { |
| 6405 | inst.error = _("constant expression expected"); |
| 6406 | return 1; |
| 6407 | } |
| 6408 | if (inst.reloc.exp.X_op == O_constant) |
| 6409 | { |
| 6410 | if (thumb_p) |
| 6411 | { |
| 6412 | if (!unified_syntax && (inst.reloc.exp.X_add_number & ~0xFF) == 0) |
| 6413 | { |
| 6414 | /* This can be done with a mov(1) instruction. */ |
| 6415 | inst.instruction = T_OPCODE_MOV_I8 | (inst.operands[i].reg << 8); |
| 6416 | inst.instruction |= inst.reloc.exp.X_add_number; |
| 6417 | return 1; |
| 6418 | } |
| 6419 | } |
| 6420 | else |
| 6421 | { |
| 6422 | int value = encode_arm_immediate (inst.reloc.exp.X_add_number); |
| 6423 | if (value != FAIL) |
| 6424 | { |
| 6425 | /* This can be done with a mov instruction. */ |
| 6426 | inst.instruction &= LITERAL_MASK; |
| 6427 | inst.instruction |= INST_IMMEDIATE | (OPCODE_MOV << DATA_OP_SHIFT); |
| 6428 | inst.instruction |= value & 0xfff; |
| 6429 | return 1; |
| 6430 | } |
| 6431 | |
| 6432 | value = encode_arm_immediate (~inst.reloc.exp.X_add_number); |
| 6433 | if (value != FAIL) |
| 6434 | { |
| 6435 | /* This can be done with a mvn instruction. */ |
| 6436 | inst.instruction &= LITERAL_MASK; |
| 6437 | inst.instruction |= INST_IMMEDIATE | (OPCODE_MVN << DATA_OP_SHIFT); |
| 6438 | inst.instruction |= value & 0xfff; |
| 6439 | return 1; |
| 6440 | } |
| 6441 | } |
| 6442 | } |
| 6443 | |
| 6444 | if (add_to_lit_pool () == FAIL) |
| 6445 | { |
| 6446 | inst.error = _("literal pool insertion failed"); |
| 6447 | return 1; |
| 6448 | } |
| 6449 | inst.operands[1].reg = REG_PC; |
| 6450 | inst.operands[1].isreg = 1; |
| 6451 | inst.operands[1].preind = 1; |
| 6452 | inst.reloc.pc_rel = 1; |
| 6453 | inst.reloc.type = (thumb_p |
| 6454 | ? BFD_RELOC_ARM_THUMB_OFFSET |
| 6455 | : (mode_3 |
| 6456 | ? BFD_RELOC_ARM_HWLITERAL |
| 6457 | : BFD_RELOC_ARM_LITERAL)); |
| 6458 | return 0; |
| 6459 | } |
| 6460 | |
| 6461 | /* Functions for instruction encoding, sorted by subarchitecture. |
| 6462 | First some generics; their names are taken from the conventional |
| 6463 | bit positions for register arguments in ARM format instructions. */ |
| 6464 | |
| 6465 | static void |
| 6466 | do_noargs (void) |
| 6467 | { |
| 6468 | } |
| 6469 | |
| 6470 | static void |
| 6471 | do_rd (void) |
| 6472 | { |
| 6473 | inst.instruction |= inst.operands[0].reg << 12; |
| 6474 | } |
| 6475 | |
| 6476 | static void |
| 6477 | do_rd_rm (void) |
| 6478 | { |
| 6479 | inst.instruction |= inst.operands[0].reg << 12; |
| 6480 | inst.instruction |= inst.operands[1].reg; |
| 6481 | } |
| 6482 | |
| 6483 | static void |
| 6484 | do_rd_rn (void) |
| 6485 | { |
| 6486 | inst.instruction |= inst.operands[0].reg << 12; |
| 6487 | inst.instruction |= inst.operands[1].reg << 16; |
| 6488 | } |
| 6489 | |
| 6490 | static void |
| 6491 | do_rn_rd (void) |
| 6492 | { |
| 6493 | inst.instruction |= inst.operands[0].reg << 16; |
| 6494 | inst.instruction |= inst.operands[1].reg << 12; |
| 6495 | } |
| 6496 | |
| 6497 | static void |
| 6498 | do_rd_rm_rn (void) |
| 6499 | { |
| 6500 | unsigned Rn = inst.operands[2].reg; |
| 6501 | /* Enforce restrictions on SWP instruction. */ |
| 6502 | if ((inst.instruction & 0x0fbfffff) == 0x01000090) |
| 6503 | constraint (Rn == inst.operands[0].reg || Rn == inst.operands[1].reg, |
| 6504 | _("Rn must not overlap other operands")); |
| 6505 | inst.instruction |= inst.operands[0].reg << 12; |
| 6506 | inst.instruction |= inst.operands[1].reg; |
| 6507 | inst.instruction |= Rn << 16; |
| 6508 | } |
| 6509 | |
| 6510 | static void |
| 6511 | do_rd_rn_rm (void) |
| 6512 | { |
| 6513 | inst.instruction |= inst.operands[0].reg << 12; |
| 6514 | inst.instruction |= inst.operands[1].reg << 16; |
| 6515 | inst.instruction |= inst.operands[2].reg; |
| 6516 | } |
| 6517 | |
| 6518 | static void |
| 6519 | do_rm_rd_rn (void) |
| 6520 | { |
| 6521 | inst.instruction |= inst.operands[0].reg; |
| 6522 | inst.instruction |= inst.operands[1].reg << 12; |
| 6523 | inst.instruction |= inst.operands[2].reg << 16; |
| 6524 | } |
| 6525 | |
| 6526 | static void |
| 6527 | do_imm0 (void) |
| 6528 | { |
| 6529 | inst.instruction |= inst.operands[0].imm; |
| 6530 | } |
| 6531 | |
| 6532 | static void |
| 6533 | do_rd_cpaddr (void) |
| 6534 | { |
| 6535 | inst.instruction |= inst.operands[0].reg << 12; |
| 6536 | encode_arm_cp_address (1, TRUE, TRUE, 0); |
| 6537 | } |
| 6538 | |
| 6539 | /* ARM instructions, in alphabetical order by function name (except |
| 6540 | that wrapper functions appear immediately after the function they |
| 6541 | wrap). */ |
| 6542 | |
| 6543 | /* This is a pseudo-op of the form "adr rd, label" to be converted |
| 6544 | into a relative address of the form "add rd, pc, #label-.-8". */ |
| 6545 | |
| 6546 | static void |
| 6547 | do_adr (void) |
| 6548 | { |
| 6549 | inst.instruction |= (inst.operands[0].reg << 12); /* Rd */ |
| 6550 | |
| 6551 | /* Frag hacking will turn this into a sub instruction if the offset turns |
| 6552 | out to be negative. */ |
| 6553 | inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE; |
| 6554 | inst.reloc.pc_rel = 1; |
| 6555 | inst.reloc.exp.X_add_number -= 8; |
| 6556 | } |
| 6557 | |
| 6558 | /* This is a pseudo-op of the form "adrl rd, label" to be converted |
| 6559 | into a relative address of the form: |
| 6560 | add rd, pc, #low(label-.-8)" |
| 6561 | add rd, rd, #high(label-.-8)" */ |
| 6562 | |
| 6563 | static void |
| 6564 | do_adrl (void) |
| 6565 | { |
| 6566 | inst.instruction |= (inst.operands[0].reg << 12); /* Rd */ |
| 6567 | |
| 6568 | /* Frag hacking will turn this into a sub instruction if the offset turns |
| 6569 | out to be negative. */ |
| 6570 | inst.reloc.type = BFD_RELOC_ARM_ADRL_IMMEDIATE; |
| 6571 | inst.reloc.pc_rel = 1; |
| 6572 | inst.size = INSN_SIZE * 2; |
| 6573 | inst.reloc.exp.X_add_number -= 8; |
| 6574 | } |
| 6575 | |
| 6576 | static void |
| 6577 | do_arit (void) |
| 6578 | { |
| 6579 | if (!inst.operands[1].present) |
| 6580 | inst.operands[1].reg = inst.operands[0].reg; |
| 6581 | inst.instruction |= inst.operands[0].reg << 12; |
| 6582 | inst.instruction |= inst.operands[1].reg << 16; |
| 6583 | encode_arm_shifter_operand (2); |
| 6584 | } |
| 6585 | |
| 6586 | static void |
| 6587 | do_barrier (void) |
| 6588 | { |
| 6589 | if (inst.operands[0].present) |
| 6590 | { |
| 6591 | constraint ((inst.instruction & 0xf0) != 0x40 |
| 6592 | && inst.operands[0].imm != 0xf, |
| 6593 | "bad barrier type"); |
| 6594 | inst.instruction |= inst.operands[0].imm; |
| 6595 | } |
| 6596 | else |
| 6597 | inst.instruction |= 0xf; |
| 6598 | } |
| 6599 | |
| 6600 | static void |
| 6601 | do_bfc (void) |
| 6602 | { |
| 6603 | unsigned int msb = inst.operands[1].imm + inst.operands[2].imm; |
| 6604 | constraint (msb > 32, _("bit-field extends past end of register")); |
| 6605 | /* The instruction encoding stores the LSB and MSB, |
| 6606 | not the LSB and width. */ |
| 6607 | inst.instruction |= inst.operands[0].reg << 12; |
| 6608 | inst.instruction |= inst.operands[1].imm << 7; |
| 6609 | inst.instruction |= (msb - 1) << 16; |
| 6610 | } |
| 6611 | |
| 6612 | static void |
| 6613 | do_bfi (void) |
| 6614 | { |
| 6615 | unsigned int msb; |
| 6616 | |
| 6617 | /* #0 in second position is alternative syntax for bfc, which is |
| 6618 | the same instruction but with REG_PC in the Rm field. */ |
| 6619 | if (!inst.operands[1].isreg) |
| 6620 | inst.operands[1].reg = REG_PC; |
| 6621 | |
| 6622 | msb = inst.operands[2].imm + inst.operands[3].imm; |
| 6623 | constraint (msb > 32, _("bit-field extends past end of register")); |
| 6624 | /* The instruction encoding stores the LSB and MSB, |
| 6625 | not the LSB and width. */ |
| 6626 | inst.instruction |= inst.operands[0].reg << 12; |
| 6627 | inst.instruction |= inst.operands[1].reg; |
| 6628 | inst.instruction |= inst.operands[2].imm << 7; |
| 6629 | inst.instruction |= (msb - 1) << 16; |
| 6630 | } |
| 6631 | |
| 6632 | static void |
| 6633 | do_bfx (void) |
| 6634 | { |
| 6635 | constraint (inst.operands[2].imm + inst.operands[3].imm > 32, |
| 6636 | _("bit-field extends past end of register")); |
| 6637 | inst.instruction |= inst.operands[0].reg << 12; |
| 6638 | inst.instruction |= inst.operands[1].reg; |
| 6639 | inst.instruction |= inst.operands[2].imm << 7; |
| 6640 | inst.instruction |= (inst.operands[3].imm - 1) << 16; |
| 6641 | } |
| 6642 | |
| 6643 | /* ARM V5 breakpoint instruction (argument parse) |
| 6644 | BKPT <16 bit unsigned immediate> |
| 6645 | Instruction is not conditional. |
| 6646 | The bit pattern given in insns[] has the COND_ALWAYS condition, |
| 6647 | and it is an error if the caller tried to override that. */ |
| 6648 | |
| 6649 | static void |
| 6650 | do_bkpt (void) |
| 6651 | { |
| 6652 | /* Top 12 of 16 bits to bits 19:8. */ |
| 6653 | inst.instruction |= (inst.operands[0].imm & 0xfff0) << 4; |
| 6654 | |
| 6655 | /* Bottom 4 of 16 bits to bits 3:0. */ |
| 6656 | inst.instruction |= inst.operands[0].imm & 0xf; |
| 6657 | } |
| 6658 | |
| 6659 | static void |
| 6660 | encode_branch (int default_reloc) |
| 6661 | { |
| 6662 | if (inst.operands[0].hasreloc) |
| 6663 | { |
| 6664 | constraint (inst.operands[0].imm != BFD_RELOC_ARM_PLT32, |
| 6665 | _("the only suffix valid here is '(plt)'")); |
| 6666 | inst.reloc.type = BFD_RELOC_ARM_PLT32; |
| 6667 | } |
| 6668 | else |
| 6669 | { |
| 6670 | inst.reloc.type = default_reloc; |
| 6671 | } |
| 6672 | inst.reloc.pc_rel = 1; |
| 6673 | } |
| 6674 | |
| 6675 | static void |
| 6676 | do_branch (void) |
| 6677 | { |
| 6678 | #ifdef OBJ_ELF |
| 6679 | if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4) |
| 6680 | encode_branch (BFD_RELOC_ARM_PCREL_JUMP); |
| 6681 | else |
| 6682 | #endif |
| 6683 | encode_branch (BFD_RELOC_ARM_PCREL_BRANCH); |
| 6684 | } |
| 6685 | |
| 6686 | static void |
| 6687 | do_bl (void) |
| 6688 | { |
| 6689 | #ifdef OBJ_ELF |
| 6690 | if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4) |
| 6691 | { |
| 6692 | if (inst.cond == COND_ALWAYS) |
| 6693 | encode_branch (BFD_RELOC_ARM_PCREL_CALL); |
| 6694 | else |
| 6695 | encode_branch (BFD_RELOC_ARM_PCREL_JUMP); |
| 6696 | } |
| 6697 | else |
| 6698 | #endif |
| 6699 | encode_branch (BFD_RELOC_ARM_PCREL_BRANCH); |
| 6700 | } |
| 6701 | |
| 6702 | /* ARM V5 branch-link-exchange instruction (argument parse) |
| 6703 | BLX <target_addr> ie BLX(1) |
| 6704 | BLX{<condition>} <Rm> ie BLX(2) |
| 6705 | Unfortunately, there are two different opcodes for this mnemonic. |
| 6706 | So, the insns[].value is not used, and the code here zaps values |
| 6707 | into inst.instruction. |
| 6708 | Also, the <target_addr> can be 25 bits, hence has its own reloc. */ |
| 6709 | |
| 6710 | static void |
| 6711 | do_blx (void) |
| 6712 | { |
| 6713 | if (inst.operands[0].isreg) |
| 6714 | { |
| 6715 | /* Arg is a register; the opcode provided by insns[] is correct. |
| 6716 | It is not illegal to do "blx pc", just useless. */ |
| 6717 | if (inst.operands[0].reg == REG_PC) |
| 6718 | as_tsktsk (_("use of r15 in blx in ARM mode is not really useful")); |
| 6719 | |
| 6720 | inst.instruction |= inst.operands[0].reg; |
| 6721 | } |
| 6722 | else |
| 6723 | { |
| 6724 | /* Arg is an address; this instruction cannot be executed |
| 6725 | conditionally, and the opcode must be adjusted. */ |
| 6726 | constraint (inst.cond != COND_ALWAYS, BAD_COND); |
| 6727 | inst.instruction = 0xfa000000; |
| 6728 | #ifdef OBJ_ELF |
| 6729 | if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4) |
| 6730 | encode_branch (BFD_RELOC_ARM_PCREL_CALL); |
| 6731 | else |
| 6732 | #endif |
| 6733 | encode_branch (BFD_RELOC_ARM_PCREL_BLX); |
| 6734 | } |
| 6735 | } |
| 6736 | |
| 6737 | static void |
| 6738 | do_bx (void) |
| 6739 | { |
| 6740 | if (inst.operands[0].reg == REG_PC) |
| 6741 | as_tsktsk (_("use of r15 in bx in ARM mode is not really useful")); |
| 6742 | |
| 6743 | inst.instruction |= inst.operands[0].reg; |
| 6744 | } |
| 6745 | |
| 6746 | |
| 6747 | /* ARM v5TEJ. Jump to Jazelle code. */ |
| 6748 | |
| 6749 | static void |
| 6750 | do_bxj (void) |
| 6751 | { |
| 6752 | if (inst.operands[0].reg == REG_PC) |
| 6753 | as_tsktsk (_("use of r15 in bxj is not really useful")); |
| 6754 | |
| 6755 | inst.instruction |= inst.operands[0].reg; |
| 6756 | } |
| 6757 | |
| 6758 | /* Co-processor data operation: |
| 6759 | CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} |
| 6760 | CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */ |
| 6761 | static void |
| 6762 | do_cdp (void) |
| 6763 | { |
| 6764 | inst.instruction |= inst.operands[0].reg << 8; |
| 6765 | inst.instruction |= inst.operands[1].imm << 20; |
| 6766 | inst.instruction |= inst.operands[2].reg << 12; |
| 6767 | inst.instruction |= inst.operands[3].reg << 16; |
| 6768 | inst.instruction |= inst.operands[4].reg; |
| 6769 | inst.instruction |= inst.operands[5].imm << 5; |
| 6770 | } |
| 6771 | |
| 6772 | static void |
| 6773 | do_cmp (void) |
| 6774 | { |
| 6775 | inst.instruction |= inst.operands[0].reg << 16; |
| 6776 | encode_arm_shifter_operand (1); |
| 6777 | } |
| 6778 | |
| 6779 | /* Transfer between coprocessor and ARM registers. |
| 6780 | MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>} |
| 6781 | MRC2 |
| 6782 | MCR{cond} |
| 6783 | MCR2 |
| 6784 | |
| 6785 | No special properties. */ |
| 6786 | |
| 6787 | static void |
| 6788 | do_co_reg (void) |
| 6789 | { |
| 6790 | inst.instruction |= inst.operands[0].reg << 8; |
| 6791 | inst.instruction |= inst.operands[1].imm << 21; |
| 6792 | inst.instruction |= inst.operands[2].reg << 12; |
| 6793 | inst.instruction |= inst.operands[3].reg << 16; |
| 6794 | inst.instruction |= inst.operands[4].reg; |
| 6795 | inst.instruction |= inst.operands[5].imm << 5; |
| 6796 | } |
| 6797 | |
| 6798 | /* Transfer between coprocessor register and pair of ARM registers. |
| 6799 | MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>. |
| 6800 | MCRR2 |
| 6801 | MRRC{cond} |
| 6802 | MRRC2 |
| 6803 | |
| 6804 | Two XScale instructions are special cases of these: |
| 6805 | |
| 6806 | MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0 |
| 6807 | MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0 |
| 6808 | |
| 6809 | Result unpredicatable if Rd or Rn is R15. */ |
| 6810 | |
| 6811 | static void |
| 6812 | do_co_reg2c (void) |
| 6813 | { |
| 6814 | inst.instruction |= inst.operands[0].reg << 8; |
| 6815 | inst.instruction |= inst.operands[1].imm << 4; |
| 6816 | inst.instruction |= inst.operands[2].reg << 12; |
| 6817 | inst.instruction |= inst.operands[3].reg << 16; |
| 6818 | inst.instruction |= inst.operands[4].reg; |
| 6819 | } |
| 6820 | |
| 6821 | static void |
| 6822 | do_cpsi (void) |
| 6823 | { |
| 6824 | inst.instruction |= inst.operands[0].imm << 6; |
| 6825 | inst.instruction |= inst.operands[1].imm; |
| 6826 | } |
| 6827 | |
| 6828 | static void |
| 6829 | do_dbg (void) |
| 6830 | { |
| 6831 | inst.instruction |= inst.operands[0].imm; |
| 6832 | } |
| 6833 | |
| 6834 | static void |
| 6835 | do_it (void) |
| 6836 | { |
| 6837 | /* There is no IT instruction in ARM mode. We |
| 6838 | process it but do not generate code for it. */ |
| 6839 | inst.size = 0; |
| 6840 | } |
| 6841 | |
| 6842 | static void |
| 6843 | do_ldmstm (void) |
| 6844 | { |
| 6845 | int base_reg = inst.operands[0].reg; |
| 6846 | int range = inst.operands[1].imm; |
| 6847 | |
| 6848 | inst.instruction |= base_reg << 16; |
| 6849 | inst.instruction |= range; |
| 6850 | |
| 6851 | if (inst.operands[1].writeback) |
| 6852 | inst.instruction |= LDM_TYPE_2_OR_3; |
| 6853 | |
| 6854 | if (inst.operands[0].writeback) |
| 6855 | { |
| 6856 | inst.instruction |= WRITE_BACK; |
| 6857 | /* Check for unpredictable uses of writeback. */ |
| 6858 | if (inst.instruction & LOAD_BIT) |
| 6859 | { |
| 6860 | /* Not allowed in LDM type 2. */ |
| 6861 | if ((inst.instruction & LDM_TYPE_2_OR_3) |
| 6862 | && ((range & (1 << REG_PC)) == 0)) |
| 6863 | as_warn (_("writeback of base register is UNPREDICTABLE")); |
| 6864 | /* Only allowed if base reg not in list for other types. */ |
| 6865 | else if (range & (1 << base_reg)) |
| 6866 | as_warn (_("writeback of base register when in register list is UNPREDICTABLE")); |
| 6867 | } |
| 6868 | else /* STM. */ |
| 6869 | { |
| 6870 | /* Not allowed for type 2. */ |
| 6871 | if (inst.instruction & LDM_TYPE_2_OR_3) |
| 6872 | as_warn (_("writeback of base register is UNPREDICTABLE")); |
| 6873 | /* Only allowed if base reg not in list, or first in list. */ |
| 6874 | else if ((range & (1 << base_reg)) |
| 6875 | && (range & ((1 << base_reg) - 1))) |
| 6876 | as_warn (_("if writeback register is in list, it must be the lowest reg in the list")); |
| 6877 | } |
| 6878 | } |
| 6879 | } |
| 6880 | |
| 6881 | /* ARMv5TE load-consecutive (argument parse) |
| 6882 | Mode is like LDRH. |
| 6883 | |
| 6884 | LDRccD R, mode |
| 6885 | STRccD R, mode. */ |
| 6886 | |
| 6887 | static void |
| 6888 | do_ldrd (void) |
| 6889 | { |
| 6890 | constraint (inst.operands[0].reg % 2 != 0, |
| 6891 | _("first destination register must be even")); |
| 6892 | constraint (inst.operands[1].present |
| 6893 | && inst.operands[1].reg != inst.operands[0].reg + 1, |
| 6894 | _("can only load two consecutive registers")); |
| 6895 | constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here")); |
| 6896 | constraint (!inst.operands[2].isreg, _("'[' expected")); |
| 6897 | |
| 6898 | if (!inst.operands[1].present) |
| 6899 | inst.operands[1].reg = inst.operands[0].reg + 1; |
| 6900 | |
| 6901 | if (inst.instruction & LOAD_BIT) |
| 6902 | { |
| 6903 | /* encode_arm_addr_mode_3 will diagnose overlap between the base |
| 6904 | register and the first register written; we have to diagnose |
| 6905 | overlap between the base and the second register written here. */ |
| 6906 | |
| 6907 | if (inst.operands[2].reg == inst.operands[1].reg |
| 6908 | && (inst.operands[2].writeback || inst.operands[2].postind)) |
| 6909 | as_warn (_("base register written back, and overlaps " |
| 6910 | "second destination register")); |
| 6911 | |
| 6912 | /* For an index-register load, the index register must not overlap the |
| 6913 | destination (even if not write-back). */ |
| 6914 | else if (inst.operands[2].immisreg |
| 6915 | && ((unsigned) inst.operands[2].imm == inst.operands[0].reg |
| 6916 | || (unsigned) inst.operands[2].imm == inst.operands[1].reg)) |
| 6917 | as_warn (_("index register overlaps destination register")); |
| 6918 | } |
| 6919 | |
| 6920 | inst.instruction |= inst.operands[0].reg << 12; |
| 6921 | encode_arm_addr_mode_3 (2, /*is_t=*/FALSE); |
| 6922 | } |
| 6923 | |
| 6924 | static void |
| 6925 | do_ldrex (void) |
| 6926 | { |
| 6927 | constraint (!inst.operands[1].isreg || !inst.operands[1].preind |
| 6928 | || inst.operands[1].postind || inst.operands[1].writeback |
| 6929 | || inst.operands[1].immisreg || inst.operands[1].shifted |
| 6930 | || inst.operands[1].negative |
| 6931 | /* This can arise if the programmer has written |
| 6932 | strex rN, rM, foo |
| 6933 | or if they have mistakenly used a register name as the last |
| 6934 | operand, eg: |
| 6935 | strex rN, rM, rX |
| 6936 | It is very difficult to distinguish between these two cases |
| 6937 | because "rX" might actually be a label. ie the register |
| 6938 | name has been occluded by a symbol of the same name. So we |
| 6939 | just generate a general 'bad addressing mode' type error |
| 6940 | message and leave it up to the programmer to discover the |
| 6941 | true cause and fix their mistake. */ |
| 6942 | || (inst.operands[1].reg == REG_PC), |
| 6943 | BAD_ADDR_MODE); |
| 6944 | |
| 6945 | constraint (inst.reloc.exp.X_op != O_constant |
| 6946 | || inst.reloc.exp.X_add_number != 0, |
| 6947 | _("offset must be zero in ARM encoding")); |
| 6948 | |
| 6949 | inst.instruction |= inst.operands[0].reg << 12; |
| 6950 | inst.instruction |= inst.operands[1].reg << 16; |
| 6951 | inst.reloc.type = BFD_RELOC_UNUSED; |
| 6952 | } |
| 6953 | |
| 6954 | static void |
| 6955 | do_ldrexd (void) |
| 6956 | { |
| 6957 | constraint (inst.operands[0].reg % 2 != 0, |
| 6958 | _("even register required")); |
| 6959 | constraint (inst.operands[1].present |
| 6960 | && inst.operands[1].reg != inst.operands[0].reg + 1, |
| 6961 | _("can only load two consecutive registers")); |
| 6962 | /* If op 1 were present and equal to PC, this function wouldn't |
| 6963 | have been called in the first place. */ |
| 6964 | constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here")); |
| 6965 | |
| 6966 | inst.instruction |= inst.operands[0].reg << 12; |
| 6967 | inst.instruction |= inst.operands[2].reg << 16; |
| 6968 | } |
| 6969 | |
| 6970 | static void |
| 6971 | do_ldst (void) |
| 6972 | { |
| 6973 | inst.instruction |= inst.operands[0].reg << 12; |
| 6974 | if (!inst.operands[1].isreg) |
| 6975 | if (move_or_literal_pool (0, /*thumb_p=*/FALSE, /*mode_3=*/FALSE)) |
| 6976 | return; |
| 6977 | encode_arm_addr_mode_2 (1, /*is_t=*/FALSE); |
| 6978 | } |
| 6979 | |
| 6980 | static void |
| 6981 | do_ldstt (void) |
| 6982 | { |
| 6983 | /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and |
| 6984 | reject [Rn,...]. */ |
| 6985 | if (inst.operands[1].preind) |
| 6986 | { |
| 6987 | constraint (inst.reloc.exp.X_op != O_constant || |
| 6988 | inst.reloc.exp.X_add_number != 0, |
| 6989 | _("this instruction requires a post-indexed address")); |
| 6990 | |
| 6991 | inst.operands[1].preind = 0; |
| 6992 | inst.operands[1].postind = 1; |
| 6993 | inst.operands[1].writeback = 1; |
| 6994 | } |
| 6995 | inst.instruction |= inst.operands[0].reg << 12; |
| 6996 | encode_arm_addr_mode_2 (1, /*is_t=*/TRUE); |
| 6997 | } |
| 6998 | |
| 6999 | /* Halfword and signed-byte load/store operations. */ |
| 7000 | |
| 7001 | static void |
| 7002 | do_ldstv4 (void) |
| 7003 | { |
| 7004 | inst.instruction |= inst.operands[0].reg << 12; |
| 7005 | if (!inst.operands[1].isreg) |
| 7006 | if (move_or_literal_pool (0, /*thumb_p=*/FALSE, /*mode_3=*/TRUE)) |
| 7007 | return; |
| 7008 | encode_arm_addr_mode_3 (1, /*is_t=*/FALSE); |
| 7009 | } |
| 7010 | |
| 7011 | static void |
| 7012 | do_ldsttv4 (void) |
| 7013 | { |
| 7014 | /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and |
| 7015 | reject [Rn,...]. */ |
| 7016 | if (inst.operands[1].preind) |
| 7017 | { |
| 7018 | constraint (inst.reloc.exp.X_op != O_constant || |
| 7019 | inst.reloc.exp.X_add_number != 0, |
| 7020 | _("this instruction requires a post-indexed address")); |
| 7021 | |
| 7022 | inst.operands[1].preind = 0; |
| 7023 | inst.operands[1].postind = 1; |
| 7024 | inst.operands[1].writeback = 1; |
| 7025 | } |
| 7026 | inst.instruction |= inst.operands[0].reg << 12; |
| 7027 | encode_arm_addr_mode_3 (1, /*is_t=*/TRUE); |
| 7028 | } |
| 7029 | |
| 7030 | /* Co-processor register load/store. |
| 7031 | Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */ |
| 7032 | static void |
| 7033 | do_lstc (void) |
| 7034 | { |
| 7035 | inst.instruction |= inst.operands[0].reg << 8; |
| 7036 | inst.instruction |= inst.operands[1].reg << 12; |
| 7037 | encode_arm_cp_address (2, TRUE, TRUE, 0); |
| 7038 | } |
| 7039 | |
| 7040 | static void |
| 7041 | do_mlas (void) |
| 7042 | { |
| 7043 | /* This restriction does not apply to mls (nor to mla in v6, but |
| 7044 | that's hard to detect at present). */ |
| 7045 | if (inst.operands[0].reg == inst.operands[1].reg |
| 7046 | && !(inst.instruction & 0x00400000)) |
| 7047 | as_tsktsk (_("rd and rm should be different in mla")); |
| 7048 | |
| 7049 | inst.instruction |= inst.operands[0].reg << 16; |
| 7050 | inst.instruction |= inst.operands[1].reg; |
| 7051 | inst.instruction |= inst.operands[2].reg << 8; |
| 7052 | inst.instruction |= inst.operands[3].reg << 12; |
| 7053 | |
| 7054 | } |
| 7055 | |
| 7056 | static void |
| 7057 | do_mov (void) |
| 7058 | { |
| 7059 | inst.instruction |= inst.operands[0].reg << 12; |
| 7060 | encode_arm_shifter_operand (1); |
| 7061 | } |
| 7062 | |
| 7063 | /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */ |
| 7064 | static void |
| 7065 | do_mov16 (void) |
| 7066 | { |
| 7067 | bfd_vma imm; |
| 7068 | bfd_boolean top; |
| 7069 | |
| 7070 | top = (inst.instruction & 0x00400000) != 0; |
| 7071 | constraint (top && inst.reloc.type == BFD_RELOC_ARM_MOVW, |
| 7072 | _(":lower16: not allowed this instruction")); |
| 7073 | constraint (!top && inst.reloc.type == BFD_RELOC_ARM_MOVT, |
| 7074 | _(":upper16: not allowed instruction")); |
| 7075 | inst.instruction |= inst.operands[0].reg << 12; |
| 7076 | if (inst.reloc.type == BFD_RELOC_UNUSED) |
| 7077 | { |
| 7078 | imm = inst.reloc.exp.X_add_number; |
| 7079 | /* The value is in two pieces: 0:11, 16:19. */ |
| 7080 | inst.instruction |= (imm & 0x00000fff); |
| 7081 | inst.instruction |= (imm & 0x0000f000) << 4; |
| 7082 | } |
| 7083 | } |
| 7084 | |
| 7085 | static void do_vfp_nsyn_opcode (const char *); |
| 7086 | |
| 7087 | static int |
| 7088 | do_vfp_nsyn_mrs (void) |
| 7089 | { |
| 7090 | if (inst.operands[0].isvec) |
| 7091 | { |
| 7092 | if (inst.operands[1].reg != 1) |
| 7093 | first_error (_("operand 1 must be FPSCR")); |
| 7094 | memset (&inst.operands[0], '\0', sizeof (inst.operands[0])); |
| 7095 | memset (&inst.operands[1], '\0', sizeof (inst.operands[1])); |
| 7096 | do_vfp_nsyn_opcode ("fmstat"); |
| 7097 | } |
| 7098 | else if (inst.operands[1].isvec) |
| 7099 | do_vfp_nsyn_opcode ("fmrx"); |
| 7100 | else |
| 7101 | return FAIL; |
| 7102 | |
| 7103 | return SUCCESS; |
| 7104 | } |
| 7105 | |
| 7106 | static int |
| 7107 | do_vfp_nsyn_msr (void) |
| 7108 | { |
| 7109 | if (inst.operands[0].isvec) |
| 7110 | do_vfp_nsyn_opcode ("fmxr"); |
| 7111 | else |
| 7112 | return FAIL; |
| 7113 | |
| 7114 | return SUCCESS; |
| 7115 | } |
| 7116 | |
| 7117 | static void |
| 7118 | do_mrs (void) |
| 7119 | { |
| 7120 | if (do_vfp_nsyn_mrs () == SUCCESS) |
| 7121 | return; |
| 7122 | |
| 7123 | /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */ |
| 7124 | constraint ((inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f)) |
| 7125 | != (PSR_c|PSR_f), |
| 7126 | _("'CPSR' or 'SPSR' expected")); |
| 7127 | inst.instruction |= inst.operands[0].reg << 12; |
| 7128 | inst.instruction |= (inst.operands[1].imm & SPSR_BIT); |
| 7129 | } |
| 7130 | |
| 7131 | /* Two possible forms: |
| 7132 | "{C|S}PSR_<field>, Rm", |
| 7133 | "{C|S}PSR_f, #expression". */ |
| 7134 | |
| 7135 | static void |
| 7136 | do_msr (void) |
| 7137 | { |
| 7138 | if (do_vfp_nsyn_msr () == SUCCESS) |
| 7139 | return; |
| 7140 | |
| 7141 | inst.instruction |= inst.operands[0].imm; |
| 7142 | if (inst.operands[1].isreg) |
| 7143 | inst.instruction |= inst.operands[1].reg; |
| 7144 | else |
| 7145 | { |
| 7146 | inst.instruction |= INST_IMMEDIATE; |
| 7147 | inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE; |
| 7148 | inst.reloc.pc_rel = 0; |
| 7149 | } |
| 7150 | } |
| 7151 | |
| 7152 | static void |
| 7153 | do_mul (void) |
| 7154 | { |
| 7155 | if (!inst.operands[2].present) |
| 7156 | inst.operands[2].reg = inst.operands[0].reg; |
| 7157 | inst.instruction |= inst.operands[0].reg << 16; |
| 7158 | inst.instruction |= inst.operands[1].reg; |
| 7159 | inst.instruction |= inst.operands[2].reg << 8; |
| 7160 | |
| 7161 | if (inst.operands[0].reg == inst.operands[1].reg) |
| 7162 | as_tsktsk (_("rd and rm should be different in mul")); |
| 7163 | } |
| 7164 | |
| 7165 | /* Long Multiply Parser |
| 7166 | UMULL RdLo, RdHi, Rm, Rs |
| 7167 | SMULL RdLo, RdHi, Rm, Rs |
| 7168 | UMLAL RdLo, RdHi, Rm, Rs |
| 7169 | SMLAL RdLo, RdHi, Rm, Rs. */ |
| 7170 | |
| 7171 | static void |
| 7172 | do_mull (void) |
| 7173 | { |
| 7174 | inst.instruction |= inst.operands[0].reg << 12; |
| 7175 | inst.instruction |= inst.operands[1].reg << 16; |
| 7176 | inst.instruction |= inst.operands[2].reg; |
| 7177 | inst.instruction |= inst.operands[3].reg << 8; |
| 7178 | |
| 7179 | /* rdhi, rdlo and rm must all be different. */ |
| 7180 | if (inst.operands[0].reg == inst.operands[1].reg |
| 7181 | || inst.operands[0].reg == inst.operands[2].reg |
| 7182 | || inst.operands[1].reg == inst.operands[2].reg) |
| 7183 | as_tsktsk (_("rdhi, rdlo and rm must all be different")); |
| 7184 | } |
| 7185 | |
| 7186 | static void |
| 7187 | do_nop (void) |
| 7188 | { |
| 7189 | if (inst.operands[0].present) |
| 7190 | { |
| 7191 | /* Architectural NOP hints are CPSR sets with no bits selected. */ |
| 7192 | inst.instruction &= 0xf0000000; |
| 7193 | inst.instruction |= 0x0320f000 + inst.operands[0].imm; |
| 7194 | } |
| 7195 | } |
| 7196 | |
| 7197 | /* ARM V6 Pack Halfword Bottom Top instruction (argument parse). |
| 7198 | PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>} |
| 7199 | Condition defaults to COND_ALWAYS. |
| 7200 | Error if Rd, Rn or Rm are R15. */ |
| 7201 | |
| 7202 | static void |
| 7203 | do_pkhbt (void) |
| 7204 | { |
| 7205 | inst.instruction |= inst.operands[0].reg << 12; |
| 7206 | inst.instruction |= inst.operands[1].reg << 16; |
| 7207 | inst.instruction |= inst.operands[2].reg; |
| 7208 | if (inst.operands[3].present) |
| 7209 | encode_arm_shift (3); |
| 7210 | } |
| 7211 | |
| 7212 | /* ARM V6 PKHTB (Argument Parse). */ |
| 7213 | |
| 7214 | static void |
| 7215 | do_pkhtb (void) |
| 7216 | { |
| 7217 | if (!inst.operands[3].present) |
| 7218 | { |
| 7219 | /* If the shift specifier is omitted, turn the instruction |
| 7220 | into pkhbt rd, rm, rn. */ |
| 7221 | inst.instruction &= 0xfff00010; |
| 7222 | inst.instruction |= inst.operands[0].reg << 12; |
| 7223 | inst.instruction |= inst.operands[1].reg; |
| 7224 | inst.instruction |= inst.operands[2].reg << 16; |
| 7225 | } |
| 7226 | else |
| 7227 | { |
| 7228 | inst.instruction |= inst.operands[0].reg << 12; |
| 7229 | inst.instruction |= inst.operands[1].reg << 16; |
| 7230 | inst.instruction |= inst.operands[2].reg; |
| 7231 | encode_arm_shift (3); |
| 7232 | } |
| 7233 | } |
| 7234 | |
| 7235 | /* ARMv5TE: Preload-Cache |
| 7236 | |
| 7237 | PLD <addr_mode> |
| 7238 | |
| 7239 | Syntactically, like LDR with B=1, W=0, L=1. */ |
| 7240 | |
| 7241 | static void |
| 7242 | do_pld (void) |
| 7243 | { |
| 7244 | constraint (!inst.operands[0].isreg, |
| 7245 | _("'[' expected after PLD mnemonic")); |
| 7246 | constraint (inst.operands[0].postind, |
| 7247 | _("post-indexed expression used in preload instruction")); |
| 7248 | constraint (inst.operands[0].writeback, |
| 7249 | _("writeback used in preload instruction")); |
| 7250 | constraint (!inst.operands[0].preind, |
| 7251 | _("unindexed addressing used in preload instruction")); |
| 7252 | encode_arm_addr_mode_2 (0, /*is_t=*/FALSE); |
| 7253 | } |
| 7254 | |
| 7255 | /* ARMv7: PLI <addr_mode> */ |
| 7256 | static void |
| 7257 | do_pli (void) |
| 7258 | { |
| 7259 | constraint (!inst.operands[0].isreg, |
| 7260 | _("'[' expected after PLI mnemonic")); |
| 7261 | constraint (inst.operands[0].postind, |
| 7262 | _("post-indexed expression used in preload instruction")); |
| 7263 | constraint (inst.operands[0].writeback, |
| 7264 | _("writeback used in preload instruction")); |
| 7265 | constraint (!inst.operands[0].preind, |
| 7266 | _("unindexed addressing used in preload instruction")); |
| 7267 | encode_arm_addr_mode_2 (0, /*is_t=*/FALSE); |
| 7268 | inst.instruction &= ~PRE_INDEX; |
| 7269 | } |
| 7270 | |
| 7271 | static void |
| 7272 | do_push_pop (void) |
| 7273 | { |
| 7274 | inst.operands[1] = inst.operands[0]; |
| 7275 | memset (&inst.operands[0], 0, sizeof inst.operands[0]); |
| 7276 | inst.operands[0].isreg = 1; |
| 7277 | inst.operands[0].writeback = 1; |
| 7278 | inst.operands[0].reg = REG_SP; |
| 7279 | do_ldmstm (); |
| 7280 | } |
| 7281 | |
| 7282 | /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the |
| 7283 | word at the specified address and the following word |
| 7284 | respectively. |
| 7285 | Unconditionally executed. |
| 7286 | Error if Rn is R15. */ |
| 7287 | |
| 7288 | static void |
| 7289 | do_rfe (void) |
| 7290 | { |
| 7291 | inst.instruction |= inst.operands[0].reg << 16; |
| 7292 | if (inst.operands[0].writeback) |
| 7293 | inst.instruction |= WRITE_BACK; |
| 7294 | } |
| 7295 | |
| 7296 | /* ARM V6 ssat (argument parse). */ |
| 7297 | |
| 7298 | static void |
| 7299 | do_ssat (void) |
| 7300 | { |
| 7301 | inst.instruction |= inst.operands[0].reg << 12; |
| 7302 | inst.instruction |= (inst.operands[1].imm - 1) << 16; |
| 7303 | inst.instruction |= inst.operands[2].reg; |
| 7304 | |
| 7305 | if (inst.operands[3].present) |
| 7306 | encode_arm_shift (3); |
| 7307 | } |
| 7308 | |
| 7309 | /* ARM V6 usat (argument parse). */ |
| 7310 | |
| 7311 | static void |
| 7312 | do_usat (void) |
| 7313 | { |
| 7314 | inst.instruction |= inst.operands[0].reg << 12; |
| 7315 | inst.instruction |= inst.operands[1].imm << 16; |
| 7316 | inst.instruction |= inst.operands[2].reg; |
| 7317 | |
| 7318 | if (inst.operands[3].present) |
| 7319 | encode_arm_shift (3); |
| 7320 | } |
| 7321 | |
| 7322 | /* ARM V6 ssat16 (argument parse). */ |
| 7323 | |
| 7324 | static void |
| 7325 | do_ssat16 (void) |
| 7326 | { |
| 7327 | inst.instruction |= inst.operands[0].reg << 12; |
| 7328 | inst.instruction |= ((inst.operands[1].imm - 1) << 16); |
| 7329 | inst.instruction |= inst.operands[2].reg; |
| 7330 | } |
| 7331 | |
| 7332 | static void |
| 7333 | do_usat16 (void) |
| 7334 | { |
| 7335 | inst.instruction |= inst.operands[0].reg << 12; |
| 7336 | inst.instruction |= inst.operands[1].imm << 16; |
| 7337 | inst.instruction |= inst.operands[2].reg; |
| 7338 | } |
| 7339 | |
| 7340 | /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while |
| 7341 | preserving the other bits. |
| 7342 | |
| 7343 | setend <endian_specifier>, where <endian_specifier> is either |
| 7344 | BE or LE. */ |
| 7345 | |
| 7346 | static void |
| 7347 | do_setend (void) |
| 7348 | { |
| 7349 | if (inst.operands[0].imm) |
| 7350 | inst.instruction |= 0x200; |
| 7351 | } |
| 7352 | |
| 7353 | static void |
| 7354 | do_shift (void) |
| 7355 | { |
| 7356 | unsigned int Rm = (inst.operands[1].present |
| 7357 | ? inst.operands[1].reg |
| 7358 | : inst.operands[0].reg); |
| 7359 | |
| 7360 | inst.instruction |= inst.operands[0].reg << 12; |
| 7361 | inst.instruction |= Rm; |
| 7362 | if (inst.operands[2].isreg) /* Rd, {Rm,} Rs */ |
| 7363 | { |
| 7364 | inst.instruction |= inst.operands[2].reg << 8; |
| 7365 | inst.instruction |= SHIFT_BY_REG; |
| 7366 | } |
| 7367 | else |
| 7368 | inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM; |
| 7369 | } |
| 7370 | |
| 7371 | static void |
| 7372 | do_smc (void) |
| 7373 | { |
| 7374 | inst.reloc.type = BFD_RELOC_ARM_SMC; |
| 7375 | inst.reloc.pc_rel = 0; |
| 7376 | } |
| 7377 | |
| 7378 | static void |
| 7379 | do_swi (void) |
| 7380 | { |
| 7381 | inst.reloc.type = BFD_RELOC_ARM_SWI; |
| 7382 | inst.reloc.pc_rel = 0; |
| 7383 | } |
| 7384 | |
| 7385 | /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse) |
| 7386 | SMLAxy{cond} Rd,Rm,Rs,Rn |
| 7387 | SMLAWy{cond} Rd,Rm,Rs,Rn |
| 7388 | Error if any register is R15. */ |
| 7389 | |
| 7390 | static void |
| 7391 | do_smla (void) |
| 7392 | { |
| 7393 | inst.instruction |= inst.operands[0].reg << 16; |
| 7394 | inst.instruction |= inst.operands[1].reg; |
| 7395 | inst.instruction |= inst.operands[2].reg << 8; |
| 7396 | inst.instruction |= inst.operands[3].reg << 12; |
| 7397 | } |
| 7398 | |
| 7399 | /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse) |
| 7400 | SMLALxy{cond} Rdlo,Rdhi,Rm,Rs |
| 7401 | Error if any register is R15. |
| 7402 | Warning if Rdlo == Rdhi. */ |
| 7403 | |
| 7404 | static void |
| 7405 | do_smlal (void) |
| 7406 | { |
| 7407 | inst.instruction |= inst.operands[0].reg << 12; |
| 7408 | inst.instruction |= inst.operands[1].reg << 16; |
| 7409 | inst.instruction |= inst.operands[2].reg; |
| 7410 | inst.instruction |= inst.operands[3].reg << 8; |
| 7411 | |
| 7412 | if (inst.operands[0].reg == inst.operands[1].reg) |
| 7413 | as_tsktsk (_("rdhi and rdlo must be different")); |
| 7414 | } |
| 7415 | |
| 7416 | /* ARM V5E (El Segundo) signed-multiply (argument parse) |
| 7417 | SMULxy{cond} Rd,Rm,Rs |
| 7418 | Error if any register is R15. */ |
| 7419 | |
| 7420 | static void |
| 7421 | do_smul (void) |
| 7422 | { |
| 7423 | inst.instruction |= inst.operands[0].reg << 16; |
| 7424 | inst.instruction |= inst.operands[1].reg; |
| 7425 | inst.instruction |= inst.operands[2].reg << 8; |
| 7426 | } |
| 7427 | |
| 7428 | /* ARM V6 srs (argument parse). */ |
| 7429 | |
| 7430 | static void |
| 7431 | do_srs (void) |
| 7432 | { |
| 7433 | inst.instruction |= inst.operands[0].imm; |
| 7434 | if (inst.operands[0].writeback) |
| 7435 | inst.instruction |= WRITE_BACK; |
| 7436 | } |
| 7437 | |
| 7438 | /* ARM V6 strex (argument parse). */ |
| 7439 | |
| 7440 | static void |
| 7441 | do_strex (void) |
| 7442 | { |
| 7443 | constraint (!inst.operands[2].isreg || !inst.operands[2].preind |
| 7444 | || inst.operands[2].postind || inst.operands[2].writeback |
| 7445 | || inst.operands[2].immisreg || inst.operands[2].shifted |
| 7446 | || inst.operands[2].negative |
| 7447 | /* See comment in do_ldrex(). */ |
| 7448 | || (inst.operands[2].reg == REG_PC), |
| 7449 | BAD_ADDR_MODE); |
| 7450 | |
| 7451 | constraint (inst.operands[0].reg == inst.operands[1].reg |
| 7452 | || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP); |
| 7453 | |
| 7454 | constraint (inst.reloc.exp.X_op != O_constant |
| 7455 | || inst.reloc.exp.X_add_number != 0, |
| 7456 | _("offset must be zero in ARM encoding")); |
| 7457 | |
| 7458 | inst.instruction |= inst.operands[0].reg << 12; |
| 7459 | inst.instruction |= inst.operands[1].reg; |
| 7460 | inst.instruction |= inst.operands[2].reg << 16; |
| 7461 | inst.reloc.type = BFD_RELOC_UNUSED; |
| 7462 | } |
| 7463 | |
| 7464 | static void |
| 7465 | do_strexd (void) |
| 7466 | { |
| 7467 | constraint (inst.operands[1].reg % 2 != 0, |
| 7468 | _("even register required")); |
| 7469 | constraint (inst.operands[2].present |
| 7470 | && inst.operands[2].reg != inst.operands[1].reg + 1, |
| 7471 | _("can only store two consecutive registers")); |
| 7472 | /* If op 2 were present and equal to PC, this function wouldn't |
| 7473 | have been called in the first place. */ |
| 7474 | constraint (inst.operands[1].reg == REG_LR, _("r14 not allowed here")); |
| 7475 | |
| 7476 | constraint (inst.operands[0].reg == inst.operands[1].reg |
| 7477 | || inst.operands[0].reg == inst.operands[1].reg + 1 |
| 7478 | || inst.operands[0].reg == inst.operands[3].reg, |
| 7479 | BAD_OVERLAP); |
| 7480 | |
| 7481 | inst.instruction |= inst.operands[0].reg << 12; |
| 7482 | inst.instruction |= inst.operands[1].reg; |
| 7483 | inst.instruction |= inst.operands[3].reg << 16; |
| 7484 | } |
| 7485 | |
| 7486 | /* ARM V6 SXTAH extracts a 16-bit value from a register, sign |
| 7487 | extends it to 32-bits, and adds the result to a value in another |
| 7488 | register. You can specify a rotation by 0, 8, 16, or 24 bits |
| 7489 | before extracting the 16-bit value. |
| 7490 | SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>} |
| 7491 | Condition defaults to COND_ALWAYS. |
| 7492 | Error if any register uses R15. */ |
| 7493 | |
| 7494 | static void |
| 7495 | do_sxtah (void) |
| 7496 | { |
| 7497 | inst.instruction |= inst.operands[0].reg << 12; |
| 7498 | inst.instruction |= inst.operands[1].reg << 16; |
| 7499 | inst.instruction |= inst.operands[2].reg; |
| 7500 | inst.instruction |= inst.operands[3].imm << 10; |
| 7501 | } |
| 7502 | |
| 7503 | /* ARM V6 SXTH. |
| 7504 | |
| 7505 | SXTH {<cond>} <Rd>, <Rm>{, <rotation>} |
| 7506 | Condition defaults to COND_ALWAYS. |
| 7507 | Error if any register uses R15. */ |
| 7508 | |
| 7509 | static void |
| 7510 | do_sxth (void) |
| 7511 | { |
| 7512 | inst.instruction |= inst.operands[0].reg << 12; |
| 7513 | inst.instruction |= inst.operands[1].reg; |
| 7514 | inst.instruction |= inst.operands[2].imm << 10; |
| 7515 | } |
| 7516 | \f |
| 7517 | /* VFP instructions. In a logical order: SP variant first, monad |
| 7518 | before dyad, arithmetic then move then load/store. */ |
| 7519 | |
| 7520 | static void |
| 7521 | do_vfp_sp_monadic (void) |
| 7522 | { |
| 7523 | encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd); |
| 7524 | encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm); |
| 7525 | } |
| 7526 | |
| 7527 | static void |
| 7528 | do_vfp_sp_dyadic (void) |
| 7529 | { |
| 7530 | encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd); |
| 7531 | encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn); |
| 7532 | encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm); |
| 7533 | } |
| 7534 | |
| 7535 | static void |
| 7536 | do_vfp_sp_compare_z (void) |
| 7537 | { |
| 7538 | encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd); |
| 7539 | } |
| 7540 | |
| 7541 | static void |
| 7542 | do_vfp_dp_sp_cvt (void) |
| 7543 | { |
| 7544 | encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd); |
| 7545 | encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm); |
| 7546 | } |
| 7547 | |
| 7548 | static void |
| 7549 | do_vfp_sp_dp_cvt (void) |
| 7550 | { |
| 7551 | encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd); |
| 7552 | encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm); |
| 7553 | } |
| 7554 | |
| 7555 | static void |
| 7556 | do_vfp_reg_from_sp (void) |
| 7557 | { |
| 7558 | inst.instruction |= inst.operands[0].reg << 12; |
| 7559 | encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn); |
| 7560 | } |
| 7561 | |
| 7562 | static void |
| 7563 | do_vfp_reg2_from_sp2 (void) |
| 7564 | { |
| 7565 | constraint (inst.operands[2].imm != 2, |
| 7566 | _("only two consecutive VFP SP registers allowed here")); |
| 7567 | inst.instruction |= inst.operands[0].reg << 12; |
| 7568 | inst.instruction |= inst.operands[1].reg << 16; |
| 7569 | encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm); |
| 7570 | } |
| 7571 | |
| 7572 | static void |
| 7573 | do_vfp_sp_from_reg (void) |
| 7574 | { |
| 7575 | encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sn); |
| 7576 | inst.instruction |= inst.operands[1].reg << 12; |
| 7577 | } |
| 7578 | |
| 7579 | static void |
| 7580 | do_vfp_sp2_from_reg2 (void) |
| 7581 | { |
| 7582 | constraint (inst.operands[0].imm != 2, |
| 7583 | _("only two consecutive VFP SP registers allowed here")); |
| 7584 | encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sm); |
| 7585 | inst.instruction |= inst.operands[1].reg << 12; |
| 7586 | inst.instruction |= inst.operands[2].reg << 16; |
| 7587 | } |
| 7588 | |
| 7589 | static void |
| 7590 | do_vfp_sp_ldst (void) |
| 7591 | { |
| 7592 | encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd); |
| 7593 | encode_arm_cp_address (1, FALSE, TRUE, 0); |
| 7594 | } |
| 7595 | |
| 7596 | static void |
| 7597 | do_vfp_dp_ldst (void) |
| 7598 | { |
| 7599 | encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd); |
| 7600 | encode_arm_cp_address (1, FALSE, TRUE, 0); |
| 7601 | } |
| 7602 | |
| 7603 | |
| 7604 | static void |
| 7605 | vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type) |
| 7606 | { |
| 7607 | if (inst.operands[0].writeback) |
| 7608 | inst.instruction |= WRITE_BACK; |
| 7609 | else |
| 7610 | constraint (ldstm_type != VFP_LDSTMIA, |
| 7611 | _("this addressing mode requires base-register writeback")); |
| 7612 | inst.instruction |= inst.operands[0].reg << 16; |
| 7613 | encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sd); |
| 7614 | inst.instruction |= inst.operands[1].imm; |
| 7615 | } |
| 7616 | |
| 7617 | static void |
| 7618 | vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type) |
| 7619 | { |
| 7620 | int count; |
| 7621 | |
| 7622 | if (inst.operands[0].writeback) |
| 7623 | inst.instruction |= WRITE_BACK; |
| 7624 | else |
| 7625 | constraint (ldstm_type != VFP_LDSTMIA && ldstm_type != VFP_LDSTMIAX, |
| 7626 | _("this addressing mode requires base-register writeback")); |
| 7627 | |
| 7628 | inst.instruction |= inst.operands[0].reg << 16; |
| 7629 | encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd); |
| 7630 | |
| 7631 | count = inst.operands[1].imm << 1; |
| 7632 | if (ldstm_type == VFP_LDSTMIAX || ldstm_type == VFP_LDSTMDBX) |
| 7633 | count += 1; |
| 7634 | |
| 7635 | inst.instruction |= count; |
| 7636 | } |
| 7637 | |
| 7638 | static void |
| 7639 | do_vfp_sp_ldstmia (void) |
| 7640 | { |
| 7641 | vfp_sp_ldstm (VFP_LDSTMIA); |
| 7642 | } |
| 7643 | |
| 7644 | static void |
| 7645 | do_vfp_sp_ldstmdb (void) |
| 7646 | { |
| 7647 | vfp_sp_ldstm (VFP_LDSTMDB); |
| 7648 | } |
| 7649 | |
| 7650 | static void |
| 7651 | do_vfp_dp_ldstmia (void) |
| 7652 | { |
| 7653 | vfp_dp_ldstm (VFP_LDSTMIA); |
| 7654 | } |
| 7655 | |
| 7656 | static void |
| 7657 | do_vfp_dp_ldstmdb (void) |
| 7658 | { |
| 7659 | vfp_dp_ldstm (VFP_LDSTMDB); |
| 7660 | } |
| 7661 | |
| 7662 | static void |
| 7663 | do_vfp_xp_ldstmia (void) |
| 7664 | { |
| 7665 | vfp_dp_ldstm (VFP_LDSTMIAX); |
| 7666 | } |
| 7667 | |
| 7668 | static void |
| 7669 | do_vfp_xp_ldstmdb (void) |
| 7670 | { |
| 7671 | vfp_dp_ldstm (VFP_LDSTMDBX); |
| 7672 | } |
| 7673 | |
| 7674 | static void |
| 7675 | do_vfp_dp_rd_rm (void) |
| 7676 | { |
| 7677 | encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd); |
| 7678 | encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm); |
| 7679 | } |
| 7680 | |
| 7681 | static void |
| 7682 | do_vfp_dp_rn_rd (void) |
| 7683 | { |
| 7684 | encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dn); |
| 7685 | encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd); |
| 7686 | } |
| 7687 | |
| 7688 | static void |
| 7689 | do_vfp_dp_rd_rn (void) |
| 7690 | { |
| 7691 | encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd); |
| 7692 | encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn); |
| 7693 | } |
| 7694 | |
| 7695 | static void |
| 7696 | do_vfp_dp_rd_rn_rm (void) |
| 7697 | { |
| 7698 | encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd); |
| 7699 | encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn); |
| 7700 | encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dm); |
| 7701 | } |
| 7702 | |
| 7703 | static void |
| 7704 | do_vfp_dp_rd (void) |
| 7705 | { |
| 7706 | encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd); |
| 7707 | } |
| 7708 | |
| 7709 | static void |
| 7710 | do_vfp_dp_rm_rd_rn (void) |
| 7711 | { |
| 7712 | encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dm); |
| 7713 | encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd); |
| 7714 | encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dn); |
| 7715 | } |
| 7716 | |
| 7717 | /* VFPv3 instructions. */ |
| 7718 | static void |
| 7719 | do_vfp_sp_const (void) |
| 7720 | { |
| 7721 | encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd); |
| 7722 | inst.instruction |= (inst.operands[1].imm & 15) << 16; |
| 7723 | inst.instruction |= (inst.operands[1].imm >> 4); |
| 7724 | } |
| 7725 | |
| 7726 | static void |
| 7727 | do_vfp_dp_const (void) |
| 7728 | { |
| 7729 | encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd); |
| 7730 | inst.instruction |= (inst.operands[1].imm & 15) << 16; |
| 7731 | inst.instruction |= (inst.operands[1].imm >> 4); |
| 7732 | } |
| 7733 | |
| 7734 | static void |
| 7735 | vfp_conv (int srcsize) |
| 7736 | { |
| 7737 | unsigned immbits = srcsize - inst.operands[1].imm; |
| 7738 | inst.instruction |= (immbits & 1) << 5; |
| 7739 | inst.instruction |= (immbits >> 1); |
| 7740 | } |
| 7741 | |
| 7742 | static void |
| 7743 | do_vfp_sp_conv_16 (void) |
| 7744 | { |
| 7745 | encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd); |
| 7746 | vfp_conv (16); |
| 7747 | } |
| 7748 | |
| 7749 | static void |
| 7750 | do_vfp_dp_conv_16 (void) |
| 7751 | { |
| 7752 | encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd); |
| 7753 | vfp_conv (16); |
| 7754 | } |
| 7755 | |
| 7756 | static void |
| 7757 | do_vfp_sp_conv_32 (void) |
| 7758 | { |
| 7759 | encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd); |
| 7760 | vfp_conv (32); |
| 7761 | } |
| 7762 | |
| 7763 | static void |
| 7764 | do_vfp_dp_conv_32 (void) |
| 7765 | { |
| 7766 | encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd); |
| 7767 | vfp_conv (32); |
| 7768 | } |
| 7769 | |
| 7770 | \f |
| 7771 | /* FPA instructions. Also in a logical order. */ |
| 7772 | |
| 7773 | static void |
| 7774 | do_fpa_cmp (void) |
| 7775 | { |
| 7776 | inst.instruction |= inst.operands[0].reg << 16; |
| 7777 | inst.instruction |= inst.operands[1].reg; |
| 7778 | } |
| 7779 | |
| 7780 | static void |
| 7781 | do_fpa_ldmstm (void) |
| 7782 | { |
| 7783 | inst.instruction |= inst.operands[0].reg << 12; |
| 7784 | switch (inst.operands[1].imm) |
| 7785 | { |
| 7786 | case 1: inst.instruction |= CP_T_X; break; |
| 7787 | case 2: inst.instruction |= CP_T_Y; break; |
| 7788 | case 3: inst.instruction |= CP_T_Y | CP_T_X; break; |
| 7789 | case 4: break; |
| 7790 | default: abort (); |
| 7791 | } |
| 7792 | |
| 7793 | if (inst.instruction & (PRE_INDEX | INDEX_UP)) |
| 7794 | { |
| 7795 | /* The instruction specified "ea" or "fd", so we can only accept |
| 7796 | [Rn]{!}. The instruction does not really support stacking or |
| 7797 | unstacking, so we have to emulate these by setting appropriate |
| 7798 | bits and offsets. */ |
| 7799 | constraint (inst.reloc.exp.X_op != O_constant |
| 7800 | || inst.reloc.exp.X_add_number != 0, |
| 7801 | _("this instruction does not support indexing")); |
| 7802 | |
| 7803 | if ((inst.instruction & PRE_INDEX) || inst.operands[2].writeback) |
| 7804 | inst.reloc.exp.X_add_number = 12 * inst.operands[1].imm; |
| 7805 | |
| 7806 | if (!(inst.instruction & INDEX_UP)) |
| 7807 | inst.reloc.exp.X_add_number = -inst.reloc.exp.X_add_number; |
| 7808 | |
| 7809 | if (!(inst.instruction & PRE_INDEX) && inst.operands[2].writeback) |
| 7810 | { |
| 7811 | inst.operands[2].preind = 0; |
| 7812 | inst.operands[2].postind = 1; |
| 7813 | } |
| 7814 | } |
| 7815 | |
| 7816 | encode_arm_cp_address (2, TRUE, TRUE, 0); |
| 7817 | } |
| 7818 | |
| 7819 | \f |
| 7820 | /* iWMMXt instructions: strictly in alphabetical order. */ |
| 7821 | |
| 7822 | static void |
| 7823 | do_iwmmxt_tandorc (void) |
| 7824 | { |
| 7825 | constraint (inst.operands[0].reg != REG_PC, _("only r15 allowed here")); |
| 7826 | } |
| 7827 | |
| 7828 | static void |
| 7829 | do_iwmmxt_textrc (void) |
| 7830 | { |
| 7831 | inst.instruction |= inst.operands[0].reg << 12; |
| 7832 | inst.instruction |= inst.operands[1].imm; |
| 7833 | } |
| 7834 | |
| 7835 | static void |
| 7836 | do_iwmmxt_textrm (void) |
| 7837 | { |
| 7838 | inst.instruction |= inst.operands[0].reg << 12; |
| 7839 | inst.instruction |= inst.operands[1].reg << 16; |
| 7840 | inst.instruction |= inst.operands[2].imm; |
| 7841 | } |
| 7842 | |
| 7843 | static void |
| 7844 | do_iwmmxt_tinsr (void) |
| 7845 | { |
| 7846 | inst.instruction |= inst.operands[0].reg << 16; |
| 7847 | inst.instruction |= inst.operands[1].reg << 12; |
| 7848 | inst.instruction |= inst.operands[2].imm; |
| 7849 | } |
| 7850 | |
| 7851 | static void |
| 7852 | do_iwmmxt_tmia (void) |
| 7853 | { |
| 7854 | inst.instruction |= inst.operands[0].reg << 5; |
| 7855 | inst.instruction |= inst.operands[1].reg; |
| 7856 | inst.instruction |= inst.operands[2].reg << 12; |
| 7857 | } |
| 7858 | |
| 7859 | static void |
| 7860 | do_iwmmxt_waligni (void) |
| 7861 | { |
| 7862 | inst.instruction |= inst.operands[0].reg << 12; |
| 7863 | inst.instruction |= inst.operands[1].reg << 16; |
| 7864 | inst.instruction |= inst.operands[2].reg; |
| 7865 | inst.instruction |= inst.operands[3].imm << 20; |
| 7866 | } |
| 7867 | |
| 7868 | static void |
| 7869 | do_iwmmxt_wmerge (void) |
| 7870 | { |
| 7871 | inst.instruction |= inst.operands[0].reg << 12; |
| 7872 | inst.instruction |= inst.operands[1].reg << 16; |
| 7873 | inst.instruction |= inst.operands[2].reg; |
| 7874 | inst.instruction |= inst.operands[3].imm << 21; |
| 7875 | } |
| 7876 | |
| 7877 | static void |
| 7878 | do_iwmmxt_wmov (void) |
| 7879 | { |
| 7880 | /* WMOV rD, rN is an alias for WOR rD, rN, rN. */ |
| 7881 | inst.instruction |= inst.operands[0].reg << 12; |
| 7882 | inst.instruction |= inst.operands[1].reg << 16; |
| 7883 | inst.instruction |= inst.operands[1].reg; |
| 7884 | } |
| 7885 | |
| 7886 | static void |
| 7887 | do_iwmmxt_wldstbh (void) |
| 7888 | { |
| 7889 | int reloc; |
| 7890 | inst.instruction |= inst.operands[0].reg << 12; |
| 7891 | if (thumb_mode) |
| 7892 | reloc = BFD_RELOC_ARM_T32_CP_OFF_IMM_S2; |
| 7893 | else |
| 7894 | reloc = BFD_RELOC_ARM_CP_OFF_IMM_S2; |
| 7895 | encode_arm_cp_address (1, TRUE, FALSE, reloc); |
| 7896 | } |
| 7897 | |
| 7898 | static void |
| 7899 | do_iwmmxt_wldstw (void) |
| 7900 | { |
| 7901 | /* RIWR_RIWC clears .isreg for a control register. */ |
| 7902 | if (!inst.operands[0].isreg) |
| 7903 | { |
| 7904 | constraint (inst.cond != COND_ALWAYS, BAD_COND); |
| 7905 | inst.instruction |= 0xf0000000; |
| 7906 | } |
| 7907 | |
| 7908 | inst.instruction |= inst.operands[0].reg << 12; |
| 7909 | encode_arm_cp_address (1, TRUE, TRUE, 0); |
| 7910 | } |
| 7911 | |
| 7912 | static void |
| 7913 | do_iwmmxt_wldstd (void) |
| 7914 | { |
| 7915 | inst.instruction |= inst.operands[0].reg << 12; |
| 7916 | if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2) |
| 7917 | && inst.operands[1].immisreg) |
| 7918 | { |
| 7919 | inst.instruction &= ~0x1a000ff; |
| 7920 | inst.instruction |= (0xf << 28); |
| 7921 | if (inst.operands[1].preind) |
| 7922 | inst.instruction |= PRE_INDEX; |
| 7923 | if (!inst.operands[1].negative) |
| 7924 | inst.instruction |= INDEX_UP; |
| 7925 | if (inst.operands[1].writeback) |
| 7926 | inst.instruction |= WRITE_BACK; |
| 7927 | inst.instruction |= inst.operands[1].reg << 16; |
| 7928 | inst.instruction |= inst.reloc.exp.X_add_number << 4; |
| 7929 | inst.instruction |= inst.operands[1].imm; |
| 7930 | } |
| 7931 | else |
| 7932 | encode_arm_cp_address (1, TRUE, FALSE, 0); |
| 7933 | } |
| 7934 | |
| 7935 | static void |
| 7936 | do_iwmmxt_wshufh (void) |
| 7937 | { |
| 7938 | inst.instruction |= inst.operands[0].reg << 12; |
| 7939 | inst.instruction |= inst.operands[1].reg << 16; |
| 7940 | inst.instruction |= ((inst.operands[2].imm & 0xf0) << 16); |
| 7941 | inst.instruction |= (inst.operands[2].imm & 0x0f); |
| 7942 | } |
| 7943 | |
| 7944 | static void |
| 7945 | do_iwmmxt_wzero (void) |
| 7946 | { |
| 7947 | /* WZERO reg is an alias for WANDN reg, reg, reg. */ |
| 7948 | inst.instruction |= inst.operands[0].reg; |
| 7949 | inst.instruction |= inst.operands[0].reg << 12; |
| 7950 | inst.instruction |= inst.operands[0].reg << 16; |
| 7951 | } |
| 7952 | |
| 7953 | static void |
| 7954 | do_iwmmxt_wrwrwr_or_imm5 (void) |
| 7955 | { |
| 7956 | if (inst.operands[2].isreg) |
| 7957 | do_rd_rn_rm (); |
| 7958 | else { |
| 7959 | constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2), |
| 7960 | _("immediate operand requires iWMMXt2")); |
| 7961 | do_rd_rn (); |
| 7962 | if (inst.operands[2].imm == 0) |
| 7963 | { |
| 7964 | switch ((inst.instruction >> 20) & 0xf) |
| 7965 | { |
| 7966 | case 4: |
| 7967 | case 5: |
| 7968 | case 6: |
| 7969 | case 7: |
| 7970 | /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16. */ |
| 7971 | inst.operands[2].imm = 16; |
| 7972 | inst.instruction = (inst.instruction & 0xff0fffff) | (0x7 << 20); |
| 7973 | break; |
| 7974 | case 8: |
| 7975 | case 9: |
| 7976 | case 10: |
| 7977 | case 11: |
| 7978 | /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32. */ |
| 7979 | inst.operands[2].imm = 32; |
| 7980 | inst.instruction = (inst.instruction & 0xff0fffff) | (0xb << 20); |
| 7981 | break; |
| 7982 | case 12: |
| 7983 | case 13: |
| 7984 | case 14: |
| 7985 | case 15: |
| 7986 | { |
| 7987 | /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn. */ |
| 7988 | unsigned long wrn; |
| 7989 | wrn = (inst.instruction >> 16) & 0xf; |
| 7990 | inst.instruction &= 0xff0fff0f; |
| 7991 | inst.instruction |= wrn; |
| 7992 | /* Bail out here; the instruction is now assembled. */ |
| 7993 | return; |
| 7994 | } |
| 7995 | } |
| 7996 | } |
| 7997 | /* Map 32 -> 0, etc. */ |
| 7998 | inst.operands[2].imm &= 0x1f; |
| 7999 | inst.instruction |= (0xf << 28) | ((inst.operands[2].imm & 0x10) << 4) | (inst.operands[2].imm & 0xf); |
| 8000 | } |
| 8001 | } |
| 8002 | \f |
| 8003 | /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register |
| 8004 | operations first, then control, shift, and load/store. */ |
| 8005 | |
| 8006 | /* Insns like "foo X,Y,Z". */ |
| 8007 | |
| 8008 | static void |
| 8009 | do_mav_triple (void) |
| 8010 | { |
| 8011 | inst.instruction |= inst.operands[0].reg << 16; |
| 8012 | inst.instruction |= inst.operands[1].reg; |
| 8013 | inst.instruction |= inst.operands[2].reg << 12; |
| 8014 | } |
| 8015 | |
| 8016 | /* Insns like "foo W,X,Y,Z". |
| 8017 | where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */ |
| 8018 | |
| 8019 | static void |
| 8020 | do_mav_quad (void) |
| 8021 | { |
| 8022 | inst.instruction |= inst.operands[0].reg << 5; |
| 8023 | inst.instruction |= inst.operands[1].reg << 12; |
| 8024 | inst.instruction |= inst.operands[2].reg << 16; |
| 8025 | inst.instruction |= inst.operands[3].reg; |
| 8026 | } |
| 8027 | |
| 8028 | /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */ |
| 8029 | static void |
| 8030 | do_mav_dspsc (void) |
| 8031 | { |
| 8032 | inst.instruction |= inst.operands[1].reg << 12; |
| 8033 | } |
| 8034 | |
| 8035 | /* Maverick shift immediate instructions. |
| 8036 | cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0]. |
| 8037 | cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */ |
| 8038 | |
| 8039 | static void |
| 8040 | do_mav_shift (void) |
| 8041 | { |
| 8042 | int imm = inst.operands[2].imm; |
| 8043 | |
| 8044 | inst.instruction |= inst.operands[0].reg << 12; |
| 8045 | inst.instruction |= inst.operands[1].reg << 16; |
| 8046 | |
| 8047 | /* Bits 0-3 of the insn should have bits 0-3 of the immediate. |
| 8048 | Bits 5-7 of the insn should have bits 4-6 of the immediate. |
| 8049 | Bit 4 should be 0. */ |
| 8050 | imm = (imm & 0xf) | ((imm & 0x70) << 1); |
| 8051 | |
| 8052 | inst.instruction |= imm; |
| 8053 | } |
| 8054 | \f |
| 8055 | /* XScale instructions. Also sorted arithmetic before move. */ |
| 8056 | |
| 8057 | /* Xscale multiply-accumulate (argument parse) |
| 8058 | MIAcc acc0,Rm,Rs |
| 8059 | MIAPHcc acc0,Rm,Rs |
| 8060 | MIAxycc acc0,Rm,Rs. */ |
| 8061 | |
| 8062 | static void |
| 8063 | do_xsc_mia (void) |
| 8064 | { |
| 8065 | inst.instruction |= inst.operands[1].reg; |
| 8066 | inst.instruction |= inst.operands[2].reg << 12; |
| 8067 | } |
| 8068 | |
| 8069 | /* Xscale move-accumulator-register (argument parse) |
| 8070 | |
| 8071 | MARcc acc0,RdLo,RdHi. */ |
| 8072 | |
| 8073 | static void |
| 8074 | do_xsc_mar (void) |
| 8075 | { |
| 8076 | inst.instruction |= inst.operands[1].reg << 12; |
| 8077 | inst.instruction |= inst.operands[2].reg << 16; |
| 8078 | } |
| 8079 | |
| 8080 | /* Xscale move-register-accumulator (argument parse) |
| 8081 | |
| 8082 | MRAcc RdLo,RdHi,acc0. */ |
| 8083 | |
| 8084 | static void |
| 8085 | do_xsc_mra (void) |
| 8086 | { |
| 8087 | constraint (inst.operands[0].reg == inst.operands[1].reg, BAD_OVERLAP); |
| 8088 | inst.instruction |= inst.operands[0].reg << 12; |
| 8089 | inst.instruction |= inst.operands[1].reg << 16; |
| 8090 | } |
| 8091 | \f |
| 8092 | /* Encoding functions relevant only to Thumb. */ |
| 8093 | |
| 8094 | /* inst.operands[i] is a shifted-register operand; encode |
| 8095 | it into inst.instruction in the format used by Thumb32. */ |
| 8096 | |
| 8097 | static void |
| 8098 | encode_thumb32_shifted_operand (int i) |
| 8099 | { |
| 8100 | unsigned int value = inst.reloc.exp.X_add_number; |
| 8101 | unsigned int shift = inst.operands[i].shift_kind; |
| 8102 | |
| 8103 | constraint (inst.operands[i].immisreg, |
| 8104 | _("shift by register not allowed in thumb mode")); |
| 8105 | inst.instruction |= inst.operands[i].reg; |
| 8106 | if (shift == SHIFT_RRX) |
| 8107 | inst.instruction |= SHIFT_ROR << 4; |
| 8108 | else |
| 8109 | { |
| 8110 | constraint (inst.reloc.exp.X_op != O_constant, |
| 8111 | _("expression too complex")); |
| 8112 | |
| 8113 | constraint (value > 32 |
| 8114 | || (value == 32 && (shift == SHIFT_LSL |
| 8115 | || shift == SHIFT_ROR)), |
| 8116 | _("shift expression is too large")); |
| 8117 | |
| 8118 | if (value == 0) |
| 8119 | shift = SHIFT_LSL; |
| 8120 | else if (value == 32) |
| 8121 | value = 0; |
| 8122 | |
| 8123 | inst.instruction |= shift << 4; |
| 8124 | inst.instruction |= (value & 0x1c) << 10; |
| 8125 | inst.instruction |= (value & 0x03) << 6; |
| 8126 | } |
| 8127 | } |
| 8128 | |
| 8129 | |
| 8130 | /* inst.operands[i] was set up by parse_address. Encode it into a |
| 8131 | Thumb32 format load or store instruction. Reject forms that cannot |
| 8132 | be used with such instructions. If is_t is true, reject forms that |
| 8133 | cannot be used with a T instruction; if is_d is true, reject forms |
| 8134 | that cannot be used with a D instruction. */ |
| 8135 | |
| 8136 | static void |
| 8137 | encode_thumb32_addr_mode (int i, bfd_boolean is_t, bfd_boolean is_d) |
| 8138 | { |
| 8139 | bfd_boolean is_pc = (inst.operands[i].reg == REG_PC); |
| 8140 | |
| 8141 | constraint (!inst.operands[i].isreg, |
| 8142 | _("Instruction does not support =N addresses")); |
| 8143 | |
| 8144 | inst.instruction |= inst.operands[i].reg << 16; |
| 8145 | if (inst.operands[i].immisreg) |
| 8146 | { |
| 8147 | constraint (is_pc, _("cannot use register index with PC-relative addressing")); |
| 8148 | constraint (is_t || is_d, _("cannot use register index with this instruction")); |
| 8149 | constraint (inst.operands[i].negative, |
| 8150 | _("Thumb does not support negative register indexing")); |
| 8151 | constraint (inst.operands[i].postind, |
| 8152 | _("Thumb does not support register post-indexing")); |
| 8153 | constraint (inst.operands[i].writeback, |
| 8154 | _("Thumb does not support register indexing with writeback")); |
| 8155 | constraint (inst.operands[i].shifted && inst.operands[i].shift_kind != SHIFT_LSL, |
| 8156 | _("Thumb supports only LSL in shifted register indexing")); |
| 8157 | |
| 8158 | inst.instruction |= inst.operands[i].imm; |
| 8159 | if (inst.operands[i].shifted) |
| 8160 | { |
| 8161 | constraint (inst.reloc.exp.X_op != O_constant, |
| 8162 | _("expression too complex")); |
| 8163 | constraint (inst.reloc.exp.X_add_number < 0 |
| 8164 | || inst.reloc.exp.X_add_number > 3, |
| 8165 | _("shift out of range")); |
| 8166 | inst.instruction |= inst.reloc.exp.X_add_number << 4; |
| 8167 | } |
| 8168 | inst.reloc.type = BFD_RELOC_UNUSED; |
| 8169 | } |
| 8170 | else if (inst.operands[i].preind) |
| 8171 | { |
| 8172 | constraint (is_pc && inst.operands[i].writeback, |
| 8173 | _("cannot use writeback with PC-relative addressing")); |
| 8174 | constraint (is_t && inst.operands[i].writeback, |
| 8175 | _("cannot use writeback with this instruction")); |
| 8176 | |
| 8177 | if (is_d) |
| 8178 | { |
| 8179 | inst.instruction |= 0x01000000; |
| 8180 | if (inst.operands[i].writeback) |
| 8181 | inst.instruction |= 0x00200000; |
| 8182 | } |
| 8183 | else |
| 8184 | { |
| 8185 | inst.instruction |= 0x00000c00; |
| 8186 | if (inst.operands[i].writeback) |
| 8187 | inst.instruction |= 0x00000100; |
| 8188 | } |
| 8189 | inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM; |
| 8190 | } |
| 8191 | else if (inst.operands[i].postind) |
| 8192 | { |
| 8193 | assert (inst.operands[i].writeback); |
| 8194 | constraint (is_pc, _("cannot use post-indexing with PC-relative addressing")); |
| 8195 | constraint (is_t, _("cannot use post-indexing with this instruction")); |
| 8196 | |
| 8197 | if (is_d) |
| 8198 | inst.instruction |= 0x00200000; |
| 8199 | else |
| 8200 | inst.instruction |= 0x00000900; |
| 8201 | inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM; |
| 8202 | } |
| 8203 | else /* unindexed - only for coprocessor */ |
| 8204 | inst.error = _("instruction does not accept unindexed addressing"); |
| 8205 | } |
| 8206 | |
| 8207 | /* Table of Thumb instructions which exist in both 16- and 32-bit |
| 8208 | encodings (the latter only in post-V6T2 cores). The index is the |
| 8209 | value used in the insns table below. When there is more than one |
| 8210 | possible 16-bit encoding for the instruction, this table always |
| 8211 | holds variant (1). |
| 8212 | Also contains several pseudo-instructions used during relaxation. */ |
| 8213 | #define T16_32_TAB \ |
| 8214 | X(adc, 4140, eb400000), \ |
| 8215 | X(adcs, 4140, eb500000), \ |
| 8216 | X(add, 1c00, eb000000), \ |
| 8217 | X(adds, 1c00, eb100000), \ |
| 8218 | X(addi, 0000, f1000000), \ |
| 8219 | X(addis, 0000, f1100000), \ |
| 8220 | X(add_pc,000f, f20f0000), \ |
| 8221 | X(add_sp,000d, f10d0000), \ |
| 8222 | X(adr, 000f, f20f0000), \ |
| 8223 | X(and, 4000, ea000000), \ |
| 8224 | X(ands, 4000, ea100000), \ |
| 8225 | X(asr, 1000, fa40f000), \ |
| 8226 | X(asrs, 1000, fa50f000), \ |
| 8227 | X(b, e000, f000b000), \ |
| 8228 | X(bcond, d000, f0008000), \ |
| 8229 | X(bic, 4380, ea200000), \ |
| 8230 | X(bics, 4380, ea300000), \ |
| 8231 | X(cmn, 42c0, eb100f00), \ |
| 8232 | X(cmp, 2800, ebb00f00), \ |
| 8233 | X(cpsie, b660, f3af8400), \ |
| 8234 | X(cpsid, b670, f3af8600), \ |
| 8235 | X(cpy, 4600, ea4f0000), \ |
| 8236 | X(dec_sp,80dd, f1bd0d00), \ |
| 8237 | X(eor, 4040, ea800000), \ |
| 8238 | X(eors, 4040, ea900000), \ |
| 8239 | X(inc_sp,00dd, f10d0d00), \ |
| 8240 | X(ldmia, c800, e8900000), \ |
| 8241 | X(ldr, 6800, f8500000), \ |
| 8242 | X(ldrb, 7800, f8100000), \ |
| 8243 | X(ldrh, 8800, f8300000), \ |
| 8244 | X(ldrsb, 5600, f9100000), \ |
| 8245 | X(ldrsh, 5e00, f9300000), \ |
| 8246 | X(ldr_pc,4800, f85f0000), \ |
| 8247 | X(ldr_pc2,4800, f85f0000), \ |
| 8248 | X(ldr_sp,9800, f85d0000), \ |
| 8249 | X(lsl, 0000, fa00f000), \ |
| 8250 | X(lsls, 0000, fa10f000), \ |
| 8251 | X(lsr, 0800, fa20f000), \ |
| 8252 | X(lsrs, 0800, fa30f000), \ |
| 8253 | X(mov, 2000, ea4f0000), \ |
| 8254 | X(movs, 2000, ea5f0000), \ |
| 8255 | X(mul, 4340, fb00f000), \ |
| 8256 | X(muls, 4340, ffffffff), /* no 32b muls */ \ |
| 8257 | X(mvn, 43c0, ea6f0000), \ |
| 8258 | X(mvns, 43c0, ea7f0000), \ |
| 8259 | X(neg, 4240, f1c00000), /* rsb #0 */ \ |
| 8260 | X(negs, 4240, f1d00000), /* rsbs #0 */ \ |
| 8261 | X(orr, 4300, ea400000), \ |
| 8262 | X(orrs, 4300, ea500000), \ |
| 8263 | X(pop, bc00, e8bd0000), /* ldmia sp!,... */ \ |
| 8264 | X(push, b400, e92d0000), /* stmdb sp!,... */ \ |
| 8265 | X(rev, ba00, fa90f080), \ |
| 8266 | X(rev16, ba40, fa90f090), \ |
| 8267 | X(revsh, bac0, fa90f0b0), \ |
| 8268 | X(ror, 41c0, fa60f000), \ |
| 8269 | X(rors, 41c0, fa70f000), \ |
| 8270 | X(sbc, 4180, eb600000), \ |
| 8271 | X(sbcs, 4180, eb700000), \ |
| 8272 | X(stmia, c000, e8800000), \ |
| 8273 | X(str, 6000, f8400000), \ |
| 8274 | X(strb, 7000, f8000000), \ |
| 8275 | X(strh, 8000, f8200000), \ |
| 8276 | X(str_sp,9000, f84d0000), \ |
| 8277 | X(sub, 1e00, eba00000), \ |
| 8278 | X(subs, 1e00, ebb00000), \ |
| 8279 | X(subi, 8000, f1a00000), \ |
| 8280 | X(subis, 8000, f1b00000), \ |
| 8281 | X(sxtb, b240, fa4ff080), \ |
| 8282 | X(sxth, b200, fa0ff080), \ |
| 8283 | X(tst, 4200, ea100f00), \ |
| 8284 | X(uxtb, b2c0, fa5ff080), \ |
| 8285 | X(uxth, b280, fa1ff080), \ |
| 8286 | X(nop, bf00, f3af8000), \ |
| 8287 | X(yield, bf10, f3af8001), \ |
| 8288 | X(wfe, bf20, f3af8002), \ |
| 8289 | X(wfi, bf30, f3af8003), \ |
| 8290 | X(sev, bf40, f3af9004), /* typo, 8004? */ |
| 8291 | |
| 8292 | /* To catch errors in encoding functions, the codes are all offset by |
| 8293 | 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined |
| 8294 | as 16-bit instructions. */ |
| 8295 | #define X(a,b,c) T_MNEM_##a |
| 8296 | enum t16_32_codes { T16_32_OFFSET = 0xF7FF, T16_32_TAB }; |
| 8297 | #undef X |
| 8298 | |
| 8299 | #define X(a,b,c) 0x##b |
| 8300 | static const unsigned short thumb_op16[] = { T16_32_TAB }; |
| 8301 | #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)]) |
| 8302 | #undef X |
| 8303 | |
| 8304 | #define X(a,b,c) 0x##c |
| 8305 | static const unsigned int thumb_op32[] = { T16_32_TAB }; |
| 8306 | #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)]) |
| 8307 | #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000) |
| 8308 | #undef X |
| 8309 | #undef T16_32_TAB |
| 8310 | |
| 8311 | /* Thumb instruction encoders, in alphabetical order. */ |
| 8312 | |
| 8313 | /* ADDW or SUBW. */ |
| 8314 | static void |
| 8315 | do_t_add_sub_w (void) |
| 8316 | { |
| 8317 | int Rd, Rn; |
| 8318 | |
| 8319 | Rd = inst.operands[0].reg; |
| 8320 | Rn = inst.operands[1].reg; |
| 8321 | |
| 8322 | constraint (Rd == 15, _("PC not allowed as destination")); |
| 8323 | inst.instruction |= (Rn << 16) | (Rd << 8); |
| 8324 | inst.reloc.type = BFD_RELOC_ARM_T32_IMM12; |
| 8325 | } |
| 8326 | |
| 8327 | /* Parse an add or subtract instruction. We get here with inst.instruction |
| 8328 | equalling any of THUMB_OPCODE_add, adds, sub, or subs. */ |
| 8329 | |
| 8330 | static void |
| 8331 | do_t_add_sub (void) |
| 8332 | { |
| 8333 | int Rd, Rs, Rn; |
| 8334 | |
| 8335 | Rd = inst.operands[0].reg; |
| 8336 | Rs = (inst.operands[1].present |
| 8337 | ? inst.operands[1].reg /* Rd, Rs, foo */ |
| 8338 | : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */ |
| 8339 | |
| 8340 | if (unified_syntax) |
| 8341 | { |
| 8342 | bfd_boolean flags; |
| 8343 | bfd_boolean narrow; |
| 8344 | int opcode; |
| 8345 | |
| 8346 | flags = (inst.instruction == T_MNEM_adds |
| 8347 | || inst.instruction == T_MNEM_subs); |
| 8348 | if (flags) |
| 8349 | narrow = (current_it_mask == 0); |
| 8350 | else |
| 8351 | narrow = (current_it_mask != 0); |
| 8352 | if (!inst.operands[2].isreg) |
| 8353 | { |
| 8354 | int add; |
| 8355 | |
| 8356 | add = (inst.instruction == T_MNEM_add |
| 8357 | || inst.instruction == T_MNEM_adds); |
| 8358 | opcode = 0; |
| 8359 | if (inst.size_req != 4) |
| 8360 | { |
| 8361 | /* Attempt to use a narrow opcode, with relaxation if |
| 8362 | appropriate. */ |
| 8363 | if (Rd == REG_SP && Rs == REG_SP && !flags) |
| 8364 | opcode = add ? T_MNEM_inc_sp : T_MNEM_dec_sp; |
| 8365 | else if (Rd <= 7 && Rs == REG_SP && add && !flags) |
| 8366 | opcode = T_MNEM_add_sp; |
| 8367 | else if (Rd <= 7 && Rs == REG_PC && add && !flags) |
| 8368 | opcode = T_MNEM_add_pc; |
| 8369 | else if (Rd <= 7 && Rs <= 7 && narrow) |
| 8370 | { |
| 8371 | if (flags) |
| 8372 | opcode = add ? T_MNEM_addis : T_MNEM_subis; |
| 8373 | else |
| 8374 | opcode = add ? T_MNEM_addi : T_MNEM_subi; |
| 8375 | } |
| 8376 | if (opcode) |
| 8377 | { |
| 8378 | inst.instruction = THUMB_OP16(opcode); |
| 8379 | inst.instruction |= (Rd << 4) | Rs; |
| 8380 | inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD; |
| 8381 | if (inst.size_req != 2) |
| 8382 | inst.relax = opcode; |
| 8383 | } |
| 8384 | else |
| 8385 | constraint (inst.size_req == 2, BAD_HIREG); |
| 8386 | } |
| 8387 | if (inst.size_req == 4 |
| 8388 | || (inst.size_req != 2 && !opcode)) |
| 8389 | { |
| 8390 | if (Rs == REG_PC) |
| 8391 | { |
| 8392 | /* Always use addw/subw. */ |
| 8393 | inst.instruction = add ? 0xf20f0000 : 0xf2af0000; |
| 8394 | inst.reloc.type = BFD_RELOC_ARM_T32_IMM12; |
| 8395 | } |
| 8396 | else |
| 8397 | { |
| 8398 | inst.instruction = THUMB_OP32 (inst.instruction); |
| 8399 | inst.instruction = (inst.instruction & 0xe1ffffff) |
| 8400 | | 0x10000000; |
| 8401 | if (flags) |
| 8402 | inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE; |
| 8403 | else |
| 8404 | inst.reloc.type = BFD_RELOC_ARM_T32_ADD_IMM; |
| 8405 | } |
| 8406 | inst.instruction |= inst.operands[0].reg << 8; |
| 8407 | inst.instruction |= inst.operands[1].reg << 16; |
| 8408 | } |
| 8409 | } |
| 8410 | else |
| 8411 | { |
| 8412 | Rn = inst.operands[2].reg; |
| 8413 | /* See if we can do this with a 16-bit instruction. */ |
| 8414 | if (!inst.operands[2].shifted && inst.size_req != 4) |
| 8415 | { |
| 8416 | if (Rd > 7 || Rs > 7 || Rn > 7) |
| 8417 | narrow = FALSE; |
| 8418 | |
| 8419 | if (narrow) |
| 8420 | { |
| 8421 | inst.instruction = ((inst.instruction == T_MNEM_adds |
| 8422 | || inst.instruction == T_MNEM_add) |
| 8423 | ? T_OPCODE_ADD_R3 |
| 8424 | : T_OPCODE_SUB_R3); |
| 8425 | inst.instruction |= Rd | (Rs << 3) | (Rn << 6); |
| 8426 | return; |
| 8427 | } |
| 8428 | |
| 8429 | if (inst.instruction == T_MNEM_add) |
| 8430 | { |
| 8431 | if (Rd == Rs) |
| 8432 | { |
| 8433 | inst.instruction = T_OPCODE_ADD_HI; |
| 8434 | inst.instruction |= (Rd & 8) << 4; |
| 8435 | inst.instruction |= (Rd & 7); |
| 8436 | inst.instruction |= Rn << 3; |
| 8437 | return; |
| 8438 | } |
| 8439 | /* ... because addition is commutative! */ |
| 8440 | else if (Rd == Rn) |
| 8441 | { |
| 8442 | inst.instruction = T_OPCODE_ADD_HI; |
| 8443 | inst.instruction |= (Rd & 8) << 4; |
| 8444 | inst.instruction |= (Rd & 7); |
| 8445 | inst.instruction |= Rs << 3; |
| 8446 | return; |
| 8447 | } |
| 8448 | } |
| 8449 | } |
| 8450 | /* If we get here, it can't be done in 16 bits. */ |
| 8451 | constraint (inst.operands[2].shifted && inst.operands[2].immisreg, |
| 8452 | _("shift must be constant")); |
| 8453 | inst.instruction = THUMB_OP32 (inst.instruction); |
| 8454 | inst.instruction |= Rd << 8; |
| 8455 | inst.instruction |= Rs << 16; |
| 8456 | encode_thumb32_shifted_operand (2); |
| 8457 | } |
| 8458 | } |
| 8459 | else |
| 8460 | { |
| 8461 | constraint (inst.instruction == T_MNEM_adds |
| 8462 | || inst.instruction == T_MNEM_subs, |
| 8463 | BAD_THUMB32); |
| 8464 | |
| 8465 | if (!inst.operands[2].isreg) /* Rd, Rs, #imm */ |
| 8466 | { |
| 8467 | constraint ((Rd > 7 && (Rd != REG_SP || Rs != REG_SP)) |
| 8468 | || (Rs > 7 && Rs != REG_SP && Rs != REG_PC), |
| 8469 | BAD_HIREG); |
| 8470 | |
| 8471 | inst.instruction = (inst.instruction == T_MNEM_add |
| 8472 | ? 0x0000 : 0x8000); |
| 8473 | inst.instruction |= (Rd << 4) | Rs; |
| 8474 | inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD; |
| 8475 | return; |
| 8476 | } |
| 8477 | |
| 8478 | Rn = inst.operands[2].reg; |
| 8479 | constraint (inst.operands[2].shifted, _("unshifted register required")); |
| 8480 | |
| 8481 | /* We now have Rd, Rs, and Rn set to registers. */ |
| 8482 | if (Rd > 7 || Rs > 7 || Rn > 7) |
| 8483 | { |
| 8484 | /* Can't do this for SUB. */ |
| 8485 | constraint (inst.instruction == T_MNEM_sub, BAD_HIREG); |
| 8486 | inst.instruction = T_OPCODE_ADD_HI; |
| 8487 | inst.instruction |= (Rd & 8) << 4; |
| 8488 | inst.instruction |= (Rd & 7); |
| 8489 | if (Rs == Rd) |
| 8490 | inst.instruction |= Rn << 3; |
| 8491 | else if (Rn == Rd) |
| 8492 | inst.instruction |= Rs << 3; |
| 8493 | else |
| 8494 | constraint (1, _("dest must overlap one source register")); |
| 8495 | } |
| 8496 | else |
| 8497 | { |
| 8498 | inst.instruction = (inst.instruction == T_MNEM_add |
| 8499 | ? T_OPCODE_ADD_R3 : T_OPCODE_SUB_R3); |
| 8500 | inst.instruction |= Rd | (Rs << 3) | (Rn << 6); |
| 8501 | } |
| 8502 | } |
| 8503 | } |
| 8504 | |
| 8505 | static void |
| 8506 | do_t_adr (void) |
| 8507 | { |
| 8508 | if (unified_syntax && inst.size_req == 0 && inst.operands[0].reg <= 7) |
| 8509 | { |
| 8510 | /* Defer to section relaxation. */ |
| 8511 | inst.relax = inst.instruction; |
| 8512 | inst.instruction = THUMB_OP16 (inst.instruction); |
| 8513 | inst.instruction |= inst.operands[0].reg << 4; |
| 8514 | } |
| 8515 | else if (unified_syntax && inst.size_req != 2) |
| 8516 | { |
| 8517 | /* Generate a 32-bit opcode. */ |
| 8518 | inst.instruction = THUMB_OP32 (inst.instruction); |
| 8519 | inst.instruction |= inst.operands[0].reg << 8; |
| 8520 | inst.reloc.type = BFD_RELOC_ARM_T32_ADD_PC12; |
| 8521 | inst.reloc.pc_rel = 1; |
| 8522 | } |
| 8523 | else |
| 8524 | { |
| 8525 | /* Generate a 16-bit opcode. */ |
| 8526 | inst.instruction = THUMB_OP16 (inst.instruction); |
| 8527 | inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD; |
| 8528 | inst.reloc.exp.X_add_number -= 4; /* PC relative adjust. */ |
| 8529 | inst.reloc.pc_rel = 1; |
| 8530 | |
| 8531 | inst.instruction |= inst.operands[0].reg << 4; |
| 8532 | } |
| 8533 | } |
| 8534 | |
| 8535 | /* Arithmetic instructions for which there is just one 16-bit |
| 8536 | instruction encoding, and it allows only two low registers. |
| 8537 | For maximal compatibility with ARM syntax, we allow three register |
| 8538 | operands even when Thumb-32 instructions are not available, as long |
| 8539 | as the first two are identical. For instance, both "sbc r0,r1" and |
| 8540 | "sbc r0,r0,r1" are allowed. */ |
| 8541 | static void |
| 8542 | do_t_arit3 (void) |
| 8543 | { |
| 8544 | int Rd, Rs, Rn; |
| 8545 | |
| 8546 | Rd = inst.operands[0].reg; |
| 8547 | Rs = (inst.operands[1].present |
| 8548 | ? inst.operands[1].reg /* Rd, Rs, foo */ |
| 8549 | : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */ |
| 8550 | Rn = inst.operands[2].reg; |
| 8551 | |
| 8552 | if (unified_syntax) |
| 8553 | { |
| 8554 | if (!inst.operands[2].isreg) |
| 8555 | { |
| 8556 | /* For an immediate, we always generate a 32-bit opcode; |
| 8557 | section relaxation will shrink it later if possible. */ |
| 8558 | inst.instruction = THUMB_OP32 (inst.instruction); |
| 8559 | inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000; |
| 8560 | inst.instruction |= Rd << 8; |
| 8561 | inst.instruction |= Rs << 16; |
| 8562 | inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE; |
| 8563 | } |
| 8564 | else |
| 8565 | { |
| 8566 | bfd_boolean narrow; |
| 8567 | |
| 8568 | /* See if we can do this with a 16-bit instruction. */ |
| 8569 | if (THUMB_SETS_FLAGS (inst.instruction)) |
| 8570 | narrow = current_it_mask == 0; |
| 8571 | else |
| 8572 | narrow = current_it_mask != 0; |
| 8573 | |
| 8574 | if (Rd > 7 || Rn > 7 || Rs > 7) |
| 8575 | narrow = FALSE; |
| 8576 | if (inst.operands[2].shifted) |
| 8577 | narrow = FALSE; |
| 8578 | if (inst.size_req == 4) |
| 8579 | narrow = FALSE; |
| 8580 | |
| 8581 | if (narrow |
| 8582 | && Rd == Rs) |
| 8583 | { |
| 8584 | inst.instruction = THUMB_OP16 (inst.instruction); |
| 8585 | inst.instruction |= Rd; |
| 8586 | inst.instruction |= Rn << 3; |
| 8587 | return; |
| 8588 | } |
| 8589 | |
| 8590 | /* If we get here, it can't be done in 16 bits. */ |
| 8591 | constraint (inst.operands[2].shifted |
| 8592 | && inst.operands[2].immisreg, |
| 8593 | _("shift must be constant")); |
| 8594 | inst.instruction = THUMB_OP32 (inst.instruction); |
| 8595 | inst.instruction |= Rd << 8; |
| 8596 | inst.instruction |= Rs << 16; |
| 8597 | encode_thumb32_shifted_operand (2); |
| 8598 | } |
| 8599 | } |
| 8600 | else |
| 8601 | { |
| 8602 | /* On its face this is a lie - the instruction does set the |
| 8603 | flags. However, the only supported mnemonic in this mode |
| 8604 | says it doesn't. */ |
| 8605 | constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32); |
| 8606 | |
| 8607 | constraint (!inst.operands[2].isreg || inst.operands[2].shifted, |
| 8608 | _("unshifted register required")); |
| 8609 | constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG); |
| 8610 | constraint (Rd != Rs, |
| 8611 | _("dest and source1 must be the same register")); |
| 8612 | |
| 8613 | inst.instruction = THUMB_OP16 (inst.instruction); |
| 8614 | inst.instruction |= Rd; |
| 8615 | inst.instruction |= Rn << 3; |
| 8616 | } |
| 8617 | } |
| 8618 | |
| 8619 | /* Similarly, but for instructions where the arithmetic operation is |
| 8620 | commutative, so we can allow either of them to be different from |
| 8621 | the destination operand in a 16-bit instruction. For instance, all |
| 8622 | three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are |
| 8623 | accepted. */ |
| 8624 | static void |
| 8625 | do_t_arit3c (void) |
| 8626 | { |
| 8627 | int Rd, Rs, Rn; |
| 8628 | |
| 8629 | Rd = inst.operands[0].reg; |
| 8630 | Rs = (inst.operands[1].present |
| 8631 | ? inst.operands[1].reg /* Rd, Rs, foo */ |
| 8632 | : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */ |
| 8633 | Rn = inst.operands[2].reg; |
| 8634 | |
| 8635 | if (unified_syntax) |
| 8636 | { |
| 8637 | if (!inst.operands[2].isreg) |
| 8638 | { |
| 8639 | /* For an immediate, we always generate a 32-bit opcode; |
| 8640 | section relaxation will shrink it later if possible. */ |
| 8641 | inst.instruction = THUMB_OP32 (inst.instruction); |
| 8642 | inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000; |
| 8643 | inst.instruction |= Rd << 8; |
| 8644 | inst.instruction |= Rs << 16; |
| 8645 | inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE; |
| 8646 | } |
| 8647 | else |
| 8648 | { |
| 8649 | bfd_boolean narrow; |
| 8650 | |
| 8651 | /* See if we can do this with a 16-bit instruction. */ |
| 8652 | if (THUMB_SETS_FLAGS (inst.instruction)) |
| 8653 | narrow = current_it_mask == 0; |
| 8654 | else |
| 8655 | narrow = current_it_mask != 0; |
| 8656 | |
| 8657 | if (Rd > 7 || Rn > 7 || Rs > 7) |
| 8658 | narrow = FALSE; |
| 8659 | if (inst.operands[2].shifted) |
| 8660 | narrow = FALSE; |
| 8661 | if (inst.size_req == 4) |
| 8662 | narrow = FALSE; |
| 8663 | |
| 8664 | if (narrow) |
| 8665 | { |
| 8666 | if (Rd == Rs) |
| 8667 | { |
| 8668 | inst.instruction = THUMB_OP16 (inst.instruction); |
| 8669 | inst.instruction |= Rd; |
| 8670 | inst.instruction |= Rn << 3; |
| 8671 | return; |
| 8672 | } |
| 8673 | if (Rd == Rn) |
| 8674 | { |
| 8675 | inst.instruction = THUMB_OP16 (inst.instruction); |
| 8676 | inst.instruction |= Rd; |
| 8677 | inst.instruction |= Rs << 3; |
| 8678 | return; |
| 8679 | } |
| 8680 | } |
| 8681 | |
| 8682 | /* If we get here, it can't be done in 16 bits. */ |
| 8683 | constraint (inst.operands[2].shifted |
| 8684 | && inst.operands[2].immisreg, |
| 8685 | _("shift must be constant")); |
| 8686 | inst.instruction = THUMB_OP32 (inst.instruction); |
| 8687 | inst.instruction |= Rd << 8; |
| 8688 | inst.instruction |= Rs << 16; |
| 8689 | encode_thumb32_shifted_operand (2); |
| 8690 | } |
| 8691 | } |
| 8692 | else |
| 8693 | { |
| 8694 | /* On its face this is a lie - the instruction does set the |
| 8695 | flags. However, the only supported mnemonic in this mode |
| 8696 | says it doesn't. */ |
| 8697 | constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32); |
| 8698 | |
| 8699 | constraint (!inst.operands[2].isreg || inst.operands[2].shifted, |
| 8700 | _("unshifted register required")); |
| 8701 | constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG); |
| 8702 | |
| 8703 | inst.instruction = THUMB_OP16 (inst.instruction); |
| 8704 | inst.instruction |= Rd; |
| 8705 | |
| 8706 | if (Rd == Rs) |
| 8707 | inst.instruction |= Rn << 3; |
| 8708 | else if (Rd == Rn) |
| 8709 | inst.instruction |= Rs << 3; |
| 8710 | else |
| 8711 | constraint (1, _("dest must overlap one source register")); |
| 8712 | } |
| 8713 | } |
| 8714 | |
| 8715 | static void |
| 8716 | do_t_barrier (void) |
| 8717 | { |
| 8718 | if (inst.operands[0].present) |
| 8719 | { |
| 8720 | constraint ((inst.instruction & 0xf0) != 0x40 |
| 8721 | && inst.operands[0].imm != 0xf, |
| 8722 | "bad barrier type"); |
| 8723 | inst.instruction |= inst.operands[0].imm; |
| 8724 | } |
| 8725 | else |
| 8726 | inst.instruction |= 0xf; |
| 8727 | } |
| 8728 | |
| 8729 | static void |
| 8730 | do_t_bfc (void) |
| 8731 | { |
| 8732 | unsigned int msb = inst.operands[1].imm + inst.operands[2].imm; |
| 8733 | constraint (msb > 32, _("bit-field extends past end of register")); |
| 8734 | /* The instruction encoding stores the LSB and MSB, |
| 8735 | not the LSB and width. */ |
| 8736 | inst.instruction |= inst.operands[0].reg << 8; |
| 8737 | inst.instruction |= (inst.operands[1].imm & 0x1c) << 10; |
| 8738 | inst.instruction |= (inst.operands[1].imm & 0x03) << 6; |
| 8739 | inst.instruction |= msb - 1; |
| 8740 | } |
| 8741 | |
| 8742 | static void |
| 8743 | do_t_bfi (void) |
| 8744 | { |
| 8745 | unsigned int msb; |
| 8746 | |
| 8747 | /* #0 in second position is alternative syntax for bfc, which is |
| 8748 | the same instruction but with REG_PC in the Rm field. */ |
| 8749 | if (!inst.operands[1].isreg) |
| 8750 | inst.operands[1].reg = REG_PC; |
| 8751 | |
| 8752 | msb = inst.operands[2].imm + inst.operands[3].imm; |
| 8753 | constraint (msb > 32, _("bit-field extends past end of register")); |
| 8754 | /* The instruction encoding stores the LSB and MSB, |
| 8755 | not the LSB and width. */ |
| 8756 | inst.instruction |= inst.operands[0].reg << 8; |
| 8757 | inst.instruction |= inst.operands[1].reg << 16; |
| 8758 | inst.instruction |= (inst.operands[2].imm & 0x1c) << 10; |
| 8759 | inst.instruction |= (inst.operands[2].imm & 0x03) << 6; |
| 8760 | inst.instruction |= msb - 1; |
| 8761 | } |
| 8762 | |
| 8763 | static void |
| 8764 | do_t_bfx (void) |
| 8765 | { |
| 8766 | constraint (inst.operands[2].imm + inst.operands[3].imm > 32, |
| 8767 | _("bit-field extends past end of register")); |
| 8768 | inst.instruction |= inst.operands[0].reg << 8; |
| 8769 | inst.instruction |= inst.operands[1].reg << 16; |
| 8770 | inst.instruction |= (inst.operands[2].imm & 0x1c) << 10; |
| 8771 | inst.instruction |= (inst.operands[2].imm & 0x03) << 6; |
| 8772 | inst.instruction |= inst.operands[3].imm - 1; |
| 8773 | } |
| 8774 | |
| 8775 | /* ARM V5 Thumb BLX (argument parse) |
| 8776 | BLX <target_addr> which is BLX(1) |
| 8777 | BLX <Rm> which is BLX(2) |
| 8778 | Unfortunately, there are two different opcodes for this mnemonic. |
| 8779 | So, the insns[].value is not used, and the code here zaps values |
| 8780 | into inst.instruction. |
| 8781 | |
| 8782 | ??? How to take advantage of the additional two bits of displacement |
| 8783 | available in Thumb32 mode? Need new relocation? */ |
| 8784 | |
| 8785 | static void |
| 8786 | do_t_blx (void) |
| 8787 | { |
| 8788 | constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH); |
| 8789 | if (inst.operands[0].isreg) |
| 8790 | /* We have a register, so this is BLX(2). */ |
| 8791 | inst.instruction |= inst.operands[0].reg << 3; |
| 8792 | else |
| 8793 | { |
| 8794 | /* No register. This must be BLX(1). */ |
| 8795 | inst.instruction = 0xf000e800; |
| 8796 | #ifdef OBJ_ELF |
| 8797 | if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4) |
| 8798 | inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH23; |
| 8799 | else |
| 8800 | #endif |
| 8801 | inst.reloc.type = BFD_RELOC_THUMB_PCREL_BLX; |
| 8802 | inst.reloc.pc_rel = 1; |
| 8803 | } |
| 8804 | } |
| 8805 | |
| 8806 | static void |
| 8807 | do_t_branch (void) |
| 8808 | { |
| 8809 | int opcode; |
| 8810 | int cond; |
| 8811 | |
| 8812 | if (current_it_mask) |
| 8813 | { |
| 8814 | /* Conditional branches inside IT blocks are encoded as unconditional |
| 8815 | branches. */ |
| 8816 | cond = COND_ALWAYS; |
| 8817 | /* A branch must be the last instruction in an IT block. */ |
| 8818 | constraint (current_it_mask != 0x10, BAD_BRANCH); |
| 8819 | } |
| 8820 | else |
| 8821 | cond = inst.cond; |
| 8822 | |
| 8823 | if (cond != COND_ALWAYS) |
| 8824 | opcode = T_MNEM_bcond; |
| 8825 | else |
| 8826 | opcode = inst.instruction; |
| 8827 | |
| 8828 | if (unified_syntax && inst.size_req == 4) |
| 8829 | { |
| 8830 | inst.instruction = THUMB_OP32(opcode); |
| 8831 | if (cond == COND_ALWAYS) |
| 8832 | inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH25; |
| 8833 | else |
| 8834 | { |
| 8835 | assert (cond != 0xF); |
| 8836 | inst.instruction |= cond << 22; |
| 8837 | inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH20; |
| 8838 | } |
| 8839 | } |
| 8840 | else |
| 8841 | { |
| 8842 | inst.instruction = THUMB_OP16(opcode); |
| 8843 | if (cond == COND_ALWAYS) |
| 8844 | inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH12; |
| 8845 | else |
| 8846 | { |
| 8847 | inst.instruction |= cond << 8; |
| 8848 | inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH9; |
| 8849 | } |
| 8850 | /* Allow section relaxation. */ |
| 8851 | if (unified_syntax && inst.size_req != 2) |
| 8852 | inst.relax = opcode; |
| 8853 | } |
| 8854 | |
| 8855 | inst.reloc.pc_rel = 1; |
| 8856 | } |
| 8857 | |
| 8858 | static void |
| 8859 | do_t_bkpt (void) |
| 8860 | { |
| 8861 | constraint (inst.cond != COND_ALWAYS, |
| 8862 | _("instruction is always unconditional")); |
| 8863 | if (inst.operands[0].present) |
| 8864 | { |
| 8865 | constraint (inst.operands[0].imm > 255, |
| 8866 | _("immediate value out of range")); |
| 8867 | inst.instruction |= inst.operands[0].imm; |
| 8868 | } |
| 8869 | } |
| 8870 | |
| 8871 | static void |
| 8872 | do_t_branch23 (void) |
| 8873 | { |
| 8874 | constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH); |
| 8875 | inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH23; |
| 8876 | inst.reloc.pc_rel = 1; |
| 8877 | |
| 8878 | /* If the destination of the branch is a defined symbol which does not have |
| 8879 | the THUMB_FUNC attribute, then we must be calling a function which has |
| 8880 | the (interfacearm) attribute. We look for the Thumb entry point to that |
| 8881 | function and change the branch to refer to that function instead. */ |
| 8882 | if ( inst.reloc.exp.X_op == O_symbol |
| 8883 | && inst.reloc.exp.X_add_symbol != NULL |
| 8884 | && S_IS_DEFINED (inst.reloc.exp.X_add_symbol) |
| 8885 | && ! THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol)) |
| 8886 | inst.reloc.exp.X_add_symbol = |
| 8887 | find_real_start (inst.reloc.exp.X_add_symbol); |
| 8888 | } |
| 8889 | |
| 8890 | static void |
| 8891 | do_t_bx (void) |
| 8892 | { |
| 8893 | constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH); |
| 8894 | inst.instruction |= inst.operands[0].reg << 3; |
| 8895 | /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc |
| 8896 | should cause the alignment to be checked once it is known. This is |
| 8897 | because BX PC only works if the instruction is word aligned. */ |
| 8898 | } |
| 8899 | |
| 8900 | static void |
| 8901 | do_t_bxj (void) |
| 8902 | { |
| 8903 | constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH); |
| 8904 | if (inst.operands[0].reg == REG_PC) |
| 8905 | as_tsktsk (_("use of r15 in bxj is not really useful")); |
| 8906 | |
| 8907 | inst.instruction |= inst.operands[0].reg << 16; |
| 8908 | } |
| 8909 | |
| 8910 | static void |
| 8911 | do_t_clz (void) |
| 8912 | { |
| 8913 | inst.instruction |= inst.operands[0].reg << 8; |
| 8914 | inst.instruction |= inst.operands[1].reg << 16; |
| 8915 | inst.instruction |= inst.operands[1].reg; |
| 8916 | } |
| 8917 | |
| 8918 | static void |
| 8919 | do_t_cps (void) |
| 8920 | { |
| 8921 | constraint (current_it_mask, BAD_NOT_IT); |
| 8922 | inst.instruction |= inst.operands[0].imm; |
| 8923 | } |
| 8924 | |
| 8925 | static void |
| 8926 | do_t_cpsi (void) |
| 8927 | { |
| 8928 | constraint (current_it_mask, BAD_NOT_IT); |
| 8929 | if (unified_syntax |
| 8930 | && (inst.operands[1].present || inst.size_req == 4) |
| 8931 | && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6_notm)) |
| 8932 | { |
| 8933 | unsigned int imod = (inst.instruction & 0x0030) >> 4; |
| 8934 | inst.instruction = 0xf3af8000; |
| 8935 | inst.instruction |= imod << 9; |
| 8936 | inst.instruction |= inst.operands[0].imm << 5; |
| 8937 | if (inst.operands[1].present) |
| 8938 | inst.instruction |= 0x100 | inst.operands[1].imm; |
| 8939 | } |
| 8940 | else |
| 8941 | { |
| 8942 | constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1) |
| 8943 | && (inst.operands[0].imm & 4), |
| 8944 | _("selected processor does not support 'A' form " |
| 8945 | "of this instruction")); |
| 8946 | constraint (inst.operands[1].present || inst.size_req == 4, |
| 8947 | _("Thumb does not support the 2-argument " |
| 8948 | "form of this instruction")); |
| 8949 | inst.instruction |= inst.operands[0].imm; |
| 8950 | } |
| 8951 | } |
| 8952 | |
| 8953 | /* THUMB CPY instruction (argument parse). */ |
| 8954 | |
| 8955 | static void |
| 8956 | do_t_cpy (void) |
| 8957 | { |
| 8958 | if (inst.size_req == 4) |
| 8959 | { |
| 8960 | inst.instruction = THUMB_OP32 (T_MNEM_mov); |
| 8961 | inst.instruction |= inst.operands[0].reg << 8; |
| 8962 | inst.instruction |= inst.operands[1].reg; |
| 8963 | } |
| 8964 | else |
| 8965 | { |
| 8966 | inst.instruction |= (inst.operands[0].reg & 0x8) << 4; |
| 8967 | inst.instruction |= (inst.operands[0].reg & 0x7); |
| 8968 | inst.instruction |= inst.operands[1].reg << 3; |
| 8969 | } |
| 8970 | } |
| 8971 | |
| 8972 | static void |
| 8973 | do_t_cbz (void) |
| 8974 | { |
| 8975 | constraint (current_it_mask, BAD_NOT_IT); |
| 8976 | constraint (inst.operands[0].reg > 7, BAD_HIREG); |
| 8977 | inst.instruction |= inst.operands[0].reg; |
| 8978 | inst.reloc.pc_rel = 1; |
| 8979 | inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH7; |
| 8980 | } |
| 8981 | |
| 8982 | static void |
| 8983 | do_t_dbg (void) |
| 8984 | { |
| 8985 | inst.instruction |= inst.operands[0].imm; |
| 8986 | } |
| 8987 | |
| 8988 | static void |
| 8989 | do_t_div (void) |
| 8990 | { |
| 8991 | if (!inst.operands[1].present) |
| 8992 | inst.operands[1].reg = inst.operands[0].reg; |
| 8993 | inst.instruction |= inst.operands[0].reg << 8; |
| 8994 | inst.instruction |= inst.operands[1].reg << 16; |
| 8995 | inst.instruction |= inst.operands[2].reg; |
| 8996 | } |
| 8997 | |
| 8998 | static void |
| 8999 | do_t_hint (void) |
| 9000 | { |
| 9001 | if (unified_syntax && inst.size_req == 4) |
| 9002 | inst.instruction = THUMB_OP32 (inst.instruction); |
| 9003 | else |
| 9004 | inst.instruction = THUMB_OP16 (inst.instruction); |
| 9005 | } |
| 9006 | |
| 9007 | static void |
| 9008 | do_t_it (void) |
| 9009 | { |
| 9010 | unsigned int cond = inst.operands[0].imm; |
| 9011 | |
| 9012 | constraint (current_it_mask, BAD_NOT_IT); |
| 9013 | current_it_mask = (inst.instruction & 0xf) | 0x10; |
| 9014 | current_cc = cond; |
| 9015 | |
| 9016 | /* If the condition is a negative condition, invert the mask. */ |
| 9017 | if ((cond & 0x1) == 0x0) |
| 9018 | { |
| 9019 | unsigned int mask = inst.instruction & 0x000f; |
| 9020 | |
| 9021 | if ((mask & 0x7) == 0) |
| 9022 | /* no conversion needed */; |
| 9023 | else if ((mask & 0x3) == 0) |
| 9024 | mask ^= 0x8; |
| 9025 | else if ((mask & 0x1) == 0) |
| 9026 | mask ^= 0xC; |
| 9027 | else |
| 9028 | mask ^= 0xE; |
| 9029 | |
| 9030 | inst.instruction &= 0xfff0; |
| 9031 | inst.instruction |= mask; |
| 9032 | } |
| 9033 | |
| 9034 | inst.instruction |= cond << 4; |
| 9035 | } |
| 9036 | |
| 9037 | static void |
| 9038 | do_t_ldmstm (void) |
| 9039 | { |
| 9040 | /* This really doesn't seem worth it. */ |
| 9041 | constraint (inst.reloc.type != BFD_RELOC_UNUSED, |
| 9042 | _("expression too complex")); |
| 9043 | constraint (inst.operands[1].writeback, |
| 9044 | _("Thumb load/store multiple does not support {reglist}^")); |
| 9045 | |
| 9046 | if (unified_syntax) |
| 9047 | { |
| 9048 | /* See if we can use a 16-bit instruction. */ |
| 9049 | if (inst.instruction < 0xffff /* not ldmdb/stmdb */ |
| 9050 | && inst.size_req != 4 |
| 9051 | && inst.operands[0].reg <= 7 |
| 9052 | && !(inst.operands[1].imm & ~0xff) |
| 9053 | && (inst.instruction == T_MNEM_stmia |
| 9054 | ? inst.operands[0].writeback |
| 9055 | : (inst.operands[0].writeback |
| 9056 | == !(inst.operands[1].imm & (1 << inst.operands[0].reg))))) |
| 9057 | { |
| 9058 | if (inst.instruction == T_MNEM_stmia |
| 9059 | && (inst.operands[1].imm & (1 << inst.operands[0].reg)) |
| 9060 | && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1))) |
| 9061 | as_warn (_("value stored for r%d is UNPREDICTABLE"), |
| 9062 | inst.operands[0].reg); |
| 9063 | |
| 9064 | inst.instruction = THUMB_OP16 (inst.instruction); |
| 9065 | inst.instruction |= inst.operands[0].reg << 8; |
| 9066 | inst.instruction |= inst.operands[1].imm; |
| 9067 | } |
| 9068 | else |
| 9069 | { |
| 9070 | if (inst.operands[1].imm & (1 << 13)) |
| 9071 | as_warn (_("SP should not be in register list")); |
| 9072 | if (inst.instruction == T_MNEM_stmia) |
| 9073 | { |
| 9074 | if (inst.operands[1].imm & (1 << 15)) |
| 9075 | as_warn (_("PC should not be in register list")); |
| 9076 | if (inst.operands[1].imm & (1 << inst.operands[0].reg)) |
| 9077 | as_warn (_("value stored for r%d is UNPREDICTABLE"), |
| 9078 | inst.operands[0].reg); |
| 9079 | } |
| 9080 | else |
| 9081 | { |
| 9082 | if (inst.operands[1].imm & (1 << 14) |
| 9083 | && inst.operands[1].imm & (1 << 15)) |
| 9084 | as_warn (_("LR and PC should not both be in register list")); |
| 9085 | if ((inst.operands[1].imm & (1 << inst.operands[0].reg)) |
| 9086 | && inst.operands[0].writeback) |
| 9087 | as_warn (_("base register should not be in register list " |
| 9088 | "when written back")); |
| 9089 | } |
| 9090 | if (inst.instruction < 0xffff) |
| 9091 | inst.instruction = THUMB_OP32 (inst.instruction); |
| 9092 | inst.instruction |= inst.operands[0].reg << 16; |
| 9093 | inst.instruction |= inst.operands[1].imm; |
| 9094 | if (inst.operands[0].writeback) |
| 9095 | inst.instruction |= WRITE_BACK; |
| 9096 | } |
| 9097 | } |
| 9098 | else |
| 9099 | { |
| 9100 | constraint (inst.operands[0].reg > 7 |
| 9101 | || (inst.operands[1].imm & ~0xff), BAD_HIREG); |
| 9102 | if (inst.instruction == T_MNEM_stmia) |
| 9103 | { |
| 9104 | if (!inst.operands[0].writeback) |
| 9105 | as_warn (_("this instruction will write back the base register")); |
| 9106 | if ((inst.operands[1].imm & (1 << inst.operands[0].reg)) |
| 9107 | && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1))) |
| 9108 | as_warn (_("value stored for r%d is UNPREDICTABLE"), |
| 9109 | inst.operands[0].reg); |
| 9110 | } |
| 9111 | else |
| 9112 | { |
| 9113 | if (!inst.operands[0].writeback |
| 9114 | && !(inst.operands[1].imm & (1 << inst.operands[0].reg))) |
| 9115 | as_warn (_("this instruction will write back the base register")); |
| 9116 | else if (inst.operands[0].writeback |
| 9117 | && (inst.operands[1].imm & (1 << inst.operands[0].reg))) |
| 9118 | as_warn (_("this instruction will not write back the base register")); |
| 9119 | } |
| 9120 | |
| 9121 | inst.instruction = THUMB_OP16 (inst.instruction); |
| 9122 | inst.instruction |= inst.operands[0].reg << 8; |
| 9123 | inst.instruction |= inst.operands[1].imm; |
| 9124 | } |
| 9125 | } |
| 9126 | |
| 9127 | static void |
| 9128 | do_t_ldrex (void) |
| 9129 | { |
| 9130 | constraint (!inst.operands[1].isreg || !inst.operands[1].preind |
| 9131 | || inst.operands[1].postind || inst.operands[1].writeback |
| 9132 | || inst.operands[1].immisreg || inst.operands[1].shifted |
| 9133 | || inst.operands[1].negative, |
| 9134 | BAD_ADDR_MODE); |
| 9135 | |
| 9136 | inst.instruction |= inst.operands[0].reg << 12; |
| 9137 | inst.instruction |= inst.operands[1].reg << 16; |
| 9138 | inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8; |
| 9139 | } |
| 9140 | |
| 9141 | static void |
| 9142 | do_t_ldrexd (void) |
| 9143 | { |
| 9144 | if (!inst.operands[1].present) |
| 9145 | { |
| 9146 | constraint (inst.operands[0].reg == REG_LR, |
| 9147 | _("r14 not allowed as first register " |
| 9148 | "when second register is omitted")); |
| 9149 | inst.operands[1].reg = inst.operands[0].reg + 1; |
| 9150 | } |
| 9151 | constraint (inst.operands[0].reg == inst.operands[1].reg, |
| 9152 | BAD_OVERLAP); |
| 9153 | |
| 9154 | inst.instruction |= inst.operands[0].reg << 12; |
| 9155 | inst.instruction |= inst.operands[1].reg << 8; |
| 9156 | inst.instruction |= inst.operands[2].reg << 16; |
| 9157 | } |
| 9158 | |
| 9159 | static void |
| 9160 | do_t_ldst (void) |
| 9161 | { |
| 9162 | unsigned long opcode; |
| 9163 | int Rn; |
| 9164 | |
| 9165 | opcode = inst.instruction; |
| 9166 | if (unified_syntax) |
| 9167 | { |
| 9168 | if (!inst.operands[1].isreg) |
| 9169 | { |
| 9170 | if (opcode <= 0xffff) |
| 9171 | inst.instruction = THUMB_OP32 (opcode); |
| 9172 | if (move_or_literal_pool (0, /*thumb_p=*/TRUE, /*mode_3=*/FALSE)) |
| 9173 | return; |
| 9174 | } |
| 9175 | if (inst.operands[1].isreg |
| 9176 | && !inst.operands[1].writeback |
| 9177 | && !inst.operands[1].shifted && !inst.operands[1].postind |
| 9178 | && !inst.operands[1].negative && inst.operands[0].reg <= 7 |
| 9179 | && opcode <= 0xffff |
| 9180 | && inst.size_req != 4) |
| 9181 | { |
| 9182 | /* Insn may have a 16-bit form. */ |
| 9183 | Rn = inst.operands[1].reg; |
| 9184 | if (inst.operands[1].immisreg) |
| 9185 | { |
| 9186 | inst.instruction = THUMB_OP16 (opcode); |
| 9187 | /* [Rn, Ri] */ |
| 9188 | if (Rn <= 7 && inst.operands[1].imm <= 7) |
| 9189 | goto op16; |
| 9190 | } |
| 9191 | else if ((Rn <= 7 && opcode != T_MNEM_ldrsh |
| 9192 | && opcode != T_MNEM_ldrsb) |
| 9193 | || ((Rn == REG_PC || Rn == REG_SP) && opcode == T_MNEM_ldr) |
| 9194 | || (Rn == REG_SP && opcode == T_MNEM_str)) |
| 9195 | { |
| 9196 | /* [Rn, #const] */ |
| 9197 | if (Rn > 7) |
| 9198 | { |
| 9199 | if (Rn == REG_PC) |
| 9200 | { |
| 9201 | if (inst.reloc.pc_rel) |
| 9202 | opcode = T_MNEM_ldr_pc2; |
| 9203 | else |
| 9204 | opcode = T_MNEM_ldr_pc; |
| 9205 | } |
| 9206 | else |
| 9207 | { |
| 9208 | if (opcode == T_MNEM_ldr) |
| 9209 | opcode = T_MNEM_ldr_sp; |
| 9210 | else |
| 9211 | opcode = T_MNEM_str_sp; |
| 9212 | } |
| 9213 | inst.instruction = inst.operands[0].reg << 8; |
| 9214 | } |
| 9215 | else |
| 9216 | { |
| 9217 | inst.instruction = inst.operands[0].reg; |
| 9218 | inst.instruction |= inst.operands[1].reg << 3; |
| 9219 | } |
| 9220 | inst.instruction |= THUMB_OP16 (opcode); |
| 9221 | if (inst.size_req == 2) |
| 9222 | inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET; |
| 9223 | else |
| 9224 | inst.relax = opcode; |
| 9225 | return; |
| 9226 | } |
| 9227 | } |
| 9228 | /* Definitely a 32-bit variant. */ |
| 9229 | inst.instruction = THUMB_OP32 (opcode); |
| 9230 | inst.instruction |= inst.operands[0].reg << 12; |
| 9231 | encode_thumb32_addr_mode (1, /*is_t=*/FALSE, /*is_d=*/FALSE); |
| 9232 | return; |
| 9233 | } |
| 9234 | |
| 9235 | constraint (inst.operands[0].reg > 7, BAD_HIREG); |
| 9236 | |
| 9237 | if (inst.instruction == T_MNEM_ldrsh || inst.instruction == T_MNEM_ldrsb) |
| 9238 | { |
| 9239 | /* Only [Rn,Rm] is acceptable. */ |
| 9240 | constraint (inst.operands[1].reg > 7 || inst.operands[1].imm > 7, BAD_HIREG); |
| 9241 | constraint (!inst.operands[1].isreg || !inst.operands[1].immisreg |
| 9242 | || inst.operands[1].postind || inst.operands[1].shifted |
| 9243 | || inst.operands[1].negative, |
| 9244 | _("Thumb does not support this addressing mode")); |
| 9245 | inst.instruction = THUMB_OP16 (inst.instruction); |
| 9246 | goto op16; |
| 9247 | } |
| 9248 | |
| 9249 | inst.instruction = THUMB_OP16 (inst.instruction); |
| 9250 | if (!inst.operands[1].isreg) |
| 9251 | if (move_or_literal_pool (0, /*thumb_p=*/TRUE, /*mode_3=*/FALSE)) |
| 9252 | return; |
| 9253 | |
| 9254 | constraint (!inst.operands[1].preind |
| 9255 | || inst.operands[1].shifted |
| 9256 | || inst.operands[1].writeback, |
| 9257 | _("Thumb does not support this addressing mode")); |
| 9258 | if (inst.operands[1].reg == REG_PC || inst.operands[1].reg == REG_SP) |
| 9259 | { |
| 9260 | constraint (inst.instruction & 0x0600, |
| 9261 | _("byte or halfword not valid for base register")); |
| 9262 | constraint (inst.operands[1].reg == REG_PC |
| 9263 | && !(inst.instruction & THUMB_LOAD_BIT), |
| 9264 | _("r15 based store not allowed")); |
| 9265 | constraint (inst.operands[1].immisreg, |
| 9266 | _("invalid base register for register offset")); |
| 9267 | |
| 9268 | if (inst.operands[1].reg == REG_PC) |
| 9269 | inst.instruction = T_OPCODE_LDR_PC; |
| 9270 | else if (inst.instruction & THUMB_LOAD_BIT) |
| 9271 | inst.instruction = T_OPCODE_LDR_SP; |
| 9272 | else |
| 9273 | inst.instruction = T_OPCODE_STR_SP; |
| 9274 | |
| 9275 | inst.instruction |= inst.operands[0].reg << 8; |
| 9276 | inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET; |
| 9277 | return; |
| 9278 | } |
| 9279 | |
| 9280 | constraint (inst.operands[1].reg > 7, BAD_HIREG); |
| 9281 | if (!inst.operands[1].immisreg) |
| 9282 | { |
| 9283 | /* Immediate offset. */ |
| 9284 | inst.instruction |= inst.operands[0].reg; |
| 9285 | inst.instruction |= inst.operands[1].reg << 3; |
| 9286 | inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET; |
| 9287 | return; |
| 9288 | } |
| 9289 | |
| 9290 | /* Register offset. */ |
| 9291 | constraint (inst.operands[1].imm > 7, BAD_HIREG); |
| 9292 | constraint (inst.operands[1].negative, |
| 9293 | _("Thumb does not support this addressing mode")); |
| 9294 | |
| 9295 | op16: |
| 9296 | switch (inst.instruction) |
| 9297 | { |
| 9298 | case T_OPCODE_STR_IW: inst.instruction = T_OPCODE_STR_RW; break; |
| 9299 | case T_OPCODE_STR_IH: inst.instruction = T_OPCODE_STR_RH; break; |
| 9300 | case T_OPCODE_STR_IB: inst.instruction = T_OPCODE_STR_RB; break; |
| 9301 | case T_OPCODE_LDR_IW: inst.instruction = T_OPCODE_LDR_RW; break; |
| 9302 | case T_OPCODE_LDR_IH: inst.instruction = T_OPCODE_LDR_RH; break; |
| 9303 | case T_OPCODE_LDR_IB: inst.instruction = T_OPCODE_LDR_RB; break; |
| 9304 | case 0x5600 /* ldrsb */: |
| 9305 | case 0x5e00 /* ldrsh */: break; |
| 9306 | default: abort (); |
| 9307 | } |
| 9308 | |
| 9309 | inst.instruction |= inst.operands[0].reg; |
| 9310 | inst.instruction |= inst.operands[1].reg << 3; |
| 9311 | inst.instruction |= inst.operands[1].imm << 6; |
| 9312 | } |
| 9313 | |
| 9314 | static void |
| 9315 | do_t_ldstd (void) |
| 9316 | { |
| 9317 | if (!inst.operands[1].present) |
| 9318 | { |
| 9319 | inst.operands[1].reg = inst.operands[0].reg + 1; |
| 9320 | constraint (inst.operands[0].reg == REG_LR, |
| 9321 | _("r14 not allowed here")); |
| 9322 | } |
| 9323 | inst.instruction |= inst.operands[0].reg << 12; |
| 9324 | inst.instruction |= inst.operands[1].reg << 8; |
| 9325 | encode_thumb32_addr_mode (2, /*is_t=*/FALSE, /*is_d=*/TRUE); |
| 9326 | |
| 9327 | } |
| 9328 | |
| 9329 | static void |
| 9330 | do_t_ldstt (void) |
| 9331 | { |
| 9332 | inst.instruction |= inst.operands[0].reg << 12; |
| 9333 | encode_thumb32_addr_mode (1, /*is_t=*/TRUE, /*is_d=*/FALSE); |
| 9334 | } |
| 9335 | |
| 9336 | static void |
| 9337 | do_t_mla (void) |
| 9338 | { |
| 9339 | inst.instruction |= inst.operands[0].reg << 8; |
| 9340 | inst.instruction |= inst.operands[1].reg << 16; |
| 9341 | inst.instruction |= inst.operands[2].reg; |
| 9342 | inst.instruction |= inst.operands[3].reg << 12; |
| 9343 | } |
| 9344 | |
| 9345 | static void |
| 9346 | do_t_mlal (void) |
| 9347 | { |
| 9348 | inst.instruction |= inst.operands[0].reg << 12; |
| 9349 | inst.instruction |= inst.operands[1].reg << 8; |
| 9350 | inst.instruction |= inst.operands[2].reg << 16; |
| 9351 | inst.instruction |= inst.operands[3].reg; |
| 9352 | } |
| 9353 | |
| 9354 | static void |
| 9355 | do_t_mov_cmp (void) |
| 9356 | { |
| 9357 | if (unified_syntax) |
| 9358 | { |
| 9359 | int r0off = (inst.instruction == T_MNEM_mov |
| 9360 | || inst.instruction == T_MNEM_movs) ? 8 : 16; |
| 9361 | unsigned long opcode; |
| 9362 | bfd_boolean narrow; |
| 9363 | bfd_boolean low_regs; |
| 9364 | |
| 9365 | low_regs = (inst.operands[0].reg <= 7 && inst.operands[1].reg <= 7); |
| 9366 | opcode = inst.instruction; |
| 9367 | if (current_it_mask) |
| 9368 | narrow = opcode != T_MNEM_movs; |
| 9369 | else |
| 9370 | narrow = opcode != T_MNEM_movs || low_regs; |
| 9371 | if (inst.size_req == 4 |
| 9372 | || inst.operands[1].shifted) |
| 9373 | narrow = FALSE; |
| 9374 | |
| 9375 | if (!inst.operands[1].isreg) |
| 9376 | { |
| 9377 | /* Immediate operand. */ |
| 9378 | if (current_it_mask == 0 && opcode == T_MNEM_mov) |
| 9379 | narrow = 0; |
| 9380 | if (low_regs && narrow) |
| 9381 | { |
| 9382 | inst.instruction = THUMB_OP16 (opcode); |
| 9383 | inst.instruction |= inst.operands[0].reg << 8; |
| 9384 | if (inst.size_req == 2) |
| 9385 | inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM; |
| 9386 | else |
| 9387 | inst.relax = opcode; |
| 9388 | } |
| 9389 | else |
| 9390 | { |
| 9391 | inst.instruction = THUMB_OP32 (inst.instruction); |
| 9392 | inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000; |
| 9393 | inst.instruction |= inst.operands[0].reg << r0off; |
| 9394 | inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE; |
| 9395 | } |
| 9396 | } |
| 9397 | else if (!narrow) |
| 9398 | { |
| 9399 | inst.instruction = THUMB_OP32 (inst.instruction); |
| 9400 | inst.instruction |= inst.operands[0].reg << r0off; |
| 9401 | encode_thumb32_shifted_operand (1); |
| 9402 | } |
| 9403 | else |
| 9404 | switch (inst.instruction) |
| 9405 | { |
| 9406 | case T_MNEM_mov: |
| 9407 | inst.instruction = T_OPCODE_MOV_HR; |
| 9408 | inst.instruction |= (inst.operands[0].reg & 0x8) << 4; |
| 9409 | inst.instruction |= (inst.operands[0].reg & 0x7); |
| 9410 | inst.instruction |= inst.operands[1].reg << 3; |
| 9411 | break; |
| 9412 | |
| 9413 | case T_MNEM_movs: |
| 9414 | /* We know we have low registers at this point. |
| 9415 | Generate ADD Rd, Rs, #0. */ |
| 9416 | inst.instruction = T_OPCODE_ADD_I3; |
| 9417 | inst.instruction |= inst.operands[0].reg; |
| 9418 | inst.instruction |= inst.operands[1].reg << 3; |
| 9419 | break; |
| 9420 | |
| 9421 | case T_MNEM_cmp: |
| 9422 | if (low_regs) |
| 9423 | { |
| 9424 | inst.instruction = T_OPCODE_CMP_LR; |
| 9425 | inst.instruction |= inst.operands[0].reg; |
| 9426 | inst.instruction |= inst.operands[1].reg << 3; |
| 9427 | } |
| 9428 | else |
| 9429 | { |
| 9430 | inst.instruction = T_OPCODE_CMP_HR; |
| 9431 | inst.instruction |= (inst.operands[0].reg & 0x8) << 4; |
| 9432 | inst.instruction |= (inst.operands[0].reg & 0x7); |
| 9433 | inst.instruction |= inst.operands[1].reg << 3; |
| 9434 | } |
| 9435 | break; |
| 9436 | } |
| 9437 | return; |
| 9438 | } |
| 9439 | |
| 9440 | inst.instruction = THUMB_OP16 (inst.instruction); |
| 9441 | if (inst.operands[1].isreg) |
| 9442 | { |
| 9443 | if (inst.operands[0].reg < 8 && inst.operands[1].reg < 8) |
| 9444 | { |
| 9445 | /* A move of two lowregs is encoded as ADD Rd, Rs, #0 |
| 9446 | since a MOV instruction produces unpredictable results. */ |
| 9447 | if (inst.instruction == T_OPCODE_MOV_I8) |
| 9448 | inst.instruction = T_OPCODE_ADD_I3; |
| 9449 | else |
| 9450 | inst.instruction = T_OPCODE_CMP_LR; |
| 9451 | |
| 9452 | inst.instruction |= inst.operands[0].reg; |
| 9453 | inst.instruction |= inst.operands[1].reg << 3; |
| 9454 | } |
| 9455 | else |
| 9456 | { |
| 9457 | if (inst.instruction == T_OPCODE_MOV_I8) |
| 9458 | inst.instruction = T_OPCODE_MOV_HR; |
| 9459 | else |
| 9460 | inst.instruction = T_OPCODE_CMP_HR; |
| 9461 | do_t_cpy (); |
| 9462 | } |
| 9463 | } |
| 9464 | else |
| 9465 | { |
| 9466 | constraint (inst.operands[0].reg > 7, |
| 9467 | _("only lo regs allowed with immediate")); |
| 9468 | inst.instruction |= inst.operands[0].reg << 8; |
| 9469 | inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM; |
| 9470 | } |
| 9471 | } |
| 9472 | |
| 9473 | static void |
| 9474 | do_t_mov16 (void) |
| 9475 | { |
| 9476 | bfd_vma imm; |
| 9477 | bfd_boolean top; |
| 9478 | |
| 9479 | top = (inst.instruction & 0x00800000) != 0; |
| 9480 | if (inst.reloc.type == BFD_RELOC_ARM_MOVW) |
| 9481 | { |
| 9482 | constraint (top, _(":lower16: not allowed this instruction")); |
| 9483 | inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVW; |
| 9484 | } |
| 9485 | else if (inst.reloc.type == BFD_RELOC_ARM_MOVT) |
| 9486 | { |
| 9487 | constraint (!top, _(":upper16: not allowed this instruction")); |
| 9488 | inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVT; |
| 9489 | } |
| 9490 | |
| 9491 | inst.instruction |= inst.operands[0].reg << 8; |
| 9492 | if (inst.reloc.type == BFD_RELOC_UNUSED) |
| 9493 | { |
| 9494 | imm = inst.reloc.exp.X_add_number; |
| 9495 | inst.instruction |= (imm & 0xf000) << 4; |
| 9496 | inst.instruction |= (imm & 0x0800) << 15; |
| 9497 | inst.instruction |= (imm & 0x0700) << 4; |
| 9498 | inst.instruction |= (imm & 0x00ff); |
| 9499 | } |
| 9500 | } |
| 9501 | |
| 9502 | static void |
| 9503 | do_t_mvn_tst (void) |
| 9504 | { |
| 9505 | if (unified_syntax) |
| 9506 | { |
| 9507 | int r0off = (inst.instruction == T_MNEM_mvn |
| 9508 | || inst.instruction == T_MNEM_mvns) ? 8 : 16; |
| 9509 | bfd_boolean narrow; |
| 9510 | |
| 9511 | if (inst.size_req == 4 |
| 9512 | || inst.instruction > 0xffff |
| 9513 | || inst.operands[1].shifted |
| 9514 | || inst.operands[0].reg > 7 || inst.operands[1].reg > 7) |
| 9515 | narrow = FALSE; |
| 9516 | else if (inst.instruction == T_MNEM_cmn) |
| 9517 | narrow = TRUE; |
| 9518 | else if (THUMB_SETS_FLAGS (inst.instruction)) |
| 9519 | narrow = (current_it_mask == 0); |
| 9520 | else |
| 9521 | narrow = (current_it_mask != 0); |
| 9522 | |
| 9523 | if (!inst.operands[1].isreg) |
| 9524 | { |
| 9525 | /* For an immediate, we always generate a 32-bit opcode; |
| 9526 | section relaxation will shrink it later if possible. */ |
| 9527 | if (inst.instruction < 0xffff) |
| 9528 | inst.instruction = THUMB_OP32 (inst.instruction); |
| 9529 | inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000; |
| 9530 | inst.instruction |= inst.operands[0].reg << r0off; |
| 9531 | inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE; |
| 9532 | } |
| 9533 | else |
| 9534 | { |
| 9535 | /* See if we can do this with a 16-bit instruction. */ |
| 9536 | if (narrow) |
| 9537 | { |
| 9538 | inst.instruction = THUMB_OP16 (inst.instruction); |
| 9539 | inst.instruction |= inst.operands[0].reg; |
| 9540 | inst.instruction |= inst.operands[1].reg << 3; |
| 9541 | } |
| 9542 | else |
| 9543 | { |
| 9544 | constraint (inst.operands[1].shifted |
| 9545 | && inst.operands[1].immisreg, |
| 9546 | _("shift must be constant")); |
| 9547 | if (inst.instruction < 0xffff) |
| 9548 | inst.instruction = THUMB_OP32 (inst.instruction); |
| 9549 | inst.instruction |= inst.operands[0].reg << r0off; |
| 9550 | encode_thumb32_shifted_operand (1); |
| 9551 | } |
| 9552 | } |
| 9553 | } |
| 9554 | else |
| 9555 | { |
| 9556 | constraint (inst.instruction > 0xffff |
| 9557 | || inst.instruction == T_MNEM_mvns, BAD_THUMB32); |
| 9558 | constraint (!inst.operands[1].isreg || inst.operands[1].shifted, |
| 9559 | _("unshifted register required")); |
| 9560 | constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7, |
| 9561 | BAD_HIREG); |
| 9562 | |
| 9563 | inst.instruction = THUMB_OP16 (inst.instruction); |
| 9564 | inst.instruction |= inst.operands[0].reg; |
| 9565 | inst.instruction |= inst.operands[1].reg << 3; |
| 9566 | } |
| 9567 | } |
| 9568 | |
| 9569 | static void |
| 9570 | do_t_mrs (void) |
| 9571 | { |
| 9572 | int flags; |
| 9573 | |
| 9574 | if (do_vfp_nsyn_mrs () == SUCCESS) |
| 9575 | return; |
| 9576 | |
| 9577 | flags = inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT); |
| 9578 | if (flags == 0) |
| 9579 | { |
| 9580 | constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7m), |
| 9581 | _("selected processor does not support " |
| 9582 | "requested special purpose register")); |
| 9583 | } |
| 9584 | else |
| 9585 | { |
| 9586 | constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1), |
| 9587 | _("selected processor does not support " |
| 9588 | "requested special purpose register %x")); |
| 9589 | /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */ |
| 9590 | constraint ((flags & ~SPSR_BIT) != (PSR_c|PSR_f), |
| 9591 | _("'CPSR' or 'SPSR' expected")); |
| 9592 | } |
| 9593 | |
| 9594 | inst.instruction |= inst.operands[0].reg << 8; |
| 9595 | inst.instruction |= (flags & SPSR_BIT) >> 2; |
| 9596 | inst.instruction |= inst.operands[1].imm & 0xff; |
| 9597 | } |
| 9598 | |
| 9599 | static void |
| 9600 | do_t_msr (void) |
| 9601 | { |
| 9602 | int flags; |
| 9603 | |
| 9604 | if (do_vfp_nsyn_msr () == SUCCESS) |
| 9605 | return; |
| 9606 | |
| 9607 | constraint (!inst.operands[1].isreg, |
| 9608 | _("Thumb encoding does not support an immediate here")); |
| 9609 | flags = inst.operands[0].imm; |
| 9610 | if (flags & ~0xff) |
| 9611 | { |
| 9612 | constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1), |
| 9613 | _("selected processor does not support " |
| 9614 | "requested special purpose register")); |
| 9615 | } |
| 9616 | else |
| 9617 | { |
| 9618 | constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7m), |
| 9619 | _("selected processor does not support " |
| 9620 | "requested special purpose register")); |
| 9621 | flags |= PSR_f; |
| 9622 | } |
| 9623 | inst.instruction |= (flags & SPSR_BIT) >> 2; |
| 9624 | inst.instruction |= (flags & ~SPSR_BIT) >> 8; |
| 9625 | inst.instruction |= (flags & 0xff); |
| 9626 | inst.instruction |= inst.operands[1].reg << 16; |
| 9627 | } |
| 9628 | |
| 9629 | static void |
| 9630 | do_t_mul (void) |
| 9631 | { |
| 9632 | if (!inst.operands[2].present) |
| 9633 | inst.operands[2].reg = inst.operands[0].reg; |
| 9634 | |
| 9635 | /* There is no 32-bit MULS and no 16-bit MUL. */ |
| 9636 | if (unified_syntax && inst.instruction == T_MNEM_mul) |
| 9637 | { |
| 9638 | inst.instruction = THUMB_OP32 (inst.instruction); |
| 9639 | inst.instruction |= inst.operands[0].reg << 8; |
| 9640 | inst.instruction |= inst.operands[1].reg << 16; |
| 9641 | inst.instruction |= inst.operands[2].reg << 0; |
| 9642 | } |
| 9643 | else |
| 9644 | { |
| 9645 | constraint (!unified_syntax |
| 9646 | && inst.instruction == T_MNEM_muls, BAD_THUMB32); |
| 9647 | constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7, |
| 9648 | BAD_HIREG); |
| 9649 | |
| 9650 | inst.instruction = THUMB_OP16 (inst.instruction); |
| 9651 | inst.instruction |= inst.operands[0].reg; |
| 9652 | |
| 9653 | if (inst.operands[0].reg == inst.operands[1].reg) |
| 9654 | inst.instruction |= inst.operands[2].reg << 3; |
| 9655 | else if (inst.operands[0].reg == inst.operands[2].reg) |
| 9656 | inst.instruction |= inst.operands[1].reg << 3; |
| 9657 | else |
| 9658 | constraint (1, _("dest must overlap one source register")); |
| 9659 | } |
| 9660 | } |
| 9661 | |
| 9662 | static void |
| 9663 | do_t_mull (void) |
| 9664 | { |
| 9665 | inst.instruction |= inst.operands[0].reg << 12; |
| 9666 | inst.instruction |= inst.operands[1].reg << 8; |
| 9667 | inst.instruction |= inst.operands[2].reg << 16; |
| 9668 | inst.instruction |= inst.operands[3].reg; |
| 9669 | |
| 9670 | if (inst.operands[0].reg == inst.operands[1].reg) |
| 9671 | as_tsktsk (_("rdhi and rdlo must be different")); |
| 9672 | } |
| 9673 | |
| 9674 | static void |
| 9675 | do_t_nop (void) |
| 9676 | { |
| 9677 | if (unified_syntax) |
| 9678 | { |
| 9679 | if (inst.size_req == 4 || inst.operands[0].imm > 15) |
| 9680 | { |
| 9681 | inst.instruction = THUMB_OP32 (inst.instruction); |
| 9682 | inst.instruction |= inst.operands[0].imm; |
| 9683 | } |
| 9684 | else |
| 9685 | { |
| 9686 | inst.instruction = THUMB_OP16 (inst.instruction); |
| 9687 | inst.instruction |= inst.operands[0].imm << 4; |
| 9688 | } |
| 9689 | } |
| 9690 | else |
| 9691 | { |
| 9692 | constraint (inst.operands[0].present, |
| 9693 | _("Thumb does not support NOP with hints")); |
| 9694 | inst.instruction = 0x46c0; |
| 9695 | } |
| 9696 | } |
| 9697 | |
| 9698 | static void |
| 9699 | do_t_neg (void) |
| 9700 | { |
| 9701 | if (unified_syntax) |
| 9702 | { |
| 9703 | bfd_boolean narrow; |
| 9704 | |
| 9705 | if (THUMB_SETS_FLAGS (inst.instruction)) |
| 9706 | narrow = (current_it_mask == 0); |
| 9707 | else |
| 9708 | narrow = (current_it_mask != 0); |
| 9709 | if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7) |
| 9710 | narrow = FALSE; |
| 9711 | if (inst.size_req == 4) |
| 9712 | narrow = FALSE; |
| 9713 | |
| 9714 | if (!narrow) |
| 9715 | { |
| 9716 | inst.instruction = THUMB_OP32 (inst.instruction); |
| 9717 | inst.instruction |= inst.operands[0].reg << 8; |
| 9718 | inst.instruction |= inst.operands[1].reg << 16; |
| 9719 | } |
| 9720 | else |
| 9721 | { |
| 9722 | inst.instruction = THUMB_OP16 (inst.instruction); |
| 9723 | inst.instruction |= inst.operands[0].reg; |
| 9724 | inst.instruction |= inst.operands[1].reg << 3; |
| 9725 | } |
| 9726 | } |
| 9727 | else |
| 9728 | { |
| 9729 | constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7, |
| 9730 | BAD_HIREG); |
| 9731 | constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32); |
| 9732 | |
| 9733 | inst.instruction = THUMB_OP16 (inst.instruction); |
| 9734 | inst.instruction |= inst.operands[0].reg; |
| 9735 | inst.instruction |= inst.operands[1].reg << 3; |
| 9736 | } |
| 9737 | } |
| 9738 | |
| 9739 | static void |
| 9740 | do_t_pkhbt (void) |
| 9741 | { |
| 9742 | inst.instruction |= inst.operands[0].reg << 8; |
| 9743 | inst.instruction |= inst.operands[1].reg << 16; |
| 9744 | inst.instruction |= inst.operands[2].reg; |
| 9745 | if (inst.operands[3].present) |
| 9746 | { |
| 9747 | unsigned int val = inst.reloc.exp.X_add_number; |
| 9748 | constraint (inst.reloc.exp.X_op != O_constant, |
| 9749 | _("expression too complex")); |
| 9750 | inst.instruction |= (val & 0x1c) << 10; |
| 9751 | inst.instruction |= (val & 0x03) << 6; |
| 9752 | } |
| 9753 | } |
| 9754 | |
| 9755 | static void |
| 9756 | do_t_pkhtb (void) |
| 9757 | { |
| 9758 | if (!inst.operands[3].present) |
| 9759 | inst.instruction &= ~0x00000020; |
| 9760 | do_t_pkhbt (); |
| 9761 | } |
| 9762 | |
| 9763 | static void |
| 9764 | do_t_pld (void) |
| 9765 | { |
| 9766 | encode_thumb32_addr_mode (0, /*is_t=*/FALSE, /*is_d=*/FALSE); |
| 9767 | } |
| 9768 | |
| 9769 | static void |
| 9770 | do_t_push_pop (void) |
| 9771 | { |
| 9772 | unsigned mask; |
| 9773 | |
| 9774 | constraint (inst.operands[0].writeback, |
| 9775 | _("push/pop do not support {reglist}^")); |
| 9776 | constraint (inst.reloc.type != BFD_RELOC_UNUSED, |
| 9777 | _("expression too complex")); |
| 9778 | |
| 9779 | mask = inst.operands[0].imm; |
| 9780 | if ((mask & ~0xff) == 0) |
| 9781 | inst.instruction = THUMB_OP16 (inst.instruction); |
| 9782 | else if ((inst.instruction == T_MNEM_push |
| 9783 | && (mask & ~0xff) == 1 << REG_LR) |
| 9784 | || (inst.instruction == T_MNEM_pop |
| 9785 | && (mask & ~0xff) == 1 << REG_PC)) |
| 9786 | { |
| 9787 | inst.instruction = THUMB_OP16 (inst.instruction); |
| 9788 | inst.instruction |= THUMB_PP_PC_LR; |
| 9789 | mask &= 0xff; |
| 9790 | } |
| 9791 | else if (unified_syntax) |
| 9792 | { |
| 9793 | if (mask & (1 << 13)) |
| 9794 | inst.error = _("SP not allowed in register list"); |
| 9795 | if (inst.instruction == T_MNEM_push) |
| 9796 | { |
| 9797 | if (mask & (1 << 15)) |
| 9798 | inst.error = _("PC not allowed in register list"); |
| 9799 | } |
| 9800 | else |
| 9801 | { |
| 9802 | if (mask & (1 << 14) |
| 9803 | && mask & (1 << 15)) |
| 9804 | inst.error = _("LR and PC should not both be in register list"); |
| 9805 | } |
| 9806 | if ((mask & (mask - 1)) == 0) |
| 9807 | { |
| 9808 | /* Single register push/pop implemented as str/ldr. */ |
| 9809 | if (inst.instruction == T_MNEM_push) |
| 9810 | inst.instruction = 0xf84d0d04; /* str reg, [sp, #-4]! */ |
| 9811 | else |
| 9812 | inst.instruction = 0xf85d0b04; /* ldr reg, [sp], #4 */ |
| 9813 | mask = ffs(mask) - 1; |
| 9814 | mask <<= 12; |
| 9815 | } |
| 9816 | else |
| 9817 | inst.instruction = THUMB_OP32 (inst.instruction); |
| 9818 | } |
| 9819 | else |
| 9820 | { |
| 9821 | inst.error = _("invalid register list to push/pop instruction"); |
| 9822 | return; |
| 9823 | } |
| 9824 | |
| 9825 | inst.instruction |= mask; |
| 9826 | } |
| 9827 | |
| 9828 | static void |
| 9829 | do_t_rbit (void) |
| 9830 | { |
| 9831 | inst.instruction |= inst.operands[0].reg << 8; |
| 9832 | inst.instruction |= inst.operands[1].reg << 16; |
| 9833 | } |
| 9834 | |
| 9835 | static void |
| 9836 | do_t_rev (void) |
| 9837 | { |
| 9838 | if (inst.operands[0].reg <= 7 && inst.operands[1].reg <= 7 |
| 9839 | && inst.size_req != 4) |
| 9840 | { |
| 9841 | inst.instruction = THUMB_OP16 (inst.instruction); |
| 9842 | inst.instruction |= inst.operands[0].reg; |
| 9843 | inst.instruction |= inst.operands[1].reg << 3; |
| 9844 | } |
| 9845 | else if (unified_syntax) |
| 9846 | { |
| 9847 | inst.instruction = THUMB_OP32 (inst.instruction); |
| 9848 | inst.instruction |= inst.operands[0].reg << 8; |
| 9849 | inst.instruction |= inst.operands[1].reg << 16; |
| 9850 | inst.instruction |= inst.operands[1].reg; |
| 9851 | } |
| 9852 | else |
| 9853 | inst.error = BAD_HIREG; |
| 9854 | } |
| 9855 | |
| 9856 | static void |
| 9857 | do_t_rsb (void) |
| 9858 | { |
| 9859 | int Rd, Rs; |
| 9860 | |
| 9861 | Rd = inst.operands[0].reg; |
| 9862 | Rs = (inst.operands[1].present |
| 9863 | ? inst.operands[1].reg /* Rd, Rs, foo */ |
| 9864 | : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */ |
| 9865 | |
| 9866 | inst.instruction |= Rd << 8; |
| 9867 | inst.instruction |= Rs << 16; |
| 9868 | if (!inst.operands[2].isreg) |
| 9869 | { |
| 9870 | inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000; |
| 9871 | inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE; |
| 9872 | } |
| 9873 | else |
| 9874 | encode_thumb32_shifted_operand (2); |
| 9875 | } |
| 9876 | |
| 9877 | static void |
| 9878 | do_t_setend (void) |
| 9879 | { |
| 9880 | constraint (current_it_mask, BAD_NOT_IT); |
| 9881 | if (inst.operands[0].imm) |
| 9882 | inst.instruction |= 0x8; |
| 9883 | } |
| 9884 | |
| 9885 | static void |
| 9886 | do_t_shift (void) |
| 9887 | { |
| 9888 | if (!inst.operands[1].present) |
| 9889 | inst.operands[1].reg = inst.operands[0].reg; |
| 9890 | |
| 9891 | if (unified_syntax) |
| 9892 | { |
| 9893 | bfd_boolean narrow; |
| 9894 | int shift_kind; |
| 9895 | |
| 9896 | switch (inst.instruction) |
| 9897 | { |
| 9898 | case T_MNEM_asr: |
| 9899 | case T_MNEM_asrs: shift_kind = SHIFT_ASR; break; |
| 9900 | case T_MNEM_lsl: |
| 9901 | case T_MNEM_lsls: shift_kind = SHIFT_LSL; break; |
| 9902 | case T_MNEM_lsr: |
| 9903 | case T_MNEM_lsrs: shift_kind = SHIFT_LSR; break; |
| 9904 | case T_MNEM_ror: |
| 9905 | case T_MNEM_rors: shift_kind = SHIFT_ROR; break; |
| 9906 | default: abort (); |
| 9907 | } |
| 9908 | |
| 9909 | if (THUMB_SETS_FLAGS (inst.instruction)) |
| 9910 | narrow = (current_it_mask == 0); |
| 9911 | else |
| 9912 | narrow = (current_it_mask != 0); |
| 9913 | if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7) |
| 9914 | narrow = FALSE; |
| 9915 | if (!inst.operands[2].isreg && shift_kind == SHIFT_ROR) |
| 9916 | narrow = FALSE; |
| 9917 | if (inst.operands[2].isreg |
| 9918 | && (inst.operands[1].reg != inst.operands[0].reg |
| 9919 | || inst.operands[2].reg > 7)) |
| 9920 | narrow = FALSE; |
| 9921 | if (inst.size_req == 4) |
| 9922 | narrow = FALSE; |
| 9923 | |
| 9924 | if (!narrow) |
| 9925 | { |
| 9926 | if (inst.operands[2].isreg) |
| 9927 | { |
| 9928 | inst.instruction = THUMB_OP32 (inst.instruction); |
| 9929 | inst.instruction |= inst.operands[0].reg << 8; |
| 9930 | inst.instruction |= inst.operands[1].reg << 16; |
| 9931 | inst.instruction |= inst.operands[2].reg; |
| 9932 | } |
| 9933 | else |
| 9934 | { |
| 9935 | inst.operands[1].shifted = 1; |
| 9936 | inst.operands[1].shift_kind = shift_kind; |
| 9937 | inst.instruction = THUMB_OP32 (THUMB_SETS_FLAGS (inst.instruction) |
| 9938 | ? T_MNEM_movs : T_MNEM_mov); |
| 9939 | inst.instruction |= inst.operands[0].reg << 8; |
| 9940 | encode_thumb32_shifted_operand (1); |
| 9941 | /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */ |
| 9942 | inst.reloc.type = BFD_RELOC_UNUSED; |
| 9943 | } |
| 9944 | } |
| 9945 | else |
| 9946 | { |
| 9947 | if (inst.operands[2].isreg) |
| 9948 | { |
| 9949 | switch (shift_kind) |
| 9950 | { |
| 9951 | case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_R; break; |
| 9952 | case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_R; break; |
| 9953 | case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_R; break; |
| 9954 | case SHIFT_ROR: inst.instruction = T_OPCODE_ROR_R; break; |
| 9955 | default: abort (); |
| 9956 | } |
| 9957 | |
| 9958 | inst.instruction |= inst.operands[0].reg; |
| 9959 | inst.instruction |= inst.operands[2].reg << 3; |
| 9960 | } |
| 9961 | else |
| 9962 | { |
| 9963 | switch (shift_kind) |
| 9964 | { |
| 9965 | case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break; |
| 9966 | case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break; |
| 9967 | case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break; |
| 9968 | default: abort (); |
| 9969 | } |
| 9970 | inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT; |
| 9971 | inst.instruction |= inst.operands[0].reg; |
| 9972 | inst.instruction |= inst.operands[1].reg << 3; |
| 9973 | } |
| 9974 | } |
| 9975 | } |
| 9976 | else |
| 9977 | { |
| 9978 | constraint (inst.operands[0].reg > 7 |
| 9979 | || inst.operands[1].reg > 7, BAD_HIREG); |
| 9980 | constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32); |
| 9981 | |
| 9982 | if (inst.operands[2].isreg) /* Rd, {Rs,} Rn */ |
| 9983 | { |
| 9984 | constraint (inst.operands[2].reg > 7, BAD_HIREG); |
| 9985 | constraint (inst.operands[0].reg != inst.operands[1].reg, |
| 9986 | _("source1 and dest must be same register")); |
| 9987 | |
| 9988 | switch (inst.instruction) |
| 9989 | { |
| 9990 | case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_R; break; |
| 9991 | case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_R; break; |
| 9992 | case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_R; break; |
| 9993 | case T_MNEM_ror: inst.instruction = T_OPCODE_ROR_R; break; |
| 9994 | default: abort (); |
| 9995 | } |
| 9996 | |
| 9997 | inst.instruction |= inst.operands[0].reg; |
| 9998 | inst.instruction |= inst.operands[2].reg << 3; |
| 9999 | } |
| 10000 | else |
| 10001 | { |
| 10002 | switch (inst.instruction) |
| 10003 | { |
| 10004 | case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_I; break; |
| 10005 | case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_I; break; |
| 10006 | case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_I; break; |
| 10007 | case T_MNEM_ror: inst.error = _("ror #imm not supported"); return; |
| 10008 | default: abort (); |
| 10009 | } |
| 10010 | inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT; |
| 10011 | inst.instruction |= inst.operands[0].reg; |
| 10012 | inst.instruction |= inst.operands[1].reg << 3; |
| 10013 | } |
| 10014 | } |
| 10015 | } |
| 10016 | |
| 10017 | static void |
| 10018 | do_t_simd (void) |
| 10019 | { |
| 10020 | inst.instruction |= inst.operands[0].reg << 8; |
| 10021 | inst.instruction |= inst.operands[1].reg << 16; |
| 10022 | inst.instruction |= inst.operands[2].reg; |
| 10023 | } |
| 10024 | |
| 10025 | static void |
| 10026 | do_t_smc (void) |
| 10027 | { |
| 10028 | unsigned int value = inst.reloc.exp.X_add_number; |
| 10029 | constraint (inst.reloc.exp.X_op != O_constant, |
| 10030 | _("expression too complex")); |
| 10031 | inst.reloc.type = BFD_RELOC_UNUSED; |
| 10032 | inst.instruction |= (value & 0xf000) >> 12; |
| 10033 | inst.instruction |= (value & 0x0ff0); |
| 10034 | inst.instruction |= (value & 0x000f) << 16; |
| 10035 | } |
| 10036 | |
| 10037 | static void |
| 10038 | do_t_ssat (void) |
| 10039 | { |
| 10040 | inst.instruction |= inst.operands[0].reg << 8; |
| 10041 | inst.instruction |= inst.operands[1].imm - 1; |
| 10042 | inst.instruction |= inst.operands[2].reg << 16; |
| 10043 | |
| 10044 | if (inst.operands[3].present) |
| 10045 | { |
| 10046 | constraint (inst.reloc.exp.X_op != O_constant, |
| 10047 | _("expression too complex")); |
| 10048 | |
| 10049 | if (inst.reloc.exp.X_add_number != 0) |
| 10050 | { |
| 10051 | if (inst.operands[3].shift_kind == SHIFT_ASR) |
| 10052 | inst.instruction |= 0x00200000; /* sh bit */ |
| 10053 | inst.instruction |= (inst.reloc.exp.X_add_number & 0x1c) << 10; |
| 10054 | inst.instruction |= (inst.reloc.exp.X_add_number & 0x03) << 6; |
| 10055 | } |
| 10056 | inst.reloc.type = BFD_RELOC_UNUSED; |
| 10057 | } |
| 10058 | } |
| 10059 | |
| 10060 | static void |
| 10061 | do_t_ssat16 (void) |
| 10062 | { |
| 10063 | inst.instruction |= inst.operands[0].reg << 8; |
| 10064 | inst.instruction |= inst.operands[1].imm - 1; |
| 10065 | inst.instruction |= inst.operands[2].reg << 16; |
| 10066 | } |
| 10067 | |
| 10068 | static void |
| 10069 | do_t_strex (void) |
| 10070 | { |
| 10071 | constraint (!inst.operands[2].isreg || !inst.operands[2].preind |
| 10072 | || inst.operands[2].postind || inst.operands[2].writeback |
| 10073 | || inst.operands[2].immisreg || inst.operands[2].shifted |
| 10074 | || inst.operands[2].negative, |
| 10075 | BAD_ADDR_MODE); |
| 10076 | |
| 10077 | inst.instruction |= inst.operands[0].reg << 8; |
| 10078 | inst.instruction |= inst.operands[1].reg << 12; |
| 10079 | inst.instruction |= inst.operands[2].reg << 16; |
| 10080 | inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8; |
| 10081 | } |
| 10082 | |
| 10083 | static void |
| 10084 | do_t_strexd (void) |
| 10085 | { |
| 10086 | if (!inst.operands[2].present) |
| 10087 | inst.operands[2].reg = inst.operands[1].reg + 1; |
| 10088 | |
| 10089 | constraint (inst.operands[0].reg == inst.operands[1].reg |
| 10090 | || inst.operands[0].reg == inst.operands[2].reg |
| 10091 | || inst.operands[0].reg == inst.operands[3].reg |
| 10092 | || inst.operands[1].reg == inst.operands[2].reg, |
| 10093 | BAD_OVERLAP); |
| 10094 | |
| 10095 | inst.instruction |= inst.operands[0].reg; |
| 10096 | inst.instruction |= inst.operands[1].reg << 12; |
| 10097 | inst.instruction |= inst.operands[2].reg << 8; |
| 10098 | inst.instruction |= inst.operands[3].reg << 16; |
| 10099 | } |
| 10100 | |
| 10101 | static void |
| 10102 | do_t_sxtah (void) |
| 10103 | { |
| 10104 | inst.instruction |= inst.operands[0].reg << 8; |
| 10105 | inst.instruction |= inst.operands[1].reg << 16; |
| 10106 | inst.instruction |= inst.operands[2].reg; |
| 10107 | inst.instruction |= inst.operands[3].imm << 4; |
| 10108 | } |
| 10109 | |
| 10110 | static void |
| 10111 | do_t_sxth (void) |
| 10112 | { |
| 10113 | if (inst.instruction <= 0xffff && inst.size_req != 4 |
| 10114 | && inst.operands[0].reg <= 7 && inst.operands[1].reg <= 7 |
| 10115 | && (!inst.operands[2].present || inst.operands[2].imm == 0)) |
| 10116 | { |
| 10117 | inst.instruction = THUMB_OP16 (inst.instruction); |
| 10118 | inst.instruction |= inst.operands[0].reg; |
| 10119 | inst.instruction |= inst.operands[1].reg << 3; |
| 10120 | } |
| 10121 | else if (unified_syntax) |
| 10122 | { |
| 10123 | if (inst.instruction <= 0xffff) |
| 10124 | inst.instruction = THUMB_OP32 (inst.instruction); |
| 10125 | inst.instruction |= inst.operands[0].reg << 8; |
| 10126 | inst.instruction |= inst.operands[1].reg; |
| 10127 | inst.instruction |= inst.operands[2].imm << 4; |
| 10128 | } |
| 10129 | else |
| 10130 | { |
| 10131 | constraint (inst.operands[2].present && inst.operands[2].imm != 0, |
| 10132 | _("Thumb encoding does not support rotation")); |
| 10133 | constraint (1, BAD_HIREG); |
| 10134 | } |
| 10135 | } |
| 10136 | |
| 10137 | static void |
| 10138 | do_t_swi (void) |
| 10139 | { |
| 10140 | inst.reloc.type = BFD_RELOC_ARM_SWI; |
| 10141 | } |
| 10142 | |
| 10143 | static void |
| 10144 | do_t_tb (void) |
| 10145 | { |
| 10146 | int half; |
| 10147 | |
| 10148 | half = (inst.instruction & 0x10) != 0; |
| 10149 | constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH); |
| 10150 | constraint (inst.operands[0].immisreg, |
| 10151 | _("instruction requires register index")); |
| 10152 | constraint (inst.operands[0].imm == 15, |
| 10153 | _("PC is not a valid index register")); |
| 10154 | constraint (!half && inst.operands[0].shifted, |
| 10155 | _("instruction does not allow shifted index")); |
| 10156 | inst.instruction |= (inst.operands[0].reg << 16) | inst.operands[0].imm; |
| 10157 | } |
| 10158 | |
| 10159 | static void |
| 10160 | do_t_usat (void) |
| 10161 | { |
| 10162 | inst.instruction |= inst.operands[0].reg << 8; |
| 10163 | inst.instruction |= inst.operands[1].imm; |
| 10164 | inst.instruction |= inst.operands[2].reg << 16; |
| 10165 | |
| 10166 | if (inst.operands[3].present) |
| 10167 | { |
| 10168 | constraint (inst.reloc.exp.X_op != O_constant, |
| 10169 | _("expression too complex")); |
| 10170 | if (inst.reloc.exp.X_add_number != 0) |
| 10171 | { |
| 10172 | if (inst.operands[3].shift_kind == SHIFT_ASR) |
| 10173 | inst.instruction |= 0x00200000; /* sh bit */ |
| 10174 | |
| 10175 | inst.instruction |= (inst.reloc.exp.X_add_number & 0x1c) << 10; |
| 10176 | inst.instruction |= (inst.reloc.exp.X_add_number & 0x03) << 6; |
| 10177 | } |
| 10178 | inst.reloc.type = BFD_RELOC_UNUSED; |
| 10179 | } |
| 10180 | } |
| 10181 | |
| 10182 | static void |
| 10183 | do_t_usat16 (void) |
| 10184 | { |
| 10185 | inst.instruction |= inst.operands[0].reg << 8; |
| 10186 | inst.instruction |= inst.operands[1].imm; |
| 10187 | inst.instruction |= inst.operands[2].reg << 16; |
| 10188 | } |
| 10189 | |
| 10190 | /* Neon instruction encoder helpers. */ |
| 10191 | |
| 10192 | /* Encodings for the different types for various Neon opcodes. */ |
| 10193 | |
| 10194 | /* An "invalid" code for the following tables. */ |
| 10195 | #define N_INV -1u |
| 10196 | |
| 10197 | struct neon_tab_entry |
| 10198 | { |
| 10199 | unsigned integer; |
| 10200 | unsigned float_or_poly; |
| 10201 | unsigned scalar_or_imm; |
| 10202 | }; |
| 10203 | |
| 10204 | /* Map overloaded Neon opcodes to their respective encodings. */ |
| 10205 | #define NEON_ENC_TAB \ |
| 10206 | X(vabd, 0x0000700, 0x1200d00, N_INV), \ |
| 10207 | X(vmax, 0x0000600, 0x0000f00, N_INV), \ |
| 10208 | X(vmin, 0x0000610, 0x0200f00, N_INV), \ |
| 10209 | X(vpadd, 0x0000b10, 0x1000d00, N_INV), \ |
| 10210 | X(vpmax, 0x0000a00, 0x1000f00, N_INV), \ |
| 10211 | X(vpmin, 0x0000a10, 0x1200f00, N_INV), \ |
| 10212 | X(vadd, 0x0000800, 0x0000d00, N_INV), \ |
| 10213 | X(vsub, 0x1000800, 0x0200d00, N_INV), \ |
| 10214 | X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \ |
| 10215 | X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \ |
| 10216 | X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \ |
| 10217 | /* Register variants of the following two instructions are encoded as |
| 10218 | vcge / vcgt with the operands reversed. */ \ |
| 10219 | X(vclt, 0x0000310, 0x1000e00, 0x1b10200), \ |
| 10220 | X(vcle, 0x0000300, 0x1200e00, 0x1b10180), \ |
| 10221 | X(vmla, 0x0000900, 0x0000d10, 0x0800040), \ |
| 10222 | X(vmls, 0x1000900, 0x0200d10, 0x0800440), \ |
| 10223 | X(vmul, 0x0000910, 0x1000d10, 0x0800840), \ |
| 10224 | X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \ |
| 10225 | X(vmlal, 0x0800800, N_INV, 0x0800240), \ |
| 10226 | X(vmlsl, 0x0800a00, N_INV, 0x0800640), \ |
| 10227 | X(vqdmlal, 0x0800900, N_INV, 0x0800340), \ |
| 10228 | X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \ |
| 10229 | X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \ |
| 10230 | X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \ |
| 10231 | X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \ |
| 10232 | X(vshl, 0x0000400, N_INV, 0x0800510), \ |
| 10233 | X(vqshl, 0x0000410, N_INV, 0x0800710), \ |
| 10234 | X(vand, 0x0000110, N_INV, 0x0800030), \ |
| 10235 | X(vbic, 0x0100110, N_INV, 0x0800030), \ |
| 10236 | X(veor, 0x1000110, N_INV, N_INV), \ |
| 10237 | X(vorn, 0x0300110, N_INV, 0x0800010), \ |
| 10238 | X(vorr, 0x0200110, N_INV, 0x0800010), \ |
| 10239 | X(vmvn, 0x1b00580, N_INV, 0x0800030), \ |
| 10240 | X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \ |
| 10241 | X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \ |
| 10242 | X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \ |
| 10243 | X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \ |
| 10244 | X(vst1, 0x0000000, 0x0800000, N_INV), \ |
| 10245 | X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \ |
| 10246 | X(vst2, 0x0000100, 0x0800100, N_INV), \ |
| 10247 | X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \ |
| 10248 | X(vst3, 0x0000200, 0x0800200, N_INV), \ |
| 10249 | X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \ |
| 10250 | X(vst4, 0x0000300, 0x0800300, N_INV), \ |
| 10251 | X(vmovn, 0x1b20200, N_INV, N_INV), \ |
| 10252 | X(vtrn, 0x1b20080, N_INV, N_INV), \ |
| 10253 | X(vqmovn, 0x1b20200, N_INV, N_INV), \ |
| 10254 | X(vqmovun, 0x1b20240, N_INV, N_INV), \ |
| 10255 | X(vnmul, 0xe200a40, 0xe200b40, N_INV), \ |
| 10256 | X(vnmla, 0xe000a40, 0xe000b40, N_INV), \ |
| 10257 | X(vnmls, 0xe100a40, 0xe100b40, N_INV), \ |
| 10258 | X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \ |
| 10259 | X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \ |
| 10260 | X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \ |
| 10261 | X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV) |
| 10262 | |
| 10263 | enum neon_opc |
| 10264 | { |
| 10265 | #define X(OPC,I,F,S) N_MNEM_##OPC |
| 10266 | NEON_ENC_TAB |
| 10267 | #undef X |
| 10268 | }; |
| 10269 | |
| 10270 | static const struct neon_tab_entry neon_enc_tab[] = |
| 10271 | { |
| 10272 | #define X(OPC,I,F,S) { (I), (F), (S) } |
| 10273 | NEON_ENC_TAB |
| 10274 | #undef X |
| 10275 | }; |
| 10276 | |
| 10277 | #define NEON_ENC_INTEGER(X) (neon_enc_tab[(X) & 0x0fffffff].integer) |
| 10278 | #define NEON_ENC_ARMREG(X) (neon_enc_tab[(X) & 0x0fffffff].integer) |
| 10279 | #define NEON_ENC_POLY(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly) |
| 10280 | #define NEON_ENC_FLOAT(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly) |
| 10281 | #define NEON_ENC_SCALAR(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm) |
| 10282 | #define NEON_ENC_IMMED(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm) |
| 10283 | #define NEON_ENC_INTERLV(X) (neon_enc_tab[(X) & 0x0fffffff].integer) |
| 10284 | #define NEON_ENC_LANE(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly) |
| 10285 | #define NEON_ENC_DUP(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm) |
| 10286 | #define NEON_ENC_SINGLE(X) \ |
| 10287 | ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000)) |
| 10288 | #define NEON_ENC_DOUBLE(X) \ |
| 10289 | ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000)) |
| 10290 | |
| 10291 | /* Define shapes for instruction operands. The following mnemonic characters |
| 10292 | are used in this table: |
| 10293 | |
| 10294 | F - VFP S<n> register |
| 10295 | D - Neon D<n> register |
| 10296 | Q - Neon Q<n> register |
| 10297 | I - Immediate |
| 10298 | S - Scalar |
| 10299 | R - ARM register |
| 10300 | L - D<n> register list |
| 10301 | |
| 10302 | This table is used to generate various data: |
| 10303 | - enumerations of the form NS_DDR to be used as arguments to |
| 10304 | neon_select_shape. |
| 10305 | - a table classifying shapes into single, double, quad, mixed. |
| 10306 | - a table used to drive neon_select_shape. |
| 10307 | */ |
| 10308 | |
| 10309 | #define NEON_SHAPE_DEF \ |
| 10310 | X(3, (D, D, D), DOUBLE), \ |
| 10311 | X(3, (Q, Q, Q), QUAD), \ |
| 10312 | X(3, (D, D, I), DOUBLE), \ |
| 10313 | X(3, (Q, Q, I), QUAD), \ |
| 10314 | X(3, (D, D, S), DOUBLE), \ |
| 10315 | X(3, (Q, Q, S), QUAD), \ |
| 10316 | X(2, (D, D), DOUBLE), \ |
| 10317 | X(2, (Q, Q), QUAD), \ |
| 10318 | X(2, (D, S), DOUBLE), \ |
| 10319 | X(2, (Q, S), QUAD), \ |
| 10320 | X(2, (D, R), DOUBLE), \ |
| 10321 | X(2, (Q, R), QUAD), \ |
| 10322 | X(2, (D, I), DOUBLE), \ |
| 10323 | X(2, (Q, I), QUAD), \ |
| 10324 | X(3, (D, L, D), DOUBLE), \ |
| 10325 | X(2, (D, Q), MIXED), \ |
| 10326 | X(2, (Q, D), MIXED), \ |
| 10327 | X(3, (D, Q, I), MIXED), \ |
| 10328 | X(3, (Q, D, I), MIXED), \ |
| 10329 | X(3, (Q, D, D), MIXED), \ |
| 10330 | X(3, (D, Q, Q), MIXED), \ |
| 10331 | X(3, (Q, Q, D), MIXED), \ |
| 10332 | X(3, (Q, D, S), MIXED), \ |
| 10333 | X(3, (D, Q, S), MIXED), \ |
| 10334 | X(4, (D, D, D, I), DOUBLE), \ |
| 10335 | X(4, (Q, Q, Q, I), QUAD), \ |
| 10336 | X(2, (F, F), SINGLE), \ |
| 10337 | X(3, (F, F, F), SINGLE), \ |
| 10338 | X(2, (F, I), SINGLE), \ |
| 10339 | X(2, (F, D), MIXED), \ |
| 10340 | X(2, (D, F), MIXED), \ |
| 10341 | X(3, (F, F, I), MIXED), \ |
| 10342 | X(4, (R, R, F, F), SINGLE), \ |
| 10343 | X(4, (F, F, R, R), SINGLE), \ |
| 10344 | X(3, (D, R, R), DOUBLE), \ |
| 10345 | X(3, (R, R, D), DOUBLE), \ |
| 10346 | X(2, (S, R), SINGLE), \ |
| 10347 | X(2, (R, S), SINGLE), \ |
| 10348 | X(2, (F, R), SINGLE), \ |
| 10349 | X(2, (R, F), SINGLE) |
| 10350 | |
| 10351 | #define S2(A,B) NS_##A##B |
| 10352 | #define S3(A,B,C) NS_##A##B##C |
| 10353 | #define S4(A,B,C,D) NS_##A##B##C##D |
| 10354 | |
| 10355 | #define X(N, L, C) S##N L |
| 10356 | |
| 10357 | enum neon_shape |
| 10358 | { |
| 10359 | NEON_SHAPE_DEF, |
| 10360 | NS_NULL |
| 10361 | }; |
| 10362 | |
| 10363 | #undef X |
| 10364 | #undef S2 |
| 10365 | #undef S3 |
| 10366 | #undef S4 |
| 10367 | |
| 10368 | enum neon_shape_class |
| 10369 | { |
| 10370 | SC_SINGLE, |
| 10371 | SC_DOUBLE, |
| 10372 | SC_QUAD, |
| 10373 | SC_MIXED |
| 10374 | }; |
| 10375 | |
| 10376 | #define X(N, L, C) SC_##C |
| 10377 | |
| 10378 | static enum neon_shape_class neon_shape_class[] = |
| 10379 | { |
| 10380 | NEON_SHAPE_DEF |
| 10381 | }; |
| 10382 | |
| 10383 | #undef X |
| 10384 | |
| 10385 | enum neon_shape_el |
| 10386 | { |
| 10387 | SE_F, |
| 10388 | SE_D, |
| 10389 | SE_Q, |
| 10390 | SE_I, |
| 10391 | SE_S, |
| 10392 | SE_R, |
| 10393 | SE_L |
| 10394 | }; |
| 10395 | |
| 10396 | /* Register widths of above. */ |
| 10397 | static unsigned neon_shape_el_size[] = |
| 10398 | { |
| 10399 | 32, |
| 10400 | 64, |
| 10401 | 128, |
| 10402 | 0, |
| 10403 | 32, |
| 10404 | 32, |
| 10405 | 0 |
| 10406 | }; |
| 10407 | |
| 10408 | struct neon_shape_info |
| 10409 | { |
| 10410 | unsigned els; |
| 10411 | enum neon_shape_el el[NEON_MAX_TYPE_ELS]; |
| 10412 | }; |
| 10413 | |
| 10414 | #define S2(A,B) { SE_##A, SE_##B } |
| 10415 | #define S3(A,B,C) { SE_##A, SE_##B, SE_##C } |
| 10416 | #define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D } |
| 10417 | |
| 10418 | #define X(N, L, C) { N, S##N L } |
| 10419 | |
| 10420 | static struct neon_shape_info neon_shape_tab[] = |
| 10421 | { |
| 10422 | NEON_SHAPE_DEF |
| 10423 | }; |
| 10424 | |
| 10425 | #undef X |
| 10426 | #undef S2 |
| 10427 | #undef S3 |
| 10428 | #undef S4 |
| 10429 | |
| 10430 | /* Bit masks used in type checking given instructions. |
| 10431 | 'N_EQK' means the type must be the same as (or based on in some way) the key |
| 10432 | type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is |
| 10433 | set, various other bits can be set as well in order to modify the meaning of |
| 10434 | the type constraint. */ |
| 10435 | |
| 10436 | enum neon_type_mask |
| 10437 | { |
| 10438 | N_S8 = 0x000001, |
| 10439 | N_S16 = 0x000002, |
| 10440 | N_S32 = 0x000004, |
| 10441 | N_S64 = 0x000008, |
| 10442 | N_U8 = 0x000010, |
| 10443 | N_U16 = 0x000020, |
| 10444 | N_U32 = 0x000040, |
| 10445 | N_U64 = 0x000080, |
| 10446 | N_I8 = 0x000100, |
| 10447 | N_I16 = 0x000200, |
| 10448 | N_I32 = 0x000400, |
| 10449 | N_I64 = 0x000800, |
| 10450 | N_8 = 0x001000, |
| 10451 | N_16 = 0x002000, |
| 10452 | N_32 = 0x004000, |
| 10453 | N_64 = 0x008000, |
| 10454 | N_P8 = 0x010000, |
| 10455 | N_P16 = 0x020000, |
| 10456 | N_F32 = 0x040000, |
| 10457 | N_F64 = 0x080000, |
| 10458 | N_KEY = 0x100000, /* key element (main type specifier). */ |
| 10459 | N_EQK = 0x200000, /* given operand has the same type & size as the key. */ |
| 10460 | N_VFP = 0x400000, /* VFP mode: operand size must match register width. */ |
| 10461 | N_DBL = 0x000001, /* if N_EQK, this operand is twice the size. */ |
| 10462 | N_HLF = 0x000002, /* if N_EQK, this operand is half the size. */ |
| 10463 | N_SGN = 0x000004, /* if N_EQK, this operand is forced to be signed. */ |
| 10464 | N_UNS = 0x000008, /* if N_EQK, this operand is forced to be unsigned. */ |
| 10465 | N_INT = 0x000010, /* if N_EQK, this operand is forced to be integer. */ |
| 10466 | N_FLT = 0x000020, /* if N_EQK, this operand is forced to be float. */ |
| 10467 | N_SIZ = 0x000040, /* if N_EQK, this operand is forced to be size-only. */ |
| 10468 | N_UTYP = 0, |
| 10469 | N_MAX_NONSPECIAL = N_F64 |
| 10470 | }; |
| 10471 | |
| 10472 | #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ) |
| 10473 | |
| 10474 | #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64) |
| 10475 | #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32) |
| 10476 | #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64) |
| 10477 | #define N_SUF_32 (N_SU_32 | N_F32) |
| 10478 | #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64) |
| 10479 | #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F32) |
| 10480 | |
| 10481 | /* Pass this as the first type argument to neon_check_type to ignore types |
| 10482 | altogether. */ |
| 10483 | #define N_IGNORE_TYPE (N_KEY | N_EQK) |
| 10484 | |
| 10485 | /* Select a "shape" for the current instruction (describing register types or |
| 10486 | sizes) from a list of alternatives. Return NS_NULL if the current instruction |
| 10487 | doesn't fit. For non-polymorphic shapes, checking is usually done as a |
| 10488 | function of operand parsing, so this function doesn't need to be called. |
| 10489 | Shapes should be listed in order of decreasing length. */ |
| 10490 | |
| 10491 | static enum neon_shape |
| 10492 | neon_select_shape (enum neon_shape shape, ...) |
| 10493 | { |
| 10494 | va_list ap; |
| 10495 | enum neon_shape first_shape = shape; |
| 10496 | |
| 10497 | /* Fix missing optional operands. FIXME: we don't know at this point how |
| 10498 | many arguments we should have, so this makes the assumption that we have |
| 10499 | > 1. This is true of all current Neon opcodes, I think, but may not be |
| 10500 | true in the future. */ |
| 10501 | if (!inst.operands[1].present) |
| 10502 | inst.operands[1] = inst.operands[0]; |
| 10503 | |
| 10504 | va_start (ap, shape); |
| 10505 | |
| 10506 | for (; shape != NS_NULL; shape = va_arg (ap, int)) |
| 10507 | { |
| 10508 | unsigned j; |
| 10509 | int matches = 1; |
| 10510 | |
| 10511 | for (j = 0; j < neon_shape_tab[shape].els; j++) |
| 10512 | { |
| 10513 | if (!inst.operands[j].present) |
| 10514 | { |
| 10515 | matches = 0; |
| 10516 | break; |
| 10517 | } |
| 10518 | |
| 10519 | switch (neon_shape_tab[shape].el[j]) |
| 10520 | { |
| 10521 | case SE_F: |
| 10522 | if (!(inst.operands[j].isreg |
| 10523 | && inst.operands[j].isvec |
| 10524 | && inst.operands[j].issingle |
| 10525 | && !inst.operands[j].isquad)) |
| 10526 | matches = 0; |
| 10527 | break; |
| 10528 | |
| 10529 | case SE_D: |
| 10530 | if (!(inst.operands[j].isreg |
| 10531 | && inst.operands[j].isvec |
| 10532 | && !inst.operands[j].isquad |
| 10533 | && !inst.operands[j].issingle)) |
| 10534 | matches = 0; |
| 10535 | break; |
| 10536 | |
| 10537 | case SE_R: |
| 10538 | if (!(inst.operands[j].isreg |
| 10539 | && !inst.operands[j].isvec)) |
| 10540 | matches = 0; |
| 10541 | break; |
| 10542 | |
| 10543 | case SE_Q: |
| 10544 | if (!(inst.operands[j].isreg |
| 10545 | && inst.operands[j].isvec |
| 10546 | && inst.operands[j].isquad |
| 10547 | && !inst.operands[j].issingle)) |
| 10548 | matches = 0; |
| 10549 | break; |
| 10550 | |
| 10551 | case SE_I: |
| 10552 | if (!(!inst.operands[j].isreg |
| 10553 | && !inst.operands[j].isscalar)) |
| 10554 | matches = 0; |
| 10555 | break; |
| 10556 | |
| 10557 | case SE_S: |
| 10558 | if (!(!inst.operands[j].isreg |
| 10559 | && inst.operands[j].isscalar)) |
| 10560 | matches = 0; |
| 10561 | break; |
| 10562 | |
| 10563 | case SE_L: |
| 10564 | break; |
| 10565 | } |
| 10566 | } |
| 10567 | if (matches) |
| 10568 | break; |
| 10569 | } |
| 10570 | |
| 10571 | va_end (ap); |
| 10572 | |
| 10573 | if (shape == NS_NULL && first_shape != NS_NULL) |
| 10574 | first_error (_("invalid instruction shape")); |
| 10575 | |
| 10576 | return shape; |
| 10577 | } |
| 10578 | |
| 10579 | /* True if SHAPE is predominantly a quadword operation (most of the time, this |
| 10580 | means the Q bit should be set). */ |
| 10581 | |
| 10582 | static int |
| 10583 | neon_quad (enum neon_shape shape) |
| 10584 | { |
| 10585 | return neon_shape_class[shape] == SC_QUAD; |
| 10586 | } |
| 10587 | |
| 10588 | static void |
| 10589 | neon_modify_type_size (unsigned typebits, enum neon_el_type *g_type, |
| 10590 | unsigned *g_size) |
| 10591 | { |
| 10592 | /* Allow modification to be made to types which are constrained to be |
| 10593 | based on the key element, based on bits set alongside N_EQK. */ |
| 10594 | if ((typebits & N_EQK) != 0) |
| 10595 | { |
| 10596 | if ((typebits & N_HLF) != 0) |
| 10597 | *g_size /= 2; |
| 10598 | else if ((typebits & N_DBL) != 0) |
| 10599 | *g_size *= 2; |
| 10600 | if ((typebits & N_SGN) != 0) |
| 10601 | *g_type = NT_signed; |
| 10602 | else if ((typebits & N_UNS) != 0) |
| 10603 | *g_type = NT_unsigned; |
| 10604 | else if ((typebits & N_INT) != 0) |
| 10605 | *g_type = NT_integer; |
| 10606 | else if ((typebits & N_FLT) != 0) |
| 10607 | *g_type = NT_float; |
| 10608 | else if ((typebits & N_SIZ) != 0) |
| 10609 | *g_type = NT_untyped; |
| 10610 | } |
| 10611 | } |
| 10612 | |
| 10613 | /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key" |
| 10614 | operand type, i.e. the single type specified in a Neon instruction when it |
| 10615 | is the only one given. */ |
| 10616 | |
| 10617 | static struct neon_type_el |
| 10618 | neon_type_promote (struct neon_type_el *key, unsigned thisarg) |
| 10619 | { |
| 10620 | struct neon_type_el dest = *key; |
| 10621 | |
| 10622 | assert ((thisarg & N_EQK) != 0); |
| 10623 | |
| 10624 | neon_modify_type_size (thisarg, &dest.type, &dest.size); |
| 10625 | |
| 10626 | return dest; |
| 10627 | } |
| 10628 | |
| 10629 | /* Convert Neon type and size into compact bitmask representation. */ |
| 10630 | |
| 10631 | static enum neon_type_mask |
| 10632 | type_chk_of_el_type (enum neon_el_type type, unsigned size) |
| 10633 | { |
| 10634 | switch (type) |
| 10635 | { |
| 10636 | case NT_untyped: |
| 10637 | switch (size) |
| 10638 | { |
| 10639 | case 8: return N_8; |
| 10640 | case 16: return N_16; |
| 10641 | case 32: return N_32; |
| 10642 | case 64: return N_64; |
| 10643 | default: ; |
| 10644 | } |
| 10645 | break; |
| 10646 | |
| 10647 | case NT_integer: |
| 10648 | switch (size) |
| 10649 | { |
| 10650 | case 8: return N_I8; |
| 10651 | case 16: return N_I16; |
| 10652 | case 32: return N_I32; |
| 10653 | case 64: return N_I64; |
| 10654 | default: ; |
| 10655 | } |
| 10656 | break; |
| 10657 | |
| 10658 | case NT_float: |
| 10659 | switch (size) |
| 10660 | { |
| 10661 | case 32: return N_F32; |
| 10662 | case 64: return N_F64; |
| 10663 | default: ; |
| 10664 | } |
| 10665 | break; |
| 10666 | |
| 10667 | case NT_poly: |
| 10668 | switch (size) |
| 10669 | { |
| 10670 | case 8: return N_P8; |
| 10671 | case 16: return N_P16; |
| 10672 | default: ; |
| 10673 | } |
| 10674 | break; |
| 10675 | |
| 10676 | case NT_signed: |
| 10677 | switch (size) |
| 10678 | { |
| 10679 | case 8: return N_S8; |
| 10680 | case 16: return N_S16; |
| 10681 | case 32: return N_S32; |
| 10682 | case 64: return N_S64; |
| 10683 | default: ; |
| 10684 | } |
| 10685 | break; |
| 10686 | |
| 10687 | case NT_unsigned: |
| 10688 | switch (size) |
| 10689 | { |
| 10690 | case 8: return N_U8; |
| 10691 | case 16: return N_U16; |
| 10692 | case 32: return N_U32; |
| 10693 | case 64: return N_U64; |
| 10694 | default: ; |
| 10695 | } |
| 10696 | break; |
| 10697 | |
| 10698 | default: ; |
| 10699 | } |
| 10700 | |
| 10701 | return N_UTYP; |
| 10702 | } |
| 10703 | |
| 10704 | /* Convert compact Neon bitmask type representation to a type and size. Only |
| 10705 | handles the case where a single bit is set in the mask. */ |
| 10706 | |
| 10707 | static int |
| 10708 | el_type_of_type_chk (enum neon_el_type *type, unsigned *size, |
| 10709 | enum neon_type_mask mask) |
| 10710 | { |
| 10711 | if ((mask & N_EQK) != 0) |
| 10712 | return FAIL; |
| 10713 | |
| 10714 | if ((mask & (N_S8 | N_U8 | N_I8 | N_8 | N_P8)) != 0) |
| 10715 | *size = 8; |
| 10716 | else if ((mask & (N_S16 | N_U16 | N_I16 | N_16 | N_P16)) != 0) |
| 10717 | *size = 16; |
| 10718 | else if ((mask & (N_S32 | N_U32 | N_I32 | N_32 | N_F32)) != 0) |
| 10719 | *size = 32; |
| 10720 | else if ((mask & (N_S64 | N_U64 | N_I64 | N_64 | N_F64)) != 0) |
| 10721 | *size = 64; |
| 10722 | else |
| 10723 | return FAIL; |
| 10724 | |
| 10725 | if ((mask & (N_S8 | N_S16 | N_S32 | N_S64)) != 0) |
| 10726 | *type = NT_signed; |
| 10727 | else if ((mask & (N_U8 | N_U16 | N_U32 | N_U64)) != 0) |
| 10728 | *type = NT_unsigned; |
| 10729 | else if ((mask & (N_I8 | N_I16 | N_I32 | N_I64)) != 0) |
| 10730 | *type = NT_integer; |
| 10731 | else if ((mask & (N_8 | N_16 | N_32 | N_64)) != 0) |
| 10732 | *type = NT_untyped; |
| 10733 | else if ((mask & (N_P8 | N_P16)) != 0) |
| 10734 | *type = NT_poly; |
| 10735 | else if ((mask & (N_F32 | N_F64)) != 0) |
| 10736 | *type = NT_float; |
| 10737 | else |
| 10738 | return FAIL; |
| 10739 | |
| 10740 | return SUCCESS; |
| 10741 | } |
| 10742 | |
| 10743 | /* Modify a bitmask of allowed types. This is only needed for type |
| 10744 | relaxation. */ |
| 10745 | |
| 10746 | static unsigned |
| 10747 | modify_types_allowed (unsigned allowed, unsigned mods) |
| 10748 | { |
| 10749 | unsigned size; |
| 10750 | enum neon_el_type type; |
| 10751 | unsigned destmask; |
| 10752 | int i; |
| 10753 | |
| 10754 | destmask = 0; |
| 10755 | |
| 10756 | for (i = 1; i <= N_MAX_NONSPECIAL; i <<= 1) |
| 10757 | { |
| 10758 | if (el_type_of_type_chk (&type, &size, allowed & i) == SUCCESS) |
| 10759 | { |
| 10760 | neon_modify_type_size (mods, &type, &size); |
| 10761 | destmask |= type_chk_of_el_type (type, size); |
| 10762 | } |
| 10763 | } |
| 10764 | |
| 10765 | return destmask; |
| 10766 | } |
| 10767 | |
| 10768 | /* Check type and return type classification. |
| 10769 | The manual states (paraphrase): If one datatype is given, it indicates the |
| 10770 | type given in: |
| 10771 | - the second operand, if there is one |
| 10772 | - the operand, if there is no second operand |
| 10773 | - the result, if there are no operands. |
| 10774 | This isn't quite good enough though, so we use a concept of a "key" datatype |
| 10775 | which is set on a per-instruction basis, which is the one which matters when |
| 10776 | only one data type is written. |
| 10777 | Note: this function has side-effects (e.g. filling in missing operands). All |
| 10778 | Neon instructions should call it before performing bit encoding. */ |
| 10779 | |
| 10780 | static struct neon_type_el |
| 10781 | neon_check_type (unsigned els, enum neon_shape ns, ...) |
| 10782 | { |
| 10783 | va_list ap; |
| 10784 | unsigned i, pass, key_el = 0; |
| 10785 | unsigned types[NEON_MAX_TYPE_ELS]; |
| 10786 | enum neon_el_type k_type = NT_invtype; |
| 10787 | unsigned k_size = -1u; |
| 10788 | struct neon_type_el badtype = {NT_invtype, -1}; |
| 10789 | unsigned key_allowed = 0; |
| 10790 | |
| 10791 | /* Optional registers in Neon instructions are always (not) in operand 1. |
| 10792 | Fill in the missing operand here, if it was omitted. */ |
| 10793 | if (els > 1 && !inst.operands[1].present) |
| 10794 | inst.operands[1] = inst.operands[0]; |
| 10795 | |
| 10796 | /* Suck up all the varargs. */ |
| 10797 | va_start (ap, ns); |
| 10798 | for (i = 0; i < els; i++) |
| 10799 | { |
| 10800 | unsigned thisarg = va_arg (ap, unsigned); |
| 10801 | if (thisarg == N_IGNORE_TYPE) |
| 10802 | { |
| 10803 | va_end (ap); |
| 10804 | return badtype; |
| 10805 | } |
| 10806 | types[i] = thisarg; |
| 10807 | if ((thisarg & N_KEY) != 0) |
| 10808 | key_el = i; |
| 10809 | } |
| 10810 | va_end (ap); |
| 10811 | |
| 10812 | if (inst.vectype.elems > 0) |
| 10813 | for (i = 0; i < els; i++) |
| 10814 | if (inst.operands[i].vectype.type != NT_invtype) |
| 10815 | { |
| 10816 | first_error (_("types specified in both the mnemonic and operands")); |
| 10817 | return badtype; |
| 10818 | } |
| 10819 | |
| 10820 | /* Duplicate inst.vectype elements here as necessary. |
| 10821 | FIXME: No idea if this is exactly the same as the ARM assembler, |
| 10822 | particularly when an insn takes one register and one non-register |
| 10823 | operand. */ |
| 10824 | if (inst.vectype.elems == 1 && els > 1) |
| 10825 | { |
| 10826 | unsigned j; |
| 10827 | inst.vectype.elems = els; |
| 10828 | inst.vectype.el[key_el] = inst.vectype.el[0]; |
| 10829 | for (j = 0; j < els; j++) |
| 10830 | if (j != key_el) |
| 10831 | inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el], |
| 10832 | types[j]); |
| 10833 | } |
| 10834 | else if (inst.vectype.elems == 0 && els > 0) |
| 10835 | { |
| 10836 | unsigned j; |
| 10837 | /* No types were given after the mnemonic, so look for types specified |
| 10838 | after each operand. We allow some flexibility here; as long as the |
| 10839 | "key" operand has a type, we can infer the others. */ |
| 10840 | for (j = 0; j < els; j++) |
| 10841 | if (inst.operands[j].vectype.type != NT_invtype) |
| 10842 | inst.vectype.el[j] = inst.operands[j].vectype; |
| 10843 | |
| 10844 | if (inst.operands[key_el].vectype.type != NT_invtype) |
| 10845 | { |
| 10846 | for (j = 0; j < els; j++) |
| 10847 | if (inst.operands[j].vectype.type == NT_invtype) |
| 10848 | inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el], |
| 10849 | types[j]); |
| 10850 | } |
| 10851 | else |
| 10852 | { |
| 10853 | first_error (_("operand types can't be inferred")); |
| 10854 | return badtype; |
| 10855 | } |
| 10856 | } |
| 10857 | else if (inst.vectype.elems != els) |
| 10858 | { |
| 10859 | first_error (_("type specifier has the wrong number of parts")); |
| 10860 | return badtype; |
| 10861 | } |
| 10862 | |
| 10863 | for (pass = 0; pass < 2; pass++) |
| 10864 | { |
| 10865 | for (i = 0; i < els; i++) |
| 10866 | { |
| 10867 | unsigned thisarg = types[i]; |
| 10868 | unsigned types_allowed = ((thisarg & N_EQK) != 0 && pass != 0) |
| 10869 | ? modify_types_allowed (key_allowed, thisarg) : thisarg; |
| 10870 | enum neon_el_type g_type = inst.vectype.el[i].type; |
| 10871 | unsigned g_size = inst.vectype.el[i].size; |
| 10872 | |
| 10873 | /* Decay more-specific signed & unsigned types to sign-insensitive |
| 10874 | integer types if sign-specific variants are unavailable. */ |
| 10875 | if ((g_type == NT_signed || g_type == NT_unsigned) |
| 10876 | && (types_allowed & N_SU_ALL) == 0) |
| 10877 | g_type = NT_integer; |
| 10878 | |
| 10879 | /* If only untyped args are allowed, decay any more specific types to |
| 10880 | them. Some instructions only care about signs for some element |
| 10881 | sizes, so handle that properly. */ |
| 10882 | if ((g_size == 8 && (types_allowed & N_8) != 0) |
| 10883 | || (g_size == 16 && (types_allowed & N_16) != 0) |
| 10884 | || (g_size == 32 && (types_allowed & N_32) != 0) |
| 10885 | || (g_size == 64 && (types_allowed & N_64) != 0)) |
| 10886 | g_type = NT_untyped; |
| 10887 | |
| 10888 | if (pass == 0) |
| 10889 | { |
| 10890 | if ((thisarg & N_KEY) != 0) |
| 10891 | { |
| 10892 | k_type = g_type; |
| 10893 | k_size = g_size; |
| 10894 | key_allowed = thisarg & ~N_KEY; |
| 10895 | } |
| 10896 | } |
| 10897 | else |
| 10898 | { |
| 10899 | if ((thisarg & N_VFP) != 0) |
| 10900 | { |
| 10901 | enum neon_shape_el regshape = neon_shape_tab[ns].el[i]; |
| 10902 | unsigned regwidth = neon_shape_el_size[regshape], match; |
| 10903 | |
| 10904 | /* In VFP mode, operands must match register widths. If we |
| 10905 | have a key operand, use its width, else use the width of |
| 10906 | the current operand. */ |
| 10907 | if (k_size != -1u) |
| 10908 | match = k_size; |
| 10909 | else |
| 10910 | match = g_size; |
| 10911 | |
| 10912 | if (regwidth != match) |
| 10913 | { |
| 10914 | first_error (_("operand size must match register width")); |
| 10915 | return badtype; |
| 10916 | } |
| 10917 | } |
| 10918 | |
| 10919 | if ((thisarg & N_EQK) == 0) |
| 10920 | { |
| 10921 | unsigned given_type = type_chk_of_el_type (g_type, g_size); |
| 10922 | |
| 10923 | if ((given_type & types_allowed) == 0) |
| 10924 | { |
| 10925 | first_error (_("bad type in Neon instruction")); |
| 10926 | return badtype; |
| 10927 | } |
| 10928 | } |
| 10929 | else |
| 10930 | { |
| 10931 | enum neon_el_type mod_k_type = k_type; |
| 10932 | unsigned mod_k_size = k_size; |
| 10933 | neon_modify_type_size (thisarg, &mod_k_type, &mod_k_size); |
| 10934 | if (g_type != mod_k_type || g_size != mod_k_size) |
| 10935 | { |
| 10936 | first_error (_("inconsistent types in Neon instruction")); |
| 10937 | return badtype; |
| 10938 | } |
| 10939 | } |
| 10940 | } |
| 10941 | } |
| 10942 | } |
| 10943 | |
| 10944 | return inst.vectype.el[key_el]; |
| 10945 | } |
| 10946 | |
| 10947 | /* Neon-style VFP instruction forwarding. */ |
| 10948 | |
| 10949 | /* Thumb VFP instructions have 0xE in the condition field. */ |
| 10950 | |
| 10951 | static void |
| 10952 | do_vfp_cond_or_thumb (void) |
| 10953 | { |
| 10954 | if (thumb_mode) |
| 10955 | inst.instruction |= 0xe0000000; |
| 10956 | else |
| 10957 | inst.instruction |= inst.cond << 28; |
| 10958 | } |
| 10959 | |
| 10960 | /* Look up and encode a simple mnemonic, for use as a helper function for the |
| 10961 | Neon-style VFP syntax. This avoids duplication of bits of the insns table, |
| 10962 | etc. It is assumed that operand parsing has already been done, and that the |
| 10963 | operands are in the form expected by the given opcode (this isn't necessarily |
| 10964 | the same as the form in which they were parsed, hence some massaging must |
| 10965 | take place before this function is called). |
| 10966 | Checks current arch version against that in the looked-up opcode. */ |
| 10967 | |
| 10968 | static void |
| 10969 | do_vfp_nsyn_opcode (const char *opname) |
| 10970 | { |
| 10971 | const struct asm_opcode *opcode; |
| 10972 | |
| 10973 | opcode = hash_find (arm_ops_hsh, opname); |
| 10974 | |
| 10975 | if (!opcode) |
| 10976 | abort (); |
| 10977 | |
| 10978 | constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, |
| 10979 | thumb_mode ? *opcode->tvariant : *opcode->avariant), |
| 10980 | _(BAD_FPU)); |
| 10981 | |
| 10982 | if (thumb_mode) |
| 10983 | { |
| 10984 | inst.instruction = opcode->tvalue; |
| 10985 | opcode->tencode (); |
| 10986 | } |
| 10987 | else |
| 10988 | { |
| 10989 | inst.instruction = (inst.cond << 28) | opcode->avalue; |
| 10990 | opcode->aencode (); |
| 10991 | } |
| 10992 | } |
| 10993 | |
| 10994 | static void |
| 10995 | do_vfp_nsyn_add_sub (enum neon_shape rs) |
| 10996 | { |
| 10997 | int is_add = (inst.instruction & 0x0fffffff) == N_MNEM_vadd; |
| 10998 | |
| 10999 | if (rs == NS_FFF) |
| 11000 | { |
| 11001 | if (is_add) |
| 11002 | do_vfp_nsyn_opcode ("fadds"); |
| 11003 | else |
| 11004 | do_vfp_nsyn_opcode ("fsubs"); |
| 11005 | } |
| 11006 | else |
| 11007 | { |
| 11008 | if (is_add) |
| 11009 | do_vfp_nsyn_opcode ("faddd"); |
| 11010 | else |
| 11011 | do_vfp_nsyn_opcode ("fsubd"); |
| 11012 | } |
| 11013 | } |
| 11014 | |
| 11015 | /* Check operand types to see if this is a VFP instruction, and if so call |
| 11016 | PFN (). */ |
| 11017 | |
| 11018 | static int |
| 11019 | try_vfp_nsyn (int args, void (*pfn) (enum neon_shape)) |
| 11020 | { |
| 11021 | enum neon_shape rs; |
| 11022 | struct neon_type_el et; |
| 11023 | |
| 11024 | switch (args) |
| 11025 | { |
| 11026 | case 2: |
| 11027 | rs = neon_select_shape (NS_FF, NS_DD, NS_NULL); |
| 11028 | et = neon_check_type (2, rs, |
| 11029 | N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP); |
| 11030 | break; |
| 11031 | |
| 11032 | case 3: |
| 11033 | rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL); |
| 11034 | et = neon_check_type (3, rs, |
| 11035 | N_EQK | N_VFP, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP); |
| 11036 | break; |
| 11037 | |
| 11038 | default: |
| 11039 | abort (); |
| 11040 | } |
| 11041 | |
| 11042 | if (et.type != NT_invtype) |
| 11043 | { |
| 11044 | pfn (rs); |
| 11045 | return SUCCESS; |
| 11046 | } |
| 11047 | else |
| 11048 | inst.error = NULL; |
| 11049 | |
| 11050 | return FAIL; |
| 11051 | } |
| 11052 | |
| 11053 | static void |
| 11054 | do_vfp_nsyn_mla_mls (enum neon_shape rs) |
| 11055 | { |
| 11056 | int is_mla = (inst.instruction & 0x0fffffff) == N_MNEM_vmla; |
| 11057 | |
| 11058 | if (rs == NS_FFF) |
| 11059 | { |
| 11060 | if (is_mla) |
| 11061 | do_vfp_nsyn_opcode ("fmacs"); |
| 11062 | else |
| 11063 | do_vfp_nsyn_opcode ("fmscs"); |
| 11064 | } |
| 11065 | else |
| 11066 | { |
| 11067 | if (is_mla) |
| 11068 | do_vfp_nsyn_opcode ("fmacd"); |
| 11069 | else |
| 11070 | do_vfp_nsyn_opcode ("fmscd"); |
| 11071 | } |
| 11072 | } |
| 11073 | |
| 11074 | static void |
| 11075 | do_vfp_nsyn_mul (enum neon_shape rs) |
| 11076 | { |
| 11077 | if (rs == NS_FFF) |
| 11078 | do_vfp_nsyn_opcode ("fmuls"); |
| 11079 | else |
| 11080 | do_vfp_nsyn_opcode ("fmuld"); |
| 11081 | } |
| 11082 | |
| 11083 | static void |
| 11084 | do_vfp_nsyn_abs_neg (enum neon_shape rs) |
| 11085 | { |
| 11086 | int is_neg = (inst.instruction & 0x80) != 0; |
| 11087 | neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_VFP | N_KEY); |
| 11088 | |
| 11089 | if (rs == NS_FF) |
| 11090 | { |
| 11091 | if (is_neg) |
| 11092 | do_vfp_nsyn_opcode ("fnegs"); |
| 11093 | else |
| 11094 | do_vfp_nsyn_opcode ("fabss"); |
| 11095 | } |
| 11096 | else |
| 11097 | { |
| 11098 | if (is_neg) |
| 11099 | do_vfp_nsyn_opcode ("fnegd"); |
| 11100 | else |
| 11101 | do_vfp_nsyn_opcode ("fabsd"); |
| 11102 | } |
| 11103 | } |
| 11104 | |
| 11105 | /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision |
| 11106 | insns belong to Neon, and are handled elsewhere. */ |
| 11107 | |
| 11108 | static void |
| 11109 | do_vfp_nsyn_ldm_stm (int is_dbmode) |
| 11110 | { |
| 11111 | int is_ldm = (inst.instruction & (1 << 20)) != 0; |
| 11112 | if (is_ldm) |
| 11113 | { |
| 11114 | if (is_dbmode) |
| 11115 | do_vfp_nsyn_opcode ("fldmdbs"); |
| 11116 | else |
| 11117 | do_vfp_nsyn_opcode ("fldmias"); |
| 11118 | } |
| 11119 | else |
| 11120 | { |
| 11121 | if (is_dbmode) |
| 11122 | do_vfp_nsyn_opcode ("fstmdbs"); |
| 11123 | else |
| 11124 | do_vfp_nsyn_opcode ("fstmias"); |
| 11125 | } |
| 11126 | } |
| 11127 | |
| 11128 | static void |
| 11129 | do_vfp_nsyn_sqrt (void) |
| 11130 | { |
| 11131 | enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_NULL); |
| 11132 | neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP); |
| 11133 | |
| 11134 | if (rs == NS_FF) |
| 11135 | do_vfp_nsyn_opcode ("fsqrts"); |
| 11136 | else |
| 11137 | do_vfp_nsyn_opcode ("fsqrtd"); |
| 11138 | } |
| 11139 | |
| 11140 | static void |
| 11141 | do_vfp_nsyn_div (void) |
| 11142 | { |
| 11143 | enum neon_shape rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL); |
| 11144 | neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP, |
| 11145 | N_F32 | N_F64 | N_KEY | N_VFP); |
| 11146 | |
| 11147 | if (rs == NS_FFF) |
| 11148 | do_vfp_nsyn_opcode ("fdivs"); |
| 11149 | else |
| 11150 | do_vfp_nsyn_opcode ("fdivd"); |
| 11151 | } |
| 11152 | |
| 11153 | static void |
| 11154 | do_vfp_nsyn_nmul (void) |
| 11155 | { |
| 11156 | enum neon_shape rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL); |
| 11157 | neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP, |
| 11158 | N_F32 | N_F64 | N_KEY | N_VFP); |
| 11159 | |
| 11160 | if (rs == NS_FFF) |
| 11161 | { |
| 11162 | inst.instruction = NEON_ENC_SINGLE (inst.instruction); |
| 11163 | do_vfp_sp_dyadic (); |
| 11164 | } |
| 11165 | else |
| 11166 | { |
| 11167 | inst.instruction = NEON_ENC_DOUBLE (inst.instruction); |
| 11168 | do_vfp_dp_rd_rn_rm (); |
| 11169 | } |
| 11170 | do_vfp_cond_or_thumb (); |
| 11171 | } |
| 11172 | |
| 11173 | static void |
| 11174 | do_vfp_nsyn_cmp (void) |
| 11175 | { |
| 11176 | if (inst.operands[1].isreg) |
| 11177 | { |
| 11178 | enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_NULL); |
| 11179 | neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP); |
| 11180 | |
| 11181 | if (rs == NS_FF) |
| 11182 | { |
| 11183 | inst.instruction = NEON_ENC_SINGLE (inst.instruction); |
| 11184 | do_vfp_sp_monadic (); |
| 11185 | } |
| 11186 | else |
| 11187 | { |
| 11188 | inst.instruction = NEON_ENC_DOUBLE (inst.instruction); |
| 11189 | do_vfp_dp_rd_rm (); |
| 11190 | } |
| 11191 | } |
| 11192 | else |
| 11193 | { |
| 11194 | enum neon_shape rs = neon_select_shape (NS_FI, NS_DI, NS_NULL); |
| 11195 | neon_check_type (2, rs, N_F32 | N_F64 | N_KEY | N_VFP, N_EQK); |
| 11196 | |
| 11197 | switch (inst.instruction & 0x0fffffff) |
| 11198 | { |
| 11199 | case N_MNEM_vcmp: |
| 11200 | inst.instruction += N_MNEM_vcmpz - N_MNEM_vcmp; |
| 11201 | break; |
| 11202 | case N_MNEM_vcmpe: |
| 11203 | inst.instruction += N_MNEM_vcmpez - N_MNEM_vcmpe; |
| 11204 | break; |
| 11205 | default: |
| 11206 | abort (); |
| 11207 | } |
| 11208 | |
| 11209 | if (rs == NS_FI) |
| 11210 | { |
| 11211 | inst.instruction = NEON_ENC_SINGLE (inst.instruction); |
| 11212 | do_vfp_sp_compare_z (); |
| 11213 | } |
| 11214 | else |
| 11215 | { |
| 11216 | inst.instruction = NEON_ENC_DOUBLE (inst.instruction); |
| 11217 | do_vfp_dp_rd (); |
| 11218 | } |
| 11219 | } |
| 11220 | do_vfp_cond_or_thumb (); |
| 11221 | } |
| 11222 | |
| 11223 | static void |
| 11224 | nsyn_insert_sp (void) |
| 11225 | { |
| 11226 | inst.operands[1] = inst.operands[0]; |
| 11227 | memset (&inst.operands[0], '\0', sizeof (inst.operands[0])); |
| 11228 | inst.operands[0].reg = 13; |
| 11229 | inst.operands[0].isreg = 1; |
| 11230 | inst.operands[0].writeback = 1; |
| 11231 | inst.operands[0].present = 1; |
| 11232 | } |
| 11233 | |
| 11234 | static void |
| 11235 | do_vfp_nsyn_push (void) |
| 11236 | { |
| 11237 | nsyn_insert_sp (); |
| 11238 | if (inst.operands[1].issingle) |
| 11239 | do_vfp_nsyn_opcode ("fstmdbs"); |
| 11240 | else |
| 11241 | do_vfp_nsyn_opcode ("fstmdbd"); |
| 11242 | } |
| 11243 | |
| 11244 | static void |
| 11245 | do_vfp_nsyn_pop (void) |
| 11246 | { |
| 11247 | nsyn_insert_sp (); |
| 11248 | if (inst.operands[1].issingle) |
| 11249 | do_vfp_nsyn_opcode ("fldmdbs"); |
| 11250 | else |
| 11251 | do_vfp_nsyn_opcode ("fldmdbd"); |
| 11252 | } |
| 11253 | |
| 11254 | /* Fix up Neon data-processing instructions, ORing in the correct bits for |
| 11255 | ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */ |
| 11256 | |
| 11257 | static unsigned |
| 11258 | neon_dp_fixup (unsigned i) |
| 11259 | { |
| 11260 | if (thumb_mode) |
| 11261 | { |
| 11262 | /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */ |
| 11263 | if (i & (1 << 24)) |
| 11264 | i |= 1 << 28; |
| 11265 | |
| 11266 | i &= ~(1 << 24); |
| 11267 | |
| 11268 | i |= 0xef000000; |
| 11269 | } |
| 11270 | else |
| 11271 | i |= 0xf2000000; |
| 11272 | |
| 11273 | return i; |
| 11274 | } |
| 11275 | |
| 11276 | /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3 |
| 11277 | (0, 1, 2, 3). */ |
| 11278 | |
| 11279 | static unsigned |
| 11280 | neon_logbits (unsigned x) |
| 11281 | { |
| 11282 | return ffs (x) - 4; |
| 11283 | } |
| 11284 | |
| 11285 | #define LOW4(R) ((R) & 0xf) |
| 11286 | #define HI1(R) (((R) >> 4) & 1) |
| 11287 | |
| 11288 | /* Encode insns with bit pattern: |
| 11289 | |
| 11290 | |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0| |
| 11291 | | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm | |
| 11292 | |
| 11293 | SIZE is passed in bits. -1 means size field isn't changed, in case it has a |
| 11294 | different meaning for some instruction. */ |
| 11295 | |
| 11296 | static void |
| 11297 | neon_three_same (int isquad, int ubit, int size) |
| 11298 | { |
| 11299 | inst.instruction |= LOW4 (inst.operands[0].reg) << 12; |
| 11300 | inst.instruction |= HI1 (inst.operands[0].reg) << 22; |
| 11301 | inst.instruction |= LOW4 (inst.operands[1].reg) << 16; |
| 11302 | inst.instruction |= HI1 (inst.operands[1].reg) << 7; |
| 11303 | inst.instruction |= LOW4 (inst.operands[2].reg); |
| 11304 | inst.instruction |= HI1 (inst.operands[2].reg) << 5; |
| 11305 | inst.instruction |= (isquad != 0) << 6; |
| 11306 | inst.instruction |= (ubit != 0) << 24; |
| 11307 | if (size != -1) |
| 11308 | inst.instruction |= neon_logbits (size) << 20; |
| 11309 | |
| 11310 | inst.instruction = neon_dp_fixup (inst.instruction); |
| 11311 | } |
| 11312 | |
| 11313 | /* Encode instructions of the form: |
| 11314 | |
| 11315 | |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0| |
| 11316 | | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm | |
| 11317 | |
| 11318 | Don't write size if SIZE == -1. */ |
| 11319 | |
| 11320 | static void |
| 11321 | neon_two_same (int qbit, int ubit, int size) |
| 11322 | { |
| 11323 | inst.instruction |= LOW4 (inst.operands[0].reg) << 12; |
| 11324 | inst.instruction |= HI1 (inst.operands[0].reg) << 22; |
| 11325 | inst.instruction |= LOW4 (inst.operands[1].reg); |
| 11326 | inst.instruction |= HI1 (inst.operands[1].reg) << 5; |
| 11327 | inst.instruction |= (qbit != 0) << 6; |
| 11328 | inst.instruction |= (ubit != 0) << 24; |
| 11329 | |
| 11330 | if (size != -1) |
| 11331 | inst.instruction |= neon_logbits (size) << 18; |
| 11332 | |
| 11333 | inst.instruction = neon_dp_fixup (inst.instruction); |
| 11334 | } |
| 11335 | |
| 11336 | /* Neon instruction encoders, in approximate order of appearance. */ |
| 11337 | |
| 11338 | static void |
| 11339 | do_neon_dyadic_i_su (void) |
| 11340 | { |
| 11341 | enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL); |
| 11342 | struct neon_type_el et = neon_check_type (3, rs, |
| 11343 | N_EQK, N_EQK, N_SU_32 | N_KEY); |
| 11344 | neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size); |
| 11345 | } |
| 11346 | |
| 11347 | static void |
| 11348 | do_neon_dyadic_i64_su (void) |
| 11349 | { |
| 11350 | enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL); |
| 11351 | struct neon_type_el et = neon_check_type (3, rs, |
| 11352 | N_EQK, N_EQK, N_SU_ALL | N_KEY); |
| 11353 | neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size); |
| 11354 | } |
| 11355 | |
| 11356 | static void |
| 11357 | neon_imm_shift (int write_ubit, int uval, int isquad, struct neon_type_el et, |
| 11358 | unsigned immbits) |
| 11359 | { |
| 11360 | unsigned size = et.size >> 3; |
| 11361 | inst.instruction |= LOW4 (inst.operands[0].reg) << 12; |
| 11362 | inst.instruction |= HI1 (inst.operands[0].reg) << 22; |
| 11363 | inst.instruction |= LOW4 (inst.operands[1].reg); |
| 11364 | inst.instruction |= HI1 (inst.operands[1].reg) << 5; |
| 11365 | inst.instruction |= (isquad != 0) << 6; |
| 11366 | inst.instruction |= immbits << 16; |
| 11367 | inst.instruction |= (size >> 3) << 7; |
| 11368 | inst.instruction |= (size & 0x7) << 19; |
| 11369 | if (write_ubit) |
| 11370 | inst.instruction |= (uval != 0) << 24; |
| 11371 | |
| 11372 | inst.instruction = neon_dp_fixup (inst.instruction); |
| 11373 | } |
| 11374 | |
| 11375 | static void |
| 11376 | do_neon_shl_imm (void) |
| 11377 | { |
| 11378 | if (!inst.operands[2].isreg) |
| 11379 | { |
| 11380 | enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL); |
| 11381 | struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_KEY | N_I_ALL); |
| 11382 | inst.instruction = NEON_ENC_IMMED (inst.instruction); |
| 11383 | neon_imm_shift (FALSE, 0, neon_quad (rs), et, inst.operands[2].imm); |
| 11384 | } |
| 11385 | else |
| 11386 | { |
| 11387 | enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL); |
| 11388 | struct neon_type_el et = neon_check_type (3, rs, |
| 11389 | N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN); |
| 11390 | inst.instruction = NEON_ENC_INTEGER (inst.instruction); |
| 11391 | neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size); |
| 11392 | } |
| 11393 | } |
| 11394 | |
| 11395 | static void |
| 11396 | do_neon_qshl_imm (void) |
| 11397 | { |
| 11398 | if (!inst.operands[2].isreg) |
| 11399 | { |
| 11400 | enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL); |
| 11401 | struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY); |
| 11402 | inst.instruction = NEON_ENC_IMMED (inst.instruction); |
| 11403 | neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et, |
| 11404 | inst.operands[2].imm); |
| 11405 | } |
| 11406 | else |
| 11407 | { |
| 11408 | enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL); |
| 11409 | struct neon_type_el et = neon_check_type (3, rs, |
| 11410 | N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN); |
| 11411 | inst.instruction = NEON_ENC_INTEGER (inst.instruction); |
| 11412 | neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size); |
| 11413 | } |
| 11414 | } |
| 11415 | |
| 11416 | static int |
| 11417 | neon_cmode_for_logic_imm (unsigned immediate, unsigned *immbits, int size) |
| 11418 | { |
| 11419 | /* Handle .I8 pseudo-instructions. */ |
| 11420 | if (size == 8) |
| 11421 | { |
| 11422 | /* Unfortunately, this will make everything apart from zero out-of-range. |
| 11423 | FIXME is this the intended semantics? There doesn't seem much point in |
| 11424 | accepting .I8 if so. */ |
| 11425 | immediate |= immediate << 8; |
| 11426 | size = 16; |
| 11427 | } |
| 11428 | |
| 11429 | if (size >= 32) |
| 11430 | { |
| 11431 | if (immediate == (immediate & 0x000000ff)) |
| 11432 | { |
| 11433 | *immbits = immediate; |
| 11434 | return 0x1; |
| 11435 | } |
| 11436 | else if (immediate == (immediate & 0x0000ff00)) |
| 11437 | { |
| 11438 | *immbits = immediate >> 8; |
| 11439 | return 0x3; |
| 11440 | } |
| 11441 | else if (immediate == (immediate & 0x00ff0000)) |
| 11442 | { |
| 11443 | *immbits = immediate >> 16; |
| 11444 | return 0x5; |
| 11445 | } |
| 11446 | else if (immediate == (immediate & 0xff000000)) |
| 11447 | { |
| 11448 | *immbits = immediate >> 24; |
| 11449 | return 0x7; |
| 11450 | } |
| 11451 | if ((immediate & 0xffff) != (immediate >> 16)) |
| 11452 | goto bad_immediate; |
| 11453 | immediate &= 0xffff; |
| 11454 | } |
| 11455 | |
| 11456 | if (immediate == (immediate & 0x000000ff)) |
| 11457 | { |
| 11458 | *immbits = immediate; |
| 11459 | return 0x9; |
| 11460 | } |
| 11461 | else if (immediate == (immediate & 0x0000ff00)) |
| 11462 | { |
| 11463 | *immbits = immediate >> 8; |
| 11464 | return 0xb; |
| 11465 | } |
| 11466 | |
| 11467 | bad_immediate: |
| 11468 | first_error (_("immediate value out of range")); |
| 11469 | return FAIL; |
| 11470 | } |
| 11471 | |
| 11472 | /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits |
| 11473 | A, B, C, D. */ |
| 11474 | |
| 11475 | static int |
| 11476 | neon_bits_same_in_bytes (unsigned imm) |
| 11477 | { |
| 11478 | return ((imm & 0x000000ff) == 0 || (imm & 0x000000ff) == 0x000000ff) |
| 11479 | && ((imm & 0x0000ff00) == 0 || (imm & 0x0000ff00) == 0x0000ff00) |
| 11480 | && ((imm & 0x00ff0000) == 0 || (imm & 0x00ff0000) == 0x00ff0000) |
| 11481 | && ((imm & 0xff000000) == 0 || (imm & 0xff000000) == 0xff000000); |
| 11482 | } |
| 11483 | |
| 11484 | /* For immediate of above form, return 0bABCD. */ |
| 11485 | |
| 11486 | static unsigned |
| 11487 | neon_squash_bits (unsigned imm) |
| 11488 | { |
| 11489 | return (imm & 0x01) | ((imm & 0x0100) >> 7) | ((imm & 0x010000) >> 14) |
| 11490 | | ((imm & 0x01000000) >> 21); |
| 11491 | } |
| 11492 | |
| 11493 | /* Compress quarter-float representation to 0b...000 abcdefgh. */ |
| 11494 | |
| 11495 | static unsigned |
| 11496 | neon_qfloat_bits (unsigned imm) |
| 11497 | { |
| 11498 | return ((imm >> 19) & 0x7f) | ((imm >> 24) & 0x80); |
| 11499 | } |
| 11500 | |
| 11501 | /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into |
| 11502 | the instruction. *OP is passed as the initial value of the op field, and |
| 11503 | may be set to a different value depending on the constant (i.e. |
| 11504 | "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not |
| 11505 | MVN). If the immediate looks like a repeated parttern then also |
| 11506 | try smaller element sizes. */ |
| 11507 | |
| 11508 | static int |
| 11509 | neon_cmode_for_move_imm (unsigned immlo, unsigned immhi, unsigned *immbits, |
| 11510 | int *op, int size, enum neon_el_type type) |
| 11511 | { |
| 11512 | if (type == NT_float && is_quarter_float (immlo) && immhi == 0) |
| 11513 | { |
| 11514 | if (size != 32 || *op == 1) |
| 11515 | return FAIL; |
| 11516 | *immbits = neon_qfloat_bits (immlo); |
| 11517 | return 0xf; |
| 11518 | } |
| 11519 | |
| 11520 | if (size == 64) |
| 11521 | { |
| 11522 | if (neon_bits_same_in_bytes (immhi) |
| 11523 | && neon_bits_same_in_bytes (immlo)) |
| 11524 | { |
| 11525 | if (*op == 1) |
| 11526 | return FAIL; |
| 11527 | *immbits = (neon_squash_bits (immhi) << 4) |
| 11528 | | neon_squash_bits (immlo); |
| 11529 | *op = 1; |
| 11530 | return 0xe; |
| 11531 | } |
| 11532 | |
| 11533 | if (immhi != immlo) |
| 11534 | return FAIL; |
| 11535 | } |
| 11536 | |
| 11537 | if (size >= 32) |
| 11538 | { |
| 11539 | if (immlo == (immlo & 0x000000ff)) |
| 11540 | { |
| 11541 | *immbits = immlo; |
| 11542 | return 0x0; |
| 11543 | } |
| 11544 | else if (immlo == (immlo & 0x0000ff00)) |
| 11545 | { |
| 11546 | *immbits = immlo >> 8; |
| 11547 | return 0x2; |
| 11548 | } |
| 11549 | else if (immlo == (immlo & 0x00ff0000)) |
| 11550 | { |
| 11551 | *immbits = immlo >> 16; |
| 11552 | return 0x4; |
| 11553 | } |
| 11554 | else if (immlo == (immlo & 0xff000000)) |
| 11555 | { |
| 11556 | *immbits = immlo >> 24; |
| 11557 | return 0x6; |
| 11558 | } |
| 11559 | else if (immlo == ((immlo & 0x0000ff00) | 0x000000ff)) |
| 11560 | { |
| 11561 | *immbits = (immlo >> 8) & 0xff; |
| 11562 | return 0xc; |
| 11563 | } |
| 11564 | else if (immlo == ((immlo & 0x00ff0000) | 0x0000ffff)) |
| 11565 | { |
| 11566 | *immbits = (immlo >> 16) & 0xff; |
| 11567 | return 0xd; |
| 11568 | } |
| 11569 | |
| 11570 | if ((immlo & 0xffff) != (immlo >> 16)) |
| 11571 | return FAIL; |
| 11572 | immlo &= 0xffff; |
| 11573 | } |
| 11574 | |
| 11575 | if (size >= 16) |
| 11576 | { |
| 11577 | if (immlo == (immlo & 0x000000ff)) |
| 11578 | { |
| 11579 | *immbits = immlo; |
| 11580 | return 0x8; |
| 11581 | } |
| 11582 | else if (immlo == (immlo & 0x0000ff00)) |
| 11583 | { |
| 11584 | *immbits = immlo >> 8; |
| 11585 | return 0xa; |
| 11586 | } |
| 11587 | |
| 11588 | if ((immlo & 0xff) != (immlo >> 8)) |
| 11589 | return FAIL; |
| 11590 | immlo &= 0xff; |
| 11591 | } |
| 11592 | |
| 11593 | if (immlo == (immlo & 0x000000ff)) |
| 11594 | { |
| 11595 | /* Don't allow MVN with 8-bit immediate. */ |
| 11596 | if (*op == 1) |
| 11597 | return FAIL; |
| 11598 | *immbits = immlo; |
| 11599 | return 0xe; |
| 11600 | } |
| 11601 | |
| 11602 | return FAIL; |
| 11603 | } |
| 11604 | |
| 11605 | /* Write immediate bits [7:0] to the following locations: |
| 11606 | |
| 11607 | |28/24|23 19|18 16|15 4|3 0| |
| 11608 | | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h| |
| 11609 | |
| 11610 | This function is used by VMOV/VMVN/VORR/VBIC. */ |
| 11611 | |
| 11612 | static void |
| 11613 | neon_write_immbits (unsigned immbits) |
| 11614 | { |
| 11615 | inst.instruction |= immbits & 0xf; |
| 11616 | inst.instruction |= ((immbits >> 4) & 0x7) << 16; |
| 11617 | inst.instruction |= ((immbits >> 7) & 0x1) << 24; |
| 11618 | } |
| 11619 | |
| 11620 | /* Invert low-order SIZE bits of XHI:XLO. */ |
| 11621 | |
| 11622 | static void |
| 11623 | neon_invert_size (unsigned *xlo, unsigned *xhi, int size) |
| 11624 | { |
| 11625 | unsigned immlo = xlo ? *xlo : 0; |
| 11626 | unsigned immhi = xhi ? *xhi : 0; |
| 11627 | |
| 11628 | switch (size) |
| 11629 | { |
| 11630 | case 8: |
| 11631 | immlo = (~immlo) & 0xff; |
| 11632 | break; |
| 11633 | |
| 11634 | case 16: |
| 11635 | immlo = (~immlo) & 0xffff; |
| 11636 | break; |
| 11637 | |
| 11638 | case 64: |
| 11639 | immhi = (~immhi) & 0xffffffff; |
| 11640 | /* fall through. */ |
| 11641 | |
| 11642 | case 32: |
| 11643 | immlo = (~immlo) & 0xffffffff; |
| 11644 | break; |
| 11645 | |
| 11646 | default: |
| 11647 | abort (); |
| 11648 | } |
| 11649 | |
| 11650 | if (xlo) |
| 11651 | *xlo = immlo; |
| 11652 | |
| 11653 | if (xhi) |
| 11654 | *xhi = immhi; |
| 11655 | } |
| 11656 | |
| 11657 | static void |
| 11658 | do_neon_logic (void) |
| 11659 | { |
| 11660 | if (inst.operands[2].present && inst.operands[2].isreg) |
| 11661 | { |
| 11662 | enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL); |
| 11663 | neon_check_type (3, rs, N_IGNORE_TYPE); |
| 11664 | /* U bit and size field were set as part of the bitmask. */ |
| 11665 | inst.instruction = NEON_ENC_INTEGER (inst.instruction); |
| 11666 | neon_three_same (neon_quad (rs), 0, -1); |
| 11667 | } |
| 11668 | else |
| 11669 | { |
| 11670 | enum neon_shape rs = neon_select_shape (NS_DI, NS_QI, NS_NULL); |
| 11671 | struct neon_type_el et = neon_check_type (2, rs, |
| 11672 | N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK); |
| 11673 | enum neon_opc opcode = inst.instruction & 0x0fffffff; |
| 11674 | unsigned immbits; |
| 11675 | int cmode; |
| 11676 | |
| 11677 | if (et.type == NT_invtype) |
| 11678 | return; |
| 11679 | |
| 11680 | inst.instruction = NEON_ENC_IMMED (inst.instruction); |
| 11681 | |
| 11682 | immbits = inst.operands[1].imm; |
| 11683 | if (et.size == 64) |
| 11684 | { |
| 11685 | /* .i64 is a pseudo-op, so the immediate must be a repeating |
| 11686 | pattern. */ |
| 11687 | if (immbits != (inst.operands[1].regisimm ? |
| 11688 | inst.operands[1].reg : 0)) |
| 11689 | { |
| 11690 | /* Set immbits to an invalid constant. */ |
| 11691 | immbits = 0xdeadbeef; |
| 11692 | } |
| 11693 | } |
| 11694 | |
| 11695 | switch (opcode) |
| 11696 | { |
| 11697 | case N_MNEM_vbic: |
| 11698 | cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size); |
| 11699 | break; |
| 11700 | |
| 11701 | case N_MNEM_vorr: |
| 11702 | cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size); |
| 11703 | break; |
| 11704 | |
| 11705 | case N_MNEM_vand: |
| 11706 | /* Pseudo-instruction for VBIC. */ |
| 11707 | neon_invert_size (&immbits, 0, et.size); |
| 11708 | cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size); |
| 11709 | break; |
| 11710 | |
| 11711 | case N_MNEM_vorn: |
| 11712 | /* Pseudo-instruction for VORR. */ |
| 11713 | neon_invert_size (&immbits, 0, et.size); |
| 11714 | cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size); |
| 11715 | break; |
| 11716 | |
| 11717 | default: |
| 11718 | abort (); |
| 11719 | } |
| 11720 | |
| 11721 | if (cmode == FAIL) |
| 11722 | return; |
| 11723 | |
| 11724 | inst.instruction |= neon_quad (rs) << 6; |
| 11725 | inst.instruction |= LOW4 (inst.operands[0].reg) << 12; |
| 11726 | inst.instruction |= HI1 (inst.operands[0].reg) << 22; |
| 11727 | inst.instruction |= cmode << 8; |
| 11728 | neon_write_immbits (immbits); |
| 11729 | |
| 11730 | inst.instruction = neon_dp_fixup (inst.instruction); |
| 11731 | } |
| 11732 | } |
| 11733 | |
| 11734 | static void |
| 11735 | do_neon_bitfield (void) |
| 11736 | { |
| 11737 | enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL); |
| 11738 | neon_check_type (3, rs, N_IGNORE_TYPE); |
| 11739 | neon_three_same (neon_quad (rs), 0, -1); |
| 11740 | } |
| 11741 | |
| 11742 | static void |
| 11743 | neon_dyadic_misc (enum neon_el_type ubit_meaning, unsigned types, |
| 11744 | unsigned destbits) |
| 11745 | { |
| 11746 | enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL); |
| 11747 | struct neon_type_el et = neon_check_type (3, rs, N_EQK | destbits, N_EQK, |
| 11748 | types | N_KEY); |
| 11749 | if (et.type == NT_float) |
| 11750 | { |
| 11751 | inst.instruction = NEON_ENC_FLOAT (inst.instruction); |
| 11752 | neon_three_same (neon_quad (rs), 0, -1); |
| 11753 | } |
| 11754 | else |
| 11755 | { |
| 11756 | inst.instruction = NEON_ENC_INTEGER (inst.instruction); |
| 11757 | neon_three_same (neon_quad (rs), et.type == ubit_meaning, et.size); |
| 11758 | } |
| 11759 | } |
| 11760 | |
| 11761 | static void |
| 11762 | do_neon_dyadic_if_su (void) |
| 11763 | { |
| 11764 | neon_dyadic_misc (NT_unsigned, N_SUF_32, 0); |
| 11765 | } |
| 11766 | |
| 11767 | static void |
| 11768 | do_neon_dyadic_if_su_d (void) |
| 11769 | { |
| 11770 | /* This version only allow D registers, but that constraint is enforced during |
| 11771 | operand parsing so we don't need to do anything extra here. */ |
| 11772 | neon_dyadic_misc (NT_unsigned, N_SUF_32, 0); |
| 11773 | } |
| 11774 | |
| 11775 | static void |
| 11776 | do_neon_dyadic_if_i_d (void) |
| 11777 | { |
| 11778 | /* The "untyped" case can't happen. Do this to stop the "U" bit being |
| 11779 | affected if we specify unsigned args. */ |
| 11780 | neon_dyadic_misc (NT_untyped, N_IF_32, 0); |
| 11781 | } |
| 11782 | |
| 11783 | enum vfp_or_neon_is_neon_bits |
| 11784 | { |
| 11785 | NEON_CHECK_CC = 1, |
| 11786 | NEON_CHECK_ARCH = 2 |
| 11787 | }; |
| 11788 | |
| 11789 | /* Call this function if an instruction which may have belonged to the VFP or |
| 11790 | Neon instruction sets, but turned out to be a Neon instruction (due to the |
| 11791 | operand types involved, etc.). We have to check and/or fix-up a couple of |
| 11792 | things: |
| 11793 | |
| 11794 | - Make sure the user hasn't attempted to make a Neon instruction |
| 11795 | conditional. |
| 11796 | - Alter the value in the condition code field if necessary. |
| 11797 | - Make sure that the arch supports Neon instructions. |
| 11798 | |
| 11799 | Which of these operations take place depends on bits from enum |
| 11800 | vfp_or_neon_is_neon_bits. |
| 11801 | |
| 11802 | WARNING: This function has side effects! If NEON_CHECK_CC is used and the |
| 11803 | current instruction's condition is COND_ALWAYS, the condition field is |
| 11804 | changed to inst.uncond_value. This is necessary because instructions shared |
| 11805 | between VFP and Neon may be conditional for the VFP variants only, and the |
| 11806 | unconditional Neon version must have, e.g., 0xF in the condition field. */ |
| 11807 | |
| 11808 | static int |
| 11809 | vfp_or_neon_is_neon (unsigned check) |
| 11810 | { |
| 11811 | /* Conditions are always legal in Thumb mode (IT blocks). */ |
| 11812 | if (!thumb_mode && (check & NEON_CHECK_CC)) |
| 11813 | { |
| 11814 | if (inst.cond != COND_ALWAYS) |
| 11815 | { |
| 11816 | first_error (_(BAD_COND)); |
| 11817 | return FAIL; |
| 11818 | } |
| 11819 | if (inst.uncond_value != -1) |
| 11820 | inst.instruction |= inst.uncond_value << 28; |
| 11821 | } |
| 11822 | |
| 11823 | if ((check & NEON_CHECK_ARCH) |
| 11824 | && !ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)) |
| 11825 | { |
| 11826 | first_error (_(BAD_FPU)); |
| 11827 | return FAIL; |
| 11828 | } |
| 11829 | |
| 11830 | return SUCCESS; |
| 11831 | } |
| 11832 | |
| 11833 | static void |
| 11834 | do_neon_addsub_if_i (void) |
| 11835 | { |
| 11836 | if (try_vfp_nsyn (3, do_vfp_nsyn_add_sub) == SUCCESS) |
| 11837 | return; |
| 11838 | |
| 11839 | if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL) |
| 11840 | return; |
| 11841 | |
| 11842 | /* The "untyped" case can't happen. Do this to stop the "U" bit being |
| 11843 | affected if we specify unsigned args. */ |
| 11844 | neon_dyadic_misc (NT_untyped, N_IF_32 | N_I64, 0); |
| 11845 | } |
| 11846 | |
| 11847 | /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the |
| 11848 | result to be: |
| 11849 | V<op> A,B (A is operand 0, B is operand 2) |
| 11850 | to mean: |
| 11851 | V<op> A,B,A |
| 11852 | not: |
| 11853 | V<op> A,B,B |
| 11854 | so handle that case specially. */ |
| 11855 | |
| 11856 | static void |
| 11857 | neon_exchange_operands (void) |
| 11858 | { |
| 11859 | void *scratch = alloca (sizeof (inst.operands[0])); |
| 11860 | if (inst.operands[1].present) |
| 11861 | { |
| 11862 | /* Swap operands[1] and operands[2]. */ |
| 11863 | memcpy (scratch, &inst.operands[1], sizeof (inst.operands[0])); |
| 11864 | inst.operands[1] = inst.operands[2]; |
| 11865 | memcpy (&inst.operands[2], scratch, sizeof (inst.operands[0])); |
| 11866 | } |
| 11867 | else |
| 11868 | { |
| 11869 | inst.operands[1] = inst.operands[2]; |
| 11870 | inst.operands[2] = inst.operands[0]; |
| 11871 | } |
| 11872 | } |
| 11873 | |
| 11874 | static void |
| 11875 | neon_compare (unsigned regtypes, unsigned immtypes, int invert) |
| 11876 | { |
| 11877 | if (inst.operands[2].isreg) |
| 11878 | { |
| 11879 | if (invert) |
| 11880 | neon_exchange_operands (); |
| 11881 | neon_dyadic_misc (NT_unsigned, regtypes, N_SIZ); |
| 11882 | } |
| 11883 | else |
| 11884 | { |
| 11885 | enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL); |
| 11886 | struct neon_type_el et = neon_check_type (2, rs, |
| 11887 | N_EQK | N_SIZ, immtypes | N_KEY); |
| 11888 | |
| 11889 | inst.instruction = NEON_ENC_IMMED (inst.instruction); |
| 11890 | inst.instruction |= LOW4 (inst.operands[0].reg) << 12; |
| 11891 | inst.instruction |= HI1 (inst.operands[0].reg) << 22; |
| 11892 | inst.instruction |= LOW4 (inst.operands[1].reg); |
| 11893 | inst.instruction |= HI1 (inst.operands[1].reg) << 5; |
| 11894 | inst.instruction |= neon_quad (rs) << 6; |
| 11895 | inst.instruction |= (et.type == NT_float) << 10; |
| 11896 | inst.instruction |= neon_logbits (et.size) << 18; |
| 11897 | |
| 11898 | inst.instruction = neon_dp_fixup (inst.instruction); |
| 11899 | } |
| 11900 | } |
| 11901 | |
| 11902 | static void |
| 11903 | do_neon_cmp (void) |
| 11904 | { |
| 11905 | neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, FALSE); |
| 11906 | } |
| 11907 | |
| 11908 | static void |
| 11909 | do_neon_cmp_inv (void) |
| 11910 | { |
| 11911 | neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, TRUE); |
| 11912 | } |
| 11913 | |
| 11914 | static void |
| 11915 | do_neon_ceq (void) |
| 11916 | { |
| 11917 | neon_compare (N_IF_32, N_IF_32, FALSE); |
| 11918 | } |
| 11919 | |
| 11920 | /* For multiply instructions, we have the possibility of 16-bit or 32-bit |
| 11921 | scalars, which are encoded in 5 bits, M : Rm. |
| 11922 | For 16-bit scalars, the register is encoded in Rm[2:0] and the index in |
| 11923 | M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the |
| 11924 | index in M. */ |
| 11925 | |
| 11926 | static unsigned |
| 11927 | neon_scalar_for_mul (unsigned scalar, unsigned elsize) |
| 11928 | { |
| 11929 | unsigned regno = NEON_SCALAR_REG (scalar); |
| 11930 | unsigned elno = NEON_SCALAR_INDEX (scalar); |
| 11931 | |
| 11932 | switch (elsize) |
| 11933 | { |
| 11934 | case 16: |
| 11935 | if (regno > 7 || elno > 3) |
| 11936 | goto bad_scalar; |
| 11937 | return regno | (elno << 3); |
| 11938 | |
| 11939 | case 32: |
| 11940 | if (regno > 15 || elno > 1) |
| 11941 | goto bad_scalar; |
| 11942 | return regno | (elno << 4); |
| 11943 | |
| 11944 | default: |
| 11945 | bad_scalar: |
| 11946 | first_error (_("scalar out of range for multiply instruction")); |
| 11947 | } |
| 11948 | |
| 11949 | return 0; |
| 11950 | } |
| 11951 | |
| 11952 | /* Encode multiply / multiply-accumulate scalar instructions. */ |
| 11953 | |
| 11954 | static void |
| 11955 | neon_mul_mac (struct neon_type_el et, int ubit) |
| 11956 | { |
| 11957 | unsigned scalar; |
| 11958 | |
| 11959 | /* Give a more helpful error message if we have an invalid type. */ |
| 11960 | if (et.type == NT_invtype) |
| 11961 | return; |
| 11962 | |
| 11963 | scalar = neon_scalar_for_mul (inst.operands[2].reg, et.size); |
| 11964 | inst.instruction |= LOW4 (inst.operands[0].reg) << 12; |
| 11965 | inst.instruction |= HI1 (inst.operands[0].reg) << 22; |
| 11966 | inst.instruction |= LOW4 (inst.operands[1].reg) << 16; |
| 11967 | inst.instruction |= HI1 (inst.operands[1].reg) << 7; |
| 11968 | inst.instruction |= LOW4 (scalar); |
| 11969 | inst.instruction |= HI1 (scalar) << 5; |
| 11970 | inst.instruction |= (et.type == NT_float) << 8; |
| 11971 | inst.instruction |= neon_logbits (et.size) << 20; |
| 11972 | inst.instruction |= (ubit != 0) << 24; |
| 11973 | |
| 11974 | inst.instruction = neon_dp_fixup (inst.instruction); |
| 11975 | } |
| 11976 | |
| 11977 | static void |
| 11978 | do_neon_mac_maybe_scalar (void) |
| 11979 | { |
| 11980 | if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls) == SUCCESS) |
| 11981 | return; |
| 11982 | |
| 11983 | if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL) |
| 11984 | return; |
| 11985 | |
| 11986 | if (inst.operands[2].isscalar) |
| 11987 | { |
| 11988 | enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL); |
| 11989 | struct neon_type_el et = neon_check_type (3, rs, |
| 11990 | N_EQK, N_EQK, N_I16 | N_I32 | N_F32 | N_KEY); |
| 11991 | inst.instruction = NEON_ENC_SCALAR (inst.instruction); |
| 11992 | neon_mul_mac (et, neon_quad (rs)); |
| 11993 | } |
| 11994 | else |
| 11995 | { |
| 11996 | /* The "untyped" case can't happen. Do this to stop the "U" bit being |
| 11997 | affected if we specify unsigned args. */ |
| 11998 | neon_dyadic_misc (NT_untyped, N_IF_32, 0); |
| 11999 | } |
| 12000 | } |
| 12001 | |
| 12002 | static void |
| 12003 | do_neon_tst (void) |
| 12004 | { |
| 12005 | enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL); |
| 12006 | struct neon_type_el et = neon_check_type (3, rs, |
| 12007 | N_EQK, N_EQK, N_8 | N_16 | N_32 | N_KEY); |
| 12008 | neon_three_same (neon_quad (rs), 0, et.size); |
| 12009 | } |
| 12010 | |
| 12011 | /* VMUL with 3 registers allows the P8 type. The scalar version supports the |
| 12012 | same types as the MAC equivalents. The polynomial type for this instruction |
| 12013 | is encoded the same as the integer type. */ |
| 12014 | |
| 12015 | static void |
| 12016 | do_neon_mul (void) |
| 12017 | { |
| 12018 | if (try_vfp_nsyn (3, do_vfp_nsyn_mul) == SUCCESS) |
| 12019 | return; |
| 12020 | |
| 12021 | if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL) |
| 12022 | return; |
| 12023 | |
| 12024 | if (inst.operands[2].isscalar) |
| 12025 | do_neon_mac_maybe_scalar (); |
| 12026 | else |
| 12027 | neon_dyadic_misc (NT_poly, N_I8 | N_I16 | N_I32 | N_F32 | N_P8, 0); |
| 12028 | } |
| 12029 | |
| 12030 | static void |
| 12031 | do_neon_qdmulh (void) |
| 12032 | { |
| 12033 | if (inst.operands[2].isscalar) |
| 12034 | { |
| 12035 | enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL); |
| 12036 | struct neon_type_el et = neon_check_type (3, rs, |
| 12037 | N_EQK, N_EQK, N_S16 | N_S32 | N_KEY); |
| 12038 | inst.instruction = NEON_ENC_SCALAR (inst.instruction); |
| 12039 | neon_mul_mac (et, neon_quad (rs)); |
| 12040 | } |
| 12041 | else |
| 12042 | { |
| 12043 | enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL); |
| 12044 | struct neon_type_el et = neon_check_type (3, rs, |
| 12045 | N_EQK, N_EQK, N_S16 | N_S32 | N_KEY); |
| 12046 | inst.instruction = NEON_ENC_INTEGER (inst.instruction); |
| 12047 | /* The U bit (rounding) comes from bit mask. */ |
| 12048 | neon_three_same (neon_quad (rs), 0, et.size); |
| 12049 | } |
| 12050 | } |
| 12051 | |
| 12052 | static void |
| 12053 | do_neon_fcmp_absolute (void) |
| 12054 | { |
| 12055 | enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL); |
| 12056 | neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY); |
| 12057 | /* Size field comes from bit mask. */ |
| 12058 | neon_three_same (neon_quad (rs), 1, -1); |
| 12059 | } |
| 12060 | |
| 12061 | static void |
| 12062 | do_neon_fcmp_absolute_inv (void) |
| 12063 | { |
| 12064 | neon_exchange_operands (); |
| 12065 | do_neon_fcmp_absolute (); |
| 12066 | } |
| 12067 | |
| 12068 | static void |
| 12069 | do_neon_step (void) |
| 12070 | { |
| 12071 | enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL); |
| 12072 | neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY); |
| 12073 | neon_three_same (neon_quad (rs), 0, -1); |
| 12074 | } |
| 12075 | |
| 12076 | static void |
| 12077 | do_neon_abs_neg (void) |
| 12078 | { |
| 12079 | enum neon_shape rs; |
| 12080 | struct neon_type_el et; |
| 12081 | |
| 12082 | if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg) == SUCCESS) |
| 12083 | return; |
| 12084 | |
| 12085 | if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL) |
| 12086 | return; |
| 12087 | |
| 12088 | rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL); |
| 12089 | et = neon_check_type (2, rs, N_EQK, N_S8 | N_S16 | N_S32 | N_F32 | N_KEY); |
| 12090 | |
| 12091 | inst.instruction |= LOW4 (inst.operands[0].reg) << 12; |
| 12092 | inst.instruction |= HI1 (inst.operands[0].reg) << 22; |
| 12093 | inst.instruction |= LOW4 (inst.operands[1].reg); |
| 12094 | inst.instruction |= HI1 (inst.operands[1].reg) << 5; |
| 12095 | inst.instruction |= neon_quad (rs) << 6; |
| 12096 | inst.instruction |= (et.type == NT_float) << 10; |
| 12097 | inst.instruction |= neon_logbits (et.size) << 18; |
| 12098 | |
| 12099 | inst.instruction = neon_dp_fixup (inst.instruction); |
| 12100 | } |
| 12101 | |
| 12102 | static void |
| 12103 | do_neon_sli (void) |
| 12104 | { |
| 12105 | enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL); |
| 12106 | struct neon_type_el et = neon_check_type (2, rs, |
| 12107 | N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY); |
| 12108 | int imm = inst.operands[2].imm; |
| 12109 | constraint (imm < 0 || (unsigned)imm >= et.size, |
| 12110 | _("immediate out of range for insert")); |
| 12111 | neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm); |
| 12112 | } |
| 12113 | |
| 12114 | static void |
| 12115 | do_neon_sri (void) |
| 12116 | { |
| 12117 | enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL); |
| 12118 | struct neon_type_el et = neon_check_type (2, rs, |
| 12119 | N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY); |
| 12120 | int imm = inst.operands[2].imm; |
| 12121 | constraint (imm < 1 || (unsigned)imm > et.size, |
| 12122 | _("immediate out of range for insert")); |
| 12123 | neon_imm_shift (FALSE, 0, neon_quad (rs), et, et.size - imm); |
| 12124 | } |
| 12125 | |
| 12126 | static void |
| 12127 | do_neon_qshlu_imm (void) |
| 12128 | { |
| 12129 | enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL); |
| 12130 | struct neon_type_el et = neon_check_type (2, rs, |
| 12131 | N_EQK | N_UNS, N_S8 | N_S16 | N_S32 | N_S64 | N_KEY); |
| 12132 | int imm = inst.operands[2].imm; |
| 12133 | constraint (imm < 0 || (unsigned)imm >= et.size, |
| 12134 | _("immediate out of range for shift")); |
| 12135 | /* Only encodes the 'U present' variant of the instruction. |
| 12136 | In this case, signed types have OP (bit 8) set to 0. |
| 12137 | Unsigned types have OP set to 1. */ |
| 12138 | inst.instruction |= (et.type == NT_unsigned) << 8; |
| 12139 | /* The rest of the bits are the same as other immediate shifts. */ |
| 12140 | neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm); |
| 12141 | } |
| 12142 | |
| 12143 | static void |
| 12144 | do_neon_qmovn (void) |
| 12145 | { |
| 12146 | struct neon_type_el et = neon_check_type (2, NS_DQ, |
| 12147 | N_EQK | N_HLF, N_SU_16_64 | N_KEY); |
| 12148 | /* Saturating move where operands can be signed or unsigned, and the |
| 12149 | destination has the same signedness. */ |
| 12150 | inst.instruction = NEON_ENC_INTEGER (inst.instruction); |
| 12151 | if (et.type == NT_unsigned) |
| 12152 | inst.instruction |= 0xc0; |
| 12153 | else |
| 12154 | inst.instruction |= 0x80; |
| 12155 | neon_two_same (0, 1, et.size / 2); |
| 12156 | } |
| 12157 | |
| 12158 | static void |
| 12159 | do_neon_qmovun (void) |
| 12160 | { |
| 12161 | struct neon_type_el et = neon_check_type (2, NS_DQ, |
| 12162 | N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY); |
| 12163 | /* Saturating move with unsigned results. Operands must be signed. */ |
| 12164 | inst.instruction = NEON_ENC_INTEGER (inst.instruction); |
| 12165 | neon_two_same (0, 1, et.size / 2); |
| 12166 | } |
| 12167 | |
| 12168 | static void |
| 12169 | do_neon_rshift_sat_narrow (void) |
| 12170 | { |
| 12171 | /* FIXME: Types for narrowing. If operands are signed, results can be signed |
| 12172 | or unsigned. If operands are unsigned, results must also be unsigned. */ |
| 12173 | struct neon_type_el et = neon_check_type (2, NS_DQI, |
| 12174 | N_EQK | N_HLF, N_SU_16_64 | N_KEY); |
| 12175 | int imm = inst.operands[2].imm; |
| 12176 | /* This gets the bounds check, size encoding and immediate bits calculation |
| 12177 | right. */ |
| 12178 | et.size /= 2; |
| 12179 | |
| 12180 | /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for |
| 12181 | VQMOVN.I<size> <Dd>, <Qm>. */ |
| 12182 | if (imm == 0) |
| 12183 | { |
| 12184 | inst.operands[2].present = 0; |
| 12185 | inst.instruction = N_MNEM_vqmovn; |
| 12186 | do_neon_qmovn (); |
| 12187 | return; |
| 12188 | } |
| 12189 | |
| 12190 | constraint (imm < 1 || (unsigned)imm > et.size, |
| 12191 | _("immediate out of range")); |
| 12192 | neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, et.size - imm); |
| 12193 | } |
| 12194 | |
| 12195 | static void |
| 12196 | do_neon_rshift_sat_narrow_u (void) |
| 12197 | { |
| 12198 | /* FIXME: Types for narrowing. If operands are signed, results can be signed |
| 12199 | or unsigned. If operands are unsigned, results must also be unsigned. */ |
| 12200 | struct neon_type_el et = neon_check_type (2, NS_DQI, |
| 12201 | N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY); |
| 12202 | int imm = inst.operands[2].imm; |
| 12203 | /* This gets the bounds check, size encoding and immediate bits calculation |
| 12204 | right. */ |
| 12205 | et.size /= 2; |
| 12206 | |
| 12207 | /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for |
| 12208 | VQMOVUN.I<size> <Dd>, <Qm>. */ |
| 12209 | if (imm == 0) |
| 12210 | { |
| 12211 | inst.operands[2].present = 0; |
| 12212 | inst.instruction = N_MNEM_vqmovun; |
| 12213 | do_neon_qmovun (); |
| 12214 | return; |
| 12215 | } |
| 12216 | |
| 12217 | constraint (imm < 1 || (unsigned)imm > et.size, |
| 12218 | _("immediate out of range")); |
| 12219 | /* FIXME: The manual is kind of unclear about what value U should have in |
| 12220 | VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it |
| 12221 | must be 1. */ |
| 12222 | neon_imm_shift (TRUE, 1, 0, et, et.size - imm); |
| 12223 | } |
| 12224 | |
| 12225 | static void |
| 12226 | do_neon_movn (void) |
| 12227 | { |
| 12228 | struct neon_type_el et = neon_check_type (2, NS_DQ, |
| 12229 | N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY); |
| 12230 | inst.instruction = NEON_ENC_INTEGER (inst.instruction); |
| 12231 | neon_two_same (0, 1, et.size / 2); |
| 12232 | } |
| 12233 | |
| 12234 | static void |
| 12235 | do_neon_rshift_narrow (void) |
| 12236 | { |
| 12237 | struct neon_type_el et = neon_check_type (2, NS_DQI, |
| 12238 | N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY); |
| 12239 | int imm = inst.operands[2].imm; |
| 12240 | /* This gets the bounds check, size encoding and immediate bits calculation |
| 12241 | right. */ |
| 12242 | et.size /= 2; |
| 12243 | |
| 12244 | /* If immediate is zero then we are a pseudo-instruction for |
| 12245 | VMOVN.I<size> <Dd>, <Qm> */ |
| 12246 | if (imm == 0) |
| 12247 | { |
| 12248 | inst.operands[2].present = 0; |
| 12249 | inst.instruction = N_MNEM_vmovn; |
| 12250 | do_neon_movn (); |
| 12251 | return; |
| 12252 | } |
| 12253 | |
| 12254 | constraint (imm < 1 || (unsigned)imm > et.size, |
| 12255 | _("immediate out of range for narrowing operation")); |
| 12256 | neon_imm_shift (FALSE, 0, 0, et, et.size - imm); |
| 12257 | } |
| 12258 | |
| 12259 | static void |
| 12260 | do_neon_shll (void) |
| 12261 | { |
| 12262 | /* FIXME: Type checking when lengthening. */ |
| 12263 | struct neon_type_el et = neon_check_type (2, NS_QDI, |
| 12264 | N_EQK | N_DBL, N_I8 | N_I16 | N_I32 | N_KEY); |
| 12265 | unsigned imm = inst.operands[2].imm; |
| 12266 | |
| 12267 | if (imm == et.size) |
| 12268 | { |
| 12269 | /* Maximum shift variant. */ |
| 12270 | inst.instruction = NEON_ENC_INTEGER (inst.instruction); |
| 12271 | inst.instruction |= LOW4 (inst.operands[0].reg) << 12; |
| 12272 | inst.instruction |= HI1 (inst.operands[0].reg) << 22; |
| 12273 | inst.instruction |= LOW4 (inst.operands[1].reg); |
| 12274 | inst.instruction |= HI1 (inst.operands[1].reg) << 5; |
| 12275 | inst.instruction |= neon_logbits (et.size) << 18; |
| 12276 | |
| 12277 | inst.instruction = neon_dp_fixup (inst.instruction); |
| 12278 | } |
| 12279 | else |
| 12280 | { |
| 12281 | /* A more-specific type check for non-max versions. */ |
| 12282 | et = neon_check_type (2, NS_QDI, |
| 12283 | N_EQK | N_DBL, N_SU_32 | N_KEY); |
| 12284 | inst.instruction = NEON_ENC_IMMED (inst.instruction); |
| 12285 | neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, imm); |
| 12286 | } |
| 12287 | } |
| 12288 | |
| 12289 | /* Check the various types for the VCVT instruction, and return which version |
| 12290 | the current instruction is. */ |
| 12291 | |
| 12292 | static int |
| 12293 | neon_cvt_flavour (enum neon_shape rs) |
| 12294 | { |
| 12295 | #define CVT_VAR(C,X,Y) \ |
| 12296 | et = neon_check_type (2, rs, whole_reg | (X), whole_reg | (Y)); \ |
| 12297 | if (et.type != NT_invtype) \ |
| 12298 | { \ |
| 12299 | inst.error = NULL; \ |
| 12300 | return (C); \ |
| 12301 | } |
| 12302 | struct neon_type_el et; |
| 12303 | unsigned whole_reg = (rs == NS_FFI || rs == NS_FD || rs == NS_DF |
| 12304 | || rs == NS_FF) ? N_VFP : 0; |
| 12305 | /* The instruction versions which take an immediate take one register |
| 12306 | argument, which is extended to the width of the full register. Thus the |
| 12307 | "source" and "destination" registers must have the same width. Hack that |
| 12308 | here by making the size equal to the key (wider, in this case) operand. */ |
| 12309 | unsigned key = (rs == NS_QQI || rs == NS_DDI || rs == NS_FFI) ? N_KEY : 0; |
| 12310 | |
| 12311 | CVT_VAR (0, N_S32, N_F32); |
| 12312 | CVT_VAR (1, N_U32, N_F32); |
| 12313 | CVT_VAR (2, N_F32, N_S32); |
| 12314 | CVT_VAR (3, N_F32, N_U32); |
| 12315 | |
| 12316 | whole_reg = N_VFP; |
| 12317 | |
| 12318 | /* VFP instructions. */ |
| 12319 | CVT_VAR (4, N_F32, N_F64); |
| 12320 | CVT_VAR (5, N_F64, N_F32); |
| 12321 | CVT_VAR (6, N_S32, N_F64 | key); |
| 12322 | CVT_VAR (7, N_U32, N_F64 | key); |
| 12323 | CVT_VAR (8, N_F64 | key, N_S32); |
| 12324 | CVT_VAR (9, N_F64 | key, N_U32); |
| 12325 | /* VFP instructions with bitshift. */ |
| 12326 | CVT_VAR (10, N_F32 | key, N_S16); |
| 12327 | CVT_VAR (11, N_F32 | key, N_U16); |
| 12328 | CVT_VAR (12, N_F64 | key, N_S16); |
| 12329 | CVT_VAR (13, N_F64 | key, N_U16); |
| 12330 | CVT_VAR (14, N_S16, N_F32 | key); |
| 12331 | CVT_VAR (15, N_U16, N_F32 | key); |
| 12332 | CVT_VAR (16, N_S16, N_F64 | key); |
| 12333 | CVT_VAR (17, N_U16, N_F64 | key); |
| 12334 | |
| 12335 | return -1; |
| 12336 | #undef CVT_VAR |
| 12337 | } |
| 12338 | |
| 12339 | /* Neon-syntax VFP conversions. */ |
| 12340 | |
| 12341 | static void |
| 12342 | do_vfp_nsyn_cvt (enum neon_shape rs, int flavour) |
| 12343 | { |
| 12344 | const char *opname = 0; |
| 12345 | |
| 12346 | if (rs == NS_DDI || rs == NS_QQI || rs == NS_FFI) |
| 12347 | { |
| 12348 | /* Conversions with immediate bitshift. */ |
| 12349 | const char *enc[] = |
| 12350 | { |
| 12351 | "ftosls", |
| 12352 | "ftouls", |
| 12353 | "fsltos", |
| 12354 | "fultos", |
| 12355 | NULL, |
| 12356 | NULL, |
| 12357 | "ftosld", |
| 12358 | "ftould", |
| 12359 | "fsltod", |
| 12360 | "fultod", |
| 12361 | "fshtos", |
| 12362 | "fuhtos", |
| 12363 | "fshtod", |
| 12364 | "fuhtod", |
| 12365 | "ftoshs", |
| 12366 | "ftouhs", |
| 12367 | "ftoshd", |
| 12368 | "ftouhd" |
| 12369 | }; |
| 12370 | |
| 12371 | if (flavour >= 0 && flavour < (int) ARRAY_SIZE (enc)) |
| 12372 | { |
| 12373 | opname = enc[flavour]; |
| 12374 | constraint (inst.operands[0].reg != inst.operands[1].reg, |
| 12375 | _("operands 0 and 1 must be the same register")); |
| 12376 | inst.operands[1] = inst.operands[2]; |
| 12377 | memset (&inst.operands[2], '\0', sizeof (inst.operands[2])); |
| 12378 | } |
| 12379 | } |
| 12380 | else |
| 12381 | { |
| 12382 | /* Conversions without bitshift. */ |
| 12383 | const char *enc[] = |
| 12384 | { |
| 12385 | "ftosis", |
| 12386 | "ftouis", |
| 12387 | "fsitos", |
| 12388 | "fuitos", |
| 12389 | "fcvtsd", |
| 12390 | "fcvtds", |
| 12391 | "ftosid", |
| 12392 | "ftouid", |
| 12393 | "fsitod", |
| 12394 | "fuitod" |
| 12395 | }; |
| 12396 | |
| 12397 | if (flavour >= 0 && flavour < (int) ARRAY_SIZE (enc)) |
| 12398 | opname = enc[flavour]; |
| 12399 | } |
| 12400 | |
| 12401 | if (opname) |
| 12402 | do_vfp_nsyn_opcode (opname); |
| 12403 | } |
| 12404 | |
| 12405 | static void |
| 12406 | do_vfp_nsyn_cvtz (void) |
| 12407 | { |
| 12408 | enum neon_shape rs = neon_select_shape (NS_FF, NS_FD, NS_NULL); |
| 12409 | int flavour = neon_cvt_flavour (rs); |
| 12410 | const char *enc[] = |
| 12411 | { |
| 12412 | "ftosizs", |
| 12413 | "ftouizs", |
| 12414 | NULL, |
| 12415 | NULL, |
| 12416 | NULL, |
| 12417 | NULL, |
| 12418 | "ftosizd", |
| 12419 | "ftouizd" |
| 12420 | }; |
| 12421 | |
| 12422 | if (flavour >= 0 && flavour < (int) ARRAY_SIZE (enc) && enc[flavour]) |
| 12423 | do_vfp_nsyn_opcode (enc[flavour]); |
| 12424 | } |
| 12425 | |
| 12426 | static void |
| 12427 | do_neon_cvt (void) |
| 12428 | { |
| 12429 | enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_FFI, NS_DD, NS_QQ, |
| 12430 | NS_FD, NS_DF, NS_FF, NS_NULL); |
| 12431 | int flavour = neon_cvt_flavour (rs); |
| 12432 | |
| 12433 | /* VFP rather than Neon conversions. */ |
| 12434 | if (flavour >= 4) |
| 12435 | { |
| 12436 | do_vfp_nsyn_cvt (rs, flavour); |
| 12437 | return; |
| 12438 | } |
| 12439 | |
| 12440 | switch (rs) |
| 12441 | { |
| 12442 | case NS_DDI: |
| 12443 | case NS_QQI: |
| 12444 | { |
| 12445 | if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL) |
| 12446 | return; |
| 12447 | |
| 12448 | /* Fixed-point conversion with #0 immediate is encoded as an |
| 12449 | integer conversion. */ |
| 12450 | if (inst.operands[2].present && inst.operands[2].imm == 0) |
| 12451 | goto int_encode; |
| 12452 | unsigned immbits = 32 - inst.operands[2].imm; |
| 12453 | unsigned enctab[] = { 0x0000100, 0x1000100, 0x0, 0x1000000 }; |
| 12454 | inst.instruction = NEON_ENC_IMMED (inst.instruction); |
| 12455 | if (flavour != -1) |
| 12456 | inst.instruction |= enctab[flavour]; |
| 12457 | inst.instruction |= LOW4 (inst.operands[0].reg) << 12; |
| 12458 | inst.instruction |= HI1 (inst.operands[0].reg) << 22; |
| 12459 | inst.instruction |= LOW4 (inst.operands[1].reg); |
| 12460 | inst.instruction |= HI1 (inst.operands[1].reg) << 5; |
| 12461 | inst.instruction |= neon_quad (rs) << 6; |
| 12462 | inst.instruction |= 1 << 21; |
| 12463 | inst.instruction |= immbits << 16; |
| 12464 | |
| 12465 | inst.instruction = neon_dp_fixup (inst.instruction); |
| 12466 | } |
| 12467 | break; |
| 12468 | |
| 12469 | case NS_DD: |
| 12470 | case NS_QQ: |
| 12471 | int_encode: |
| 12472 | { |
| 12473 | unsigned enctab[] = { 0x100, 0x180, 0x0, 0x080 }; |
| 12474 | |
| 12475 | inst.instruction = NEON_ENC_INTEGER (inst.instruction); |
| 12476 | |
| 12477 | if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL) |
| 12478 | return; |
| 12479 | |
| 12480 | if (flavour != -1) |
| 12481 | inst.instruction |= enctab[flavour]; |
| 12482 | |
| 12483 | inst.instruction |= LOW4 (inst.operands[0].reg) << 12; |
| 12484 | inst.instruction |= HI1 (inst.operands[0].reg) << 22; |
| 12485 | inst.instruction |= LOW4 (inst.operands[1].reg); |
| 12486 | inst.instruction |= HI1 (inst.operands[1].reg) << 5; |
| 12487 | inst.instruction |= neon_quad (rs) << 6; |
| 12488 | inst.instruction |= 2 << 18; |
| 12489 | |
| 12490 | inst.instruction = neon_dp_fixup (inst.instruction); |
| 12491 | } |
| 12492 | break; |
| 12493 | |
| 12494 | default: |
| 12495 | /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */ |
| 12496 | do_vfp_nsyn_cvt (rs, flavour); |
| 12497 | } |
| 12498 | } |
| 12499 | |
| 12500 | static void |
| 12501 | neon_move_immediate (void) |
| 12502 | { |
| 12503 | enum neon_shape rs = neon_select_shape (NS_DI, NS_QI, NS_NULL); |
| 12504 | struct neon_type_el et = neon_check_type (2, rs, |
| 12505 | N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK); |
| 12506 | unsigned immlo, immhi = 0, immbits; |
| 12507 | int op, cmode; |
| 12508 | |
| 12509 | constraint (et.type == NT_invtype, |
| 12510 | _("operand size must be specified for immediate VMOV")); |
| 12511 | |
| 12512 | /* We start out as an MVN instruction if OP = 1, MOV otherwise. */ |
| 12513 | op = (inst.instruction & (1 << 5)) != 0; |
| 12514 | |
| 12515 | immlo = inst.operands[1].imm; |
| 12516 | if (inst.operands[1].regisimm) |
| 12517 | immhi = inst.operands[1].reg; |
| 12518 | |
| 12519 | constraint (et.size < 32 && (immlo & ~((1 << et.size) - 1)) != 0, |
| 12520 | _("immediate has bits set outside the operand size")); |
| 12521 | |
| 12522 | if ((cmode = neon_cmode_for_move_imm (immlo, immhi, &immbits, &op, |
| 12523 | et.size, et.type)) == FAIL) |
| 12524 | { |
| 12525 | /* Invert relevant bits only. */ |
| 12526 | neon_invert_size (&immlo, &immhi, et.size); |
| 12527 | /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable |
| 12528 | with one or the other; those cases are caught by |
| 12529 | neon_cmode_for_move_imm. */ |
| 12530 | op = !op; |
| 12531 | if ((cmode = neon_cmode_for_move_imm (immlo, immhi, &immbits, &op, |
| 12532 | et.size, et.type)) == FAIL) |
| 12533 | { |
| 12534 | first_error (_("immediate out of range")); |
| 12535 | return; |
| 12536 | } |
| 12537 | } |
| 12538 | |
| 12539 | inst.instruction &= ~(1 << 5); |
| 12540 | inst.instruction |= op << 5; |
| 12541 | |
| 12542 | inst.instruction |= LOW4 (inst.operands[0].reg) << 12; |
| 12543 | inst.instruction |= HI1 (inst.operands[0].reg) << 22; |
| 12544 | inst.instruction |= neon_quad (rs) << 6; |
| 12545 | inst.instruction |= cmode << 8; |
| 12546 | |
| 12547 | neon_write_immbits (immbits); |
| 12548 | } |
| 12549 | |
| 12550 | static void |
| 12551 | do_neon_mvn (void) |
| 12552 | { |
| 12553 | if (inst.operands[1].isreg) |
| 12554 | { |
| 12555 | enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL); |
| 12556 | |
| 12557 | inst.instruction = NEON_ENC_INTEGER (inst.instruction); |
| 12558 | inst.instruction |= LOW4 (inst.operands[0].reg) << 12; |
| 12559 | inst.instruction |= HI1 (inst.operands[0].reg) << 22; |
| 12560 | inst.instruction |= LOW4 (inst.operands[1].reg); |
| 12561 | inst.instruction |= HI1 (inst.operands[1].reg) << 5; |
| 12562 | inst.instruction |= neon_quad (rs) << 6; |
| 12563 | } |
| 12564 | else |
| 12565 | { |
| 12566 | inst.instruction = NEON_ENC_IMMED (inst.instruction); |
| 12567 | neon_move_immediate (); |
| 12568 | } |
| 12569 | |
| 12570 | inst.instruction = neon_dp_fixup (inst.instruction); |
| 12571 | } |
| 12572 | |
| 12573 | /* Encode instructions of form: |
| 12574 | |
| 12575 | |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0| |
| 12576 | | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm | |
| 12577 | |
| 12578 | */ |
| 12579 | |
| 12580 | static void |
| 12581 | neon_mixed_length (struct neon_type_el et, unsigned size) |
| 12582 | { |
| 12583 | inst.instruction |= LOW4 (inst.operands[0].reg) << 12; |
| 12584 | inst.instruction |= HI1 (inst.operands[0].reg) << 22; |
| 12585 | inst.instruction |= LOW4 (inst.operands[1].reg) << 16; |
| 12586 | inst.instruction |= HI1 (inst.operands[1].reg) << 7; |
| 12587 | inst.instruction |= LOW4 (inst.operands[2].reg); |
| 12588 | inst.instruction |= HI1 (inst.operands[2].reg) << 5; |
| 12589 | inst.instruction |= (et.type == NT_unsigned) << 24; |
| 12590 | inst.instruction |= neon_logbits (size) << 20; |
| 12591 | |
| 12592 | inst.instruction = neon_dp_fixup (inst.instruction); |
| 12593 | } |
| 12594 | |
| 12595 | static void |
| 12596 | do_neon_dyadic_long (void) |
| 12597 | { |
| 12598 | /* FIXME: Type checking for lengthening op. */ |
| 12599 | struct neon_type_el et = neon_check_type (3, NS_QDD, |
| 12600 | N_EQK | N_DBL, N_EQK, N_SU_32 | N_KEY); |
| 12601 | neon_mixed_length (et, et.size); |
| 12602 | } |
| 12603 | |
| 12604 | static void |
| 12605 | do_neon_abal (void) |
| 12606 | { |
| 12607 | struct neon_type_el et = neon_check_type (3, NS_QDD, |
| 12608 | N_EQK | N_INT | N_DBL, N_EQK, N_SU_32 | N_KEY); |
| 12609 | neon_mixed_length (et, et.size); |
| 12610 | } |
| 12611 | |
| 12612 | static void |
| 12613 | neon_mac_reg_scalar_long (unsigned regtypes, unsigned scalartypes) |
| 12614 | { |
| 12615 | if (inst.operands[2].isscalar) |
| 12616 | { |
| 12617 | struct neon_type_el et = neon_check_type (3, NS_QDS, |
| 12618 | N_EQK | N_DBL, N_EQK, regtypes | N_KEY); |
| 12619 | inst.instruction = NEON_ENC_SCALAR (inst.instruction); |
| 12620 | neon_mul_mac (et, et.type == NT_unsigned); |
| 12621 | } |
| 12622 | else |
| 12623 | { |
| 12624 | struct neon_type_el et = neon_check_type (3, NS_QDD, |
| 12625 | N_EQK | N_DBL, N_EQK, scalartypes | N_KEY); |
| 12626 | inst.instruction = NEON_ENC_INTEGER (inst.instruction); |
| 12627 | neon_mixed_length (et, et.size); |
| 12628 | } |
| 12629 | } |
| 12630 | |
| 12631 | static void |
| 12632 | do_neon_mac_maybe_scalar_long (void) |
| 12633 | { |
| 12634 | neon_mac_reg_scalar_long (N_S16 | N_S32 | N_U16 | N_U32, N_SU_32); |
| 12635 | } |
| 12636 | |
| 12637 | static void |
| 12638 | do_neon_dyadic_wide (void) |
| 12639 | { |
| 12640 | struct neon_type_el et = neon_check_type (3, NS_QQD, |
| 12641 | N_EQK | N_DBL, N_EQK | N_DBL, N_SU_32 | N_KEY); |
| 12642 | neon_mixed_length (et, et.size); |
| 12643 | } |
| 12644 | |
| 12645 | static void |
| 12646 | do_neon_dyadic_narrow (void) |
| 12647 | { |
| 12648 | struct neon_type_el et = neon_check_type (3, NS_QDD, |
| 12649 | N_EQK | N_DBL, N_EQK, N_I16 | N_I32 | N_I64 | N_KEY); |
| 12650 | /* Operand sign is unimportant, and the U bit is part of the opcode, |
| 12651 | so force the operand type to integer. */ |
| 12652 | et.type = NT_integer; |
| 12653 | neon_mixed_length (et, et.size / 2); |
| 12654 | } |
| 12655 | |
| 12656 | static void |
| 12657 | do_neon_mul_sat_scalar_long (void) |
| 12658 | { |
| 12659 | neon_mac_reg_scalar_long (N_S16 | N_S32, N_S16 | N_S32); |
| 12660 | } |
| 12661 | |
| 12662 | static void |
| 12663 | do_neon_vmull (void) |
| 12664 | { |
| 12665 | if (inst.operands[2].isscalar) |
| 12666 | do_neon_mac_maybe_scalar_long (); |
| 12667 | else |
| 12668 | { |
| 12669 | struct neon_type_el et = neon_check_type (3, NS_QDD, |
| 12670 | N_EQK | N_DBL, N_EQK, N_SU_32 | N_P8 | N_KEY); |
| 12671 | if (et.type == NT_poly) |
| 12672 | inst.instruction = NEON_ENC_POLY (inst.instruction); |
| 12673 | else |
| 12674 | inst.instruction = NEON_ENC_INTEGER (inst.instruction); |
| 12675 | /* For polynomial encoding, size field must be 0b00 and the U bit must be |
| 12676 | zero. Should be OK as-is. */ |
| 12677 | neon_mixed_length (et, et.size); |
| 12678 | } |
| 12679 | } |
| 12680 | |
| 12681 | static void |
| 12682 | do_neon_ext (void) |
| 12683 | { |
| 12684 | enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL); |
| 12685 | struct neon_type_el et = neon_check_type (3, rs, |
| 12686 | N_EQK, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY); |
| 12687 | unsigned imm = (inst.operands[3].imm * et.size) / 8; |
| 12688 | inst.instruction |= LOW4 (inst.operands[0].reg) << 12; |
| 12689 | inst.instruction |= HI1 (inst.operands[0].reg) << 22; |
| 12690 | inst.instruction |= LOW4 (inst.operands[1].reg) << 16; |
| 12691 | inst.instruction |= HI1 (inst.operands[1].reg) << 7; |
| 12692 | inst.instruction |= LOW4 (inst.operands[2].reg); |
| 12693 | inst.instruction |= HI1 (inst.operands[2].reg) << 5; |
| 12694 | inst.instruction |= neon_quad (rs) << 6; |
| 12695 | inst.instruction |= imm << 8; |
| 12696 | |
| 12697 | inst.instruction = neon_dp_fixup (inst.instruction); |
| 12698 | } |
| 12699 | |
| 12700 | static void |
| 12701 | do_neon_rev (void) |
| 12702 | { |
| 12703 | enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL); |
| 12704 | struct neon_type_el et = neon_check_type (2, rs, |
| 12705 | N_EQK, N_8 | N_16 | N_32 | N_KEY); |
| 12706 | unsigned op = (inst.instruction >> 7) & 3; |
| 12707 | /* N (width of reversed regions) is encoded as part of the bitmask. We |
| 12708 | extract it here to check the elements to be reversed are smaller. |
| 12709 | Otherwise we'd get a reserved instruction. */ |
| 12710 | unsigned elsize = (op == 2) ? 16 : (op == 1) ? 32 : (op == 0) ? 64 : 0; |
| 12711 | assert (elsize != 0); |
| 12712 | constraint (et.size >= elsize, |
| 12713 | _("elements must be smaller than reversal region")); |
| 12714 | neon_two_same (neon_quad (rs), 1, et.size); |
| 12715 | } |
| 12716 | |
| 12717 | static void |
| 12718 | do_neon_dup (void) |
| 12719 | { |
| 12720 | if (inst.operands[1].isscalar) |
| 12721 | { |
| 12722 | enum neon_shape rs = neon_select_shape (NS_DS, NS_QS, NS_NULL); |
| 12723 | struct neon_type_el et = neon_check_type (2, rs, |
| 12724 | N_EQK, N_8 | N_16 | N_32 | N_KEY); |
| 12725 | unsigned sizebits = et.size >> 3; |
| 12726 | unsigned dm = NEON_SCALAR_REG (inst.operands[1].reg); |
| 12727 | int logsize = neon_logbits (et.size); |
| 12728 | unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg) << logsize; |
| 12729 | |
| 12730 | if (vfp_or_neon_is_neon (NEON_CHECK_CC) == FAIL) |
| 12731 | return; |
| 12732 | |
| 12733 | inst.instruction = NEON_ENC_SCALAR (inst.instruction); |
| 12734 | inst.instruction |= LOW4 (inst.operands[0].reg) << 12; |
| 12735 | inst.instruction |= HI1 (inst.operands[0].reg) << 22; |
| 12736 | inst.instruction |= LOW4 (dm); |
| 12737 | inst.instruction |= HI1 (dm) << 5; |
| 12738 | inst.instruction |= neon_quad (rs) << 6; |
| 12739 | inst.instruction |= x << 17; |
| 12740 | inst.instruction |= sizebits << 16; |
| 12741 | |
| 12742 | inst.instruction = neon_dp_fixup (inst.instruction); |
| 12743 | } |
| 12744 | else |
| 12745 | { |
| 12746 | enum neon_shape rs = neon_select_shape (NS_DR, NS_QR, NS_NULL); |
| 12747 | struct neon_type_el et = neon_check_type (2, rs, |
| 12748 | N_8 | N_16 | N_32 | N_KEY, N_EQK); |
| 12749 | /* Duplicate ARM register to lanes of vector. */ |
| 12750 | inst.instruction = NEON_ENC_ARMREG (inst.instruction); |
| 12751 | switch (et.size) |
| 12752 | { |
| 12753 | case 8: inst.instruction |= 0x400000; break; |
| 12754 | case 16: inst.instruction |= 0x000020; break; |
| 12755 | case 32: inst.instruction |= 0x000000; break; |
| 12756 | default: break; |
| 12757 | } |
| 12758 | inst.instruction |= LOW4 (inst.operands[1].reg) << 12; |
| 12759 | inst.instruction |= LOW4 (inst.operands[0].reg) << 16; |
| 12760 | inst.instruction |= HI1 (inst.operands[0].reg) << 7; |
| 12761 | inst.instruction |= neon_quad (rs) << 21; |
| 12762 | /* The encoding for this instruction is identical for the ARM and Thumb |
| 12763 | variants, except for the condition field. */ |
| 12764 | do_vfp_cond_or_thumb (); |
| 12765 | } |
| 12766 | } |
| 12767 | |
| 12768 | /* VMOV has particularly many variations. It can be one of: |
| 12769 | 0. VMOV<c><q> <Qd>, <Qm> |
| 12770 | 1. VMOV<c><q> <Dd>, <Dm> |
| 12771 | (Register operations, which are VORR with Rm = Rn.) |
| 12772 | 2. VMOV<c><q>.<dt> <Qd>, #<imm> |
| 12773 | 3. VMOV<c><q>.<dt> <Dd>, #<imm> |
| 12774 | (Immediate loads.) |
| 12775 | 4. VMOV<c><q>.<size> <Dn[x]>, <Rd> |
| 12776 | (ARM register to scalar.) |
| 12777 | 5. VMOV<c><q> <Dm>, <Rd>, <Rn> |
| 12778 | (Two ARM registers to vector.) |
| 12779 | 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]> |
| 12780 | (Scalar to ARM register.) |
| 12781 | 7. VMOV<c><q> <Rd>, <Rn>, <Dm> |
| 12782 | (Vector to two ARM registers.) |
| 12783 | 8. VMOV.F32 <Sd>, <Sm> |
| 12784 | 9. VMOV.F64 <Dd>, <Dm> |
| 12785 | (VFP register moves.) |
| 12786 | 10. VMOV.F32 <Sd>, #imm |
| 12787 | 11. VMOV.F64 <Dd>, #imm |
| 12788 | (VFP float immediate load.) |
| 12789 | 12. VMOV <Rd>, <Sm> |
| 12790 | (VFP single to ARM reg.) |
| 12791 | 13. VMOV <Sd>, <Rm> |
| 12792 | (ARM reg to VFP single.) |
| 12793 | 14. VMOV <Rd>, <Re>, <Sn>, <Sm> |
| 12794 | (Two ARM regs to two VFP singles.) |
| 12795 | 15. VMOV <Sd>, <Se>, <Rn>, <Rm> |
| 12796 | (Two VFP singles to two ARM regs.) |
| 12797 | |
| 12798 | These cases can be disambiguated using neon_select_shape, except cases 1/9 |
| 12799 | and 3/11 which depend on the operand type too. |
| 12800 | |
| 12801 | All the encoded bits are hardcoded by this function. |
| 12802 | |
| 12803 | Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!). |
| 12804 | Cases 5, 7 may be used with VFPv2 and above. |
| 12805 | |
| 12806 | FIXME: Some of the checking may be a bit sloppy (in a couple of cases you |
| 12807 | can specify a type where it doesn't make sense to, and is ignored). |
| 12808 | */ |
| 12809 | |
| 12810 | static void |
| 12811 | do_neon_mov (void) |
| 12812 | { |
| 12813 | enum neon_shape rs = neon_select_shape (NS_RRFF, NS_FFRR, NS_DRR, NS_RRD, |
| 12814 | NS_QQ, NS_DD, NS_QI, NS_DI, NS_SR, NS_RS, NS_FF, NS_FI, NS_RF, NS_FR, |
| 12815 | NS_NULL); |
| 12816 | struct neon_type_el et; |
| 12817 | const char *ldconst = 0; |
| 12818 | |
| 12819 | switch (rs) |
| 12820 | { |
| 12821 | case NS_DD: /* case 1/9. */ |
| 12822 | et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY); |
| 12823 | /* It is not an error here if no type is given. */ |
| 12824 | inst.error = NULL; |
| 12825 | if (et.type == NT_float && et.size == 64) |
| 12826 | { |
| 12827 | do_vfp_nsyn_opcode ("fcpyd"); |
| 12828 | break; |
| 12829 | } |
| 12830 | /* fall through. */ |
| 12831 | |
| 12832 | case NS_QQ: /* case 0/1. */ |
| 12833 | { |
| 12834 | if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL) |
| 12835 | return; |
| 12836 | /* The architecture manual I have doesn't explicitly state which |
| 12837 | value the U bit should have for register->register moves, but |
| 12838 | the equivalent VORR instruction has U = 0, so do that. */ |
| 12839 | inst.instruction = 0x0200110; |
| 12840 | inst.instruction |= LOW4 (inst.operands[0].reg) << 12; |
| 12841 | inst.instruction |= HI1 (inst.operands[0].reg) << 22; |
| 12842 | inst.instruction |= LOW4 (inst.operands[1].reg); |
| 12843 | inst.instruction |= HI1 (inst.operands[1].reg) << 5; |
| 12844 | inst.instruction |= LOW4 (inst.operands[1].reg) << 16; |
| 12845 | inst.instruction |= HI1 (inst.operands[1].reg) << 7; |
| 12846 | inst.instruction |= neon_quad (rs) << 6; |
| 12847 | |
| 12848 | inst.instruction = neon_dp_fixup (inst.instruction); |
| 12849 | } |
| 12850 | break; |
| 12851 | |
| 12852 | case NS_DI: /* case 3/11. */ |
| 12853 | et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY); |
| 12854 | inst.error = NULL; |
| 12855 | if (et.type == NT_float && et.size == 64) |
| 12856 | { |
| 12857 | /* case 11 (fconstd). */ |
| 12858 | ldconst = "fconstd"; |
| 12859 | goto encode_fconstd; |
| 12860 | } |
| 12861 | /* fall through. */ |
| 12862 | |
| 12863 | case NS_QI: /* case 2/3. */ |
| 12864 | if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL) |
| 12865 | return; |
| 12866 | inst.instruction = 0x0800010; |
| 12867 | neon_move_immediate (); |
| 12868 | inst.instruction = neon_dp_fixup (inst.instruction); |
| 12869 | break; |
| 12870 | |
| 12871 | case NS_SR: /* case 4. */ |
| 12872 | { |
| 12873 | unsigned bcdebits = 0; |
| 12874 | struct neon_type_el et = neon_check_type (2, NS_NULL, |
| 12875 | N_8 | N_16 | N_32 | N_KEY, N_EQK); |
| 12876 | int logsize = neon_logbits (et.size); |
| 12877 | unsigned dn = NEON_SCALAR_REG (inst.operands[0].reg); |
| 12878 | unsigned x = NEON_SCALAR_INDEX (inst.operands[0].reg); |
| 12879 | |
| 12880 | constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1), |
| 12881 | _(BAD_FPU)); |
| 12882 | constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1) |
| 12883 | && et.size != 32, _(BAD_FPU)); |
| 12884 | constraint (et.type == NT_invtype, _("bad type for scalar")); |
| 12885 | constraint (x >= 64 / et.size, _("scalar index out of range")); |
| 12886 | |
| 12887 | switch (et.size) |
| 12888 | { |
| 12889 | case 8: bcdebits = 0x8; break; |
| 12890 | case 16: bcdebits = 0x1; break; |
| 12891 | case 32: bcdebits = 0x0; break; |
| 12892 | default: ; |
| 12893 | } |
| 12894 | |
| 12895 | bcdebits |= x << logsize; |
| 12896 | |
| 12897 | inst.instruction = 0xe000b10; |
| 12898 | do_vfp_cond_or_thumb (); |
| 12899 | inst.instruction |= LOW4 (dn) << 16; |
| 12900 | inst.instruction |= HI1 (dn) << 7; |
| 12901 | inst.instruction |= inst.operands[1].reg << 12; |
| 12902 | inst.instruction |= (bcdebits & 3) << 5; |
| 12903 | inst.instruction |= (bcdebits >> 2) << 21; |
| 12904 | } |
| 12905 | break; |
| 12906 | |
| 12907 | case NS_DRR: /* case 5 (fmdrr). */ |
| 12908 | constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2), |
| 12909 | _(BAD_FPU)); |
| 12910 | |
| 12911 | inst.instruction = 0xc400b10; |
| 12912 | do_vfp_cond_or_thumb (); |
| 12913 | inst.instruction |= LOW4 (inst.operands[0].reg); |
| 12914 | inst.instruction |= HI1 (inst.operands[0].reg) << 5; |
| 12915 | inst.instruction |= inst.operands[1].reg << 12; |
| 12916 | inst.instruction |= inst.operands[2].reg << 16; |
| 12917 | break; |
| 12918 | |
| 12919 | case NS_RS: /* case 6. */ |
| 12920 | { |
| 12921 | struct neon_type_el et = neon_check_type (2, NS_NULL, |
| 12922 | N_EQK, N_S8 | N_S16 | N_U8 | N_U16 | N_32 | N_KEY); |
| 12923 | unsigned logsize = neon_logbits (et.size); |
| 12924 | unsigned dn = NEON_SCALAR_REG (inst.operands[1].reg); |
| 12925 | unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg); |
| 12926 | unsigned abcdebits = 0; |
| 12927 | |
| 12928 | constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1), |
| 12929 | _(BAD_FPU)); |
| 12930 | constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1) |
| 12931 | && et.size != 32, _(BAD_FPU)); |
| 12932 | constraint (et.type == NT_invtype, _("bad type for scalar")); |
| 12933 | constraint (x >= 64 / et.size, _("scalar index out of range")); |
| 12934 | |
| 12935 | switch (et.size) |
| 12936 | { |
| 12937 | case 8: abcdebits = (et.type == NT_signed) ? 0x08 : 0x18; break; |
| 12938 | case 16: abcdebits = (et.type == NT_signed) ? 0x01 : 0x11; break; |
| 12939 | case 32: abcdebits = 0x00; break; |
| 12940 | default: ; |
| 12941 | } |
| 12942 | |
| 12943 | abcdebits |= x << logsize; |
| 12944 | inst.instruction = 0xe100b10; |
| 12945 | do_vfp_cond_or_thumb (); |
| 12946 | inst.instruction |= LOW4 (dn) << 16; |
| 12947 | inst.instruction |= HI1 (dn) << 7; |
| 12948 | inst.instruction |= inst.operands[0].reg << 12; |
| 12949 | inst.instruction |= (abcdebits & 3) << 5; |
| 12950 | inst.instruction |= (abcdebits >> 2) << 21; |
| 12951 | } |
| 12952 | break; |
| 12953 | |
| 12954 | case NS_RRD: /* case 7 (fmrrd). */ |
| 12955 | constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2), |
| 12956 | _(BAD_FPU)); |
| 12957 | |
| 12958 | inst.instruction = 0xc500b10; |
| 12959 | do_vfp_cond_or_thumb (); |
| 12960 | inst.instruction |= inst.operands[0].reg << 12; |
| 12961 | inst.instruction |= inst.operands[1].reg << 16; |
| 12962 | inst.instruction |= LOW4 (inst.operands[2].reg); |
| 12963 | inst.instruction |= HI1 (inst.operands[2].reg) << 5; |
| 12964 | break; |
| 12965 | |
| 12966 | case NS_FF: /* case 8 (fcpys). */ |
| 12967 | do_vfp_nsyn_opcode ("fcpys"); |
| 12968 | break; |
| 12969 | |
| 12970 | case NS_FI: /* case 10 (fconsts). */ |
| 12971 | ldconst = "fconsts"; |
| 12972 | encode_fconstd: |
| 12973 | if (is_quarter_float (inst.operands[1].imm)) |
| 12974 | { |
| 12975 | inst.operands[1].imm = neon_qfloat_bits (inst.operands[1].imm); |
| 12976 | do_vfp_nsyn_opcode (ldconst); |
| 12977 | } |
| 12978 | else |
| 12979 | first_error (_("immediate out of range")); |
| 12980 | break; |
| 12981 | |
| 12982 | case NS_RF: /* case 12 (fmrs). */ |
| 12983 | do_vfp_nsyn_opcode ("fmrs"); |
| 12984 | break; |
| 12985 | |
| 12986 | case NS_FR: /* case 13 (fmsr). */ |
| 12987 | do_vfp_nsyn_opcode ("fmsr"); |
| 12988 | break; |
| 12989 | |
| 12990 | /* The encoders for the fmrrs and fmsrr instructions expect three operands |
| 12991 | (one of which is a list), but we have parsed four. Do some fiddling to |
| 12992 | make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2 |
| 12993 | expect. */ |
| 12994 | case NS_RRFF: /* case 14 (fmrrs). */ |
| 12995 | constraint (inst.operands[3].reg != inst.operands[2].reg + 1, |
| 12996 | _("VFP registers must be adjacent")); |
| 12997 | inst.operands[2].imm = 2; |
| 12998 | memset (&inst.operands[3], '\0', sizeof (inst.operands[3])); |
| 12999 | do_vfp_nsyn_opcode ("fmrrs"); |
| 13000 | break; |
| 13001 | |
| 13002 | case NS_FFRR: /* case 15 (fmsrr). */ |
| 13003 | constraint (inst.operands[1].reg != inst.operands[0].reg + 1, |
| 13004 | _("VFP registers must be adjacent")); |
| 13005 | inst.operands[1] = inst.operands[2]; |
| 13006 | inst.operands[2] = inst.operands[3]; |
| 13007 | inst.operands[0].imm = 2; |
| 13008 | memset (&inst.operands[3], '\0', sizeof (inst.operands[3])); |
| 13009 | do_vfp_nsyn_opcode ("fmsrr"); |
| 13010 | break; |
| 13011 | |
| 13012 | default: |
| 13013 | abort (); |
| 13014 | } |
| 13015 | } |
| 13016 | |
| 13017 | static void |
| 13018 | do_neon_rshift_round_imm (void) |
| 13019 | { |
| 13020 | enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL); |
| 13021 | struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY); |
| 13022 | int imm = inst.operands[2].imm; |
| 13023 | |
| 13024 | /* imm == 0 case is encoded as VMOV for V{R}SHR. */ |
| 13025 | if (imm == 0) |
| 13026 | { |
| 13027 | inst.operands[2].present = 0; |
| 13028 | do_neon_mov (); |
| 13029 | return; |
| 13030 | } |
| 13031 | |
| 13032 | constraint (imm < 1 || (unsigned)imm > et.size, |
| 13033 | _("immediate out of range for shift")); |
| 13034 | neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et, |
| 13035 | et.size - imm); |
| 13036 | } |
| 13037 | |
| 13038 | static void |
| 13039 | do_neon_movl (void) |
| 13040 | { |
| 13041 | struct neon_type_el et = neon_check_type (2, NS_QD, |
| 13042 | N_EQK | N_DBL, N_SU_32 | N_KEY); |
| 13043 | unsigned sizebits = et.size >> 3; |
| 13044 | inst.instruction |= sizebits << 19; |
| 13045 | neon_two_same (0, et.type == NT_unsigned, -1); |
| 13046 | } |
| 13047 | |
| 13048 | static void |
| 13049 | do_neon_trn (void) |
| 13050 | { |
| 13051 | enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL); |
| 13052 | struct neon_type_el et = neon_check_type (2, rs, |
| 13053 | N_EQK, N_8 | N_16 | N_32 | N_KEY); |
| 13054 | inst.instruction = NEON_ENC_INTEGER (inst.instruction); |
| 13055 | neon_two_same (neon_quad (rs), 1, et.size); |
| 13056 | } |
| 13057 | |
| 13058 | static void |
| 13059 | do_neon_zip_uzp (void) |
| 13060 | { |
| 13061 | enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL); |
| 13062 | struct neon_type_el et = neon_check_type (2, rs, |
| 13063 | N_EQK, N_8 | N_16 | N_32 | N_KEY); |
| 13064 | if (rs == NS_DD && et.size == 32) |
| 13065 | { |
| 13066 | /* Special case: encode as VTRN.32 <Dd>, <Dm>. */ |
| 13067 | inst.instruction = N_MNEM_vtrn; |
| 13068 | do_neon_trn (); |
| 13069 | return; |
| 13070 | } |
| 13071 | neon_two_same (neon_quad (rs), 1, et.size); |
| 13072 | } |
| 13073 | |
| 13074 | static void |
| 13075 | do_neon_sat_abs_neg (void) |
| 13076 | { |
| 13077 | enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL); |
| 13078 | struct neon_type_el et = neon_check_type (2, rs, |
| 13079 | N_EQK, N_S8 | N_S16 | N_S32 | N_KEY); |
| 13080 | neon_two_same (neon_quad (rs), 1, et.size); |
| 13081 | } |
| 13082 | |
| 13083 | static void |
| 13084 | do_neon_pair_long (void) |
| 13085 | { |
| 13086 | enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL); |
| 13087 | struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_32 | N_KEY); |
| 13088 | /* Unsigned is encoded in OP field (bit 7) for these instruction. */ |
| 13089 | inst.instruction |= (et.type == NT_unsigned) << 7; |
| 13090 | neon_two_same (neon_quad (rs), 1, et.size); |
| 13091 | } |
| 13092 | |
| 13093 | static void |
| 13094 | do_neon_recip_est (void) |
| 13095 | { |
| 13096 | enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL); |
| 13097 | struct neon_type_el et = neon_check_type (2, rs, |
| 13098 | N_EQK | N_FLT, N_F32 | N_U32 | N_KEY); |
| 13099 | inst.instruction |= (et.type == NT_float) << 8; |
| 13100 | neon_two_same (neon_quad (rs), 1, et.size); |
| 13101 | } |
| 13102 | |
| 13103 | static void |
| 13104 | do_neon_cls (void) |
| 13105 | { |
| 13106 | enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL); |
| 13107 | struct neon_type_el et = neon_check_type (2, rs, |
| 13108 | N_EQK, N_S8 | N_S16 | N_S32 | N_KEY); |
| 13109 | neon_two_same (neon_quad (rs), 1, et.size); |
| 13110 | } |
| 13111 | |
| 13112 | static void |
| 13113 | do_neon_clz (void) |
| 13114 | { |
| 13115 | enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL); |
| 13116 | struct neon_type_el et = neon_check_type (2, rs, |
| 13117 | N_EQK, N_I8 | N_I16 | N_I32 | N_KEY); |
| 13118 | neon_two_same (neon_quad (rs), 1, et.size); |
| 13119 | } |
| 13120 | |
| 13121 | static void |
| 13122 | do_neon_cnt (void) |
| 13123 | { |
| 13124 | enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL); |
| 13125 | struct neon_type_el et = neon_check_type (2, rs, |
| 13126 | N_EQK | N_INT, N_8 | N_KEY); |
| 13127 | neon_two_same (neon_quad (rs), 1, et.size); |
| 13128 | } |
| 13129 | |
| 13130 | static void |
| 13131 | do_neon_swp (void) |
| 13132 | { |
| 13133 | enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL); |
| 13134 | neon_two_same (neon_quad (rs), 1, -1); |
| 13135 | } |
| 13136 | |
| 13137 | static void |
| 13138 | do_neon_tbl_tbx (void) |
| 13139 | { |
| 13140 | unsigned listlenbits; |
| 13141 | neon_check_type (3, NS_DLD, N_EQK, N_EQK, N_8 | N_KEY); |
| 13142 | |
| 13143 | if (inst.operands[1].imm < 1 || inst.operands[1].imm > 4) |
| 13144 | { |
| 13145 | first_error (_("bad list length for table lookup")); |
| 13146 | return; |
| 13147 | } |
| 13148 | |
| 13149 | listlenbits = inst.operands[1].imm - 1; |
| 13150 | inst.instruction |= LOW4 (inst.operands[0].reg) << 12; |
| 13151 | inst.instruction |= HI1 (inst.operands[0].reg) << 22; |
| 13152 | inst.instruction |= LOW4 (inst.operands[1].reg) << 16; |
| 13153 | inst.instruction |= HI1 (inst.operands[1].reg) << 7; |
| 13154 | inst.instruction |= LOW4 (inst.operands[2].reg); |
| 13155 | inst.instruction |= HI1 (inst.operands[2].reg) << 5; |
| 13156 | inst.instruction |= listlenbits << 8; |
| 13157 | |
| 13158 | inst.instruction = neon_dp_fixup (inst.instruction); |
| 13159 | } |
| 13160 | |
| 13161 | static void |
| 13162 | do_neon_ldm_stm (void) |
| 13163 | { |
| 13164 | /* P, U and L bits are part of bitmask. */ |
| 13165 | int is_dbmode = (inst.instruction & (1 << 24)) != 0; |
| 13166 | unsigned offsetbits = inst.operands[1].imm * 2; |
| 13167 | |
| 13168 | if (inst.operands[1].issingle) |
| 13169 | { |
| 13170 | do_vfp_nsyn_ldm_stm (is_dbmode); |
| 13171 | return; |
| 13172 | } |
| 13173 | |
| 13174 | constraint (is_dbmode && !inst.operands[0].writeback, |
| 13175 | _("writeback (!) must be used for VLDMDB and VSTMDB")); |
| 13176 | |
| 13177 | constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16, |
| 13178 | _("register list must contain at least 1 and at most 16 " |
| 13179 | "registers")); |
| 13180 | |
| 13181 | inst.instruction |= inst.operands[0].reg << 16; |
| 13182 | inst.instruction |= inst.operands[0].writeback << 21; |
| 13183 | inst.instruction |= LOW4 (inst.operands[1].reg) << 12; |
| 13184 | inst.instruction |= HI1 (inst.operands[1].reg) << 22; |
| 13185 | |
| 13186 | inst.instruction |= offsetbits; |
| 13187 | |
| 13188 | do_vfp_cond_or_thumb (); |
| 13189 | } |
| 13190 | |
| 13191 | static void |
| 13192 | do_neon_ldr_str (void) |
| 13193 | { |
| 13194 | int is_ldr = (inst.instruction & (1 << 20)) != 0; |
| 13195 | |
| 13196 | if (inst.operands[0].issingle) |
| 13197 | { |
| 13198 | if (is_ldr) |
| 13199 | do_vfp_nsyn_opcode ("flds"); |
| 13200 | else |
| 13201 | do_vfp_nsyn_opcode ("fsts"); |
| 13202 | } |
| 13203 | else |
| 13204 | { |
| 13205 | if (is_ldr) |
| 13206 | do_vfp_nsyn_opcode ("fldd"); |
| 13207 | else |
| 13208 | do_vfp_nsyn_opcode ("fstd"); |
| 13209 | } |
| 13210 | } |
| 13211 | |
| 13212 | /* "interleave" version also handles non-interleaving register VLD1/VST1 |
| 13213 | instructions. */ |
| 13214 | |
| 13215 | static void |
| 13216 | do_neon_ld_st_interleave (void) |
| 13217 | { |
| 13218 | struct neon_type_el et = neon_check_type (1, NS_NULL, |
| 13219 | N_8 | N_16 | N_32 | N_64); |
| 13220 | unsigned alignbits = 0; |
| 13221 | unsigned idx; |
| 13222 | /* The bits in this table go: |
| 13223 | 0: register stride of one (0) or two (1) |
| 13224 | 1,2: register list length, minus one (1, 2, 3, 4). |
| 13225 | 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>). |
| 13226 | We use -1 for invalid entries. */ |
| 13227 | const int typetable[] = |
| 13228 | { |
| 13229 | 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */ |
| 13230 | -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */ |
| 13231 | -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */ |
| 13232 | -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */ |
| 13233 | }; |
| 13234 | int typebits; |
| 13235 | |
| 13236 | if (et.type == NT_invtype) |
| 13237 | return; |
| 13238 | |
| 13239 | if (inst.operands[1].immisalign) |
| 13240 | switch (inst.operands[1].imm >> 8) |
| 13241 | { |
| 13242 | case 64: alignbits = 1; break; |
| 13243 | case 128: |
| 13244 | if (NEON_REGLIST_LENGTH (inst.operands[0].imm) == 3) |
| 13245 | goto bad_alignment; |
| 13246 | alignbits = 2; |
| 13247 | break; |
| 13248 | case 256: |
| 13249 | if (NEON_REGLIST_LENGTH (inst.operands[0].imm) == 3) |
| 13250 | goto bad_alignment; |
| 13251 | alignbits = 3; |
| 13252 | break; |
| 13253 | default: |
| 13254 | bad_alignment: |
| 13255 | first_error (_("bad alignment")); |
| 13256 | return; |
| 13257 | } |
| 13258 | |
| 13259 | inst.instruction |= alignbits << 4; |
| 13260 | inst.instruction |= neon_logbits (et.size) << 6; |
| 13261 | |
| 13262 | /* Bits [4:6] of the immediate in a list specifier encode register stride |
| 13263 | (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of |
| 13264 | VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look |
| 13265 | up the right value for "type" in a table based on this value and the given |
| 13266 | list style, then stick it back. */ |
| 13267 | idx = ((inst.operands[0].imm >> 4) & 7) |
| 13268 | | (((inst.instruction >> 8) & 3) << 3); |
| 13269 | |
| 13270 | typebits = typetable[idx]; |
| 13271 | |
| 13272 | constraint (typebits == -1, _("bad list type for instruction")); |
| 13273 | |
| 13274 | inst.instruction &= ~0xf00; |
| 13275 | inst.instruction |= typebits << 8; |
| 13276 | } |
| 13277 | |
| 13278 | /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup. |
| 13279 | *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0 |
| 13280 | otherwise. The variable arguments are a list of pairs of legal (size, align) |
| 13281 | values, terminated with -1. */ |
| 13282 | |
| 13283 | static int |
| 13284 | neon_alignment_bit (int size, int align, int *do_align, ...) |
| 13285 | { |
| 13286 | va_list ap; |
| 13287 | int result = FAIL, thissize, thisalign; |
| 13288 | |
| 13289 | if (!inst.operands[1].immisalign) |
| 13290 | { |
| 13291 | *do_align = 0; |
| 13292 | return SUCCESS; |
| 13293 | } |
| 13294 | |
| 13295 | va_start (ap, do_align); |
| 13296 | |
| 13297 | do |
| 13298 | { |
| 13299 | thissize = va_arg (ap, int); |
| 13300 | if (thissize == -1) |
| 13301 | break; |
| 13302 | thisalign = va_arg (ap, int); |
| 13303 | |
| 13304 | if (size == thissize && align == thisalign) |
| 13305 | result = SUCCESS; |
| 13306 | } |
| 13307 | while (result != SUCCESS); |
| 13308 | |
| 13309 | va_end (ap); |
| 13310 | |
| 13311 | if (result == SUCCESS) |
| 13312 | *do_align = 1; |
| 13313 | else |
| 13314 | first_error (_("unsupported alignment for instruction")); |
| 13315 | |
| 13316 | return result; |
| 13317 | } |
| 13318 | |
| 13319 | static void |
| 13320 | do_neon_ld_st_lane (void) |
| 13321 | { |
| 13322 | struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32); |
| 13323 | int align_good, do_align = 0; |
| 13324 | int logsize = neon_logbits (et.size); |
| 13325 | int align = inst.operands[1].imm >> 8; |
| 13326 | int n = (inst.instruction >> 8) & 3; |
| 13327 | int max_el = 64 / et.size; |
| 13328 | |
| 13329 | if (et.type == NT_invtype) |
| 13330 | return; |
| 13331 | |
| 13332 | constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != n + 1, |
| 13333 | _("bad list length")); |
| 13334 | constraint (NEON_LANE (inst.operands[0].imm) >= max_el, |
| 13335 | _("scalar index out of range")); |
| 13336 | constraint (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2 |
| 13337 | && et.size == 8, |
| 13338 | _("stride of 2 unavailable when element size is 8")); |
| 13339 | |
| 13340 | switch (n) |
| 13341 | { |
| 13342 | case 0: /* VLD1 / VST1. */ |
| 13343 | align_good = neon_alignment_bit (et.size, align, &do_align, 16, 16, |
| 13344 | 32, 32, -1); |
| 13345 | if (align_good == FAIL) |
| 13346 | return; |
| 13347 | if (do_align) |
| 13348 | { |
| 13349 | unsigned alignbits = 0; |
| 13350 | switch (et.size) |
| 13351 | { |
| 13352 | case 16: alignbits = 0x1; break; |
| 13353 | case 32: alignbits = 0x3; break; |
| 13354 | default: ; |
| 13355 | } |
| 13356 | inst.instruction |= alignbits << 4; |
| 13357 | } |
| 13358 | break; |
| 13359 | |
| 13360 | case 1: /* VLD2 / VST2. */ |
| 13361 | align_good = neon_alignment_bit (et.size, align, &do_align, 8, 16, 16, 32, |
| 13362 | 32, 64, -1); |
| 13363 | if (align_good == FAIL) |
| 13364 | return; |
| 13365 | if (do_align) |
| 13366 | inst.instruction |= 1 << 4; |
| 13367 | break; |
| 13368 | |
| 13369 | case 2: /* VLD3 / VST3. */ |
| 13370 | constraint (inst.operands[1].immisalign, |
| 13371 | _("can't use alignment with this instruction")); |
| 13372 | break; |
| 13373 | |
| 13374 | case 3: /* VLD4 / VST4. */ |
| 13375 | align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32, |
| 13376 | 16, 64, 32, 64, 32, 128, -1); |
| 13377 | if (align_good == FAIL) |
| 13378 | return; |
| 13379 | if (do_align) |
| 13380 | { |
| 13381 | unsigned alignbits = 0; |
| 13382 | switch (et.size) |
| 13383 | { |
| 13384 | case 8: alignbits = 0x1; break; |
| 13385 | case 16: alignbits = 0x1; break; |
| 13386 | case 32: alignbits = (align == 64) ? 0x1 : 0x2; break; |
| 13387 | default: ; |
| 13388 | } |
| 13389 | inst.instruction |= alignbits << 4; |
| 13390 | } |
| 13391 | break; |
| 13392 | |
| 13393 | default: ; |
| 13394 | } |
| 13395 | |
| 13396 | /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */ |
| 13397 | if (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2) |
| 13398 | inst.instruction |= 1 << (4 + logsize); |
| 13399 | |
| 13400 | inst.instruction |= NEON_LANE (inst.operands[0].imm) << (logsize + 5); |
| 13401 | inst.instruction |= logsize << 10; |
| 13402 | } |
| 13403 | |
| 13404 | /* Encode single n-element structure to all lanes VLD<n> instructions. */ |
| 13405 | |
| 13406 | static void |
| 13407 | do_neon_ld_dup (void) |
| 13408 | { |
| 13409 | struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32); |
| 13410 | int align_good, do_align = 0; |
| 13411 | |
| 13412 | if (et.type == NT_invtype) |
| 13413 | return; |
| 13414 | |
| 13415 | switch ((inst.instruction >> 8) & 3) |
| 13416 | { |
| 13417 | case 0: /* VLD1. */ |
| 13418 | assert (NEON_REG_STRIDE (inst.operands[0].imm) != 2); |
| 13419 | align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8, |
| 13420 | &do_align, 16, 16, 32, 32, -1); |
| 13421 | if (align_good == FAIL) |
| 13422 | return; |
| 13423 | switch (NEON_REGLIST_LENGTH (inst.operands[0].imm)) |
| 13424 | { |
| 13425 | case 1: break; |
| 13426 | case 2: inst.instruction |= 1 << 5; break; |
| 13427 | default: first_error (_("bad list length")); return; |
| 13428 | } |
| 13429 | inst.instruction |= neon_logbits (et.size) << 6; |
| 13430 | break; |
| 13431 | |
| 13432 | case 1: /* VLD2. */ |
| 13433 | align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8, |
| 13434 | &do_align, 8, 16, 16, 32, 32, 64, -1); |
| 13435 | if (align_good == FAIL) |
| 13436 | return; |
| 13437 | constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2, |
| 13438 | _("bad list length")); |
| 13439 | if (NEON_REG_STRIDE (inst.operands[0].imm) == 2) |
| 13440 | inst.instruction |= 1 << 5; |
| 13441 | inst.instruction |= neon_logbits (et.size) << 6; |
| 13442 | break; |
| 13443 | |
| 13444 | case 2: /* VLD3. */ |
| 13445 | constraint (inst.operands[1].immisalign, |
| 13446 | _("can't use alignment with this instruction")); |
| 13447 | constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 3, |
| 13448 | _("bad list length")); |
| 13449 | if (NEON_REG_STRIDE (inst.operands[0].imm) == 2) |
| 13450 | inst.instruction |= 1 << 5; |
| 13451 | inst.instruction |= neon_logbits (et.size) << 6; |
| 13452 | break; |
| 13453 | |
| 13454 | case 3: /* VLD4. */ |
| 13455 | { |
| 13456 | int align = inst.operands[1].imm >> 8; |
| 13457 | align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32, |
| 13458 | 16, 64, 32, 64, 32, 128, -1); |
| 13459 | if (align_good == FAIL) |
| 13460 | return; |
| 13461 | constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4, |
| 13462 | _("bad list length")); |
| 13463 | if (NEON_REG_STRIDE (inst.operands[0].imm) == 2) |
| 13464 | inst.instruction |= 1 << 5; |
| 13465 | if (et.size == 32 && align == 128) |
| 13466 | inst.instruction |= 0x3 << 6; |
| 13467 | else |
| 13468 | inst.instruction |= neon_logbits (et.size) << 6; |
| 13469 | } |
| 13470 | break; |
| 13471 | |
| 13472 | default: ; |
| 13473 | } |
| 13474 | |
| 13475 | inst.instruction |= do_align << 4; |
| 13476 | } |
| 13477 | |
| 13478 | /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those |
| 13479 | apart from bits [11:4]. */ |
| 13480 | |
| 13481 | static void |
| 13482 | do_neon_ldx_stx (void) |
| 13483 | { |
| 13484 | switch (NEON_LANE (inst.operands[0].imm)) |
| 13485 | { |
| 13486 | case NEON_INTERLEAVE_LANES: |
| 13487 | inst.instruction = NEON_ENC_INTERLV (inst.instruction); |
| 13488 | do_neon_ld_st_interleave (); |
| 13489 | break; |
| 13490 | |
| 13491 | case NEON_ALL_LANES: |
| 13492 | inst.instruction = NEON_ENC_DUP (inst.instruction); |
| 13493 | do_neon_ld_dup (); |
| 13494 | break; |
| 13495 | |
| 13496 | default: |
| 13497 | inst.instruction = NEON_ENC_LANE (inst.instruction); |
| 13498 | do_neon_ld_st_lane (); |
| 13499 | } |
| 13500 | |
| 13501 | /* L bit comes from bit mask. */ |
| 13502 | inst.instruction |= LOW4 (inst.operands[0].reg) << 12; |
| 13503 | inst.instruction |= HI1 (inst.operands[0].reg) << 22; |
| 13504 | inst.instruction |= inst.operands[1].reg << 16; |
| 13505 | |
| 13506 | if (inst.operands[1].postind) |
| 13507 | { |
| 13508 | int postreg = inst.operands[1].imm & 0xf; |
| 13509 | constraint (!inst.operands[1].immisreg, |
| 13510 | _("post-index must be a register")); |
| 13511 | constraint (postreg == 0xd || postreg == 0xf, |
| 13512 | _("bad register for post-index")); |
| 13513 | inst.instruction |= postreg; |
| 13514 | } |
| 13515 | else if (inst.operands[1].writeback) |
| 13516 | { |
| 13517 | inst.instruction |= 0xd; |
| 13518 | } |
| 13519 | else |
| 13520 | inst.instruction |= 0xf; |
| 13521 | |
| 13522 | if (thumb_mode) |
| 13523 | inst.instruction |= 0xf9000000; |
| 13524 | else |
| 13525 | inst.instruction |= 0xf4000000; |
| 13526 | } |
| 13527 | |
| 13528 | \f |
| 13529 | /* Overall per-instruction processing. */ |
| 13530 | |
| 13531 | /* We need to be able to fix up arbitrary expressions in some statements. |
| 13532 | This is so that we can handle symbols that are an arbitrary distance from |
| 13533 | the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask), |
| 13534 | which returns part of an address in a form which will be valid for |
| 13535 | a data instruction. We do this by pushing the expression into a symbol |
| 13536 | in the expr_section, and creating a fix for that. */ |
| 13537 | |
| 13538 | static void |
| 13539 | fix_new_arm (fragS * frag, |
| 13540 | int where, |
| 13541 | short int size, |
| 13542 | expressionS * exp, |
| 13543 | int pc_rel, |
| 13544 | int reloc) |
| 13545 | { |
| 13546 | fixS * new_fix; |
| 13547 | |
| 13548 | switch (exp->X_op) |
| 13549 | { |
| 13550 | case O_constant: |
| 13551 | case O_symbol: |
| 13552 | case O_add: |
| 13553 | case O_subtract: |
| 13554 | new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc); |
| 13555 | break; |
| 13556 | |
| 13557 | default: |
| 13558 | new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0, |
| 13559 | pc_rel, reloc); |
| 13560 | break; |
| 13561 | } |
| 13562 | |
| 13563 | /* Mark whether the fix is to a THUMB instruction, or an ARM |
| 13564 | instruction. */ |
| 13565 | new_fix->tc_fix_data = thumb_mode; |
| 13566 | } |
| 13567 | |
| 13568 | /* Create a frg for an instruction requiring relaxation. */ |
| 13569 | static void |
| 13570 | output_relax_insn (void) |
| 13571 | { |
| 13572 | char * to; |
| 13573 | symbolS *sym; |
| 13574 | int offset; |
| 13575 | |
| 13576 | /* The size of the instruction is unknown, so tie the debug info to the |
| 13577 | start of the instruction. */ |
| 13578 | dwarf2_emit_insn (0); |
| 13579 | |
| 13580 | switch (inst.reloc.exp.X_op) |
| 13581 | { |
| 13582 | case O_symbol: |
| 13583 | sym = inst.reloc.exp.X_add_symbol; |
| 13584 | offset = inst.reloc.exp.X_add_number; |
| 13585 | break; |
| 13586 | case O_constant: |
| 13587 | sym = NULL; |
| 13588 | offset = inst.reloc.exp.X_add_number; |
| 13589 | break; |
| 13590 | default: |
| 13591 | sym = make_expr_symbol (&inst.reloc.exp); |
| 13592 | offset = 0; |
| 13593 | break; |
| 13594 | } |
| 13595 | to = frag_var (rs_machine_dependent, INSN_SIZE, THUMB_SIZE, |
| 13596 | inst.relax, sym, offset, NULL/*offset, opcode*/); |
| 13597 | md_number_to_chars (to, inst.instruction, THUMB_SIZE); |
| 13598 | } |
| 13599 | |
| 13600 | /* Write a 32-bit thumb instruction to buf. */ |
| 13601 | static void |
| 13602 | put_thumb32_insn (char * buf, unsigned long insn) |
| 13603 | { |
| 13604 | md_number_to_chars (buf, insn >> 16, THUMB_SIZE); |
| 13605 | md_number_to_chars (buf + THUMB_SIZE, insn, THUMB_SIZE); |
| 13606 | } |
| 13607 | |
| 13608 | static void |
| 13609 | output_inst (const char * str) |
| 13610 | { |
| 13611 | char * to = NULL; |
| 13612 | |
| 13613 | if (inst.error) |
| 13614 | { |
| 13615 | as_bad ("%s -- `%s'", inst.error, str); |
| 13616 | return; |
| 13617 | } |
| 13618 | if (inst.relax) { |
| 13619 | output_relax_insn(); |
| 13620 | return; |
| 13621 | } |
| 13622 | if (inst.size == 0) |
| 13623 | return; |
| 13624 | |
| 13625 | to = frag_more (inst.size); |
| 13626 | |
| 13627 | if (thumb_mode && (inst.size > THUMB_SIZE)) |
| 13628 | { |
| 13629 | assert (inst.size == (2 * THUMB_SIZE)); |
| 13630 | put_thumb32_insn (to, inst.instruction); |
| 13631 | } |
| 13632 | else if (inst.size > INSN_SIZE) |
| 13633 | { |
| 13634 | assert (inst.size == (2 * INSN_SIZE)); |
| 13635 | md_number_to_chars (to, inst.instruction, INSN_SIZE); |
| 13636 | md_number_to_chars (to + INSN_SIZE, inst.instruction, INSN_SIZE); |
| 13637 | } |
| 13638 | else |
| 13639 | md_number_to_chars (to, inst.instruction, inst.size); |
| 13640 | |
| 13641 | if (inst.reloc.type != BFD_RELOC_UNUSED) |
| 13642 | fix_new_arm (frag_now, to - frag_now->fr_literal, |
| 13643 | inst.size, & inst.reloc.exp, inst.reloc.pc_rel, |
| 13644 | inst.reloc.type); |
| 13645 | |
| 13646 | dwarf2_emit_insn (inst.size); |
| 13647 | } |
| 13648 | |
| 13649 | /* Tag values used in struct asm_opcode's tag field. */ |
| 13650 | enum opcode_tag |
| 13651 | { |
| 13652 | OT_unconditional, /* Instruction cannot be conditionalized. |
| 13653 | The ARM condition field is still 0xE. */ |
| 13654 | OT_unconditionalF, /* Instruction cannot be conditionalized |
| 13655 | and carries 0xF in its ARM condition field. */ |
| 13656 | OT_csuffix, /* Instruction takes a conditional suffix. */ |
| 13657 | OT_csuffixF, /* Some forms of the instruction take a conditional |
| 13658 | suffix, others place 0xF where the condition field |
| 13659 | would be. */ |
| 13660 | OT_cinfix3, /* Instruction takes a conditional infix, |
| 13661 | beginning at character index 3. (In |
| 13662 | unified mode, it becomes a suffix.) */ |
| 13663 | OT_cinfix3_deprecated, /* The same as OT_cinfix3. This is used for |
| 13664 | tsts, cmps, cmns, and teqs. */ |
| 13665 | OT_cinfix3_legacy, /* Legacy instruction takes a conditional infix at |
| 13666 | character index 3, even in unified mode. Used for |
| 13667 | legacy instructions where suffix and infix forms |
| 13668 | may be ambiguous. */ |
| 13669 | OT_csuf_or_in3, /* Instruction takes either a conditional |
| 13670 | suffix or an infix at character index 3. */ |
| 13671 | OT_odd_infix_unc, /* This is the unconditional variant of an |
| 13672 | instruction that takes a conditional infix |
| 13673 | at an unusual position. In unified mode, |
| 13674 | this variant will accept a suffix. */ |
| 13675 | OT_odd_infix_0 /* Values greater than or equal to OT_odd_infix_0 |
| 13676 | are the conditional variants of instructions that |
| 13677 | take conditional infixes in unusual positions. |
| 13678 | The infix appears at character index |
| 13679 | (tag - OT_odd_infix_0). These are not accepted |
| 13680 | in unified mode. */ |
| 13681 | }; |
| 13682 | |
| 13683 | /* Subroutine of md_assemble, responsible for looking up the primary |
| 13684 | opcode from the mnemonic the user wrote. STR points to the |
| 13685 | beginning of the mnemonic. |
| 13686 | |
| 13687 | This is not simply a hash table lookup, because of conditional |
| 13688 | variants. Most instructions have conditional variants, which are |
| 13689 | expressed with a _conditional affix_ to the mnemonic. If we were |
| 13690 | to encode each conditional variant as a literal string in the opcode |
| 13691 | table, it would have approximately 20,000 entries. |
| 13692 | |
| 13693 | Most mnemonics take this affix as a suffix, and in unified syntax, |
| 13694 | 'most' is upgraded to 'all'. However, in the divided syntax, some |
| 13695 | instructions take the affix as an infix, notably the s-variants of |
| 13696 | the arithmetic instructions. Of those instructions, all but six |
| 13697 | have the infix appear after the third character of the mnemonic. |
| 13698 | |
| 13699 | Accordingly, the algorithm for looking up primary opcodes given |
| 13700 | an identifier is: |
| 13701 | |
| 13702 | 1. Look up the identifier in the opcode table. |
| 13703 | If we find a match, go to step U. |
| 13704 | |
| 13705 | 2. Look up the last two characters of the identifier in the |
| 13706 | conditions table. If we find a match, look up the first N-2 |
| 13707 | characters of the identifier in the opcode table. If we |
| 13708 | find a match, go to step CE. |
| 13709 | |
| 13710 | 3. Look up the fourth and fifth characters of the identifier in |
| 13711 | the conditions table. If we find a match, extract those |
| 13712 | characters from the identifier, and look up the remaining |
| 13713 | characters in the opcode table. If we find a match, go |
| 13714 | to step CM. |
| 13715 | |
| 13716 | 4. Fail. |
| 13717 | |
| 13718 | U. Examine the tag field of the opcode structure, in case this is |
| 13719 | one of the six instructions with its conditional infix in an |
| 13720 | unusual place. If it is, the tag tells us where to find the |
| 13721 | infix; look it up in the conditions table and set inst.cond |
| 13722 | accordingly. Otherwise, this is an unconditional instruction. |
| 13723 | Again set inst.cond accordingly. Return the opcode structure. |
| 13724 | |
| 13725 | CE. Examine the tag field to make sure this is an instruction that |
| 13726 | should receive a conditional suffix. If it is not, fail. |
| 13727 | Otherwise, set inst.cond from the suffix we already looked up, |
| 13728 | and return the opcode structure. |
| 13729 | |
| 13730 | CM. Examine the tag field to make sure this is an instruction that |
| 13731 | should receive a conditional infix after the third character. |
| 13732 | If it is not, fail. Otherwise, undo the edits to the current |
| 13733 | line of input and proceed as for case CE. */ |
| 13734 | |
| 13735 | static const struct asm_opcode * |
| 13736 | opcode_lookup (char **str) |
| 13737 | { |
| 13738 | char *end, *base; |
| 13739 | char *affix; |
| 13740 | const struct asm_opcode *opcode; |
| 13741 | const struct asm_cond *cond; |
| 13742 | char save[2]; |
| 13743 | bfd_boolean neon_supported; |
| 13744 | |
| 13745 | neon_supported = ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1); |
| 13746 | |
| 13747 | /* Scan up to the end of the mnemonic, which must end in white space, |
| 13748 | '.' (in unified mode, or for Neon instructions), or end of string. */ |
| 13749 | for (base = end = *str; *end != '\0'; end++) |
| 13750 | if (*end == ' ' || ((unified_syntax || neon_supported) && *end == '.')) |
| 13751 | break; |
| 13752 | |
| 13753 | if (end == base) |
| 13754 | return 0; |
| 13755 | |
| 13756 | /* Handle a possible width suffix and/or Neon type suffix. */ |
| 13757 | if (end[0] == '.') |
| 13758 | { |
| 13759 | int offset = 2; |
| 13760 | |
| 13761 | /* The .w and .n suffixes are only valid if the unified syntax is in |
| 13762 | use. */ |
| 13763 | if (unified_syntax && end[1] == 'w') |
| 13764 | inst.size_req = 4; |
| 13765 | else if (unified_syntax && end[1] == 'n') |
| 13766 | inst.size_req = 2; |
| 13767 | else |
| 13768 | offset = 0; |
| 13769 | |
| 13770 | inst.vectype.elems = 0; |
| 13771 | |
| 13772 | *str = end + offset; |
| 13773 | |
| 13774 | if (end[offset] == '.') |
| 13775 | { |
| 13776 | /* See if we have a Neon type suffix (possible in either unified or |
| 13777 | non-unified ARM syntax mode). */ |
| 13778 | if (parse_neon_type (&inst.vectype, str) == FAIL) |
| 13779 | return 0; |
| 13780 | } |
| 13781 | else if (end[offset] != '\0' && end[offset] != ' ') |
| 13782 | return 0; |
| 13783 | } |
| 13784 | else |
| 13785 | *str = end; |
| 13786 | |
| 13787 | /* Look for unaffixed or special-case affixed mnemonic. */ |
| 13788 | opcode = hash_find_n (arm_ops_hsh, base, end - base); |
| 13789 | if (opcode) |
| 13790 | { |
| 13791 | /* step U */ |
| 13792 | if (opcode->tag < OT_odd_infix_0) |
| 13793 | { |
| 13794 | inst.cond = COND_ALWAYS; |
| 13795 | return opcode; |
| 13796 | } |
| 13797 | |
| 13798 | if (unified_syntax) |
| 13799 | as_warn (_("conditional infixes are deprecated in unified syntax")); |
| 13800 | affix = base + (opcode->tag - OT_odd_infix_0); |
| 13801 | cond = hash_find_n (arm_cond_hsh, affix, 2); |
| 13802 | assert (cond); |
| 13803 | |
| 13804 | inst.cond = cond->value; |
| 13805 | return opcode; |
| 13806 | } |
| 13807 | |
| 13808 | /* Cannot have a conditional suffix on a mnemonic of less than two |
| 13809 | characters. */ |
| 13810 | if (end - base < 3) |
| 13811 | return 0; |
| 13812 | |
| 13813 | /* Look for suffixed mnemonic. */ |
| 13814 | affix = end - 2; |
| 13815 | cond = hash_find_n (arm_cond_hsh, affix, 2); |
| 13816 | opcode = hash_find_n (arm_ops_hsh, base, affix - base); |
| 13817 | if (opcode && cond) |
| 13818 | { |
| 13819 | /* step CE */ |
| 13820 | switch (opcode->tag) |
| 13821 | { |
| 13822 | case OT_cinfix3_legacy: |
| 13823 | /* Ignore conditional suffixes matched on infix only mnemonics. */ |
| 13824 | break; |
| 13825 | |
| 13826 | case OT_cinfix3: |
| 13827 | case OT_cinfix3_deprecated: |
| 13828 | case OT_odd_infix_unc: |
| 13829 | if (!unified_syntax) |
| 13830 | return 0; |
| 13831 | /* else fall through */ |
| 13832 | |
| 13833 | case OT_csuffix: |
| 13834 | case OT_csuffixF: |
| 13835 | case OT_csuf_or_in3: |
| 13836 | inst.cond = cond->value; |
| 13837 | return opcode; |
| 13838 | |
| 13839 | case OT_unconditional: |
| 13840 | case OT_unconditionalF: |
| 13841 | if (thumb_mode) |
| 13842 | { |
| 13843 | inst.cond = cond->value; |
| 13844 | } |
| 13845 | else |
| 13846 | { |
| 13847 | /* delayed diagnostic */ |
| 13848 | inst.error = BAD_COND; |
| 13849 | inst.cond = COND_ALWAYS; |
| 13850 | } |
| 13851 | return opcode; |
| 13852 | |
| 13853 | default: |
| 13854 | return 0; |
| 13855 | } |
| 13856 | } |
| 13857 | |
| 13858 | /* Cannot have a usual-position infix on a mnemonic of less than |
| 13859 | six characters (five would be a suffix). */ |
| 13860 | if (end - base < 6) |
| 13861 | return 0; |
| 13862 | |
| 13863 | /* Look for infixed mnemonic in the usual position. */ |
| 13864 | affix = base + 3; |
| 13865 | cond = hash_find_n (arm_cond_hsh, affix, 2); |
| 13866 | if (!cond) |
| 13867 | return 0; |
| 13868 | |
| 13869 | memcpy (save, affix, 2); |
| 13870 | memmove (affix, affix + 2, (end - affix) - 2); |
| 13871 | opcode = hash_find_n (arm_ops_hsh, base, (end - base) - 2); |
| 13872 | memmove (affix + 2, affix, (end - affix) - 2); |
| 13873 | memcpy (affix, save, 2); |
| 13874 | |
| 13875 | if (opcode |
| 13876 | && (opcode->tag == OT_cinfix3 |
| 13877 | || opcode->tag == OT_cinfix3_deprecated |
| 13878 | || opcode->tag == OT_csuf_or_in3 |
| 13879 | || opcode->tag == OT_cinfix3_legacy)) |
| 13880 | { |
| 13881 | /* step CM */ |
| 13882 | if (unified_syntax |
| 13883 | && (opcode->tag == OT_cinfix3 |
| 13884 | || opcode->tag == OT_cinfix3_deprecated)) |
| 13885 | as_warn (_("conditional infixes are deprecated in unified syntax")); |
| 13886 | |
| 13887 | inst.cond = cond->value; |
| 13888 | return opcode; |
| 13889 | } |
| 13890 | |
| 13891 | return 0; |
| 13892 | } |
| 13893 | |
| 13894 | void |
| 13895 | md_assemble (char *str) |
| 13896 | { |
| 13897 | char *p = str; |
| 13898 | const struct asm_opcode * opcode; |
| 13899 | |
| 13900 | /* Align the previous label if needed. */ |
| 13901 | if (last_label_seen != NULL) |
| 13902 | { |
| 13903 | symbol_set_frag (last_label_seen, frag_now); |
| 13904 | S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ()); |
| 13905 | S_SET_SEGMENT (last_label_seen, now_seg); |
| 13906 | } |
| 13907 | |
| 13908 | memset (&inst, '\0', sizeof (inst)); |
| 13909 | inst.reloc.type = BFD_RELOC_UNUSED; |
| 13910 | |
| 13911 | opcode = opcode_lookup (&p); |
| 13912 | if (!opcode) |
| 13913 | { |
| 13914 | /* It wasn't an instruction, but it might be a register alias of |
| 13915 | the form alias .req reg, or a Neon .dn/.qn directive. */ |
| 13916 | if (!create_register_alias (str, p) |
| 13917 | && !create_neon_reg_alias (str, p)) |
| 13918 | as_bad (_("bad instruction `%s'"), str); |
| 13919 | |
| 13920 | return; |
| 13921 | } |
| 13922 | |
| 13923 | if (opcode->tag == OT_cinfix3_deprecated) |
| 13924 | as_warn (_("s suffix on comparison instruction is deprecated")); |
| 13925 | |
| 13926 | /* The value which unconditional instructions should have in place of the |
| 13927 | condition field. */ |
| 13928 | inst.uncond_value = (opcode->tag == OT_csuffixF) ? 0xf : -1; |
| 13929 | |
| 13930 | if (thumb_mode) |
| 13931 | { |
| 13932 | arm_feature_set variant; |
| 13933 | |
| 13934 | variant = cpu_variant; |
| 13935 | /* Only allow coprocessor instructions on Thumb-2 capable devices. */ |
| 13936 | if (!ARM_CPU_HAS_FEATURE (variant, arm_arch_t2)) |
| 13937 | ARM_CLEAR_FEATURE (variant, variant, fpu_any_hard); |
| 13938 | /* Check that this instruction is supported for this CPU. */ |
| 13939 | if (!opcode->tvariant |
| 13940 | || (thumb_mode == 1 |
| 13941 | && !ARM_CPU_HAS_FEATURE (variant, *opcode->tvariant))) |
| 13942 | { |
| 13943 | as_bad (_("selected processor does not support `%s'"), str); |
| 13944 | return; |
| 13945 | } |
| 13946 | if (inst.cond != COND_ALWAYS && !unified_syntax |
| 13947 | && opcode->tencode != do_t_branch) |
| 13948 | { |
| 13949 | as_bad (_("Thumb does not support conditional execution")); |
| 13950 | return; |
| 13951 | } |
| 13952 | |
| 13953 | /* Check conditional suffixes. */ |
| 13954 | if (current_it_mask) |
| 13955 | { |
| 13956 | int cond; |
| 13957 | cond = current_cc ^ ((current_it_mask >> 4) & 1) ^ 1; |
| 13958 | current_it_mask <<= 1; |
| 13959 | current_it_mask &= 0x1f; |
| 13960 | /* The BKPT instruction is unconditional even in an IT block. */ |
| 13961 | if (!inst.error |
| 13962 | && cond != inst.cond && opcode->tencode != do_t_bkpt) |
| 13963 | { |
| 13964 | as_bad (_("incorrect condition in IT block")); |
| 13965 | return; |
| 13966 | } |
| 13967 | } |
| 13968 | else if (inst.cond != COND_ALWAYS && opcode->tencode != do_t_branch) |
| 13969 | { |
| 13970 | as_bad (_("thumb conditional instrunction not in IT block")); |
| 13971 | return; |
| 13972 | } |
| 13973 | |
| 13974 | mapping_state (MAP_THUMB); |
| 13975 | inst.instruction = opcode->tvalue; |
| 13976 | |
| 13977 | if (!parse_operands (p, opcode->operands)) |
| 13978 | opcode->tencode (); |
| 13979 | |
| 13980 | /* Clear current_it_mask at the end of an IT block. */ |
| 13981 | if (current_it_mask == 0x10) |
| 13982 | current_it_mask = 0; |
| 13983 | |
| 13984 | if (!(inst.error || inst.relax)) |
| 13985 | { |
| 13986 | assert (inst.instruction < 0xe800 || inst.instruction > 0xffff); |
| 13987 | inst.size = (inst.instruction > 0xffff ? 4 : 2); |
| 13988 | if (inst.size_req && inst.size_req != inst.size) |
| 13989 | { |
| 13990 | as_bad (_("cannot honor width suffix -- `%s'"), str); |
| 13991 | return; |
| 13992 | } |
| 13993 | } |
| 13994 | ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, |
| 13995 | *opcode->tvariant); |
| 13996 | /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly |
| 13997 | set those bits when Thumb-2 32-bit instructions are seen. ie. |
| 13998 | anything other than bl/blx. |
| 13999 | This is overly pessimistic for relaxable instructions. */ |
| 14000 | if ((inst.size == 4 && (inst.instruction & 0xf800e800) != 0xf000e800) |
| 14001 | || inst.relax) |
| 14002 | ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, |
| 14003 | arm_ext_v6t2); |
| 14004 | } |
| 14005 | else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1)) |
| 14006 | { |
| 14007 | /* Check that this instruction is supported for this CPU. */ |
| 14008 | if (!opcode->avariant || |
| 14009 | !ARM_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant)) |
| 14010 | { |
| 14011 | as_bad (_("selected processor does not support `%s'"), str); |
| 14012 | return; |
| 14013 | } |
| 14014 | if (inst.size_req) |
| 14015 | { |
| 14016 | as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str); |
| 14017 | return; |
| 14018 | } |
| 14019 | |
| 14020 | mapping_state (MAP_ARM); |
| 14021 | inst.instruction = opcode->avalue; |
| 14022 | if (opcode->tag == OT_unconditionalF) |
| 14023 | inst.instruction |= 0xF << 28; |
| 14024 | else |
| 14025 | inst.instruction |= inst.cond << 28; |
| 14026 | inst.size = INSN_SIZE; |
| 14027 | if (!parse_operands (p, opcode->operands)) |
| 14028 | opcode->aencode (); |
| 14029 | /* Arm mode bx is marked as both v4T and v5 because it's still required |
| 14030 | on a hypothetical non-thumb v5 core. */ |
| 14031 | if (ARM_CPU_HAS_FEATURE (*opcode->avariant, arm_ext_v4t) |
| 14032 | || ARM_CPU_HAS_FEATURE (*opcode->avariant, arm_ext_v5)) |
| 14033 | ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, arm_ext_v4t); |
| 14034 | else |
| 14035 | ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, |
| 14036 | *opcode->avariant); |
| 14037 | } |
| 14038 | else |
| 14039 | { |
| 14040 | as_bad (_("attempt to use an ARM instruction on a Thumb-only processor " |
| 14041 | "-- `%s'"), str); |
| 14042 | return; |
| 14043 | } |
| 14044 | output_inst (str); |
| 14045 | } |
| 14046 | |
| 14047 | /* Various frobbings of labels and their addresses. */ |
| 14048 | |
| 14049 | void |
| 14050 | arm_start_line_hook (void) |
| 14051 | { |
| 14052 | last_label_seen = NULL; |
| 14053 | } |
| 14054 | |
| 14055 | void |
| 14056 | arm_frob_label (symbolS * sym) |
| 14057 | { |
| 14058 | last_label_seen = sym; |
| 14059 | |
| 14060 | ARM_SET_THUMB (sym, thumb_mode); |
| 14061 | |
| 14062 | #if defined OBJ_COFF || defined OBJ_ELF |
| 14063 | ARM_SET_INTERWORK (sym, support_interwork); |
| 14064 | #endif |
| 14065 | |
| 14066 | /* Note - do not allow local symbols (.Lxxx) to be labeled |
| 14067 | as Thumb functions. This is because these labels, whilst |
| 14068 | they exist inside Thumb code, are not the entry points for |
| 14069 | possible ARM->Thumb calls. Also, these labels can be used |
| 14070 | as part of a computed goto or switch statement. eg gcc |
| 14071 | can generate code that looks like this: |
| 14072 | |
| 14073 | ldr r2, [pc, .Laaa] |
| 14074 | lsl r3, r3, #2 |
| 14075 | ldr r2, [r3, r2] |
| 14076 | mov pc, r2 |
| 14077 | |
| 14078 | .Lbbb: .word .Lxxx |
| 14079 | .Lccc: .word .Lyyy |
| 14080 | ..etc... |
| 14081 | .Laaa: .word Lbbb |
| 14082 | |
| 14083 | The first instruction loads the address of the jump table. |
| 14084 | The second instruction converts a table index into a byte offset. |
| 14085 | The third instruction gets the jump address out of the table. |
| 14086 | The fourth instruction performs the jump. |
| 14087 | |
| 14088 | If the address stored at .Laaa is that of a symbol which has the |
| 14089 | Thumb_Func bit set, then the linker will arrange for this address |
| 14090 | to have the bottom bit set, which in turn would mean that the |
| 14091 | address computation performed by the third instruction would end |
| 14092 | up with the bottom bit set. Since the ARM is capable of unaligned |
| 14093 | word loads, the instruction would then load the incorrect address |
| 14094 | out of the jump table, and chaos would ensue. */ |
| 14095 | if (label_is_thumb_function_name |
| 14096 | && (S_GET_NAME (sym)[0] != '.' || S_GET_NAME (sym)[1] != 'L') |
| 14097 | && (bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE) != 0) |
| 14098 | { |
| 14099 | /* When the address of a Thumb function is taken the bottom |
| 14100 | bit of that address should be set. This will allow |
| 14101 | interworking between Arm and Thumb functions to work |
| 14102 | correctly. */ |
| 14103 | |
| 14104 | THUMB_SET_FUNC (sym, 1); |
| 14105 | |
| 14106 | label_is_thumb_function_name = FALSE; |
| 14107 | } |
| 14108 | |
| 14109 | dwarf2_emit_label (sym); |
| 14110 | } |
| 14111 | |
| 14112 | int |
| 14113 | arm_data_in_code (void) |
| 14114 | { |
| 14115 | if (thumb_mode && ! strncmp (input_line_pointer + 1, "data:", 5)) |
| 14116 | { |
| 14117 | *input_line_pointer = '/'; |
| 14118 | input_line_pointer += 5; |
| 14119 | *input_line_pointer = 0; |
| 14120 | return 1; |
| 14121 | } |
| 14122 | |
| 14123 | return 0; |
| 14124 | } |
| 14125 | |
| 14126 | char * |
| 14127 | arm_canonicalize_symbol_name (char * name) |
| 14128 | { |
| 14129 | int len; |
| 14130 | |
| 14131 | if (thumb_mode && (len = strlen (name)) > 5 |
| 14132 | && streq (name + len - 5, "/data")) |
| 14133 | *(name + len - 5) = 0; |
| 14134 | |
| 14135 | return name; |
| 14136 | } |
| 14137 | \f |
| 14138 | /* Table of all register names defined by default. The user can |
| 14139 | define additional names with .req. Note that all register names |
| 14140 | should appear in both upper and lowercase variants. Some registers |
| 14141 | also have mixed-case names. */ |
| 14142 | |
| 14143 | #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 } |
| 14144 | #define REGNUM(p,n,t) REGDEF(p##n, n, t) |
| 14145 | #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t) |
| 14146 | #define REGSET(p,t) \ |
| 14147 | REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \ |
| 14148 | REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \ |
| 14149 | REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \ |
| 14150 | REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t) |
| 14151 | #define REGSETH(p,t) \ |
| 14152 | REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \ |
| 14153 | REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \ |
| 14154 | REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \ |
| 14155 | REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t) |
| 14156 | #define REGSET2(p,t) \ |
| 14157 | REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \ |
| 14158 | REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \ |
| 14159 | REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \ |
| 14160 | REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t) |
| 14161 | |
| 14162 | static const struct reg_entry reg_names[] = |
| 14163 | { |
| 14164 | /* ARM integer registers. */ |
| 14165 | REGSET(r, RN), REGSET(R, RN), |
| 14166 | |
| 14167 | /* ATPCS synonyms. */ |
| 14168 | REGDEF(a1,0,RN), REGDEF(a2,1,RN), REGDEF(a3, 2,RN), REGDEF(a4, 3,RN), |
| 14169 | REGDEF(v1,4,RN), REGDEF(v2,5,RN), REGDEF(v3, 6,RN), REGDEF(v4, 7,RN), |
| 14170 | REGDEF(v5,8,RN), REGDEF(v6,9,RN), REGDEF(v7,10,RN), REGDEF(v8,11,RN), |
| 14171 | |
| 14172 | REGDEF(A1,0,RN), REGDEF(A2,1,RN), REGDEF(A3, 2,RN), REGDEF(A4, 3,RN), |
| 14173 | REGDEF(V1,4,RN), REGDEF(V2,5,RN), REGDEF(V3, 6,RN), REGDEF(V4, 7,RN), |
| 14174 | REGDEF(V5,8,RN), REGDEF(V6,9,RN), REGDEF(V7,10,RN), REGDEF(V8,11,RN), |
| 14175 | |
| 14176 | /* Well-known aliases. */ |
| 14177 | REGDEF(wr, 7,RN), REGDEF(sb, 9,RN), REGDEF(sl,10,RN), REGDEF(fp,11,RN), |
| 14178 | REGDEF(ip,12,RN), REGDEF(sp,13,RN), REGDEF(lr,14,RN), REGDEF(pc,15,RN), |
| 14179 | |
| 14180 | REGDEF(WR, 7,RN), REGDEF(SB, 9,RN), REGDEF(SL,10,RN), REGDEF(FP,11,RN), |
| 14181 | REGDEF(IP,12,RN), REGDEF(SP,13,RN), REGDEF(LR,14,RN), REGDEF(PC,15,RN), |
| 14182 | |
| 14183 | /* Coprocessor numbers. */ |
| 14184 | REGSET(p, CP), REGSET(P, CP), |
| 14185 | |
| 14186 | /* Coprocessor register numbers. The "cr" variants are for backward |
| 14187 | compatibility. */ |
| 14188 | REGSET(c, CN), REGSET(C, CN), |
| 14189 | REGSET(cr, CN), REGSET(CR, CN), |
| 14190 | |
| 14191 | /* FPA registers. */ |
| 14192 | REGNUM(f,0,FN), REGNUM(f,1,FN), REGNUM(f,2,FN), REGNUM(f,3,FN), |
| 14193 | REGNUM(f,4,FN), REGNUM(f,5,FN), REGNUM(f,6,FN), REGNUM(f,7, FN), |
| 14194 | |
| 14195 | REGNUM(F,0,FN), REGNUM(F,1,FN), REGNUM(F,2,FN), REGNUM(F,3,FN), |
| 14196 | REGNUM(F,4,FN), REGNUM(F,5,FN), REGNUM(F,6,FN), REGNUM(F,7, FN), |
| 14197 | |
| 14198 | /* VFP SP registers. */ |
| 14199 | REGSET(s,VFS), REGSET(S,VFS), |
| 14200 | REGSETH(s,VFS), REGSETH(S,VFS), |
| 14201 | |
| 14202 | /* VFP DP Registers. */ |
| 14203 | REGSET(d,VFD), REGSET(D,VFD), |
| 14204 | /* Extra Neon DP registers. */ |
| 14205 | REGSETH(d,VFD), REGSETH(D,VFD), |
| 14206 | |
| 14207 | /* Neon QP registers. */ |
| 14208 | REGSET2(q,NQ), REGSET2(Q,NQ), |
| 14209 | |
| 14210 | /* VFP control registers. */ |
| 14211 | REGDEF(fpsid,0,VFC), REGDEF(fpscr,1,VFC), REGDEF(fpexc,8,VFC), |
| 14212 | REGDEF(FPSID,0,VFC), REGDEF(FPSCR,1,VFC), REGDEF(FPEXC,8,VFC), |
| 14213 | |
| 14214 | /* Maverick DSP coprocessor registers. */ |
| 14215 | REGSET(mvf,MVF), REGSET(mvd,MVD), REGSET(mvfx,MVFX), REGSET(mvdx,MVDX), |
| 14216 | REGSET(MVF,MVF), REGSET(MVD,MVD), REGSET(MVFX,MVFX), REGSET(MVDX,MVDX), |
| 14217 | |
| 14218 | REGNUM(mvax,0,MVAX), REGNUM(mvax,1,MVAX), |
| 14219 | REGNUM(mvax,2,MVAX), REGNUM(mvax,3,MVAX), |
| 14220 | REGDEF(dspsc,0,DSPSC), |
| 14221 | |
| 14222 | REGNUM(MVAX,0,MVAX), REGNUM(MVAX,1,MVAX), |
| 14223 | REGNUM(MVAX,2,MVAX), REGNUM(MVAX,3,MVAX), |
| 14224 | REGDEF(DSPSC,0,DSPSC), |
| 14225 | |
| 14226 | /* iWMMXt data registers - p0, c0-15. */ |
| 14227 | REGSET(wr,MMXWR), REGSET(wR,MMXWR), REGSET(WR, MMXWR), |
| 14228 | |
| 14229 | /* iWMMXt control registers - p1, c0-3. */ |
| 14230 | REGDEF(wcid, 0,MMXWC), REGDEF(wCID, 0,MMXWC), REGDEF(WCID, 0,MMXWC), |
| 14231 | REGDEF(wcon, 1,MMXWC), REGDEF(wCon, 1,MMXWC), REGDEF(WCON, 1,MMXWC), |
| 14232 | REGDEF(wcssf, 2,MMXWC), REGDEF(wCSSF, 2,MMXWC), REGDEF(WCSSF, 2,MMXWC), |
| 14233 | REGDEF(wcasf, 3,MMXWC), REGDEF(wCASF, 3,MMXWC), REGDEF(WCASF, 3,MMXWC), |
| 14234 | |
| 14235 | /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */ |
| 14236 | REGDEF(wcgr0, 8,MMXWCG), REGDEF(wCGR0, 8,MMXWCG), REGDEF(WCGR0, 8,MMXWCG), |
| 14237 | REGDEF(wcgr1, 9,MMXWCG), REGDEF(wCGR1, 9,MMXWCG), REGDEF(WCGR1, 9,MMXWCG), |
| 14238 | REGDEF(wcgr2,10,MMXWCG), REGDEF(wCGR2,10,MMXWCG), REGDEF(WCGR2,10,MMXWCG), |
| 14239 | REGDEF(wcgr3,11,MMXWCG), REGDEF(wCGR3,11,MMXWCG), REGDEF(WCGR3,11,MMXWCG), |
| 14240 | |
| 14241 | /* XScale accumulator registers. */ |
| 14242 | REGNUM(acc,0,XSCALE), REGNUM(ACC,0,XSCALE), |
| 14243 | }; |
| 14244 | #undef REGDEF |
| 14245 | #undef REGNUM |
| 14246 | #undef REGSET |
| 14247 | |
| 14248 | /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled |
| 14249 | within psr_required_here. */ |
| 14250 | static const struct asm_psr psrs[] = |
| 14251 | { |
| 14252 | /* Backward compatibility notation. Note that "all" is no longer |
| 14253 | truly all possible PSR bits. */ |
| 14254 | {"all", PSR_c | PSR_f}, |
| 14255 | {"flg", PSR_f}, |
| 14256 | {"ctl", PSR_c}, |
| 14257 | |
| 14258 | /* Individual flags. */ |
| 14259 | {"f", PSR_f}, |
| 14260 | {"c", PSR_c}, |
| 14261 | {"x", PSR_x}, |
| 14262 | {"s", PSR_s}, |
| 14263 | /* Combinations of flags. */ |
| 14264 | {"fs", PSR_f | PSR_s}, |
| 14265 | {"fx", PSR_f | PSR_x}, |
| 14266 | {"fc", PSR_f | PSR_c}, |
| 14267 | {"sf", PSR_s | PSR_f}, |
| 14268 | {"sx", PSR_s | PSR_x}, |
| 14269 | {"sc", PSR_s | PSR_c}, |
| 14270 | {"xf", PSR_x | PSR_f}, |
| 14271 | {"xs", PSR_x | PSR_s}, |
| 14272 | {"xc", PSR_x | PSR_c}, |
| 14273 | {"cf", PSR_c | PSR_f}, |
| 14274 | {"cs", PSR_c | PSR_s}, |
| 14275 | {"cx", PSR_c | PSR_x}, |
| 14276 | {"fsx", PSR_f | PSR_s | PSR_x}, |
| 14277 | {"fsc", PSR_f | PSR_s | PSR_c}, |
| 14278 | {"fxs", PSR_f | PSR_x | PSR_s}, |
| 14279 | {"fxc", PSR_f | PSR_x | PSR_c}, |
| 14280 | {"fcs", PSR_f | PSR_c | PSR_s}, |
| 14281 | {"fcx", PSR_f | PSR_c | PSR_x}, |
| 14282 | {"sfx", PSR_s | PSR_f | PSR_x}, |
| 14283 | {"sfc", PSR_s | PSR_f | PSR_c}, |
| 14284 | {"sxf", PSR_s | PSR_x | PSR_f}, |
| 14285 | {"sxc", PSR_s | PSR_x | PSR_c}, |
| 14286 | {"scf", PSR_s | PSR_c | PSR_f}, |
| 14287 | {"scx", PSR_s | PSR_c | PSR_x}, |
| 14288 | {"xfs", PSR_x | PSR_f | PSR_s}, |
| 14289 | {"xfc", PSR_x | PSR_f | PSR_c}, |
| 14290 | {"xsf", PSR_x | PSR_s | PSR_f}, |
| 14291 | {"xsc", PSR_x | PSR_s | PSR_c}, |
| 14292 | {"xcf", PSR_x | PSR_c | PSR_f}, |
| 14293 | {"xcs", PSR_x | PSR_c | PSR_s}, |
| 14294 | {"cfs", PSR_c | PSR_f | PSR_s}, |
| 14295 | {"cfx", PSR_c | PSR_f | PSR_x}, |
| 14296 | {"csf", PSR_c | PSR_s | PSR_f}, |
| 14297 | {"csx", PSR_c | PSR_s | PSR_x}, |
| 14298 | {"cxf", PSR_c | PSR_x | PSR_f}, |
| 14299 | {"cxs", PSR_c | PSR_x | PSR_s}, |
| 14300 | {"fsxc", PSR_f | PSR_s | PSR_x | PSR_c}, |
| 14301 | {"fscx", PSR_f | PSR_s | PSR_c | PSR_x}, |
| 14302 | {"fxsc", PSR_f | PSR_x | PSR_s | PSR_c}, |
| 14303 | {"fxcs", PSR_f | PSR_x | PSR_c | PSR_s}, |
| 14304 | {"fcsx", PSR_f | PSR_c | PSR_s | PSR_x}, |
| 14305 | {"fcxs", PSR_f | PSR_c | PSR_x | PSR_s}, |
| 14306 | {"sfxc", PSR_s | PSR_f | PSR_x | PSR_c}, |
| 14307 | {"sfcx", PSR_s | PSR_f | PSR_c | PSR_x}, |
| 14308 | {"sxfc", PSR_s | PSR_x | PSR_f | PSR_c}, |
| 14309 | {"sxcf", PSR_s | PSR_x | PSR_c | PSR_f}, |
| 14310 | {"scfx", PSR_s | PSR_c | PSR_f | PSR_x}, |
| 14311 | {"scxf", PSR_s | PSR_c | PSR_x | PSR_f}, |
| 14312 | {"xfsc", PSR_x | PSR_f | PSR_s | PSR_c}, |
| 14313 | {"xfcs", PSR_x | PSR_f | PSR_c | PSR_s}, |
| 14314 | {"xsfc", PSR_x | PSR_s | PSR_f | PSR_c}, |
| 14315 | {"xscf", PSR_x | PSR_s | PSR_c | PSR_f}, |
| 14316 | {"xcfs", PSR_x | PSR_c | PSR_f | PSR_s}, |
| 14317 | {"xcsf", PSR_x | PSR_c | PSR_s | PSR_f}, |
| 14318 | {"cfsx", PSR_c | PSR_f | PSR_s | PSR_x}, |
| 14319 | {"cfxs", PSR_c | PSR_f | PSR_x | PSR_s}, |
| 14320 | {"csfx", PSR_c | PSR_s | PSR_f | PSR_x}, |
| 14321 | {"csxf", PSR_c | PSR_s | PSR_x | PSR_f}, |
| 14322 | {"cxfs", PSR_c | PSR_x | PSR_f | PSR_s}, |
| 14323 | {"cxsf", PSR_c | PSR_x | PSR_s | PSR_f}, |
| 14324 | }; |
| 14325 | |
| 14326 | /* Table of V7M psr names. */ |
| 14327 | static const struct asm_psr v7m_psrs[] = |
| 14328 | { |
| 14329 | {"apsr", 0 }, |
| 14330 | {"iapsr", 1 }, |
| 14331 | {"eapsr", 2 }, |
| 14332 | {"psr", 3 }, |
| 14333 | {"ipsr", 5 }, |
| 14334 | {"epsr", 6 }, |
| 14335 | {"iepsr", 7 }, |
| 14336 | {"msp", 8 }, |
| 14337 | {"psp", 9 }, |
| 14338 | {"primask", 16}, |
| 14339 | {"basepri", 17}, |
| 14340 | {"basepri_max", 18}, |
| 14341 | {"faultmask", 19}, |
| 14342 | {"control", 20} |
| 14343 | }; |
| 14344 | |
| 14345 | /* Table of all shift-in-operand names. */ |
| 14346 | static const struct asm_shift_name shift_names [] = |
| 14347 | { |
| 14348 | { "asl", SHIFT_LSL }, { "ASL", SHIFT_LSL }, |
| 14349 | { "lsl", SHIFT_LSL }, { "LSL", SHIFT_LSL }, |
| 14350 | { "lsr", SHIFT_LSR }, { "LSR", SHIFT_LSR }, |
| 14351 | { "asr", SHIFT_ASR }, { "ASR", SHIFT_ASR }, |
| 14352 | { "ror", SHIFT_ROR }, { "ROR", SHIFT_ROR }, |
| 14353 | { "rrx", SHIFT_RRX }, { "RRX", SHIFT_RRX } |
| 14354 | }; |
| 14355 | |
| 14356 | /* Table of all explicit relocation names. */ |
| 14357 | #ifdef OBJ_ELF |
| 14358 | static struct reloc_entry reloc_names[] = |
| 14359 | { |
| 14360 | { "got", BFD_RELOC_ARM_GOT32 }, { "GOT", BFD_RELOC_ARM_GOT32 }, |
| 14361 | { "gotoff", BFD_RELOC_ARM_GOTOFF }, { "GOTOFF", BFD_RELOC_ARM_GOTOFF }, |
| 14362 | { "plt", BFD_RELOC_ARM_PLT32 }, { "PLT", BFD_RELOC_ARM_PLT32 }, |
| 14363 | { "target1", BFD_RELOC_ARM_TARGET1 }, { "TARGET1", BFD_RELOC_ARM_TARGET1 }, |
| 14364 | { "target2", BFD_RELOC_ARM_TARGET2 }, { "TARGET2", BFD_RELOC_ARM_TARGET2 }, |
| 14365 | { "sbrel", BFD_RELOC_ARM_SBREL32 }, { "SBREL", BFD_RELOC_ARM_SBREL32 }, |
| 14366 | { "tlsgd", BFD_RELOC_ARM_TLS_GD32}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32}, |
| 14367 | { "tlsldm", BFD_RELOC_ARM_TLS_LDM32}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32}, |
| 14368 | { "tlsldo", BFD_RELOC_ARM_TLS_LDO32}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32}, |
| 14369 | { "gottpoff",BFD_RELOC_ARM_TLS_IE32}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32}, |
| 14370 | { "tpoff", BFD_RELOC_ARM_TLS_LE32}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32} |
| 14371 | }; |
| 14372 | #endif |
| 14373 | |
| 14374 | /* Table of all conditional affixes. 0xF is not defined as a condition code. */ |
| 14375 | static const struct asm_cond conds[] = |
| 14376 | { |
| 14377 | {"eq", 0x0}, |
| 14378 | {"ne", 0x1}, |
| 14379 | {"cs", 0x2}, {"hs", 0x2}, |
| 14380 | {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3}, |
| 14381 | {"mi", 0x4}, |
| 14382 | {"pl", 0x5}, |
| 14383 | {"vs", 0x6}, |
| 14384 | {"vc", 0x7}, |
| 14385 | {"hi", 0x8}, |
| 14386 | {"ls", 0x9}, |
| 14387 | {"ge", 0xa}, |
| 14388 | {"lt", 0xb}, |
| 14389 | {"gt", 0xc}, |
| 14390 | {"le", 0xd}, |
| 14391 | {"al", 0xe} |
| 14392 | }; |
| 14393 | |
| 14394 | static struct asm_barrier_opt barrier_opt_names[] = |
| 14395 | { |
| 14396 | { "sy", 0xf }, |
| 14397 | { "un", 0x7 }, |
| 14398 | { "st", 0xe }, |
| 14399 | { "unst", 0x6 } |
| 14400 | }; |
| 14401 | |
| 14402 | /* Table of ARM-format instructions. */ |
| 14403 | |
| 14404 | /* Macros for gluing together operand strings. N.B. In all cases |
| 14405 | other than OPS0, the trailing OP_stop comes from default |
| 14406 | zero-initialization of the unspecified elements of the array. */ |
| 14407 | #define OPS0() { OP_stop, } |
| 14408 | #define OPS1(a) { OP_##a, } |
| 14409 | #define OPS2(a,b) { OP_##a,OP_##b, } |
| 14410 | #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, } |
| 14411 | #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, } |
| 14412 | #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, } |
| 14413 | #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, } |
| 14414 | |
| 14415 | /* These macros abstract out the exact format of the mnemonic table and |
| 14416 | save some repeated characters. */ |
| 14417 | |
| 14418 | /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */ |
| 14419 | #define TxCE(mnem, op, top, nops, ops, ae, te) \ |
| 14420 | { #mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \ |
| 14421 | THUMB_VARIANT, do_##ae, do_##te } |
| 14422 | |
| 14423 | /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for |
| 14424 | a T_MNEM_xyz enumerator. */ |
| 14425 | #define TCE(mnem, aop, top, nops, ops, ae, te) \ |
| 14426 | TxCE(mnem, aop, 0x##top, nops, ops, ae, te) |
| 14427 | #define tCE(mnem, aop, top, nops, ops, ae, te) \ |
| 14428 | TxCE(mnem, aop, T_MNEM_##top, nops, ops, ae, te) |
| 14429 | |
| 14430 | /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional |
| 14431 | infix after the third character. */ |
| 14432 | #define TxC3(mnem, op, top, nops, ops, ae, te) \ |
| 14433 | { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \ |
| 14434 | THUMB_VARIANT, do_##ae, do_##te } |
| 14435 | #define TxC3w(mnem, op, top, nops, ops, ae, te) \ |
| 14436 | { #mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \ |
| 14437 | THUMB_VARIANT, do_##ae, do_##te } |
| 14438 | #define TC3(mnem, aop, top, nops, ops, ae, te) \ |
| 14439 | TxC3(mnem, aop, 0x##top, nops, ops, ae, te) |
| 14440 | #define TC3w(mnem, aop, top, nops, ops, ae, te) \ |
| 14441 | TxC3w(mnem, aop, 0x##top, nops, ops, ae, te) |
| 14442 | #define tC3(mnem, aop, top, nops, ops, ae, te) \ |
| 14443 | TxC3(mnem, aop, T_MNEM_##top, nops, ops, ae, te) |
| 14444 | #define tC3w(mnem, aop, top, nops, ops, ae, te) \ |
| 14445 | TxC3w(mnem, aop, T_MNEM_##top, nops, ops, ae, te) |
| 14446 | |
| 14447 | /* Mnemonic with a conditional infix in an unusual place. Each and every variant has to |
| 14448 | appear in the condition table. */ |
| 14449 | #define TxCM_(m1, m2, m3, op, top, nops, ops, ae, te) \ |
| 14450 | { #m1 #m2 #m3, OPS##nops ops, sizeof(#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof(#m1) - 1, \ |
| 14451 | 0x##op, top, ARM_VARIANT, THUMB_VARIANT, do_##ae, do_##te } |
| 14452 | |
| 14453 | #define TxCM(m1, m2, op, top, nops, ops, ae, te) \ |
| 14454 | TxCM_(m1, , m2, op, top, nops, ops, ae, te), \ |
| 14455 | TxCM_(m1, eq, m2, op, top, nops, ops, ae, te), \ |
| 14456 | TxCM_(m1, ne, m2, op, top, nops, ops, ae, te), \ |
| 14457 | TxCM_(m1, cs, m2, op, top, nops, ops, ae, te), \ |
| 14458 | TxCM_(m1, hs, m2, op, top, nops, ops, ae, te), \ |
| 14459 | TxCM_(m1, cc, m2, op, top, nops, ops, ae, te), \ |
| 14460 | TxCM_(m1, ul, m2, op, top, nops, ops, ae, te), \ |
| 14461 | TxCM_(m1, lo, m2, op, top, nops, ops, ae, te), \ |
| 14462 | TxCM_(m1, mi, m2, op, top, nops, ops, ae, te), \ |
| 14463 | TxCM_(m1, pl, m2, op, top, nops, ops, ae, te), \ |
| 14464 | TxCM_(m1, vs, m2, op, top, nops, ops, ae, te), \ |
| 14465 | TxCM_(m1, vc, m2, op, top, nops, ops, ae, te), \ |
| 14466 | TxCM_(m1, hi, m2, op, top, nops, ops, ae, te), \ |
| 14467 | TxCM_(m1, ls, m2, op, top, nops, ops, ae, te), \ |
| 14468 | TxCM_(m1, ge, m2, op, top, nops, ops, ae, te), \ |
| 14469 | TxCM_(m1, lt, m2, op, top, nops, ops, ae, te), \ |
| 14470 | TxCM_(m1, gt, m2, op, top, nops, ops, ae, te), \ |
| 14471 | TxCM_(m1, le, m2, op, top, nops, ops, ae, te), \ |
| 14472 | TxCM_(m1, al, m2, op, top, nops, ops, ae, te) |
| 14473 | |
| 14474 | #define TCM(m1,m2, aop, top, nops, ops, ae, te) \ |
| 14475 | TxCM(m1,m2, aop, 0x##top, nops, ops, ae, te) |
| 14476 | #define tCM(m1,m2, aop, top, nops, ops, ae, te) \ |
| 14477 | TxCM(m1,m2, aop, T_MNEM_##top, nops, ops, ae, te) |
| 14478 | |
| 14479 | /* Mnemonic that cannot be conditionalized. The ARM condition-code |
| 14480 | field is still 0xE. Many of the Thumb variants can be executed |
| 14481 | conditionally, so this is checked separately. */ |
| 14482 | #define TUE(mnem, op, top, nops, ops, ae, te) \ |
| 14483 | { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \ |
| 14484 | THUMB_VARIANT, do_##ae, do_##te } |
| 14485 | |
| 14486 | /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM |
| 14487 | condition code field. */ |
| 14488 | #define TUF(mnem, op, top, nops, ops, ae, te) \ |
| 14489 | { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \ |
| 14490 | THUMB_VARIANT, do_##ae, do_##te } |
| 14491 | |
| 14492 | /* ARM-only variants of all the above. */ |
| 14493 | #define CE(mnem, op, nops, ops, ae) \ |
| 14494 | { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL } |
| 14495 | |
| 14496 | #define C3(mnem, op, nops, ops, ae) \ |
| 14497 | { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL } |
| 14498 | |
| 14499 | /* Legacy mnemonics that always have conditional infix after the third |
| 14500 | character. */ |
| 14501 | #define CL(mnem, op, nops, ops, ae) \ |
| 14502 | { #mnem, OPS##nops ops, OT_cinfix3_legacy, \ |
| 14503 | 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL } |
| 14504 | |
| 14505 | /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */ |
| 14506 | #define cCE(mnem, op, nops, ops, ae) \ |
| 14507 | { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae } |
| 14508 | |
| 14509 | /* Legacy coprocessor instructions where conditional infix and conditional |
| 14510 | suffix are ambiguous. For consistency this includes all FPA instructions, |
| 14511 | not just the potentially ambiguous ones. */ |
| 14512 | #define cCL(mnem, op, nops, ops, ae) \ |
| 14513 | { #mnem, OPS##nops ops, OT_cinfix3_legacy, \ |
| 14514 | 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae } |
| 14515 | |
| 14516 | /* Coprocessor, takes either a suffix or a position-3 infix |
| 14517 | (for an FPA corner case). */ |
| 14518 | #define C3E(mnem, op, nops, ops, ae) \ |
| 14519 | { #mnem, OPS##nops ops, OT_csuf_or_in3, \ |
| 14520 | 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae } |
| 14521 | |
| 14522 | #define xCM_(m1, m2, m3, op, nops, ops, ae) \ |
| 14523 | { #m1 #m2 #m3, OPS##nops ops, \ |
| 14524 | sizeof(#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof(#m1) - 1, \ |
| 14525 | 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL } |
| 14526 | |
| 14527 | #define CM(m1, m2, op, nops, ops, ae) \ |
| 14528 | xCM_(m1, , m2, op, nops, ops, ae), \ |
| 14529 | xCM_(m1, eq, m2, op, nops, ops, ae), \ |
| 14530 | xCM_(m1, ne, m2, op, nops, ops, ae), \ |
| 14531 | xCM_(m1, cs, m2, op, nops, ops, ae), \ |
| 14532 | xCM_(m1, hs, m2, op, nops, ops, ae), \ |
| 14533 | xCM_(m1, cc, m2, op, nops, ops, ae), \ |
| 14534 | xCM_(m1, ul, m2, op, nops, ops, ae), \ |
| 14535 | xCM_(m1, lo, m2, op, nops, ops, ae), \ |
| 14536 | xCM_(m1, mi, m2, op, nops, ops, ae), \ |
| 14537 | xCM_(m1, pl, m2, op, nops, ops, ae), \ |
| 14538 | xCM_(m1, vs, m2, op, nops, ops, ae), \ |
| 14539 | xCM_(m1, vc, m2, op, nops, ops, ae), \ |
| 14540 | xCM_(m1, hi, m2, op, nops, ops, ae), \ |
| 14541 | xCM_(m1, ls, m2, op, nops, ops, ae), \ |
| 14542 | xCM_(m1, ge, m2, op, nops, ops, ae), \ |
| 14543 | xCM_(m1, lt, m2, op, nops, ops, ae), \ |
| 14544 | xCM_(m1, gt, m2, op, nops, ops, ae), \ |
| 14545 | xCM_(m1, le, m2, op, nops, ops, ae), \ |
| 14546 | xCM_(m1, al, m2, op, nops, ops, ae) |
| 14547 | |
| 14548 | #define UE(mnem, op, nops, ops, ae) \ |
| 14549 | { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL } |
| 14550 | |
| 14551 | #define UF(mnem, op, nops, ops, ae) \ |
| 14552 | { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL } |
| 14553 | |
| 14554 | /* Neon data-processing. ARM versions are unconditional with cond=0xf. |
| 14555 | The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we |
| 14556 | use the same encoding function for each. */ |
| 14557 | #define NUF(mnem, op, nops, ops, enc) \ |
| 14558 | { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \ |
| 14559 | ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc } |
| 14560 | |
| 14561 | /* Neon data processing, version which indirects through neon_enc_tab for |
| 14562 | the various overloaded versions of opcodes. */ |
| 14563 | #define nUF(mnem, op, nops, ops, enc) \ |
| 14564 | { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM_##op, N_MNEM_##op, \ |
| 14565 | ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc } |
| 14566 | |
| 14567 | /* Neon insn with conditional suffix for the ARM version, non-overloaded |
| 14568 | version. */ |
| 14569 | #define NCE_tag(mnem, op, nops, ops, enc, tag) \ |
| 14570 | { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT, \ |
| 14571 | THUMB_VARIANT, do_##enc, do_##enc } |
| 14572 | |
| 14573 | #define NCE(mnem, op, nops, ops, enc) \ |
| 14574 | NCE_tag(mnem, op, nops, ops, enc, OT_csuffix) |
| 14575 | |
| 14576 | #define NCEF(mnem, op, nops, ops, enc) \ |
| 14577 | NCE_tag(mnem, op, nops, ops, enc, OT_csuffixF) |
| 14578 | |
| 14579 | /* Neon insn with conditional suffix for the ARM version, overloaded types. */ |
| 14580 | #define nCE_tag(mnem, op, nops, ops, enc, tag) \ |
| 14581 | { #mnem, OPS##nops ops, tag, N_MNEM_##op, N_MNEM_##op, \ |
| 14582 | ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc } |
| 14583 | |
| 14584 | #define nCE(mnem, op, nops, ops, enc) \ |
| 14585 | nCE_tag(mnem, op, nops, ops, enc, OT_csuffix) |
| 14586 | |
| 14587 | #define nCEF(mnem, op, nops, ops, enc) \ |
| 14588 | nCE_tag(mnem, op, nops, ops, enc, OT_csuffixF) |
| 14589 | |
| 14590 | #define do_0 0 |
| 14591 | |
| 14592 | /* Thumb-only, unconditional. */ |
| 14593 | #define UT(mnem, op, nops, ops, te) TUE(mnem, 0, op, nops, ops, 0, te) |
| 14594 | |
| 14595 | static const struct asm_opcode insns[] = |
| 14596 | { |
| 14597 | #define ARM_VARIANT &arm_ext_v1 /* Core ARM Instructions. */ |
| 14598 | #define THUMB_VARIANT &arm_ext_v4t |
| 14599 | tCE(and, 0000000, and, 3, (RR, oRR, SH), arit, t_arit3c), |
| 14600 | tC3(ands, 0100000, ands, 3, (RR, oRR, SH), arit, t_arit3c), |
| 14601 | tCE(eor, 0200000, eor, 3, (RR, oRR, SH), arit, t_arit3c), |
| 14602 | tC3(eors, 0300000, eors, 3, (RR, oRR, SH), arit, t_arit3c), |
| 14603 | tCE(sub, 0400000, sub, 3, (RR, oRR, SH), arit, t_add_sub), |
| 14604 | tC3(subs, 0500000, subs, 3, (RR, oRR, SH), arit, t_add_sub), |
| 14605 | tCE(add, 0800000, add, 3, (RR, oRR, SHG), arit, t_add_sub), |
| 14606 | tC3(adds, 0900000, adds, 3, (RR, oRR, SHG), arit, t_add_sub), |
| 14607 | tCE(adc, 0a00000, adc, 3, (RR, oRR, SH), arit, t_arit3c), |
| 14608 | tC3(adcs, 0b00000, adcs, 3, (RR, oRR, SH), arit, t_arit3c), |
| 14609 | tCE(sbc, 0c00000, sbc, 3, (RR, oRR, SH), arit, t_arit3), |
| 14610 | tC3(sbcs, 0d00000, sbcs, 3, (RR, oRR, SH), arit, t_arit3), |
| 14611 | tCE(orr, 1800000, orr, 3, (RR, oRR, SH), arit, t_arit3c), |
| 14612 | tC3(orrs, 1900000, orrs, 3, (RR, oRR, SH), arit, t_arit3c), |
| 14613 | tCE(bic, 1c00000, bic, 3, (RR, oRR, SH), arit, t_arit3), |
| 14614 | tC3(bics, 1d00000, bics, 3, (RR, oRR, SH), arit, t_arit3), |
| 14615 | |
| 14616 | /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism |
| 14617 | for setting PSR flag bits. They are obsolete in V6 and do not |
| 14618 | have Thumb equivalents. */ |
| 14619 | tCE(tst, 1100000, tst, 2, (RR, SH), cmp, t_mvn_tst), |
| 14620 | tC3w(tsts, 1100000, tst, 2, (RR, SH), cmp, t_mvn_tst), |
| 14621 | CL(tstp, 110f000, 2, (RR, SH), cmp), |
| 14622 | tCE(cmp, 1500000, cmp, 2, (RR, SH), cmp, t_mov_cmp), |
| 14623 | tC3w(cmps, 1500000, cmp, 2, (RR, SH), cmp, t_mov_cmp), |
| 14624 | CL(cmpp, 150f000, 2, (RR, SH), cmp), |
| 14625 | tCE(cmn, 1700000, cmn, 2, (RR, SH), cmp, t_mvn_tst), |
| 14626 | tC3w(cmns, 1700000, cmn, 2, (RR, SH), cmp, t_mvn_tst), |
| 14627 | CL(cmnp, 170f000, 2, (RR, SH), cmp), |
| 14628 | |
| 14629 | tCE(mov, 1a00000, mov, 2, (RR, SH), mov, t_mov_cmp), |
| 14630 | tC3(movs, 1b00000, movs, 2, (RR, SH), mov, t_mov_cmp), |
| 14631 | tCE(mvn, 1e00000, mvn, 2, (RR, SH), mov, t_mvn_tst), |
| 14632 | tC3(mvns, 1f00000, mvns, 2, (RR, SH), mov, t_mvn_tst), |
| 14633 | |
| 14634 | tCE(ldr, 4100000, ldr, 2, (RR, ADDRGLDR),ldst, t_ldst), |
| 14635 | tC3(ldrb, 4500000, ldrb, 2, (RR, ADDRGLDR),ldst, t_ldst), |
| 14636 | tCE(str, 4000000, str, 2, (RR, ADDRGLDR),ldst, t_ldst), |
| 14637 | tC3(strb, 4400000, strb, 2, (RR, ADDRGLDR),ldst, t_ldst), |
| 14638 | |
| 14639 | tCE(stm, 8800000, stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm), |
| 14640 | tC3(stmia, 8800000, stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm), |
| 14641 | tC3(stmea, 8800000, stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm), |
| 14642 | tCE(ldm, 8900000, ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm), |
| 14643 | tC3(ldmia, 8900000, ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm), |
| 14644 | tC3(ldmfd, 8900000, ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm), |
| 14645 | |
| 14646 | TCE(swi, f000000, df00, 1, (EXPi), swi, t_swi), |
| 14647 | TCE(svc, f000000, df00, 1, (EXPi), swi, t_swi), |
| 14648 | tCE(b, a000000, b, 1, (EXPr), branch, t_branch), |
| 14649 | TCE(bl, b000000, f000f800, 1, (EXPr), bl, t_branch23), |
| 14650 | |
| 14651 | /* Pseudo ops. */ |
| 14652 | tCE(adr, 28f0000, adr, 2, (RR, EXP), adr, t_adr), |
| 14653 | C3(adrl, 28f0000, 2, (RR, EXP), adrl), |
| 14654 | tCE(nop, 1a00000, nop, 1, (oI255c), nop, t_nop), |
| 14655 | |
| 14656 | /* Thumb-compatibility pseudo ops. */ |
| 14657 | tCE(lsl, 1a00000, lsl, 3, (RR, oRR, SH), shift, t_shift), |
| 14658 | tC3(lsls, 1b00000, lsls, 3, (RR, oRR, SH), shift, t_shift), |
| 14659 | tCE(lsr, 1a00020, lsr, 3, (RR, oRR, SH), shift, t_shift), |
| 14660 | tC3(lsrs, 1b00020, lsrs, 3, (RR, oRR, SH), shift, t_shift), |
| 14661 | tCE(asr, 1a00040, asr, 3, (RR, oRR, SH), shift, t_shift), |
| 14662 | tC3(asrs, 1b00040, asrs, 3, (RR, oRR, SH), shift, t_shift), |
| 14663 | tCE(ror, 1a00060, ror, 3, (RR, oRR, SH), shift, t_shift), |
| 14664 | tC3(rors, 1b00060, rors, 3, (RR, oRR, SH), shift, t_shift), |
| 14665 | tCE(neg, 2600000, neg, 2, (RR, RR), rd_rn, t_neg), |
| 14666 | tC3(negs, 2700000, negs, 2, (RR, RR), rd_rn, t_neg), |
| 14667 | tCE(push, 92d0000, push, 1, (REGLST), push_pop, t_push_pop), |
| 14668 | tCE(pop, 8bd0000, pop, 1, (REGLST), push_pop, t_push_pop), |
| 14669 | |
| 14670 | #undef THUMB_VARIANT |
| 14671 | #define THUMB_VARIANT &arm_ext_v6 |
| 14672 | TCE(cpy, 1a00000, 4600, 2, (RR, RR), rd_rm, t_cpy), |
| 14673 | |
| 14674 | /* V1 instructions with no Thumb analogue prior to V6T2. */ |
| 14675 | #undef THUMB_VARIANT |
| 14676 | #define THUMB_VARIANT &arm_ext_v6t2 |
| 14677 | TCE(rsb, 0600000, ebc00000, 3, (RR, oRR, SH), arit, t_rsb), |
| 14678 | TC3(rsbs, 0700000, ebd00000, 3, (RR, oRR, SH), arit, t_rsb), |
| 14679 | TCE(teq, 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst), |
| 14680 | TC3w(teqs, 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst), |
| 14681 | CL(teqp, 130f000, 2, (RR, SH), cmp), |
| 14682 | |
| 14683 | TC3(ldrt, 4300000, f8500e00, 2, (RR, ADDR), ldstt, t_ldstt), |
| 14684 | TC3(ldrbt, 4700000, f8100e00, 2, (RR, ADDR), ldstt, t_ldstt), |
| 14685 | TC3(strt, 4200000, f8400e00, 2, (RR, ADDR), ldstt, t_ldstt), |
| 14686 | TC3(strbt, 4600000, f8000e00, 2, (RR, ADDR), ldstt, t_ldstt), |
| 14687 | |
| 14688 | TC3(stmdb, 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm), |
| 14689 | TC3(stmfd, 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm), |
| 14690 | |
| 14691 | TC3(ldmdb, 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm), |
| 14692 | TC3(ldmea, 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm), |
| 14693 | |
| 14694 | /* V1 instructions with no Thumb analogue at all. */ |
| 14695 | CE(rsc, 0e00000, 3, (RR, oRR, SH), arit), |
| 14696 | C3(rscs, 0f00000, 3, (RR, oRR, SH), arit), |
| 14697 | |
| 14698 | C3(stmib, 9800000, 2, (RRw, REGLST), ldmstm), |
| 14699 | C3(stmfa, 9800000, 2, (RRw, REGLST), ldmstm), |
| 14700 | C3(stmda, 8000000, 2, (RRw, REGLST), ldmstm), |
| 14701 | C3(stmed, 8000000, 2, (RRw, REGLST), ldmstm), |
| 14702 | C3(ldmib, 9900000, 2, (RRw, REGLST), ldmstm), |
| 14703 | C3(ldmed, 9900000, 2, (RRw, REGLST), ldmstm), |
| 14704 | C3(ldmda, 8100000, 2, (RRw, REGLST), ldmstm), |
| 14705 | C3(ldmfa, 8100000, 2, (RRw, REGLST), ldmstm), |
| 14706 | |
| 14707 | #undef ARM_VARIANT |
| 14708 | #define ARM_VARIANT &arm_ext_v2 /* ARM 2 - multiplies. */ |
| 14709 | #undef THUMB_VARIANT |
| 14710 | #define THUMB_VARIANT &arm_ext_v4t |
| 14711 | tCE(mul, 0000090, mul, 3, (RRnpc, RRnpc, oRR), mul, t_mul), |
| 14712 | tC3(muls, 0100090, muls, 3, (RRnpc, RRnpc, oRR), mul, t_mul), |
| 14713 | |
| 14714 | #undef THUMB_VARIANT |
| 14715 | #define THUMB_VARIANT &arm_ext_v6t2 |
| 14716 | TCE(mla, 0200090, fb000000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla), |
| 14717 | C3(mlas, 0300090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas), |
| 14718 | |
| 14719 | /* Generic coprocessor instructions. */ |
| 14720 | TCE(cdp, e000000, ee000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp), |
| 14721 | TCE(ldc, c100000, ec100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc), |
| 14722 | TC3(ldcl, c500000, ec500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc), |
| 14723 | TCE(stc, c000000, ec000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc), |
| 14724 | TC3(stcl, c400000, ec400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc), |
| 14725 | TCE(mcr, e000010, ee000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg), |
| 14726 | TCE(mrc, e100010, ee100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg), |
| 14727 | |
| 14728 | #undef ARM_VARIANT |
| 14729 | #define ARM_VARIANT &arm_ext_v2s /* ARM 3 - swp instructions. */ |
| 14730 | CE(swp, 1000090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn), |
| 14731 | C3(swpb, 1400090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn), |
| 14732 | |
| 14733 | #undef ARM_VARIANT |
| 14734 | #define ARM_VARIANT &arm_ext_v3 /* ARM 6 Status register instructions. */ |
| 14735 | TCE(mrs, 10f0000, f3ef8000, 2, (APSR_RR, RVC_PSR), mrs, t_mrs), |
| 14736 | TCE(msr, 120f000, f3808000, 2, (RVC_PSR, RR_EXi), msr, t_msr), |
| 14737 | |
| 14738 | #undef ARM_VARIANT |
| 14739 | #define ARM_VARIANT &arm_ext_v3m /* ARM 7M long multiplies. */ |
| 14740 | TCE(smull, 0c00090, fb800000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull), |
| 14741 | CM(smull,s, 0d00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull), |
| 14742 | TCE(umull, 0800090, fba00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull), |
| 14743 | CM(umull,s, 0900090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull), |
| 14744 | TCE(smlal, 0e00090, fbc00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull), |
| 14745 | CM(smlal,s, 0f00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull), |
| 14746 | TCE(umlal, 0a00090, fbe00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull), |
| 14747 | CM(umlal,s, 0b00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull), |
| 14748 | |
| 14749 | #undef ARM_VARIANT |
| 14750 | #define ARM_VARIANT &arm_ext_v4 /* ARM Architecture 4. */ |
| 14751 | #undef THUMB_VARIANT |
| 14752 | #define THUMB_VARIANT &arm_ext_v4t |
| 14753 | tC3(ldrh, 01000b0, ldrh, 2, (RR, ADDRGLDRS), ldstv4, t_ldst), |
| 14754 | tC3(strh, 00000b0, strh, 2, (RR, ADDRGLDRS), ldstv4, t_ldst), |
| 14755 | tC3(ldrsh, 01000f0, ldrsh, 2, (RR, ADDRGLDRS), ldstv4, t_ldst), |
| 14756 | tC3(ldrsb, 01000d0, ldrsb, 2, (RR, ADDRGLDRS), ldstv4, t_ldst), |
| 14757 | tCM(ld,sh, 01000f0, ldrsh, 2, (RR, ADDRGLDRS), ldstv4, t_ldst), |
| 14758 | tCM(ld,sb, 01000d0, ldrsb, 2, (RR, ADDRGLDRS), ldstv4, t_ldst), |
| 14759 | |
| 14760 | #undef ARM_VARIANT |
| 14761 | #define ARM_VARIANT &arm_ext_v4t_5 |
| 14762 | /* ARM Architecture 4T. */ |
| 14763 | /* Note: bx (and blx) are required on V5, even if the processor does |
| 14764 | not support Thumb. */ |
| 14765 | TCE(bx, 12fff10, 4700, 1, (RR), bx, t_bx), |
| 14766 | |
| 14767 | #undef ARM_VARIANT |
| 14768 | #define ARM_VARIANT &arm_ext_v5 /* ARM Architecture 5T. */ |
| 14769 | #undef THUMB_VARIANT |
| 14770 | #define THUMB_VARIANT &arm_ext_v5t |
| 14771 | /* Note: blx has 2 variants; the .value coded here is for |
| 14772 | BLX(2). Only this variant has conditional execution. */ |
| 14773 | TCE(blx, 12fff30, 4780, 1, (RR_EXr), blx, t_blx), |
| 14774 | TUE(bkpt, 1200070, be00, 1, (oIffffb), bkpt, t_bkpt), |
| 14775 | |
| 14776 | #undef THUMB_VARIANT |
| 14777 | #define THUMB_VARIANT &arm_ext_v6t2 |
| 14778 | TCE(clz, 16f0f10, fab0f080, 2, (RRnpc, RRnpc), rd_rm, t_clz), |
| 14779 | TUF(ldc2, c100000, fc100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc), |
| 14780 | TUF(ldc2l, c500000, fc500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc), |
| 14781 | TUF(stc2, c000000, fc000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc), |
| 14782 | TUF(stc2l, c400000, fc400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc), |
| 14783 | TUF(cdp2, e000000, fe000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp), |
| 14784 | TUF(mcr2, e000010, fe000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg), |
| 14785 | TUF(mrc2, e100010, fe100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg), |
| 14786 | |
| 14787 | #undef ARM_VARIANT |
| 14788 | #define ARM_VARIANT &arm_ext_v5exp /* ARM Architecture 5TExP. */ |
| 14789 | TCE(smlabb, 1000080, fb100000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla), |
| 14790 | TCE(smlatb, 10000a0, fb100020, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla), |
| 14791 | TCE(smlabt, 10000c0, fb100010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla), |
| 14792 | TCE(smlatt, 10000e0, fb100030, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla), |
| 14793 | |
| 14794 | TCE(smlawb, 1200080, fb300000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla), |
| 14795 | TCE(smlawt, 12000c0, fb300010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla), |
| 14796 | |
| 14797 | TCE(smlalbb, 1400080, fbc00080, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal), |
| 14798 | TCE(smlaltb, 14000a0, fbc000a0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal), |
| 14799 | TCE(smlalbt, 14000c0, fbc00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal), |
| 14800 | TCE(smlaltt, 14000e0, fbc000b0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal), |
| 14801 | |
| 14802 | TCE(smulbb, 1600080, fb10f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd), |
| 14803 | TCE(smultb, 16000a0, fb10f020, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd), |
| 14804 | TCE(smulbt, 16000c0, fb10f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd), |
| 14805 | TCE(smultt, 16000e0, fb10f030, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd), |
| 14806 | |
| 14807 | TCE(smulwb, 12000a0, fb30f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd), |
| 14808 | TCE(smulwt, 12000e0, fb30f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd), |
| 14809 | |
| 14810 | TCE(qadd, 1000050, fa80f080, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, rd_rm_rn), |
| 14811 | TCE(qdadd, 1400050, fa80f090, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, rd_rm_rn), |
| 14812 | TCE(qsub, 1200050, fa80f0a0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, rd_rm_rn), |
| 14813 | TCE(qdsub, 1600050, fa80f0b0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, rd_rm_rn), |
| 14814 | |
| 14815 | #undef ARM_VARIANT |
| 14816 | #define ARM_VARIANT &arm_ext_v5e /* ARM Architecture 5TE. */ |
| 14817 | TUF(pld, 450f000, f810f000, 1, (ADDR), pld, t_pld), |
| 14818 | TC3(ldrd, 00000d0, e9500000, 3, (RRnpc, oRRnpc, ADDRGLDRS), ldrd, t_ldstd), |
| 14819 | TC3(strd, 00000f0, e9400000, 3, (RRnpc, oRRnpc, ADDRGLDRS), ldrd, t_ldstd), |
| 14820 | |
| 14821 | TCE(mcrr, c400000, ec400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c), |
| 14822 | TCE(mrrc, c500000, ec500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c), |
| 14823 | |
| 14824 | #undef ARM_VARIANT |
| 14825 | #define ARM_VARIANT &arm_ext_v5j /* ARM Architecture 5TEJ. */ |
| 14826 | TCE(bxj, 12fff20, f3c08f00, 1, (RR), bxj, t_bxj), |
| 14827 | |
| 14828 | #undef ARM_VARIANT |
| 14829 | #define ARM_VARIANT &arm_ext_v6 /* ARM V6. */ |
| 14830 | #undef THUMB_VARIANT |
| 14831 | #define THUMB_VARIANT &arm_ext_v6 |
| 14832 | TUF(cpsie, 1080000, b660, 2, (CPSF, oI31b), cpsi, t_cpsi), |
| 14833 | TUF(cpsid, 10c0000, b670, 2, (CPSF, oI31b), cpsi, t_cpsi), |
| 14834 | tCE(rev, 6bf0f30, rev, 2, (RRnpc, RRnpc), rd_rm, t_rev), |
| 14835 | tCE(rev16, 6bf0fb0, rev16, 2, (RRnpc, RRnpc), rd_rm, t_rev), |
| 14836 | tCE(revsh, 6ff0fb0, revsh, 2, (RRnpc, RRnpc), rd_rm, t_rev), |
| 14837 | tCE(sxth, 6bf0070, sxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth), |
| 14838 | tCE(uxth, 6ff0070, uxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth), |
| 14839 | tCE(sxtb, 6af0070, sxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth), |
| 14840 | tCE(uxtb, 6ef0070, uxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth), |
| 14841 | TUF(setend, 1010000, b650, 1, (ENDI), setend, t_setend), |
| 14842 | |
| 14843 | #undef THUMB_VARIANT |
| 14844 | #define THUMB_VARIANT &arm_ext_v6t2 |
| 14845 | TCE(ldrex, 1900f9f, e8500f00, 2, (RRnpc, ADDR), ldrex, t_ldrex), |
| 14846 | TUF(mcrr2, c400000, fc400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c), |
| 14847 | TUF(mrrc2, c500000, fc500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c), |
| 14848 | |
| 14849 | TCE(ssat, 6a00010, f3000000, 4, (RRnpc, I32, RRnpc, oSHllar),ssat, t_ssat), |
| 14850 | TCE(usat, 6e00010, f3800000, 4, (RRnpc, I31, RRnpc, oSHllar),usat, t_usat), |
| 14851 | |
| 14852 | /* ARM V6 not included in V7M (eg. integer SIMD). */ |
| 14853 | #undef THUMB_VARIANT |
| 14854 | #define THUMB_VARIANT &arm_ext_v6_notm |
| 14855 | TUF(cps, 1020000, f3af8100, 1, (I31b), imm0, t_cps), |
| 14856 | TCE(pkhbt, 6800010, eac00000, 4, (RRnpc, RRnpc, RRnpc, oSHll), pkhbt, t_pkhbt), |
| 14857 | TCE(pkhtb, 6800050, eac00020, 4, (RRnpc, RRnpc, RRnpc, oSHar), pkhtb, t_pkhtb), |
| 14858 | TCE(qadd16, 6200f10, fa90f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), |
| 14859 | TCE(qadd8, 6200f90, fa80f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), |
| 14860 | TCE(qaddsubx, 6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), |
| 14861 | TCE(qsub16, 6200f70, fad0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), |
| 14862 | TCE(qsub8, 6200ff0, fac0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), |
| 14863 | TCE(qsubaddx, 6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), |
| 14864 | TCE(sadd16, 6100f10, fa90f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), |
| 14865 | TCE(sadd8, 6100f90, fa80f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), |
| 14866 | TCE(saddsubx, 6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), |
| 14867 | TCE(shadd16, 6300f10, fa90f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), |
| 14868 | TCE(shadd8, 6300f90, fa80f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), |
| 14869 | TCE(shaddsubx, 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), |
| 14870 | TCE(shsub16, 6300f70, fad0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), |
| 14871 | TCE(shsub8, 6300ff0, fac0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), |
| 14872 | TCE(shsubaddx, 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), |
| 14873 | TCE(ssub16, 6100f70, fad0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), |
| 14874 | TCE(ssub8, 6100ff0, fac0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), |
| 14875 | TCE(ssubaddx, 6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), |
| 14876 | TCE(uadd16, 6500f10, fa90f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), |
| 14877 | TCE(uadd8, 6500f90, fa80f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), |
| 14878 | TCE(uaddsubx, 6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), |
| 14879 | TCE(uhadd16, 6700f10, fa90f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), |
| 14880 | TCE(uhadd8, 6700f90, fa80f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), |
| 14881 | TCE(uhaddsubx, 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), |
| 14882 | TCE(uhsub16, 6700f70, fad0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), |
| 14883 | TCE(uhsub8, 6700ff0, fac0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), |
| 14884 | TCE(uhsubaddx, 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), |
| 14885 | TCE(uqadd16, 6600f10, fa90f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), |
| 14886 | TCE(uqadd8, 6600f90, fa80f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), |
| 14887 | TCE(uqaddsubx, 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), |
| 14888 | TCE(uqsub16, 6600f70, fad0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), |
| 14889 | TCE(uqsub8, 6600ff0, fac0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), |
| 14890 | TCE(uqsubaddx, 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), |
| 14891 | TCE(usub16, 6500f70, fad0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), |
| 14892 | TCE(usub8, 6500ff0, fac0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), |
| 14893 | TCE(usubaddx, 6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), |
| 14894 | TUF(rfeia, 8900a00, e990c000, 1, (RRw), rfe, rfe), |
| 14895 | UF(rfeib, 9900a00, 1, (RRw), rfe), |
| 14896 | UF(rfeda, 8100a00, 1, (RRw), rfe), |
| 14897 | TUF(rfedb, 9100a00, e810c000, 1, (RRw), rfe, rfe), |
| 14898 | TUF(rfefd, 8900a00, e990c000, 1, (RRw), rfe, rfe), |
| 14899 | UF(rfefa, 9900a00, 1, (RRw), rfe), |
| 14900 | UF(rfeea, 8100a00, 1, (RRw), rfe), |
| 14901 | TUF(rfeed, 9100a00, e810c000, 1, (RRw), rfe, rfe), |
| 14902 | TCE(sxtah, 6b00070, fa00f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah), |
| 14903 | TCE(sxtab16, 6800070, fa20f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah), |
| 14904 | TCE(sxtab, 6a00070, fa40f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah), |
| 14905 | TCE(sxtb16, 68f0070, fa2ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth), |
| 14906 | TCE(uxtah, 6f00070, fa10f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah), |
| 14907 | TCE(uxtab16, 6c00070, fa30f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah), |
| 14908 | TCE(uxtab, 6e00070, fa50f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah), |
| 14909 | TCE(uxtb16, 6cf0070, fa3ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth), |
| 14910 | TCE(sel, 6800fb0, faa0f080, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), |
| 14911 | TCE(smlad, 7000010, fb200000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla), |
| 14912 | TCE(smladx, 7000030, fb200010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla), |
| 14913 | TCE(smlald, 7400010, fbc000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal), |
| 14914 | TCE(smlaldx, 7400030, fbc000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal), |
| 14915 | TCE(smlsd, 7000050, fb400000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla), |
| 14916 | TCE(smlsdx, 7000070, fb400010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla), |
| 14917 | TCE(smlsld, 7400050, fbd000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal), |
| 14918 | TCE(smlsldx, 7400070, fbd000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal), |
| 14919 | TCE(smmla, 7500010, fb500000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla), |
| 14920 | TCE(smmlar, 7500030, fb500010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla), |
| 14921 | TCE(smmls, 75000d0, fb600000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla), |
| 14922 | TCE(smmlsr, 75000f0, fb600010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla), |
| 14923 | TCE(smmul, 750f010, fb50f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd), |
| 14924 | TCE(smmulr, 750f030, fb50f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd), |
| 14925 | TCE(smuad, 700f010, fb20f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd), |
| 14926 | TCE(smuadx, 700f030, fb20f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd), |
| 14927 | TCE(smusd, 700f050, fb40f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd), |
| 14928 | TCE(smusdx, 700f070, fb40f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd), |
| 14929 | TUF(srsia, 8cd0500, e980c000, 1, (I31w), srs, srs), |
| 14930 | UF(srsib, 9cd0500, 1, (I31w), srs), |
| 14931 | UF(srsda, 84d0500, 1, (I31w), srs), |
| 14932 | TUF(srsdb, 94d0500, e800c000, 1, (I31w), srs, srs), |
| 14933 | TCE(ssat16, 6a00f30, f3200000, 3, (RRnpc, I16, RRnpc), ssat16, t_ssat16), |
| 14934 | TCE(strex, 1800f90, e8400000, 3, (RRnpc, RRnpc, ADDR), strex, t_strex), |
| 14935 | TCE(umaal, 0400090, fbe00060, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal, t_mlal), |
| 14936 | TCE(usad8, 780f010, fb70f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd), |
| 14937 | TCE(usada8, 7800010, fb700000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla), |
| 14938 | TCE(usat16, 6e00f30, f3a00000, 3, (RRnpc, I15, RRnpc), usat16, t_usat16), |
| 14939 | |
| 14940 | #undef ARM_VARIANT |
| 14941 | #define ARM_VARIANT &arm_ext_v6k |
| 14942 | #undef THUMB_VARIANT |
| 14943 | #define THUMB_VARIANT &arm_ext_v6k |
| 14944 | tCE(yield, 320f001, yield, 0, (), noargs, t_hint), |
| 14945 | tCE(wfe, 320f002, wfe, 0, (), noargs, t_hint), |
| 14946 | tCE(wfi, 320f003, wfi, 0, (), noargs, t_hint), |
| 14947 | tCE(sev, 320f004, sev, 0, (), noargs, t_hint), |
| 14948 | |
| 14949 | #undef THUMB_VARIANT |
| 14950 | #define THUMB_VARIANT &arm_ext_v6_notm |
| 14951 | TCE(ldrexd, 1b00f9f, e8d0007f, 3, (RRnpc, oRRnpc, RRnpcb), ldrexd, t_ldrexd), |
| 14952 | TCE(strexd, 1a00f90, e8c00070, 4, (RRnpc, RRnpc, oRRnpc, RRnpcb), strexd, t_strexd), |
| 14953 | |
| 14954 | #undef THUMB_VARIANT |
| 14955 | #define THUMB_VARIANT &arm_ext_v6t2 |
| 14956 | TCE(ldrexb, 1d00f9f, e8d00f4f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn), |
| 14957 | TCE(ldrexh, 1f00f9f, e8d00f5f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn), |
| 14958 | TCE(strexb, 1c00f90, e8c00f40, 3, (RRnpc, RRnpc, ADDR), strex, rm_rd_rn), |
| 14959 | TCE(strexh, 1e00f90, e8c00f50, 3, (RRnpc, RRnpc, ADDR), strex, rm_rd_rn), |
| 14960 | TUF(clrex, 57ff01f, f3bf8f2f, 0, (), noargs, noargs), |
| 14961 | |
| 14962 | #undef ARM_VARIANT |
| 14963 | #define ARM_VARIANT &arm_ext_v6z |
| 14964 | TCE(smc, 1600070, f7f08000, 1, (EXPi), smc, t_smc), |
| 14965 | |
| 14966 | #undef ARM_VARIANT |
| 14967 | #define ARM_VARIANT &arm_ext_v6t2 |
| 14968 | TCE(bfc, 7c0001f, f36f0000, 3, (RRnpc, I31, I32), bfc, t_bfc), |
| 14969 | TCE(bfi, 7c00010, f3600000, 4, (RRnpc, RRnpc_I0, I31, I32), bfi, t_bfi), |
| 14970 | TCE(sbfx, 7a00050, f3400000, 4, (RR, RR, I31, I32), bfx, t_bfx), |
| 14971 | TCE(ubfx, 7e00050, f3c00000, 4, (RR, RR, I31, I32), bfx, t_bfx), |
| 14972 | |
| 14973 | TCE(mls, 0600090, fb000010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla), |
| 14974 | TCE(movw, 3000000, f2400000, 2, (RRnpc, HALF), mov16, t_mov16), |
| 14975 | TCE(movt, 3400000, f2c00000, 2, (RRnpc, HALF), mov16, t_mov16), |
| 14976 | TCE(rbit, 6ff0f30, fa90f0a0, 2, (RR, RR), rd_rm, t_rbit), |
| 14977 | |
| 14978 | TC3(ldrht, 03000b0, f8300e00, 2, (RR, ADDR), ldsttv4, t_ldstt), |
| 14979 | TC3(ldrsht, 03000f0, f9300e00, 2, (RR, ADDR), ldsttv4, t_ldstt), |
| 14980 | TC3(ldrsbt, 03000d0, f9100e00, 2, (RR, ADDR), ldsttv4, t_ldstt), |
| 14981 | TC3(strht, 02000b0, f8200e00, 2, (RR, ADDR), ldsttv4, t_ldstt), |
| 14982 | |
| 14983 | UT(cbnz, b900, 2, (RR, EXP), t_cbz), |
| 14984 | UT(cbz, b100, 2, (RR, EXP), t_cbz), |
| 14985 | /* ARM does not really have an IT instruction, so always allow it. */ |
| 14986 | #undef ARM_VARIANT |
| 14987 | #define ARM_VARIANT &arm_ext_v1 |
| 14988 | TUE(it, 0, bf08, 1, (COND), it, t_it), |
| 14989 | TUE(itt, 0, bf0c, 1, (COND), it, t_it), |
| 14990 | TUE(ite, 0, bf04, 1, (COND), it, t_it), |
| 14991 | TUE(ittt, 0, bf0e, 1, (COND), it, t_it), |
| 14992 | TUE(itet, 0, bf06, 1, (COND), it, t_it), |
| 14993 | TUE(itte, 0, bf0a, 1, (COND), it, t_it), |
| 14994 | TUE(itee, 0, bf02, 1, (COND), it, t_it), |
| 14995 | TUE(itttt, 0, bf0f, 1, (COND), it, t_it), |
| 14996 | TUE(itett, 0, bf07, 1, (COND), it, t_it), |
| 14997 | TUE(ittet, 0, bf0b, 1, (COND), it, t_it), |
| 14998 | TUE(iteet, 0, bf03, 1, (COND), it, t_it), |
| 14999 | TUE(ittte, 0, bf0d, 1, (COND), it, t_it), |
| 15000 | TUE(itete, 0, bf05, 1, (COND), it, t_it), |
| 15001 | TUE(ittee, 0, bf09, 1, (COND), it, t_it), |
| 15002 | TUE(iteee, 0, bf01, 1, (COND), it, t_it), |
| 15003 | |
| 15004 | /* Thumb2 only instructions. */ |
| 15005 | #undef ARM_VARIANT |
| 15006 | #define ARM_VARIANT NULL |
| 15007 | |
| 15008 | TCE(addw, 0, f2000000, 3, (RR, RR, EXPi), 0, t_add_sub_w), |
| 15009 | TCE(subw, 0, f2a00000, 3, (RR, RR, EXPi), 0, t_add_sub_w), |
| 15010 | TCE(tbb, 0, e8d0f000, 1, (TB), 0, t_tb), |
| 15011 | TCE(tbh, 0, e8d0f010, 1, (TB), 0, t_tb), |
| 15012 | |
| 15013 | /* Thumb-2 hardware division instructions (R and M profiles only). */ |
| 15014 | #undef THUMB_VARIANT |
| 15015 | #define THUMB_VARIANT &arm_ext_div |
| 15016 | TCE(sdiv, 0, fb90f0f0, 3, (RR, oRR, RR), 0, t_div), |
| 15017 | TCE(udiv, 0, fbb0f0f0, 3, (RR, oRR, RR), 0, t_div), |
| 15018 | |
| 15019 | /* ARM V7 instructions. */ |
| 15020 | #undef ARM_VARIANT |
| 15021 | #define ARM_VARIANT &arm_ext_v7 |
| 15022 | #undef THUMB_VARIANT |
| 15023 | #define THUMB_VARIANT &arm_ext_v7 |
| 15024 | TUF(pli, 450f000, f910f000, 1, (ADDR), pli, t_pld), |
| 15025 | TCE(dbg, 320f0f0, f3af80f0, 1, (I15), dbg, t_dbg), |
| 15026 | TUF(dmb, 57ff050, f3bf8f50, 1, (oBARRIER), barrier, t_barrier), |
| 15027 | TUF(dsb, 57ff040, f3bf8f40, 1, (oBARRIER), barrier, t_barrier), |
| 15028 | TUF(isb, 57ff060, f3bf8f60, 1, (oBARRIER), barrier, t_barrier), |
| 15029 | |
| 15030 | #undef ARM_VARIANT |
| 15031 | #define ARM_VARIANT &fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */ |
| 15032 | cCE(wfs, e200110, 1, (RR), rd), |
| 15033 | cCE(rfs, e300110, 1, (RR), rd), |
| 15034 | cCE(wfc, e400110, 1, (RR), rd), |
| 15035 | cCE(rfc, e500110, 1, (RR), rd), |
| 15036 | |
| 15037 | cCL(ldfs, c100100, 2, (RF, ADDRGLDC), rd_cpaddr), |
| 15038 | cCL(ldfd, c108100, 2, (RF, ADDRGLDC), rd_cpaddr), |
| 15039 | cCL(ldfe, c500100, 2, (RF, ADDRGLDC), rd_cpaddr), |
| 15040 | cCL(ldfp, c508100, 2, (RF, ADDRGLDC), rd_cpaddr), |
| 15041 | |
| 15042 | cCL(stfs, c000100, 2, (RF, ADDRGLDC), rd_cpaddr), |
| 15043 | cCL(stfd, c008100, 2, (RF, ADDRGLDC), rd_cpaddr), |
| 15044 | cCL(stfe, c400100, 2, (RF, ADDRGLDC), rd_cpaddr), |
| 15045 | cCL(stfp, c408100, 2, (RF, ADDRGLDC), rd_cpaddr), |
| 15046 | |
| 15047 | cCL(mvfs, e008100, 2, (RF, RF_IF), rd_rm), |
| 15048 | cCL(mvfsp, e008120, 2, (RF, RF_IF), rd_rm), |
| 15049 | cCL(mvfsm, e008140, 2, (RF, RF_IF), rd_rm), |
| 15050 | cCL(mvfsz, e008160, 2, (RF, RF_IF), rd_rm), |
| 15051 | cCL(mvfd, e008180, 2, (RF, RF_IF), rd_rm), |
| 15052 | cCL(mvfdp, e0081a0, 2, (RF, RF_IF), rd_rm), |
| 15053 | cCL(mvfdm, e0081c0, 2, (RF, RF_IF), rd_rm), |
| 15054 | cCL(mvfdz, e0081e0, 2, (RF, RF_IF), rd_rm), |
| 15055 | cCL(mvfe, e088100, 2, (RF, RF_IF), rd_rm), |
| 15056 | cCL(mvfep, e088120, 2, (RF, RF_IF), rd_rm), |
| 15057 | cCL(mvfem, e088140, 2, (RF, RF_IF), rd_rm), |
| 15058 | cCL(mvfez, e088160, 2, (RF, RF_IF), rd_rm), |
| 15059 | |
| 15060 | cCL(mnfs, e108100, 2, (RF, RF_IF), rd_rm), |
| 15061 | cCL(mnfsp, e108120, 2, (RF, RF_IF), rd_rm), |
| 15062 | cCL(mnfsm, e108140, 2, (RF, RF_IF), rd_rm), |
| 15063 | cCL(mnfsz, e108160, 2, (RF, RF_IF), rd_rm), |
| 15064 | cCL(mnfd, e108180, 2, (RF, RF_IF), rd_rm), |
| 15065 | cCL(mnfdp, e1081a0, 2, (RF, RF_IF), rd_rm), |
| 15066 | cCL(mnfdm, e1081c0, 2, (RF, RF_IF), rd_rm), |
| 15067 | cCL(mnfdz, e1081e0, 2, (RF, RF_IF), rd_rm), |
| 15068 | cCL(mnfe, e188100, 2, (RF, RF_IF), rd_rm), |
| 15069 | cCL(mnfep, e188120, 2, (RF, RF_IF), rd_rm), |
| 15070 | cCL(mnfem, e188140, 2, (RF, RF_IF), rd_rm), |
| 15071 | cCL(mnfez, e188160, 2, (RF, RF_IF), rd_rm), |
| 15072 | |
| 15073 | cCL(abss, e208100, 2, (RF, RF_IF), rd_rm), |
| 15074 | cCL(abssp, e208120, 2, (RF, RF_IF), rd_rm), |
| 15075 | cCL(abssm, e208140, 2, (RF, RF_IF), rd_rm), |
| 15076 | cCL(abssz, e208160, 2, (RF, RF_IF), rd_rm), |
| 15077 | cCL(absd, e208180, 2, (RF, RF_IF), rd_rm), |
| 15078 | cCL(absdp, e2081a0, 2, (RF, RF_IF), rd_rm), |
| 15079 | cCL(absdm, e2081c0, 2, (RF, RF_IF), rd_rm), |
| 15080 | cCL(absdz, e2081e0, 2, (RF, RF_IF), rd_rm), |
| 15081 | cCL(abse, e288100, 2, (RF, RF_IF), rd_rm), |
| 15082 | cCL(absep, e288120, 2, (RF, RF_IF), rd_rm), |
| 15083 | cCL(absem, e288140, 2, (RF, RF_IF), rd_rm), |
| 15084 | cCL(absez, e288160, 2, (RF, RF_IF), rd_rm), |
| 15085 | |
| 15086 | cCL(rnds, e308100, 2, (RF, RF_IF), rd_rm), |
| 15087 | cCL(rndsp, e308120, 2, (RF, RF_IF), rd_rm), |
| 15088 | cCL(rndsm, e308140, 2, (RF, RF_IF), rd_rm), |
| 15089 | cCL(rndsz, e308160, 2, (RF, RF_IF), rd_rm), |
| 15090 | cCL(rndd, e308180, 2, (RF, RF_IF), rd_rm), |
| 15091 | cCL(rnddp, e3081a0, 2, (RF, RF_IF), rd_rm), |
| 15092 | cCL(rnddm, e3081c0, 2, (RF, RF_IF), rd_rm), |
| 15093 | cCL(rnddz, e3081e0, 2, (RF, RF_IF), rd_rm), |
| 15094 | cCL(rnde, e388100, 2, (RF, RF_IF), rd_rm), |
| 15095 | cCL(rndep, e388120, 2, (RF, RF_IF), rd_rm), |
| 15096 | cCL(rndem, e388140, 2, (RF, RF_IF), rd_rm), |
| 15097 | cCL(rndez, e388160, 2, (RF, RF_IF), rd_rm), |
| 15098 | |
| 15099 | cCL(sqts, e408100, 2, (RF, RF_IF), rd_rm), |
| 15100 | cCL(sqtsp, e408120, 2, (RF, RF_IF), rd_rm), |
| 15101 | cCL(sqtsm, e408140, 2, (RF, RF_IF), rd_rm), |
| 15102 | cCL(sqtsz, e408160, 2, (RF, RF_IF), rd_rm), |
| 15103 | cCL(sqtd, e408180, 2, (RF, RF_IF), rd_rm), |
| 15104 | cCL(sqtdp, e4081a0, 2, (RF, RF_IF), rd_rm), |
| 15105 | cCL(sqtdm, e4081c0, 2, (RF, RF_IF), rd_rm), |
| 15106 | cCL(sqtdz, e4081e0, 2, (RF, RF_IF), rd_rm), |
| 15107 | cCL(sqte, e488100, 2, (RF, RF_IF), rd_rm), |
| 15108 | cCL(sqtep, e488120, 2, (RF, RF_IF), rd_rm), |
| 15109 | cCL(sqtem, e488140, 2, (RF, RF_IF), rd_rm), |
| 15110 | cCL(sqtez, e488160, 2, (RF, RF_IF), rd_rm), |
| 15111 | |
| 15112 | cCL(logs, e508100, 2, (RF, RF_IF), rd_rm), |
| 15113 | cCL(logsp, e508120, 2, (RF, RF_IF), rd_rm), |
| 15114 | cCL(logsm, e508140, 2, (RF, RF_IF), rd_rm), |
| 15115 | cCL(logsz, e508160, 2, (RF, RF_IF), rd_rm), |
| 15116 | cCL(logd, e508180, 2, (RF, RF_IF), rd_rm), |
| 15117 | cCL(logdp, e5081a0, 2, (RF, RF_IF), rd_rm), |
| 15118 | cCL(logdm, e5081c0, 2, (RF, RF_IF), rd_rm), |
| 15119 | cCL(logdz, e5081e0, 2, (RF, RF_IF), rd_rm), |
| 15120 | cCL(loge, e588100, 2, (RF, RF_IF), rd_rm), |
| 15121 | cCL(logep, e588120, 2, (RF, RF_IF), rd_rm), |
| 15122 | cCL(logem, e588140, 2, (RF, RF_IF), rd_rm), |
| 15123 | cCL(logez, e588160, 2, (RF, RF_IF), rd_rm), |
| 15124 | |
| 15125 | cCL(lgns, e608100, 2, (RF, RF_IF), rd_rm), |
| 15126 | cCL(lgnsp, e608120, 2, (RF, RF_IF), rd_rm), |
| 15127 | cCL(lgnsm, e608140, 2, (RF, RF_IF), rd_rm), |
| 15128 | cCL(lgnsz, e608160, 2, (RF, RF_IF), rd_rm), |
| 15129 | cCL(lgnd, e608180, 2, (RF, RF_IF), rd_rm), |
| 15130 | cCL(lgndp, e6081a0, 2, (RF, RF_IF), rd_rm), |
| 15131 | cCL(lgndm, e6081c0, 2, (RF, RF_IF), rd_rm), |
| 15132 | cCL(lgndz, e6081e0, 2, (RF, RF_IF), rd_rm), |
| 15133 | cCL(lgne, e688100, 2, (RF, RF_IF), rd_rm), |
| 15134 | cCL(lgnep, e688120, 2, (RF, RF_IF), rd_rm), |
| 15135 | cCL(lgnem, e688140, 2, (RF, RF_IF), rd_rm), |
| 15136 | cCL(lgnez, e688160, 2, (RF, RF_IF), rd_rm), |
| 15137 | |
| 15138 | cCL(exps, e708100, 2, (RF, RF_IF), rd_rm), |
| 15139 | cCL(expsp, e708120, 2, (RF, RF_IF), rd_rm), |
| 15140 | cCL(expsm, e708140, 2, (RF, RF_IF), rd_rm), |
| 15141 | cCL(expsz, e708160, 2, (RF, RF_IF), rd_rm), |
| 15142 | cCL(expd, e708180, 2, (RF, RF_IF), rd_rm), |
| 15143 | cCL(expdp, e7081a0, 2, (RF, RF_IF), rd_rm), |
| 15144 | cCL(expdm, e7081c0, 2, (RF, RF_IF), rd_rm), |
| 15145 | cCL(expdz, e7081e0, 2, (RF, RF_IF), rd_rm), |
| 15146 | cCL(expe, e788100, 2, (RF, RF_IF), rd_rm), |
| 15147 | cCL(expep, e788120, 2, (RF, RF_IF), rd_rm), |
| 15148 | cCL(expem, e788140, 2, (RF, RF_IF), rd_rm), |
| 15149 | cCL(expdz, e788160, 2, (RF, RF_IF), rd_rm), |
| 15150 | |
| 15151 | cCL(sins, e808100, 2, (RF, RF_IF), rd_rm), |
| 15152 | cCL(sinsp, e808120, 2, (RF, RF_IF), rd_rm), |
| 15153 | cCL(sinsm, e808140, 2, (RF, RF_IF), rd_rm), |
| 15154 | cCL(sinsz, e808160, 2, (RF, RF_IF), rd_rm), |
| 15155 | cCL(sind, e808180, 2, (RF, RF_IF), rd_rm), |
| 15156 | cCL(sindp, e8081a0, 2, (RF, RF_IF), rd_rm), |
| 15157 | cCL(sindm, e8081c0, 2, (RF, RF_IF), rd_rm), |
| 15158 | cCL(sindz, e8081e0, 2, (RF, RF_IF), rd_rm), |
| 15159 | cCL(sine, e888100, 2, (RF, RF_IF), rd_rm), |
| 15160 | cCL(sinep, e888120, 2, (RF, RF_IF), rd_rm), |
| 15161 | cCL(sinem, e888140, 2, (RF, RF_IF), rd_rm), |
| 15162 | cCL(sinez, e888160, 2, (RF, RF_IF), rd_rm), |
| 15163 | |
| 15164 | cCL(coss, e908100, 2, (RF, RF_IF), rd_rm), |
| 15165 | cCL(cossp, e908120, 2, (RF, RF_IF), rd_rm), |
| 15166 | cCL(cossm, e908140, 2, (RF, RF_IF), rd_rm), |
| 15167 | cCL(cossz, e908160, 2, (RF, RF_IF), rd_rm), |
| 15168 | cCL(cosd, e908180, 2, (RF, RF_IF), rd_rm), |
| 15169 | cCL(cosdp, e9081a0, 2, (RF, RF_IF), rd_rm), |
| 15170 | cCL(cosdm, e9081c0, 2, (RF, RF_IF), rd_rm), |
| 15171 | cCL(cosdz, e9081e0, 2, (RF, RF_IF), rd_rm), |
| 15172 | cCL(cose, e988100, 2, (RF, RF_IF), rd_rm), |
| 15173 | cCL(cosep, e988120, 2, (RF, RF_IF), rd_rm), |
| 15174 | cCL(cosem, e988140, 2, (RF, RF_IF), rd_rm), |
| 15175 | cCL(cosez, e988160, 2, (RF, RF_IF), rd_rm), |
| 15176 | |
| 15177 | cCL(tans, ea08100, 2, (RF, RF_IF), rd_rm), |
| 15178 | cCL(tansp, ea08120, 2, (RF, RF_IF), rd_rm), |
| 15179 | cCL(tansm, ea08140, 2, (RF, RF_IF), rd_rm), |
| 15180 | cCL(tansz, ea08160, 2, (RF, RF_IF), rd_rm), |
| 15181 | cCL(tand, ea08180, 2, (RF, RF_IF), rd_rm), |
| 15182 | cCL(tandp, ea081a0, 2, (RF, RF_IF), rd_rm), |
| 15183 | cCL(tandm, ea081c0, 2, (RF, RF_IF), rd_rm), |
| 15184 | cCL(tandz, ea081e0, 2, (RF, RF_IF), rd_rm), |
| 15185 | cCL(tane, ea88100, 2, (RF, RF_IF), rd_rm), |
| 15186 | cCL(tanep, ea88120, 2, (RF, RF_IF), rd_rm), |
| 15187 | cCL(tanem, ea88140, 2, (RF, RF_IF), rd_rm), |
| 15188 | cCL(tanez, ea88160, 2, (RF, RF_IF), rd_rm), |
| 15189 | |
| 15190 | cCL(asns, eb08100, 2, (RF, RF_IF), rd_rm), |
| 15191 | cCL(asnsp, eb08120, 2, (RF, RF_IF), rd_rm), |
| 15192 | cCL(asnsm, eb08140, 2, (RF, RF_IF), rd_rm), |
| 15193 | cCL(asnsz, eb08160, 2, (RF, RF_IF), rd_rm), |
| 15194 | cCL(asnd, eb08180, 2, (RF, RF_IF), rd_rm), |
| 15195 | cCL(asndp, eb081a0, 2, (RF, RF_IF), rd_rm), |
| 15196 | cCL(asndm, eb081c0, 2, (RF, RF_IF), rd_rm), |
| 15197 | cCL(asndz, eb081e0, 2, (RF, RF_IF), rd_rm), |
| 15198 | cCL(asne, eb88100, 2, (RF, RF_IF), rd_rm), |
| 15199 | cCL(asnep, eb88120, 2, (RF, RF_IF), rd_rm), |
| 15200 | cCL(asnem, eb88140, 2, (RF, RF_IF), rd_rm), |
| 15201 | cCL(asnez, eb88160, 2, (RF, RF_IF), rd_rm), |
| 15202 | |
| 15203 | cCL(acss, ec08100, 2, (RF, RF_IF), rd_rm), |
| 15204 | cCL(acssp, ec08120, 2, (RF, RF_IF), rd_rm), |
| 15205 | cCL(acssm, ec08140, 2, (RF, RF_IF), rd_rm), |
| 15206 | cCL(acssz, ec08160, 2, (RF, RF_IF), rd_rm), |
| 15207 | cCL(acsd, ec08180, 2, (RF, RF_IF), rd_rm), |
| 15208 | cCL(acsdp, ec081a0, 2, (RF, RF_IF), rd_rm), |
| 15209 | cCL(acsdm, ec081c0, 2, (RF, RF_IF), rd_rm), |
| 15210 | cCL(acsdz, ec081e0, 2, (RF, RF_IF), rd_rm), |
| 15211 | cCL(acse, ec88100, 2, (RF, RF_IF), rd_rm), |
| 15212 | cCL(acsep, ec88120, 2, (RF, RF_IF), rd_rm), |
| 15213 | cCL(acsem, ec88140, 2, (RF, RF_IF), rd_rm), |
| 15214 | cCL(acsez, ec88160, 2, (RF, RF_IF), rd_rm), |
| 15215 | |
| 15216 | cCL(atns, ed08100, 2, (RF, RF_IF), rd_rm), |
| 15217 | cCL(atnsp, ed08120, 2, (RF, RF_IF), rd_rm), |
| 15218 | cCL(atnsm, ed08140, 2, (RF, RF_IF), rd_rm), |
| 15219 | cCL(atnsz, ed08160, 2, (RF, RF_IF), rd_rm), |
| 15220 | cCL(atnd, ed08180, 2, (RF, RF_IF), rd_rm), |
| 15221 | cCL(atndp, ed081a0, 2, (RF, RF_IF), rd_rm), |
| 15222 | cCL(atndm, ed081c0, 2, (RF, RF_IF), rd_rm), |
| 15223 | cCL(atndz, ed081e0, 2, (RF, RF_IF), rd_rm), |
| 15224 | cCL(atne, ed88100, 2, (RF, RF_IF), rd_rm), |
| 15225 | cCL(atnep, ed88120, 2, (RF, RF_IF), rd_rm), |
| 15226 | cCL(atnem, ed88140, 2, (RF, RF_IF), rd_rm), |
| 15227 | cCL(atnez, ed88160, 2, (RF, RF_IF), rd_rm), |
| 15228 | |
| 15229 | cCL(urds, ee08100, 2, (RF, RF_IF), rd_rm), |
| 15230 | cCL(urdsp, ee08120, 2, (RF, RF_IF), rd_rm), |
| 15231 | cCL(urdsm, ee08140, 2, (RF, RF_IF), rd_rm), |
| 15232 | cCL(urdsz, ee08160, 2, (RF, RF_IF), rd_rm), |
| 15233 | cCL(urdd, ee08180, 2, (RF, RF_IF), rd_rm), |
| 15234 | cCL(urddp, ee081a0, 2, (RF, RF_IF), rd_rm), |
| 15235 | cCL(urddm, ee081c0, 2, (RF, RF_IF), rd_rm), |
| 15236 | cCL(urddz, ee081e0, 2, (RF, RF_IF), rd_rm), |
| 15237 | cCL(urde, ee88100, 2, (RF, RF_IF), rd_rm), |
| 15238 | cCL(urdep, ee88120, 2, (RF, RF_IF), rd_rm), |
| 15239 | cCL(urdem, ee88140, 2, (RF, RF_IF), rd_rm), |
| 15240 | cCL(urdez, ee88160, 2, (RF, RF_IF), rd_rm), |
| 15241 | |
| 15242 | cCL(nrms, ef08100, 2, (RF, RF_IF), rd_rm), |
| 15243 | cCL(nrmsp, ef08120, 2, (RF, RF_IF), rd_rm), |
| 15244 | cCL(nrmsm, ef08140, 2, (RF, RF_IF), rd_rm), |
| 15245 | cCL(nrmsz, ef08160, 2, (RF, RF_IF), rd_rm), |
| 15246 | cCL(nrmd, ef08180, 2, (RF, RF_IF), rd_rm), |
| 15247 | cCL(nrmdp, ef081a0, 2, (RF, RF_IF), rd_rm), |
| 15248 | cCL(nrmdm, ef081c0, 2, (RF, RF_IF), rd_rm), |
| 15249 | cCL(nrmdz, ef081e0, 2, (RF, RF_IF), rd_rm), |
| 15250 | cCL(nrme, ef88100, 2, (RF, RF_IF), rd_rm), |
| 15251 | cCL(nrmep, ef88120, 2, (RF, RF_IF), rd_rm), |
| 15252 | cCL(nrmem, ef88140, 2, (RF, RF_IF), rd_rm), |
| 15253 | cCL(nrmez, ef88160, 2, (RF, RF_IF), rd_rm), |
| 15254 | |
| 15255 | cCL(adfs, e000100, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15256 | cCL(adfsp, e000120, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15257 | cCL(adfsm, e000140, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15258 | cCL(adfsz, e000160, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15259 | cCL(adfd, e000180, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15260 | cCL(adfdp, e0001a0, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15261 | cCL(adfdm, e0001c0, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15262 | cCL(adfdz, e0001e0, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15263 | cCL(adfe, e080100, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15264 | cCL(adfep, e080120, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15265 | cCL(adfem, e080140, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15266 | cCL(adfez, e080160, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15267 | |
| 15268 | cCL(sufs, e200100, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15269 | cCL(sufsp, e200120, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15270 | cCL(sufsm, e200140, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15271 | cCL(sufsz, e200160, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15272 | cCL(sufd, e200180, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15273 | cCL(sufdp, e2001a0, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15274 | cCL(sufdm, e2001c0, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15275 | cCL(sufdz, e2001e0, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15276 | cCL(sufe, e280100, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15277 | cCL(sufep, e280120, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15278 | cCL(sufem, e280140, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15279 | cCL(sufez, e280160, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15280 | |
| 15281 | cCL(rsfs, e300100, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15282 | cCL(rsfsp, e300120, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15283 | cCL(rsfsm, e300140, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15284 | cCL(rsfsz, e300160, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15285 | cCL(rsfd, e300180, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15286 | cCL(rsfdp, e3001a0, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15287 | cCL(rsfdm, e3001c0, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15288 | cCL(rsfdz, e3001e0, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15289 | cCL(rsfe, e380100, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15290 | cCL(rsfep, e380120, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15291 | cCL(rsfem, e380140, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15292 | cCL(rsfez, e380160, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15293 | |
| 15294 | cCL(mufs, e100100, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15295 | cCL(mufsp, e100120, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15296 | cCL(mufsm, e100140, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15297 | cCL(mufsz, e100160, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15298 | cCL(mufd, e100180, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15299 | cCL(mufdp, e1001a0, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15300 | cCL(mufdm, e1001c0, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15301 | cCL(mufdz, e1001e0, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15302 | cCL(mufe, e180100, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15303 | cCL(mufep, e180120, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15304 | cCL(mufem, e180140, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15305 | cCL(mufez, e180160, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15306 | |
| 15307 | cCL(dvfs, e400100, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15308 | cCL(dvfsp, e400120, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15309 | cCL(dvfsm, e400140, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15310 | cCL(dvfsz, e400160, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15311 | cCL(dvfd, e400180, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15312 | cCL(dvfdp, e4001a0, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15313 | cCL(dvfdm, e4001c0, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15314 | cCL(dvfdz, e4001e0, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15315 | cCL(dvfe, e480100, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15316 | cCL(dvfep, e480120, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15317 | cCL(dvfem, e480140, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15318 | cCL(dvfez, e480160, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15319 | |
| 15320 | cCL(rdfs, e500100, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15321 | cCL(rdfsp, e500120, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15322 | cCL(rdfsm, e500140, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15323 | cCL(rdfsz, e500160, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15324 | cCL(rdfd, e500180, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15325 | cCL(rdfdp, e5001a0, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15326 | cCL(rdfdm, e5001c0, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15327 | cCL(rdfdz, e5001e0, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15328 | cCL(rdfe, e580100, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15329 | cCL(rdfep, e580120, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15330 | cCL(rdfem, e580140, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15331 | cCL(rdfez, e580160, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15332 | |
| 15333 | cCL(pows, e600100, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15334 | cCL(powsp, e600120, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15335 | cCL(powsm, e600140, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15336 | cCL(powsz, e600160, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15337 | cCL(powd, e600180, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15338 | cCL(powdp, e6001a0, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15339 | cCL(powdm, e6001c0, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15340 | cCL(powdz, e6001e0, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15341 | cCL(powe, e680100, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15342 | cCL(powep, e680120, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15343 | cCL(powem, e680140, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15344 | cCL(powez, e680160, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15345 | |
| 15346 | cCL(rpws, e700100, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15347 | cCL(rpwsp, e700120, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15348 | cCL(rpwsm, e700140, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15349 | cCL(rpwsz, e700160, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15350 | cCL(rpwd, e700180, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15351 | cCL(rpwdp, e7001a0, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15352 | cCL(rpwdm, e7001c0, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15353 | cCL(rpwdz, e7001e0, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15354 | cCL(rpwe, e780100, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15355 | cCL(rpwep, e780120, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15356 | cCL(rpwem, e780140, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15357 | cCL(rpwez, e780160, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15358 | |
| 15359 | cCL(rmfs, e800100, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15360 | cCL(rmfsp, e800120, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15361 | cCL(rmfsm, e800140, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15362 | cCL(rmfsz, e800160, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15363 | cCL(rmfd, e800180, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15364 | cCL(rmfdp, e8001a0, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15365 | cCL(rmfdm, e8001c0, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15366 | cCL(rmfdz, e8001e0, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15367 | cCL(rmfe, e880100, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15368 | cCL(rmfep, e880120, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15369 | cCL(rmfem, e880140, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15370 | cCL(rmfez, e880160, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15371 | |
| 15372 | cCL(fmls, e900100, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15373 | cCL(fmlsp, e900120, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15374 | cCL(fmlsm, e900140, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15375 | cCL(fmlsz, e900160, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15376 | cCL(fmld, e900180, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15377 | cCL(fmldp, e9001a0, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15378 | cCL(fmldm, e9001c0, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15379 | cCL(fmldz, e9001e0, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15380 | cCL(fmle, e980100, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15381 | cCL(fmlep, e980120, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15382 | cCL(fmlem, e980140, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15383 | cCL(fmlez, e980160, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15384 | |
| 15385 | cCL(fdvs, ea00100, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15386 | cCL(fdvsp, ea00120, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15387 | cCL(fdvsm, ea00140, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15388 | cCL(fdvsz, ea00160, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15389 | cCL(fdvd, ea00180, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15390 | cCL(fdvdp, ea001a0, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15391 | cCL(fdvdm, ea001c0, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15392 | cCL(fdvdz, ea001e0, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15393 | cCL(fdve, ea80100, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15394 | cCL(fdvep, ea80120, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15395 | cCL(fdvem, ea80140, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15396 | cCL(fdvez, ea80160, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15397 | |
| 15398 | cCL(frds, eb00100, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15399 | cCL(frdsp, eb00120, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15400 | cCL(frdsm, eb00140, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15401 | cCL(frdsz, eb00160, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15402 | cCL(frdd, eb00180, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15403 | cCL(frddp, eb001a0, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15404 | cCL(frddm, eb001c0, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15405 | cCL(frddz, eb001e0, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15406 | cCL(frde, eb80100, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15407 | cCL(frdep, eb80120, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15408 | cCL(frdem, eb80140, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15409 | cCL(frdez, eb80160, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15410 | |
| 15411 | cCL(pols, ec00100, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15412 | cCL(polsp, ec00120, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15413 | cCL(polsm, ec00140, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15414 | cCL(polsz, ec00160, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15415 | cCL(pold, ec00180, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15416 | cCL(poldp, ec001a0, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15417 | cCL(poldm, ec001c0, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15418 | cCL(poldz, ec001e0, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15419 | cCL(pole, ec80100, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15420 | cCL(polep, ec80120, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15421 | cCL(polem, ec80140, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15422 | cCL(polez, ec80160, 3, (RF, RF, RF_IF), rd_rn_rm), |
| 15423 | |
| 15424 | cCE(cmf, e90f110, 2, (RF, RF_IF), fpa_cmp), |
| 15425 | C3E(cmfe, ed0f110, 2, (RF, RF_IF), fpa_cmp), |
| 15426 | cCE(cnf, eb0f110, 2, (RF, RF_IF), fpa_cmp), |
| 15427 | C3E(cnfe, ef0f110, 2, (RF, RF_IF), fpa_cmp), |
| 15428 | |
| 15429 | cCL(flts, e000110, 2, (RF, RR), rn_rd), |
| 15430 | cCL(fltsp, e000130, 2, (RF, RR), rn_rd), |
| 15431 | cCL(fltsm, e000150, 2, (RF, RR), rn_rd), |
| 15432 | cCL(fltsz, e000170, 2, (RF, RR), rn_rd), |
| 15433 | cCL(fltd, e000190, 2, (RF, RR), rn_rd), |
| 15434 | cCL(fltdp, e0001b0, 2, (RF, RR), rn_rd), |
| 15435 | cCL(fltdm, e0001d0, 2, (RF, RR), rn_rd), |
| 15436 | cCL(fltdz, e0001f0, 2, (RF, RR), rn_rd), |
| 15437 | cCL(flte, e080110, 2, (RF, RR), rn_rd), |
| 15438 | cCL(fltep, e080130, 2, (RF, RR), rn_rd), |
| 15439 | cCL(fltem, e080150, 2, (RF, RR), rn_rd), |
| 15440 | cCL(fltez, e080170, 2, (RF, RR), rn_rd), |
| 15441 | |
| 15442 | /* The implementation of the FIX instruction is broken on some |
| 15443 | assemblers, in that it accepts a precision specifier as well as a |
| 15444 | rounding specifier, despite the fact that this is meaningless. |
| 15445 | To be more compatible, we accept it as well, though of course it |
| 15446 | does not set any bits. */ |
| 15447 | cCE(fix, e100110, 2, (RR, RF), rd_rm), |
| 15448 | cCL(fixp, e100130, 2, (RR, RF), rd_rm), |
| 15449 | cCL(fixm, e100150, 2, (RR, RF), rd_rm), |
| 15450 | cCL(fixz, e100170, 2, (RR, RF), rd_rm), |
| 15451 | cCL(fixsp, e100130, 2, (RR, RF), rd_rm), |
| 15452 | cCL(fixsm, e100150, 2, (RR, RF), rd_rm), |
| 15453 | cCL(fixsz, e100170, 2, (RR, RF), rd_rm), |
| 15454 | cCL(fixdp, e100130, 2, (RR, RF), rd_rm), |
| 15455 | cCL(fixdm, e100150, 2, (RR, RF), rd_rm), |
| 15456 | cCL(fixdz, e100170, 2, (RR, RF), rd_rm), |
| 15457 | cCL(fixep, e100130, 2, (RR, RF), rd_rm), |
| 15458 | cCL(fixem, e100150, 2, (RR, RF), rd_rm), |
| 15459 | cCL(fixez, e100170, 2, (RR, RF), rd_rm), |
| 15460 | |
| 15461 | /* Instructions that were new with the real FPA, call them V2. */ |
| 15462 | #undef ARM_VARIANT |
| 15463 | #define ARM_VARIANT &fpu_fpa_ext_v2 |
| 15464 | cCE(lfm, c100200, 3, (RF, I4b, ADDR), fpa_ldmstm), |
| 15465 | cCL(lfmfd, c900200, 3, (RF, I4b, ADDR), fpa_ldmstm), |
| 15466 | cCL(lfmea, d100200, 3, (RF, I4b, ADDR), fpa_ldmstm), |
| 15467 | cCE(sfm, c000200, 3, (RF, I4b, ADDR), fpa_ldmstm), |
| 15468 | cCL(sfmfd, d000200, 3, (RF, I4b, ADDR), fpa_ldmstm), |
| 15469 | cCL(sfmea, c800200, 3, (RF, I4b, ADDR), fpa_ldmstm), |
| 15470 | |
| 15471 | #undef ARM_VARIANT |
| 15472 | #define ARM_VARIANT &fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */ |
| 15473 | /* Moves and type conversions. */ |
| 15474 | cCE(fcpys, eb00a40, 2, (RVS, RVS), vfp_sp_monadic), |
| 15475 | cCE(fmrs, e100a10, 2, (RR, RVS), vfp_reg_from_sp), |
| 15476 | cCE(fmsr, e000a10, 2, (RVS, RR), vfp_sp_from_reg), |
| 15477 | cCE(fmstat, ef1fa10, 0, (), noargs), |
| 15478 | cCE(fsitos, eb80ac0, 2, (RVS, RVS), vfp_sp_monadic), |
| 15479 | cCE(fuitos, eb80a40, 2, (RVS, RVS), vfp_sp_monadic), |
| 15480 | cCE(ftosis, ebd0a40, 2, (RVS, RVS), vfp_sp_monadic), |
| 15481 | cCE(ftosizs, ebd0ac0, 2, (RVS, RVS), vfp_sp_monadic), |
| 15482 | cCE(ftouis, ebc0a40, 2, (RVS, RVS), vfp_sp_monadic), |
| 15483 | cCE(ftouizs, ebc0ac0, 2, (RVS, RVS), vfp_sp_monadic), |
| 15484 | cCE(fmrx, ef00a10, 2, (RR, RVC), rd_rn), |
| 15485 | cCE(fmxr, ee00a10, 2, (RVC, RR), rn_rd), |
| 15486 | |
| 15487 | /* Memory operations. */ |
| 15488 | cCE(flds, d100a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst), |
| 15489 | cCE(fsts, d000a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst), |
| 15490 | cCE(fldmias, c900a00, 2, (RRw, VRSLST), vfp_sp_ldstmia), |
| 15491 | cCE(fldmfds, c900a00, 2, (RRw, VRSLST), vfp_sp_ldstmia), |
| 15492 | cCE(fldmdbs, d300a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb), |
| 15493 | cCE(fldmeas, d300a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb), |
| 15494 | cCE(fldmiax, c900b00, 2, (RRw, VRDLST), vfp_xp_ldstmia), |
| 15495 | cCE(fldmfdx, c900b00, 2, (RRw, VRDLST), vfp_xp_ldstmia), |
| 15496 | cCE(fldmdbx, d300b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb), |
| 15497 | cCE(fldmeax, d300b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb), |
| 15498 | cCE(fstmias, c800a00, 2, (RRw, VRSLST), vfp_sp_ldstmia), |
| 15499 | cCE(fstmeas, c800a00, 2, (RRw, VRSLST), vfp_sp_ldstmia), |
| 15500 | cCE(fstmdbs, d200a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb), |
| 15501 | cCE(fstmfds, d200a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb), |
| 15502 | cCE(fstmiax, c800b00, 2, (RRw, VRDLST), vfp_xp_ldstmia), |
| 15503 | cCE(fstmeax, c800b00, 2, (RRw, VRDLST), vfp_xp_ldstmia), |
| 15504 | cCE(fstmdbx, d200b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb), |
| 15505 | cCE(fstmfdx, d200b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb), |
| 15506 | |
| 15507 | /* Monadic operations. */ |
| 15508 | cCE(fabss, eb00ac0, 2, (RVS, RVS), vfp_sp_monadic), |
| 15509 | cCE(fnegs, eb10a40, 2, (RVS, RVS), vfp_sp_monadic), |
| 15510 | cCE(fsqrts, eb10ac0, 2, (RVS, RVS), vfp_sp_monadic), |
| 15511 | |
| 15512 | /* Dyadic operations. */ |
| 15513 | cCE(fadds, e300a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic), |
| 15514 | cCE(fsubs, e300a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic), |
| 15515 | cCE(fmuls, e200a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic), |
| 15516 | cCE(fdivs, e800a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic), |
| 15517 | cCE(fmacs, e000a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic), |
| 15518 | cCE(fmscs, e100a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic), |
| 15519 | cCE(fnmuls, e200a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic), |
| 15520 | cCE(fnmacs, e000a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic), |
| 15521 | cCE(fnmscs, e100a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic), |
| 15522 | |
| 15523 | /* Comparisons. */ |
| 15524 | cCE(fcmps, eb40a40, 2, (RVS, RVS), vfp_sp_monadic), |
| 15525 | cCE(fcmpzs, eb50a40, 1, (RVS), vfp_sp_compare_z), |
| 15526 | cCE(fcmpes, eb40ac0, 2, (RVS, RVS), vfp_sp_monadic), |
| 15527 | cCE(fcmpezs, eb50ac0, 1, (RVS), vfp_sp_compare_z), |
| 15528 | |
| 15529 | #undef ARM_VARIANT |
| 15530 | #define ARM_VARIANT &fpu_vfp_ext_v1 /* VFP V1 (Double precision). */ |
| 15531 | /* Moves and type conversions. */ |
| 15532 | cCE(fcpyd, eb00b40, 2, (RVD, RVD), vfp_dp_rd_rm), |
| 15533 | cCE(fcvtds, eb70ac0, 2, (RVD, RVS), vfp_dp_sp_cvt), |
| 15534 | cCE(fcvtsd, eb70bc0, 2, (RVS, RVD), vfp_sp_dp_cvt), |
| 15535 | cCE(fmdhr, e200b10, 2, (RVD, RR), vfp_dp_rn_rd), |
| 15536 | cCE(fmdlr, e000b10, 2, (RVD, RR), vfp_dp_rn_rd), |
| 15537 | cCE(fmrdh, e300b10, 2, (RR, RVD), vfp_dp_rd_rn), |
| 15538 | cCE(fmrdl, e100b10, 2, (RR, RVD), vfp_dp_rd_rn), |
| 15539 | cCE(fsitod, eb80bc0, 2, (RVD, RVS), vfp_dp_sp_cvt), |
| 15540 | cCE(fuitod, eb80b40, 2, (RVD, RVS), vfp_dp_sp_cvt), |
| 15541 | cCE(ftosid, ebd0b40, 2, (RVS, RVD), vfp_sp_dp_cvt), |
| 15542 | cCE(ftosizd, ebd0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt), |
| 15543 | cCE(ftouid, ebc0b40, 2, (RVS, RVD), vfp_sp_dp_cvt), |
| 15544 | cCE(ftouizd, ebc0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt), |
| 15545 | |
| 15546 | /* Memory operations. */ |
| 15547 | cCE(fldd, d100b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst), |
| 15548 | cCE(fstd, d000b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst), |
| 15549 | cCE(fldmiad, c900b00, 2, (RRw, VRDLST), vfp_dp_ldstmia), |
| 15550 | cCE(fldmfdd, c900b00, 2, (RRw, VRDLST), vfp_dp_ldstmia), |
| 15551 | cCE(fldmdbd, d300b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb), |
| 15552 | cCE(fldmead, d300b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb), |
| 15553 | cCE(fstmiad, c800b00, 2, (RRw, VRDLST), vfp_dp_ldstmia), |
| 15554 | cCE(fstmead, c800b00, 2, (RRw, VRDLST), vfp_dp_ldstmia), |
| 15555 | cCE(fstmdbd, d200b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb), |
| 15556 | cCE(fstmfdd, d200b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb), |
| 15557 | |
| 15558 | /* Monadic operations. */ |
| 15559 | cCE(fabsd, eb00bc0, 2, (RVD, RVD), vfp_dp_rd_rm), |
| 15560 | cCE(fnegd, eb10b40, 2, (RVD, RVD), vfp_dp_rd_rm), |
| 15561 | cCE(fsqrtd, eb10bc0, 2, (RVD, RVD), vfp_dp_rd_rm), |
| 15562 | |
| 15563 | /* Dyadic operations. */ |
| 15564 | cCE(faddd, e300b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm), |
| 15565 | cCE(fsubd, e300b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm), |
| 15566 | cCE(fmuld, e200b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm), |
| 15567 | cCE(fdivd, e800b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm), |
| 15568 | cCE(fmacd, e000b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm), |
| 15569 | cCE(fmscd, e100b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm), |
| 15570 | cCE(fnmuld, e200b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm), |
| 15571 | cCE(fnmacd, e000b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm), |
| 15572 | cCE(fnmscd, e100b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm), |
| 15573 | |
| 15574 | /* Comparisons. */ |
| 15575 | cCE(fcmpd, eb40b40, 2, (RVD, RVD), vfp_dp_rd_rm), |
| 15576 | cCE(fcmpzd, eb50b40, 1, (RVD), vfp_dp_rd), |
| 15577 | cCE(fcmped, eb40bc0, 2, (RVD, RVD), vfp_dp_rd_rm), |
| 15578 | cCE(fcmpezd, eb50bc0, 1, (RVD), vfp_dp_rd), |
| 15579 | |
| 15580 | #undef ARM_VARIANT |
| 15581 | #define ARM_VARIANT &fpu_vfp_ext_v2 |
| 15582 | cCE(fmsrr, c400a10, 3, (VRSLST, RR, RR), vfp_sp2_from_reg2), |
| 15583 | cCE(fmrrs, c500a10, 3, (RR, RR, VRSLST), vfp_reg2_from_sp2), |
| 15584 | cCE(fmdrr, c400b10, 3, (RVD, RR, RR), vfp_dp_rm_rd_rn), |
| 15585 | cCE(fmrrd, c500b10, 3, (RR, RR, RVD), vfp_dp_rd_rn_rm), |
| 15586 | |
| 15587 | /* Instructions which may belong to either the Neon or VFP instruction sets. |
| 15588 | Individual encoder functions perform additional architecture checks. */ |
| 15589 | #undef ARM_VARIANT |
| 15590 | #define ARM_VARIANT &fpu_vfp_ext_v1xd |
| 15591 | #undef THUMB_VARIANT |
| 15592 | #define THUMB_VARIANT &fpu_vfp_ext_v1xd |
| 15593 | /* These mnemonics are unique to VFP. */ |
| 15594 | NCE(vsqrt, 0, 2, (RVSD, RVSD), vfp_nsyn_sqrt), |
| 15595 | NCE(vdiv, 0, 3, (RVSD, RVSD, RVSD), vfp_nsyn_div), |
| 15596 | nCE(vnmul, vnmul, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul), |
| 15597 | nCE(vnmla, vnmla, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul), |
| 15598 | nCE(vnmls, vnmls, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul), |
| 15599 | nCE(vcmp, vcmp, 2, (RVSD, RVSD_I0), vfp_nsyn_cmp), |
| 15600 | nCE(vcmpe, vcmpe, 2, (RVSD, RVSD_I0), vfp_nsyn_cmp), |
| 15601 | NCE(vpush, 0, 1, (VRSDLST), vfp_nsyn_push), |
| 15602 | NCE(vpop, 0, 1, (VRSDLST), vfp_nsyn_pop), |
| 15603 | NCE(vcvtz, 0, 2, (RVSD, RVSD), vfp_nsyn_cvtz), |
| 15604 | |
| 15605 | /* Mnemonics shared by Neon and VFP. */ |
| 15606 | nCEF(vmul, vmul, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mul), |
| 15607 | nCEF(vmla, vmla, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar), |
| 15608 | nCEF(vmls, vmls, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar), |
| 15609 | |
| 15610 | nCEF(vadd, vadd, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i), |
| 15611 | nCEF(vsub, vsub, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i), |
| 15612 | |
| 15613 | NCEF(vabs, 1b10300, 2, (RNSDQ, RNSDQ), neon_abs_neg), |
| 15614 | NCEF(vneg, 1b10380, 2, (RNSDQ, RNSDQ), neon_abs_neg), |
| 15615 | |
| 15616 | NCE(vldm, c900b00, 2, (RRw, VRSDLST), neon_ldm_stm), |
| 15617 | NCE(vldmia, c900b00, 2, (RRw, VRSDLST), neon_ldm_stm), |
| 15618 | NCE(vldmdb, d100b00, 2, (RRw, VRSDLST), neon_ldm_stm), |
| 15619 | NCE(vstm, c800b00, 2, (RRw, VRSDLST), neon_ldm_stm), |
| 15620 | NCE(vstmia, c800b00, 2, (RRw, VRSDLST), neon_ldm_stm), |
| 15621 | NCE(vstmdb, d000b00, 2, (RRw, VRSDLST), neon_ldm_stm), |
| 15622 | NCE(vldr, d100b00, 2, (RVSD, ADDRGLDC), neon_ldr_str), |
| 15623 | NCE(vstr, d000b00, 2, (RVSD, ADDRGLDC), neon_ldr_str), |
| 15624 | |
| 15625 | nCEF(vcvt, vcvt, 3, (RNSDQ, RNSDQ, oI32b), neon_cvt), |
| 15626 | |
| 15627 | /* NOTE: All VMOV encoding is special-cased! */ |
| 15628 | NCE(vmov, 0, 1, (VMOV), neon_mov), |
| 15629 | NCE(vmovq, 0, 1, (VMOV), neon_mov), |
| 15630 | |
| 15631 | #undef THUMB_VARIANT |
| 15632 | #define THUMB_VARIANT &fpu_neon_ext_v1 |
| 15633 | #undef ARM_VARIANT |
| 15634 | #define ARM_VARIANT &fpu_neon_ext_v1 |
| 15635 | /* Data processing with three registers of the same length. */ |
| 15636 | /* integer ops, valid types S8 S16 S32 U8 U16 U32. */ |
| 15637 | NUF(vaba, 0000710, 3, (RNDQ, RNDQ, RNDQ), neon_dyadic_i_su), |
| 15638 | NUF(vabaq, 0000710, 3, (RNQ, RNQ, RNQ), neon_dyadic_i_su), |
| 15639 | NUF(vhadd, 0000000, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su), |
| 15640 | NUF(vhaddq, 0000000, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su), |
| 15641 | NUF(vrhadd, 0000100, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su), |
| 15642 | NUF(vrhaddq, 0000100, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su), |
| 15643 | NUF(vhsub, 0000200, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su), |
| 15644 | NUF(vhsubq, 0000200, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su), |
| 15645 | /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */ |
| 15646 | NUF(vqadd, 0000010, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su), |
| 15647 | NUF(vqaddq, 0000010, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su), |
| 15648 | NUF(vqsub, 0000210, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su), |
| 15649 | NUF(vqsubq, 0000210, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su), |
| 15650 | NUF(vrshl, 0000500, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su), |
| 15651 | NUF(vrshlq, 0000500, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su), |
| 15652 | NUF(vqrshl, 0000510, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su), |
| 15653 | NUF(vqrshlq, 0000510, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su), |
| 15654 | /* If not immediate, fall back to neon_dyadic_i64_su. |
| 15655 | shl_imm should accept I8 I16 I32 I64, |
| 15656 | qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */ |
| 15657 | nUF(vshl, vshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_shl_imm), |
| 15658 | nUF(vshlq, vshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_shl_imm), |
| 15659 | nUF(vqshl, vqshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_qshl_imm), |
| 15660 | nUF(vqshlq, vqshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_qshl_imm), |
| 15661 | /* Logic ops, types optional & ignored. */ |
| 15662 | nUF(vand, vand, 2, (RNDQ, NILO), neon_logic), |
| 15663 | nUF(vandq, vand, 2, (RNQ, NILO), neon_logic), |
| 15664 | nUF(vbic, vbic, 2, (RNDQ, NILO), neon_logic), |
| 15665 | nUF(vbicq, vbic, 2, (RNQ, NILO), neon_logic), |
| 15666 | nUF(vorr, vorr, 2, (RNDQ, NILO), neon_logic), |
| 15667 | nUF(vorrq, vorr, 2, (RNQ, NILO), neon_logic), |
| 15668 | nUF(vorn, vorn, 2, (RNDQ, NILO), neon_logic), |
| 15669 | nUF(vornq, vorn, 2, (RNQ, NILO), neon_logic), |
| 15670 | nUF(veor, veor, 3, (RNDQ, oRNDQ, RNDQ), neon_logic), |
| 15671 | nUF(veorq, veor, 3, (RNQ, oRNQ, RNQ), neon_logic), |
| 15672 | /* Bitfield ops, untyped. */ |
| 15673 | NUF(vbsl, 1100110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield), |
| 15674 | NUF(vbslq, 1100110, 3, (RNQ, RNQ, RNQ), neon_bitfield), |
| 15675 | NUF(vbit, 1200110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield), |
| 15676 | NUF(vbitq, 1200110, 3, (RNQ, RNQ, RNQ), neon_bitfield), |
| 15677 | NUF(vbif, 1300110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield), |
| 15678 | NUF(vbifq, 1300110, 3, (RNQ, RNQ, RNQ), neon_bitfield), |
| 15679 | /* Int and float variants, types S8 S16 S32 U8 U16 U32 F32. */ |
| 15680 | nUF(vabd, vabd, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su), |
| 15681 | nUF(vabdq, vabd, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su), |
| 15682 | nUF(vmax, vmax, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su), |
| 15683 | nUF(vmaxq, vmax, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su), |
| 15684 | nUF(vmin, vmin, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su), |
| 15685 | nUF(vminq, vmin, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su), |
| 15686 | /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall |
| 15687 | back to neon_dyadic_if_su. */ |
| 15688 | nUF(vcge, vcge, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp), |
| 15689 | nUF(vcgeq, vcge, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp), |
| 15690 | nUF(vcgt, vcgt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp), |
| 15691 | nUF(vcgtq, vcgt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp), |
| 15692 | nUF(vclt, vclt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv), |
| 15693 | nUF(vcltq, vclt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv), |
| 15694 | nUF(vcle, vcle, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv), |
| 15695 | nUF(vcleq, vcle, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv), |
| 15696 | /* Comparison. Type I8 I16 I32 F32. */ |
| 15697 | nUF(vceq, vceq, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_ceq), |
| 15698 | nUF(vceqq, vceq, 3, (RNQ, oRNQ, RNDQ_I0), neon_ceq), |
| 15699 | /* As above, D registers only. */ |
| 15700 | nUF(vpmax, vpmax, 3, (RND, oRND, RND), neon_dyadic_if_su_d), |
| 15701 | nUF(vpmin, vpmin, 3, (RND, oRND, RND), neon_dyadic_if_su_d), |
| 15702 | /* Int and float variants, signedness unimportant. */ |
| 15703 | nUF(vmlaq, vmla, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar), |
| 15704 | nUF(vmlsq, vmls, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar), |
| 15705 | nUF(vpadd, vpadd, 3, (RND, oRND, RND), neon_dyadic_if_i_d), |
| 15706 | /* Add/sub take types I8 I16 I32 I64 F32. */ |
| 15707 | nUF(vaddq, vadd, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i), |
| 15708 | nUF(vsubq, vsub, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i), |
| 15709 | /* vtst takes sizes 8, 16, 32. */ |
| 15710 | NUF(vtst, 0000810, 3, (RNDQ, oRNDQ, RNDQ), neon_tst), |
| 15711 | NUF(vtstq, 0000810, 3, (RNQ, oRNQ, RNQ), neon_tst), |
| 15712 | /* VMUL takes I8 I16 I32 F32 P8. */ |
| 15713 | nUF(vmulq, vmul, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mul), |
| 15714 | /* VQD{R}MULH takes S16 S32. */ |
| 15715 | nUF(vqdmulh, vqdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh), |
| 15716 | nUF(vqdmulhq, vqdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh), |
| 15717 | nUF(vqrdmulh, vqrdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh), |
| 15718 | nUF(vqrdmulhq, vqrdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh), |
| 15719 | NUF(vacge, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute), |
| 15720 | NUF(vacgeq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute), |
| 15721 | NUF(vacgt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute), |
| 15722 | NUF(vacgtq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute), |
| 15723 | NUF(vaclt, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv), |
| 15724 | NUF(vacltq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv), |
| 15725 | NUF(vacle, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv), |
| 15726 | NUF(vacleq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv), |
| 15727 | NUF(vrecps, 0000f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step), |
| 15728 | NUF(vrecpsq, 0000f10, 3, (RNQ, oRNQ, RNQ), neon_step), |
| 15729 | NUF(vrsqrts, 0200f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step), |
| 15730 | NUF(vrsqrtsq, 0200f10, 3, (RNQ, oRNQ, RNQ), neon_step), |
| 15731 | |
| 15732 | /* Two address, int/float. Types S8 S16 S32 F32. */ |
| 15733 | NUF(vabsq, 1b10300, 2, (RNQ, RNQ), neon_abs_neg), |
| 15734 | NUF(vnegq, 1b10380, 2, (RNQ, RNQ), neon_abs_neg), |
| 15735 | |
| 15736 | /* Data processing with two registers and a shift amount. */ |
| 15737 | /* Right shifts, and variants with rounding. |
| 15738 | Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */ |
| 15739 | NUF(vshr, 0800010, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm), |
| 15740 | NUF(vshrq, 0800010, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm), |
| 15741 | NUF(vrshr, 0800210, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm), |
| 15742 | NUF(vrshrq, 0800210, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm), |
| 15743 | NUF(vsra, 0800110, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm), |
| 15744 | NUF(vsraq, 0800110, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm), |
| 15745 | NUF(vrsra, 0800310, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm), |
| 15746 | NUF(vrsraq, 0800310, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm), |
| 15747 | /* Shift and insert. Sizes accepted 8 16 32 64. */ |
| 15748 | NUF(vsli, 1800510, 3, (RNDQ, oRNDQ, I63), neon_sli), |
| 15749 | NUF(vsliq, 1800510, 3, (RNQ, oRNQ, I63), neon_sli), |
| 15750 | NUF(vsri, 1800410, 3, (RNDQ, oRNDQ, I64), neon_sri), |
| 15751 | NUF(vsriq, 1800410, 3, (RNQ, oRNQ, I64), neon_sri), |
| 15752 | /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */ |
| 15753 | NUF(vqshlu, 1800610, 3, (RNDQ, oRNDQ, I63), neon_qshlu_imm), |
| 15754 | NUF(vqshluq, 1800610, 3, (RNQ, oRNQ, I63), neon_qshlu_imm), |
| 15755 | /* Right shift immediate, saturating & narrowing, with rounding variants. |
| 15756 | Types accepted S16 S32 S64 U16 U32 U64. */ |
| 15757 | NUF(vqshrn, 0800910, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow), |
| 15758 | NUF(vqrshrn, 0800950, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow), |
| 15759 | /* As above, unsigned. Types accepted S16 S32 S64. */ |
| 15760 | NUF(vqshrun, 0800810, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u), |
| 15761 | NUF(vqrshrun, 0800850, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u), |
| 15762 | /* Right shift narrowing. Types accepted I16 I32 I64. */ |
| 15763 | NUF(vshrn, 0800810, 3, (RND, RNQ, I32z), neon_rshift_narrow), |
| 15764 | NUF(vrshrn, 0800850, 3, (RND, RNQ, I32z), neon_rshift_narrow), |
| 15765 | /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */ |
| 15766 | nUF(vshll, vshll, 3, (RNQ, RND, I32), neon_shll), |
| 15767 | /* CVT with optional immediate for fixed-point variant. */ |
| 15768 | nUF(vcvtq, vcvt, 3, (RNQ, RNQ, oI32b), neon_cvt), |
| 15769 | |
| 15770 | nUF(vmvn, vmvn, 2, (RNDQ, RNDQ_IMVNb), neon_mvn), |
| 15771 | nUF(vmvnq, vmvn, 2, (RNQ, RNDQ_IMVNb), neon_mvn), |
| 15772 | |
| 15773 | /* Data processing, three registers of different lengths. */ |
| 15774 | /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */ |
| 15775 | NUF(vabal, 0800500, 3, (RNQ, RND, RND), neon_abal), |
| 15776 | NUF(vabdl, 0800700, 3, (RNQ, RND, RND), neon_dyadic_long), |
| 15777 | NUF(vaddl, 0800000, 3, (RNQ, RND, RND), neon_dyadic_long), |
| 15778 | NUF(vsubl, 0800200, 3, (RNQ, RND, RND), neon_dyadic_long), |
| 15779 | /* If not scalar, fall back to neon_dyadic_long. |
| 15780 | Vector types as above, scalar types S16 S32 U16 U32. */ |
| 15781 | nUF(vmlal, vmlal, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long), |
| 15782 | nUF(vmlsl, vmlsl, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long), |
| 15783 | /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */ |
| 15784 | NUF(vaddw, 0800100, 3, (RNQ, oRNQ, RND), neon_dyadic_wide), |
| 15785 | NUF(vsubw, 0800300, 3, (RNQ, oRNQ, RND), neon_dyadic_wide), |
| 15786 | /* Dyadic, narrowing insns. Types I16 I32 I64. */ |
| 15787 | NUF(vaddhn, 0800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow), |
| 15788 | NUF(vraddhn, 1800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow), |
| 15789 | NUF(vsubhn, 0800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow), |
| 15790 | NUF(vrsubhn, 1800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow), |
| 15791 | /* Saturating doubling multiplies. Types S16 S32. */ |
| 15792 | nUF(vqdmlal, vqdmlal, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long), |
| 15793 | nUF(vqdmlsl, vqdmlsl, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long), |
| 15794 | nUF(vqdmull, vqdmull, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long), |
| 15795 | /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types |
| 15796 | S16 S32 U16 U32. */ |
| 15797 | nUF(vmull, vmull, 3, (RNQ, RND, RND_RNSC), neon_vmull), |
| 15798 | |
| 15799 | /* Extract. Size 8. */ |
| 15800 | NUF(vext, 0b00000, 4, (RNDQ, oRNDQ, RNDQ, I7), neon_ext), |
| 15801 | NUF(vextq, 0b00000, 4, (RNQ, oRNQ, RNQ, I7), neon_ext), |
| 15802 | |
| 15803 | /* Two registers, miscellaneous. */ |
| 15804 | /* Reverse. Sizes 8 16 32 (must be < size in opcode). */ |
| 15805 | NUF(vrev64, 1b00000, 2, (RNDQ, RNDQ), neon_rev), |
| 15806 | NUF(vrev64q, 1b00000, 2, (RNQ, RNQ), neon_rev), |
| 15807 | NUF(vrev32, 1b00080, 2, (RNDQ, RNDQ), neon_rev), |
| 15808 | NUF(vrev32q, 1b00080, 2, (RNQ, RNQ), neon_rev), |
| 15809 | NUF(vrev16, 1b00100, 2, (RNDQ, RNDQ), neon_rev), |
| 15810 | NUF(vrev16q, 1b00100, 2, (RNQ, RNQ), neon_rev), |
| 15811 | /* Vector replicate. Sizes 8 16 32. */ |
| 15812 | nCE(vdup, vdup, 2, (RNDQ, RR_RNSC), neon_dup), |
| 15813 | nCE(vdupq, vdup, 2, (RNQ, RR_RNSC), neon_dup), |
| 15814 | /* VMOVL. Types S8 S16 S32 U8 U16 U32. */ |
| 15815 | NUF(vmovl, 0800a10, 2, (RNQ, RND), neon_movl), |
| 15816 | /* VMOVN. Types I16 I32 I64. */ |
| 15817 | nUF(vmovn, vmovn, 2, (RND, RNQ), neon_movn), |
| 15818 | /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */ |
| 15819 | nUF(vqmovn, vqmovn, 2, (RND, RNQ), neon_qmovn), |
| 15820 | /* VQMOVUN. Types S16 S32 S64. */ |
| 15821 | nUF(vqmovun, vqmovun, 2, (RND, RNQ), neon_qmovun), |
| 15822 | /* VZIP / VUZP. Sizes 8 16 32. */ |
| 15823 | NUF(vzip, 1b20180, 2, (RNDQ, RNDQ), neon_zip_uzp), |
| 15824 | NUF(vzipq, 1b20180, 2, (RNQ, RNQ), neon_zip_uzp), |
| 15825 | NUF(vuzp, 1b20100, 2, (RNDQ, RNDQ), neon_zip_uzp), |
| 15826 | NUF(vuzpq, 1b20100, 2, (RNQ, RNQ), neon_zip_uzp), |
| 15827 | /* VQABS / VQNEG. Types S8 S16 S32. */ |
| 15828 | NUF(vqabs, 1b00700, 2, (RNDQ, RNDQ), neon_sat_abs_neg), |
| 15829 | NUF(vqabsq, 1b00700, 2, (RNQ, RNQ), neon_sat_abs_neg), |
| 15830 | NUF(vqneg, 1b00780, 2, (RNDQ, RNDQ), neon_sat_abs_neg), |
| 15831 | NUF(vqnegq, 1b00780, 2, (RNQ, RNQ), neon_sat_abs_neg), |
| 15832 | /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */ |
| 15833 | NUF(vpadal, 1b00600, 2, (RNDQ, RNDQ), neon_pair_long), |
| 15834 | NUF(vpadalq, 1b00600, 2, (RNQ, RNQ), neon_pair_long), |
| 15835 | NUF(vpaddl, 1b00200, 2, (RNDQ, RNDQ), neon_pair_long), |
| 15836 | NUF(vpaddlq, 1b00200, 2, (RNQ, RNQ), neon_pair_long), |
| 15837 | /* Reciprocal estimates. Types U32 F32. */ |
| 15838 | NUF(vrecpe, 1b30400, 2, (RNDQ, RNDQ), neon_recip_est), |
| 15839 | NUF(vrecpeq, 1b30400, 2, (RNQ, RNQ), neon_recip_est), |
| 15840 | NUF(vrsqrte, 1b30480, 2, (RNDQ, RNDQ), neon_recip_est), |
| 15841 | NUF(vrsqrteq, 1b30480, 2, (RNQ, RNQ), neon_recip_est), |
| 15842 | /* VCLS. Types S8 S16 S32. */ |
| 15843 | NUF(vcls, 1b00400, 2, (RNDQ, RNDQ), neon_cls), |
| 15844 | NUF(vclsq, 1b00400, 2, (RNQ, RNQ), neon_cls), |
| 15845 | /* VCLZ. Types I8 I16 I32. */ |
| 15846 | NUF(vclz, 1b00480, 2, (RNDQ, RNDQ), neon_clz), |
| 15847 | NUF(vclzq, 1b00480, 2, (RNQ, RNQ), neon_clz), |
| 15848 | /* VCNT. Size 8. */ |
| 15849 | NUF(vcnt, 1b00500, 2, (RNDQ, RNDQ), neon_cnt), |
| 15850 | NUF(vcntq, 1b00500, 2, (RNQ, RNQ), neon_cnt), |
| 15851 | /* Two address, untyped. */ |
| 15852 | NUF(vswp, 1b20000, 2, (RNDQ, RNDQ), neon_swp), |
| 15853 | NUF(vswpq, 1b20000, 2, (RNQ, RNQ), neon_swp), |
| 15854 | /* VTRN. Sizes 8 16 32. */ |
| 15855 | nUF(vtrn, vtrn, 2, (RNDQ, RNDQ), neon_trn), |
| 15856 | nUF(vtrnq, vtrn, 2, (RNQ, RNQ), neon_trn), |
| 15857 | |
| 15858 | /* Table lookup. Size 8. */ |
| 15859 | NUF(vtbl, 1b00800, 3, (RND, NRDLST, RND), neon_tbl_tbx), |
| 15860 | NUF(vtbx, 1b00840, 3, (RND, NRDLST, RND), neon_tbl_tbx), |
| 15861 | |
| 15862 | #undef THUMB_VARIANT |
| 15863 | #define THUMB_VARIANT &fpu_vfp_v3_or_neon_ext |
| 15864 | #undef ARM_VARIANT |
| 15865 | #define ARM_VARIANT &fpu_vfp_v3_or_neon_ext |
| 15866 | /* Neon element/structure load/store. */ |
| 15867 | nUF(vld1, vld1, 2, (NSTRLST, ADDR), neon_ldx_stx), |
| 15868 | nUF(vst1, vst1, 2, (NSTRLST, ADDR), neon_ldx_stx), |
| 15869 | nUF(vld2, vld2, 2, (NSTRLST, ADDR), neon_ldx_stx), |
| 15870 | nUF(vst2, vst2, 2, (NSTRLST, ADDR), neon_ldx_stx), |
| 15871 | nUF(vld3, vld3, 2, (NSTRLST, ADDR), neon_ldx_stx), |
| 15872 | nUF(vst3, vst3, 2, (NSTRLST, ADDR), neon_ldx_stx), |
| 15873 | nUF(vld4, vld4, 2, (NSTRLST, ADDR), neon_ldx_stx), |
| 15874 | nUF(vst4, vst4, 2, (NSTRLST, ADDR), neon_ldx_stx), |
| 15875 | |
| 15876 | #undef THUMB_VARIANT |
| 15877 | #define THUMB_VARIANT &fpu_vfp_ext_v3 |
| 15878 | #undef ARM_VARIANT |
| 15879 | #define ARM_VARIANT &fpu_vfp_ext_v3 |
| 15880 | cCE(fconsts, eb00a00, 2, (RVS, I255), vfp_sp_const), |
| 15881 | cCE(fconstd, eb00b00, 2, (RVD, I255), vfp_dp_const), |
| 15882 | cCE(fshtos, eba0a40, 2, (RVS, I16z), vfp_sp_conv_16), |
| 15883 | cCE(fshtod, eba0b40, 2, (RVD, I16z), vfp_dp_conv_16), |
| 15884 | cCE(fsltos, eba0ac0, 2, (RVS, I32), vfp_sp_conv_32), |
| 15885 | cCE(fsltod, eba0bc0, 2, (RVD, I32), vfp_dp_conv_32), |
| 15886 | cCE(fuhtos, ebb0a40, 2, (RVS, I16z), vfp_sp_conv_16), |
| 15887 | cCE(fuhtod, ebb0b40, 2, (RVD, I16z), vfp_dp_conv_16), |
| 15888 | cCE(fultos, ebb0ac0, 2, (RVS, I32), vfp_sp_conv_32), |
| 15889 | cCE(fultod, ebb0bc0, 2, (RVD, I32), vfp_dp_conv_32), |
| 15890 | cCE(ftoshs, ebe0a40, 2, (RVS, I16z), vfp_sp_conv_16), |
| 15891 | cCE(ftoshd, ebe0b40, 2, (RVD, I16z), vfp_dp_conv_16), |
| 15892 | cCE(ftosls, ebe0ac0, 2, (RVS, I32), vfp_sp_conv_32), |
| 15893 | cCE(ftosld, ebe0bc0, 2, (RVD, I32), vfp_dp_conv_32), |
| 15894 | cCE(ftouhs, ebf0a40, 2, (RVS, I16z), vfp_sp_conv_16), |
| 15895 | cCE(ftouhd, ebf0b40, 2, (RVD, I16z), vfp_dp_conv_16), |
| 15896 | cCE(ftouls, ebf0ac0, 2, (RVS, I32), vfp_sp_conv_32), |
| 15897 | cCE(ftould, ebf0bc0, 2, (RVD, I32), vfp_dp_conv_32), |
| 15898 | |
| 15899 | #undef THUMB_VARIANT |
| 15900 | #undef ARM_VARIANT |
| 15901 | #define ARM_VARIANT &arm_cext_xscale /* Intel XScale extensions. */ |
| 15902 | cCE(mia, e200010, 3, (RXA, RRnpc, RRnpc), xsc_mia), |
| 15903 | cCE(miaph, e280010, 3, (RXA, RRnpc, RRnpc), xsc_mia), |
| 15904 | cCE(miabb, e2c0010, 3, (RXA, RRnpc, RRnpc), xsc_mia), |
| 15905 | cCE(miabt, e2d0010, 3, (RXA, RRnpc, RRnpc), xsc_mia), |
| 15906 | cCE(miatb, e2e0010, 3, (RXA, RRnpc, RRnpc), xsc_mia), |
| 15907 | cCE(miatt, e2f0010, 3, (RXA, RRnpc, RRnpc), xsc_mia), |
| 15908 | cCE(mar, c400000, 3, (RXA, RRnpc, RRnpc), xsc_mar), |
| 15909 | cCE(mra, c500000, 3, (RRnpc, RRnpc, RXA), xsc_mra), |
| 15910 | |
| 15911 | #undef ARM_VARIANT |
| 15912 | #define ARM_VARIANT &arm_cext_iwmmxt /* Intel Wireless MMX technology. */ |
| 15913 | cCE(tandcb, e13f130, 1, (RR), iwmmxt_tandorc), |
| 15914 | cCE(tandch, e53f130, 1, (RR), iwmmxt_tandorc), |
| 15915 | cCE(tandcw, e93f130, 1, (RR), iwmmxt_tandorc), |
| 15916 | cCE(tbcstb, e400010, 2, (RIWR, RR), rn_rd), |
| 15917 | cCE(tbcsth, e400050, 2, (RIWR, RR), rn_rd), |
| 15918 | cCE(tbcstw, e400090, 2, (RIWR, RR), rn_rd), |
| 15919 | cCE(textrcb, e130170, 2, (RR, I7), iwmmxt_textrc), |
| 15920 | cCE(textrch, e530170, 2, (RR, I7), iwmmxt_textrc), |
| 15921 | cCE(textrcw, e930170, 2, (RR, I7), iwmmxt_textrc), |
| 15922 | cCE(textrmub, e100070, 3, (RR, RIWR, I7), iwmmxt_textrm), |
| 15923 | cCE(textrmuh, e500070, 3, (RR, RIWR, I7), iwmmxt_textrm), |
| 15924 | cCE(textrmuw, e900070, 3, (RR, RIWR, I7), iwmmxt_textrm), |
| 15925 | cCE(textrmsb, e100078, 3, (RR, RIWR, I7), iwmmxt_textrm), |
| 15926 | cCE(textrmsh, e500078, 3, (RR, RIWR, I7), iwmmxt_textrm), |
| 15927 | cCE(textrmsw, e900078, 3, (RR, RIWR, I7), iwmmxt_textrm), |
| 15928 | cCE(tinsrb, e600010, 3, (RIWR, RR, I7), iwmmxt_tinsr), |
| 15929 | cCE(tinsrh, e600050, 3, (RIWR, RR, I7), iwmmxt_tinsr), |
| 15930 | cCE(tinsrw, e600090, 3, (RIWR, RR, I7), iwmmxt_tinsr), |
| 15931 | cCE(tmcr, e000110, 2, (RIWC_RIWG, RR), rn_rd), |
| 15932 | cCE(tmcrr, c400000, 3, (RIWR, RR, RR), rm_rd_rn), |
| 15933 | cCE(tmia, e200010, 3, (RIWR, RR, RR), iwmmxt_tmia), |
| 15934 | cCE(tmiaph, e280010, 3, (RIWR, RR, RR), iwmmxt_tmia), |
| 15935 | cCE(tmiabb, e2c0010, 3, (RIWR, RR, RR), iwmmxt_tmia), |
| 15936 | cCE(tmiabt, e2d0010, 3, (RIWR, RR, RR), iwmmxt_tmia), |
| 15937 | cCE(tmiatb, e2e0010, 3, (RIWR, RR, RR), iwmmxt_tmia), |
| 15938 | cCE(tmiatt, e2f0010, 3, (RIWR, RR, RR), iwmmxt_tmia), |
| 15939 | cCE(tmovmskb, e100030, 2, (RR, RIWR), rd_rn), |
| 15940 | cCE(tmovmskh, e500030, 2, (RR, RIWR), rd_rn), |
| 15941 | cCE(tmovmskw, e900030, 2, (RR, RIWR), rd_rn), |
| 15942 | cCE(tmrc, e100110, 2, (RR, RIWC_RIWG), rd_rn), |
| 15943 | cCE(tmrrc, c500000, 3, (RR, RR, RIWR), rd_rn_rm), |
| 15944 | cCE(torcb, e13f150, 1, (RR), iwmmxt_tandorc), |
| 15945 | cCE(torch, e53f150, 1, (RR), iwmmxt_tandorc), |
| 15946 | cCE(torcw, e93f150, 1, (RR), iwmmxt_tandorc), |
| 15947 | cCE(waccb, e0001c0, 2, (RIWR, RIWR), rd_rn), |
| 15948 | cCE(wacch, e4001c0, 2, (RIWR, RIWR), rd_rn), |
| 15949 | cCE(waccw, e8001c0, 2, (RIWR, RIWR), rd_rn), |
| 15950 | cCE(waddbss, e300180, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 15951 | cCE(waddb, e000180, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 15952 | cCE(waddbus, e100180, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 15953 | cCE(waddhss, e700180, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 15954 | cCE(waddh, e400180, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 15955 | cCE(waddhus, e500180, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 15956 | cCE(waddwss, eb00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 15957 | cCE(waddw, e800180, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 15958 | cCE(waddwus, e900180, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 15959 | cCE(waligni, e000020, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_waligni), |
| 15960 | cCE(walignr0, e800020, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 15961 | cCE(walignr1, e900020, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 15962 | cCE(walignr2, ea00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 15963 | cCE(walignr3, eb00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 15964 | cCE(wand, e200000, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 15965 | cCE(wandn, e300000, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 15966 | cCE(wavg2b, e800000, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 15967 | cCE(wavg2br, e900000, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 15968 | cCE(wavg2h, ec00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 15969 | cCE(wavg2hr, ed00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 15970 | cCE(wcmpeqb, e000060, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 15971 | cCE(wcmpeqh, e400060, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 15972 | cCE(wcmpeqw, e800060, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 15973 | cCE(wcmpgtub, e100060, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 15974 | cCE(wcmpgtuh, e500060, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 15975 | cCE(wcmpgtuw, e900060, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 15976 | cCE(wcmpgtsb, e300060, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 15977 | cCE(wcmpgtsh, e700060, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 15978 | cCE(wcmpgtsw, eb00060, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 15979 | cCE(wldrb, c100000, 2, (RIWR, ADDR), iwmmxt_wldstbh), |
| 15980 | cCE(wldrh, c500000, 2, (RIWR, ADDR), iwmmxt_wldstbh), |
| 15981 | cCE(wldrw, c100100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw), |
| 15982 | cCE(wldrd, c500100, 2, (RIWR, ADDR), iwmmxt_wldstd), |
| 15983 | cCE(wmacs, e600100, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 15984 | cCE(wmacsz, e700100, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 15985 | cCE(wmacu, e400100, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 15986 | cCE(wmacuz, e500100, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 15987 | cCE(wmadds, ea00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 15988 | cCE(wmaddu, e800100, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 15989 | cCE(wmaxsb, e200160, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 15990 | cCE(wmaxsh, e600160, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 15991 | cCE(wmaxsw, ea00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 15992 | cCE(wmaxub, e000160, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 15993 | cCE(wmaxuh, e400160, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 15994 | cCE(wmaxuw, e800160, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 15995 | cCE(wminsb, e300160, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 15996 | cCE(wminsh, e700160, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 15997 | cCE(wminsw, eb00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 15998 | cCE(wminub, e100160, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 15999 | cCE(wminuh, e500160, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 16000 | cCE(wminuw, e900160, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 16001 | cCE(wmov, e000000, 2, (RIWR, RIWR), iwmmxt_wmov), |
| 16002 | cCE(wmulsm, e300100, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 16003 | cCE(wmulsl, e200100, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 16004 | cCE(wmulum, e100100, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 16005 | cCE(wmulul, e000100, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 16006 | cCE(wor, e000000, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 16007 | cCE(wpackhss, e700080, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 16008 | cCE(wpackhus, e500080, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 16009 | cCE(wpackwss, eb00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 16010 | cCE(wpackwus, e900080, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 16011 | cCE(wpackdss, ef00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 16012 | cCE(wpackdus, ed00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 16013 | cCE(wrorh, e700040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5), |
| 16014 | cCE(wrorhg, e700148, 3, (RIWR, RIWR, RIWG), rd_rn_rm), |
| 16015 | cCE(wrorw, eb00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5), |
| 16016 | cCE(wrorwg, eb00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm), |
| 16017 | cCE(wrord, ef00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5), |
| 16018 | cCE(wrordg, ef00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm), |
| 16019 | cCE(wsadb, e000120, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 16020 | cCE(wsadbz, e100120, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 16021 | cCE(wsadh, e400120, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 16022 | cCE(wsadhz, e500120, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 16023 | cCE(wshufh, e0001e0, 3, (RIWR, RIWR, I255), iwmmxt_wshufh), |
| 16024 | cCE(wsllh, e500040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5), |
| 16025 | cCE(wsllhg, e500148, 3, (RIWR, RIWR, RIWG), rd_rn_rm), |
| 16026 | cCE(wsllw, e900040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5), |
| 16027 | cCE(wsllwg, e900148, 3, (RIWR, RIWR, RIWG), rd_rn_rm), |
| 16028 | cCE(wslld, ed00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5), |
| 16029 | cCE(wslldg, ed00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm), |
| 16030 | cCE(wsrah, e400040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5), |
| 16031 | cCE(wsrahg, e400148, 3, (RIWR, RIWR, RIWG), rd_rn_rm), |
| 16032 | cCE(wsraw, e800040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5), |
| 16033 | cCE(wsrawg, e800148, 3, (RIWR, RIWR, RIWG), rd_rn_rm), |
| 16034 | cCE(wsrad, ec00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5), |
| 16035 | cCE(wsradg, ec00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm), |
| 16036 | cCE(wsrlh, e600040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5), |
| 16037 | cCE(wsrlhg, e600148, 3, (RIWR, RIWR, RIWG), rd_rn_rm), |
| 16038 | cCE(wsrlw, ea00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5), |
| 16039 | cCE(wsrlwg, ea00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm), |
| 16040 | cCE(wsrld, ee00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5), |
| 16041 | cCE(wsrldg, ee00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm), |
| 16042 | cCE(wstrb, c000000, 2, (RIWR, ADDR), iwmmxt_wldstbh), |
| 16043 | cCE(wstrh, c400000, 2, (RIWR, ADDR), iwmmxt_wldstbh), |
| 16044 | cCE(wstrw, c000100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw), |
| 16045 | cCE(wstrd, c400100, 2, (RIWR, ADDR), iwmmxt_wldstd), |
| 16046 | cCE(wsubbss, e3001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 16047 | cCE(wsubb, e0001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 16048 | cCE(wsubbus, e1001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 16049 | cCE(wsubhss, e7001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 16050 | cCE(wsubh, e4001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 16051 | cCE(wsubhus, e5001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 16052 | cCE(wsubwss, eb001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 16053 | cCE(wsubw, e8001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 16054 | cCE(wsubwus, e9001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 16055 | cCE(wunpckehub,e0000c0, 2, (RIWR, RIWR), rd_rn), |
| 16056 | cCE(wunpckehuh,e4000c0, 2, (RIWR, RIWR), rd_rn), |
| 16057 | cCE(wunpckehuw,e8000c0, 2, (RIWR, RIWR), rd_rn), |
| 16058 | cCE(wunpckehsb,e2000c0, 2, (RIWR, RIWR), rd_rn), |
| 16059 | cCE(wunpckehsh,e6000c0, 2, (RIWR, RIWR), rd_rn), |
| 16060 | cCE(wunpckehsw,ea000c0, 2, (RIWR, RIWR), rd_rn), |
| 16061 | cCE(wunpckihb, e1000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 16062 | cCE(wunpckihh, e5000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 16063 | cCE(wunpckihw, e9000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 16064 | cCE(wunpckelub,e0000e0, 2, (RIWR, RIWR), rd_rn), |
| 16065 | cCE(wunpckeluh,e4000e0, 2, (RIWR, RIWR), rd_rn), |
| 16066 | cCE(wunpckeluw,e8000e0, 2, (RIWR, RIWR), rd_rn), |
| 16067 | cCE(wunpckelsb,e2000e0, 2, (RIWR, RIWR), rd_rn), |
| 16068 | cCE(wunpckelsh,e6000e0, 2, (RIWR, RIWR), rd_rn), |
| 16069 | cCE(wunpckelsw,ea000e0, 2, (RIWR, RIWR), rd_rn), |
| 16070 | cCE(wunpckilb, e1000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 16071 | cCE(wunpckilh, e5000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 16072 | cCE(wunpckilw, e9000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 16073 | cCE(wxor, e100000, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 16074 | cCE(wzero, e300000, 1, (RIWR), iwmmxt_wzero), |
| 16075 | |
| 16076 | #undef ARM_VARIANT |
| 16077 | #define ARM_VARIANT &arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2. */ |
| 16078 | cCE(torvscb, e13f190, 1, (RR), iwmmxt_tandorc), |
| 16079 | cCE(torvsch, e53f190, 1, (RR), iwmmxt_tandorc), |
| 16080 | cCE(torvscw, e93f190, 1, (RR), iwmmxt_tandorc), |
| 16081 | cCE(wabsb, e2001c0, 2, (RIWR, RIWR), rd_rn), |
| 16082 | cCE(wabsh, e6001c0, 2, (RIWR, RIWR), rd_rn), |
| 16083 | cCE(wabsw, ea001c0, 2, (RIWR, RIWR), rd_rn), |
| 16084 | cCE(wabsdiffb, e1001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 16085 | cCE(wabsdiffh, e5001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 16086 | cCE(wabsdiffw, e9001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 16087 | cCE(waddbhusl, e2001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 16088 | cCE(waddbhusm, e6001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 16089 | cCE(waddhc, e600180, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 16090 | cCE(waddwc, ea00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 16091 | cCE(waddsubhx, ea001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 16092 | cCE(wavg4, e400000, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 16093 | cCE(wavg4r, e500000, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 16094 | cCE(wmaddsn, ee00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 16095 | cCE(wmaddsx, eb00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 16096 | cCE(wmaddun, ec00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 16097 | cCE(wmaddux, e900100, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 16098 | cCE(wmerge, e000080, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_wmerge), |
| 16099 | cCE(wmiabb, e0000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 16100 | cCE(wmiabt, e1000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 16101 | cCE(wmiatb, e2000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 16102 | cCE(wmiatt, e3000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 16103 | cCE(wmiabbn, e4000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 16104 | cCE(wmiabtn, e5000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 16105 | cCE(wmiatbn, e6000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 16106 | cCE(wmiattn, e7000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 16107 | cCE(wmiawbb, e800120, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 16108 | cCE(wmiawbt, e900120, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 16109 | cCE(wmiawtb, ea00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 16110 | cCE(wmiawtt, eb00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 16111 | cCE(wmiawbbn, ec00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 16112 | cCE(wmiawbtn, ed00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 16113 | cCE(wmiawtbn, ee00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 16114 | cCE(wmiawttn, ef00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 16115 | cCE(wmulsmr, ef00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 16116 | cCE(wmulumr, ed00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 16117 | cCE(wmulwumr, ec000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 16118 | cCE(wmulwsmr, ee000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 16119 | cCE(wmulwum, ed000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 16120 | cCE(wmulwsm, ef000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 16121 | cCE(wmulwl, eb000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 16122 | cCE(wqmiabb, e8000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 16123 | cCE(wqmiabt, e9000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 16124 | cCE(wqmiatb, ea000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 16125 | cCE(wqmiatt, eb000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 16126 | cCE(wqmiabbn, ec000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 16127 | cCE(wqmiabtn, ed000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 16128 | cCE(wqmiatbn, ee000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 16129 | cCE(wqmiattn, ef000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 16130 | cCE(wqmulm, e100080, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 16131 | cCE(wqmulmr, e300080, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 16132 | cCE(wqmulwm, ec000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 16133 | cCE(wqmulwmr, ee000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 16134 | cCE(wsubaddhx, ed001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), |
| 16135 | |
| 16136 | #undef ARM_VARIANT |
| 16137 | #define ARM_VARIANT &arm_cext_maverick /* Cirrus Maverick instructions. */ |
| 16138 | cCE(cfldrs, c100400, 2, (RMF, ADDRGLDC), rd_cpaddr), |
| 16139 | cCE(cfldrd, c500400, 2, (RMD, ADDRGLDC), rd_cpaddr), |
| 16140 | cCE(cfldr32, c100500, 2, (RMFX, ADDRGLDC), rd_cpaddr), |
| 16141 | cCE(cfldr64, c500500, 2, (RMDX, ADDRGLDC), rd_cpaddr), |
| 16142 | cCE(cfstrs, c000400, 2, (RMF, ADDRGLDC), rd_cpaddr), |
| 16143 | cCE(cfstrd, c400400, 2, (RMD, ADDRGLDC), rd_cpaddr), |
| 16144 | cCE(cfstr32, c000500, 2, (RMFX, ADDRGLDC), rd_cpaddr), |
| 16145 | cCE(cfstr64, c400500, 2, (RMDX, ADDRGLDC), rd_cpaddr), |
| 16146 | cCE(cfmvsr, e000450, 2, (RMF, RR), rn_rd), |
| 16147 | cCE(cfmvrs, e100450, 2, (RR, RMF), rd_rn), |
| 16148 | cCE(cfmvdlr, e000410, 2, (RMD, RR), rn_rd), |
| 16149 | cCE(cfmvrdl, e100410, 2, (RR, RMD), rd_rn), |
| 16150 | cCE(cfmvdhr, e000430, 2, (RMD, RR), rn_rd), |
| 16151 | cCE(cfmvrdh, e100430, 2, (RR, RMD), rd_rn), |
| 16152 | cCE(cfmv64lr, e000510, 2, (RMDX, RR), rn_rd), |
| 16153 | cCE(cfmvr64l, e100510, 2, (RR, RMDX), rd_rn), |
| 16154 | cCE(cfmv64hr, e000530, 2, (RMDX, RR), rn_rd), |
| 16155 | cCE(cfmvr64h, e100530, 2, (RR, RMDX), rd_rn), |
| 16156 | cCE(cfmval32, e200440, 2, (RMAX, RMFX), rd_rn), |
| 16157 | cCE(cfmv32al, e100440, 2, (RMFX, RMAX), rd_rn), |
| 16158 | cCE(cfmvam32, e200460, 2, (RMAX, RMFX), rd_rn), |
| 16159 | cCE(cfmv32am, e100460, 2, (RMFX, RMAX), rd_rn), |
| 16160 | cCE(cfmvah32, e200480, 2, (RMAX, RMFX), rd_rn), |
| 16161 | cCE(cfmv32ah, e100480, 2, (RMFX, RMAX), rd_rn), |
| 16162 | cCE(cfmva32, e2004a0, 2, (RMAX, RMFX), rd_rn), |
| 16163 | cCE(cfmv32a, e1004a0, 2, (RMFX, RMAX), rd_rn), |
| 16164 | cCE(cfmva64, e2004c0, 2, (RMAX, RMDX), rd_rn), |
| 16165 | cCE(cfmv64a, e1004c0, 2, (RMDX, RMAX), rd_rn), |
| 16166 | cCE(cfmvsc32, e2004e0, 2, (RMDS, RMDX), mav_dspsc), |
| 16167 | cCE(cfmv32sc, e1004e0, 2, (RMDX, RMDS), rd), |
| 16168 | cCE(cfcpys, e000400, 2, (RMF, RMF), rd_rn), |
| 16169 | cCE(cfcpyd, e000420, 2, (RMD, RMD), rd_rn), |
| 16170 | cCE(cfcvtsd, e000460, 2, (RMD, RMF), rd_rn), |
| 16171 | cCE(cfcvtds, e000440, 2, (RMF, RMD), rd_rn), |
| 16172 | cCE(cfcvt32s, e000480, 2, (RMF, RMFX), rd_rn), |
| 16173 | cCE(cfcvt32d, e0004a0, 2, (RMD, RMFX), rd_rn), |
| 16174 | cCE(cfcvt64s, e0004c0, 2, (RMF, RMDX), rd_rn), |
| 16175 | cCE(cfcvt64d, e0004e0, 2, (RMD, RMDX), rd_rn), |
| 16176 | cCE(cfcvts32, e100580, 2, (RMFX, RMF), rd_rn), |
| 16177 | cCE(cfcvtd32, e1005a0, 2, (RMFX, RMD), rd_rn), |
| 16178 | cCE(cftruncs32,e1005c0, 2, (RMFX, RMF), rd_rn), |
| 16179 | cCE(cftruncd32,e1005e0, 2, (RMFX, RMD), rd_rn), |
| 16180 | cCE(cfrshl32, e000550, 3, (RMFX, RMFX, RR), mav_triple), |
| 16181 | cCE(cfrshl64, e000570, 3, (RMDX, RMDX, RR), mav_triple), |
| 16182 | cCE(cfsh32, e000500, 3, (RMFX, RMFX, I63s), mav_shift), |
| 16183 | cCE(cfsh64, e200500, 3, (RMDX, RMDX, I63s), mav_shift), |
| 16184 | cCE(cfcmps, e100490, 3, (RR, RMF, RMF), rd_rn_rm), |
| 16185 | cCE(cfcmpd, e1004b0, 3, (RR, RMD, RMD), rd_rn_rm), |
| 16186 | cCE(cfcmp32, e100590, 3, (RR, RMFX, RMFX), rd_rn_rm), |
| 16187 | cCE(cfcmp64, e1005b0, 3, (RR, RMDX, RMDX), rd_rn_rm), |
| 16188 | cCE(cfabss, e300400, 2, (RMF, RMF), rd_rn), |
| 16189 | cCE(cfabsd, e300420, 2, (RMD, RMD), rd_rn), |
| 16190 | cCE(cfnegs, e300440, 2, (RMF, RMF), rd_rn), |
| 16191 | cCE(cfnegd, e300460, 2, (RMD, RMD), rd_rn), |
| 16192 | cCE(cfadds, e300480, 3, (RMF, RMF, RMF), rd_rn_rm), |
| 16193 | cCE(cfaddd, e3004a0, 3, (RMD, RMD, RMD), rd_rn_rm), |
| 16194 | cCE(cfsubs, e3004c0, 3, (RMF, RMF, RMF), rd_rn_rm), |
| 16195 | cCE(cfsubd, e3004e0, 3, (RMD, RMD, RMD), rd_rn_rm), |
| 16196 | cCE(cfmuls, e100400, 3, (RMF, RMF, RMF), rd_rn_rm), |
| 16197 | cCE(cfmuld, e100420, 3, (RMD, RMD, RMD), rd_rn_rm), |
| 16198 | cCE(cfabs32, e300500, 2, (RMFX, RMFX), rd_rn), |
| 16199 | cCE(cfabs64, e300520, 2, (RMDX, RMDX), rd_rn), |
| 16200 | cCE(cfneg32, e300540, 2, (RMFX, RMFX), rd_rn), |
| 16201 | cCE(cfneg64, e300560, 2, (RMDX, RMDX), rd_rn), |
| 16202 | cCE(cfadd32, e300580, 3, (RMFX, RMFX, RMFX), rd_rn_rm), |
| 16203 | cCE(cfadd64, e3005a0, 3, (RMDX, RMDX, RMDX), rd_rn_rm), |
| 16204 | cCE(cfsub32, e3005c0, 3, (RMFX, RMFX, RMFX), rd_rn_rm), |
| 16205 | cCE(cfsub64, e3005e0, 3, (RMDX, RMDX, RMDX), rd_rn_rm), |
| 16206 | cCE(cfmul32, e100500, 3, (RMFX, RMFX, RMFX), rd_rn_rm), |
| 16207 | cCE(cfmul64, e100520, 3, (RMDX, RMDX, RMDX), rd_rn_rm), |
| 16208 | cCE(cfmac32, e100540, 3, (RMFX, RMFX, RMFX), rd_rn_rm), |
| 16209 | cCE(cfmsc32, e100560, 3, (RMFX, RMFX, RMFX), rd_rn_rm), |
| 16210 | cCE(cfmadd32, e000600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad), |
| 16211 | cCE(cfmsub32, e100600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad), |
| 16212 | cCE(cfmadda32, e200600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad), |
| 16213 | cCE(cfmsuba32, e300600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad), |
| 16214 | }; |
| 16215 | #undef ARM_VARIANT |
| 16216 | #undef THUMB_VARIANT |
| 16217 | #undef TCE |
| 16218 | #undef TCM |
| 16219 | #undef TUE |
| 16220 | #undef TUF |
| 16221 | #undef TCC |
| 16222 | #undef cCE |
| 16223 | #undef cCL |
| 16224 | #undef C3E |
| 16225 | #undef CE |
| 16226 | #undef CM |
| 16227 | #undef UE |
| 16228 | #undef UF |
| 16229 | #undef UT |
| 16230 | #undef NUF |
| 16231 | #undef nUF |
| 16232 | #undef NCE |
| 16233 | #undef nCE |
| 16234 | #undef OPS0 |
| 16235 | #undef OPS1 |
| 16236 | #undef OPS2 |
| 16237 | #undef OPS3 |
| 16238 | #undef OPS4 |
| 16239 | #undef OPS5 |
| 16240 | #undef OPS6 |
| 16241 | #undef do_0 |
| 16242 | \f |
| 16243 | /* MD interface: bits in the object file. */ |
| 16244 | |
| 16245 | /* Turn an integer of n bytes (in val) into a stream of bytes appropriate |
| 16246 | for use in the a.out file, and stores them in the array pointed to by buf. |
| 16247 | This knows about the endian-ness of the target machine and does |
| 16248 | THE RIGHT THING, whatever it is. Possible values for n are 1 (byte) |
| 16249 | 2 (short) and 4 (long) Floating numbers are put out as a series of |
| 16250 | LITTLENUMS (shorts, here at least). */ |
| 16251 | |
| 16252 | void |
| 16253 | md_number_to_chars (char * buf, valueT val, int n) |
| 16254 | { |
| 16255 | if (target_big_endian) |
| 16256 | number_to_chars_bigendian (buf, val, n); |
| 16257 | else |
| 16258 | number_to_chars_littleendian (buf, val, n); |
| 16259 | } |
| 16260 | |
| 16261 | static valueT |
| 16262 | md_chars_to_number (char * buf, int n) |
| 16263 | { |
| 16264 | valueT result = 0; |
| 16265 | unsigned char * where = (unsigned char *) buf; |
| 16266 | |
| 16267 | if (target_big_endian) |
| 16268 | { |
| 16269 | while (n--) |
| 16270 | { |
| 16271 | result <<= 8; |
| 16272 | result |= (*where++ & 255); |
| 16273 | } |
| 16274 | } |
| 16275 | else |
| 16276 | { |
| 16277 | while (n--) |
| 16278 | { |
| 16279 | result <<= 8; |
| 16280 | result |= (where[n] & 255); |
| 16281 | } |
| 16282 | } |
| 16283 | |
| 16284 | return result; |
| 16285 | } |
| 16286 | |
| 16287 | /* MD interface: Sections. */ |
| 16288 | |
| 16289 | /* Estimate the size of a frag before relaxing. Assume everything fits in |
| 16290 | 2 bytes. */ |
| 16291 | |
| 16292 | int |
| 16293 | md_estimate_size_before_relax (fragS * fragp, |
| 16294 | segT segtype ATTRIBUTE_UNUSED) |
| 16295 | { |
| 16296 | fragp->fr_var = 2; |
| 16297 | return 2; |
| 16298 | } |
| 16299 | |
| 16300 | /* Convert a machine dependent frag. */ |
| 16301 | |
| 16302 | void |
| 16303 | md_convert_frag (bfd *abfd, segT asec ATTRIBUTE_UNUSED, fragS *fragp) |
| 16304 | { |
| 16305 | unsigned long insn; |
| 16306 | unsigned long old_op; |
| 16307 | char *buf; |
| 16308 | expressionS exp; |
| 16309 | fixS *fixp; |
| 16310 | int reloc_type; |
| 16311 | int pc_rel; |
| 16312 | int opcode; |
| 16313 | |
| 16314 | buf = fragp->fr_literal + fragp->fr_fix; |
| 16315 | |
| 16316 | old_op = bfd_get_16(abfd, buf); |
| 16317 | if (fragp->fr_symbol) { |
| 16318 | exp.X_op = O_symbol; |
| 16319 | exp.X_add_symbol = fragp->fr_symbol; |
| 16320 | } else { |
| 16321 | exp.X_op = O_constant; |
| 16322 | } |
| 16323 | exp.X_add_number = fragp->fr_offset; |
| 16324 | opcode = fragp->fr_subtype; |
| 16325 | switch (opcode) |
| 16326 | { |
| 16327 | case T_MNEM_ldr_pc: |
| 16328 | case T_MNEM_ldr_pc2: |
| 16329 | case T_MNEM_ldr_sp: |
| 16330 | case T_MNEM_str_sp: |
| 16331 | case T_MNEM_ldr: |
| 16332 | case T_MNEM_ldrb: |
| 16333 | case T_MNEM_ldrh: |
| 16334 | case T_MNEM_str: |
| 16335 | case T_MNEM_strb: |
| 16336 | case T_MNEM_strh: |
| 16337 | if (fragp->fr_var == 4) |
| 16338 | { |
| 16339 | insn = THUMB_OP32(opcode); |
| 16340 | if ((old_op >> 12) == 4 || (old_op >> 12) == 9) |
| 16341 | { |
| 16342 | insn |= (old_op & 0x700) << 4; |
| 16343 | } |
| 16344 | else |
| 16345 | { |
| 16346 | insn |= (old_op & 7) << 12; |
| 16347 | insn |= (old_op & 0x38) << 13; |
| 16348 | } |
| 16349 | insn |= 0x00000c00; |
| 16350 | put_thumb32_insn (buf, insn); |
| 16351 | reloc_type = BFD_RELOC_ARM_T32_OFFSET_IMM; |
| 16352 | } |
| 16353 | else |
| 16354 | { |
| 16355 | reloc_type = BFD_RELOC_ARM_THUMB_OFFSET; |
| 16356 | } |
| 16357 | pc_rel = (opcode == T_MNEM_ldr_pc2); |
| 16358 | break; |
| 16359 | case T_MNEM_adr: |
| 16360 | if (fragp->fr_var == 4) |
| 16361 | { |
| 16362 | insn = THUMB_OP32 (opcode); |
| 16363 | insn |= (old_op & 0xf0) << 4; |
| 16364 | put_thumb32_insn (buf, insn); |
| 16365 | reloc_type = BFD_RELOC_ARM_T32_ADD_PC12; |
| 16366 | } |
| 16367 | else |
| 16368 | { |
| 16369 | reloc_type = BFD_RELOC_ARM_THUMB_ADD; |
| 16370 | exp.X_add_number -= 4; |
| 16371 | } |
| 16372 | pc_rel = 1; |
| 16373 | break; |
| 16374 | case T_MNEM_mov: |
| 16375 | case T_MNEM_movs: |
| 16376 | case T_MNEM_cmp: |
| 16377 | case T_MNEM_cmn: |
| 16378 | if (fragp->fr_var == 4) |
| 16379 | { |
| 16380 | int r0off = (opcode == T_MNEM_mov |
| 16381 | || opcode == T_MNEM_movs) ? 0 : 8; |
| 16382 | insn = THUMB_OP32 (opcode); |
| 16383 | insn = (insn & 0xe1ffffff) | 0x10000000; |
| 16384 | insn |= (old_op & 0x700) << r0off; |
| 16385 | put_thumb32_insn (buf, insn); |
| 16386 | reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE; |
| 16387 | } |
| 16388 | else |
| 16389 | { |
| 16390 | reloc_type = BFD_RELOC_ARM_THUMB_IMM; |
| 16391 | } |
| 16392 | pc_rel = 0; |
| 16393 | break; |
| 16394 | case T_MNEM_b: |
| 16395 | if (fragp->fr_var == 4) |
| 16396 | { |
| 16397 | insn = THUMB_OP32(opcode); |
| 16398 | put_thumb32_insn (buf, insn); |
| 16399 | reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH25; |
| 16400 | } |
| 16401 | else |
| 16402 | reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH12; |
| 16403 | pc_rel = 1; |
| 16404 | break; |
| 16405 | case T_MNEM_bcond: |
| 16406 | if (fragp->fr_var == 4) |
| 16407 | { |
| 16408 | insn = THUMB_OP32(opcode); |
| 16409 | insn |= (old_op & 0xf00) << 14; |
| 16410 | put_thumb32_insn (buf, insn); |
| 16411 | reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH20; |
| 16412 | } |
| 16413 | else |
| 16414 | reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH9; |
| 16415 | pc_rel = 1; |
| 16416 | break; |
| 16417 | case T_MNEM_add_sp: |
| 16418 | case T_MNEM_add_pc: |
| 16419 | case T_MNEM_inc_sp: |
| 16420 | case T_MNEM_dec_sp: |
| 16421 | if (fragp->fr_var == 4) |
| 16422 | { |
| 16423 | /* ??? Choose between add and addw. */ |
| 16424 | insn = THUMB_OP32 (opcode); |
| 16425 | insn |= (old_op & 0xf0) << 4; |
| 16426 | put_thumb32_insn (buf, insn); |
| 16427 | if (opcode == T_MNEM_add_pc) |
| 16428 | reloc_type = BFD_RELOC_ARM_T32_IMM12; |
| 16429 | else |
| 16430 | reloc_type = BFD_RELOC_ARM_T32_ADD_IMM; |
| 16431 | } |
| 16432 | else |
| 16433 | reloc_type = BFD_RELOC_ARM_THUMB_ADD; |
| 16434 | pc_rel = 0; |
| 16435 | break; |
| 16436 | |
| 16437 | case T_MNEM_addi: |
| 16438 | case T_MNEM_addis: |
| 16439 | case T_MNEM_subi: |
| 16440 | case T_MNEM_subis: |
| 16441 | if (fragp->fr_var == 4) |
| 16442 | { |
| 16443 | insn = THUMB_OP32 (opcode); |
| 16444 | insn |= (old_op & 0xf0) << 4; |
| 16445 | insn |= (old_op & 0xf) << 16; |
| 16446 | put_thumb32_insn (buf, insn); |
| 16447 | if (insn & (1 << 20)) |
| 16448 | reloc_type = BFD_RELOC_ARM_T32_ADD_IMM; |
| 16449 | else |
| 16450 | reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE; |
| 16451 | } |
| 16452 | else |
| 16453 | reloc_type = BFD_RELOC_ARM_THUMB_ADD; |
| 16454 | pc_rel = 0; |
| 16455 | break; |
| 16456 | default: |
| 16457 | abort(); |
| 16458 | } |
| 16459 | fixp = fix_new_exp (fragp, fragp->fr_fix, fragp->fr_var, &exp, pc_rel, |
| 16460 | reloc_type); |
| 16461 | fixp->fx_file = fragp->fr_file; |
| 16462 | fixp->fx_line = fragp->fr_line; |
| 16463 | fragp->fr_fix += fragp->fr_var; |
| 16464 | } |
| 16465 | |
| 16466 | /* Return the size of a relaxable immediate operand instruction. |
| 16467 | SHIFT and SIZE specify the form of the allowable immediate. */ |
| 16468 | static int |
| 16469 | relax_immediate (fragS *fragp, int size, int shift) |
| 16470 | { |
| 16471 | offsetT offset; |
| 16472 | offsetT mask; |
| 16473 | offsetT low; |
| 16474 | |
| 16475 | /* ??? Should be able to do better than this. */ |
| 16476 | if (fragp->fr_symbol) |
| 16477 | return 4; |
| 16478 | |
| 16479 | low = (1 << shift) - 1; |
| 16480 | mask = (1 << (shift + size)) - (1 << shift); |
| 16481 | offset = fragp->fr_offset; |
| 16482 | /* Force misaligned offsets to 32-bit variant. */ |
| 16483 | if (offset & low) |
| 16484 | return -4; |
| 16485 | if (offset & ~mask) |
| 16486 | return 4; |
| 16487 | return 2; |
| 16488 | } |
| 16489 | |
| 16490 | /* Return the size of a relaxable adr pseudo-instruction or PC-relative |
| 16491 | load. */ |
| 16492 | static int |
| 16493 | relax_adr (fragS *fragp, asection *sec) |
| 16494 | { |
| 16495 | addressT addr; |
| 16496 | offsetT val; |
| 16497 | |
| 16498 | /* Assume worst case for symbols not known to be in the same section. */ |
| 16499 | if (!S_IS_DEFINED(fragp->fr_symbol) |
| 16500 | || sec != S_GET_SEGMENT (fragp->fr_symbol)) |
| 16501 | return 4; |
| 16502 | |
| 16503 | val = S_GET_VALUE(fragp->fr_symbol) + fragp->fr_offset; |
| 16504 | addr = fragp->fr_address + fragp->fr_fix; |
| 16505 | addr = (addr + 4) & ~3; |
| 16506 | /* Fix the insn as the 4-byte version if the target address is not |
| 16507 | sufficiently aligned. This is prevents an infinite loop when two |
| 16508 | instructions have contradictory range/alignment requirements. */ |
| 16509 | if (val & 3) |
| 16510 | return -4; |
| 16511 | val -= addr; |
| 16512 | if (val < 0 || val > 1020) |
| 16513 | return 4; |
| 16514 | return 2; |
| 16515 | } |
| 16516 | |
| 16517 | /* Return the size of a relaxable add/sub immediate instruction. */ |
| 16518 | static int |
| 16519 | relax_addsub (fragS *fragp, asection *sec) |
| 16520 | { |
| 16521 | char *buf; |
| 16522 | int op; |
| 16523 | |
| 16524 | buf = fragp->fr_literal + fragp->fr_fix; |
| 16525 | op = bfd_get_16(sec->owner, buf); |
| 16526 | if ((op & 0xf) == ((op >> 4) & 0xf)) |
| 16527 | return relax_immediate (fragp, 8, 0); |
| 16528 | else |
| 16529 | return relax_immediate (fragp, 3, 0); |
| 16530 | } |
| 16531 | |
| 16532 | |
| 16533 | /* Return the size of a relaxable branch instruction. BITS is the |
| 16534 | size of the offset field in the narrow instruction. */ |
| 16535 | |
| 16536 | static int |
| 16537 | relax_branch (fragS *fragp, asection *sec, int bits) |
| 16538 | { |
| 16539 | addressT addr; |
| 16540 | offsetT val; |
| 16541 | offsetT limit; |
| 16542 | |
| 16543 | /* Assume worst case for symbols not known to be in the same section. */ |
| 16544 | if (!S_IS_DEFINED(fragp->fr_symbol) |
| 16545 | || sec != S_GET_SEGMENT (fragp->fr_symbol)) |
| 16546 | return 4; |
| 16547 | |
| 16548 | val = S_GET_VALUE(fragp->fr_symbol) + fragp->fr_offset; |
| 16549 | addr = fragp->fr_address + fragp->fr_fix + 4; |
| 16550 | val -= addr; |
| 16551 | |
| 16552 | /* Offset is a signed value *2 */ |
| 16553 | limit = 1 << bits; |
| 16554 | if (val >= limit || val < -limit) |
| 16555 | return 4; |
| 16556 | return 2; |
| 16557 | } |
| 16558 | |
| 16559 | |
| 16560 | /* Relax a machine dependent frag. This returns the amount by which |
| 16561 | the current size of the frag should change. */ |
| 16562 | |
| 16563 | int |
| 16564 | arm_relax_frag (asection *sec, fragS *fragp, long stretch ATTRIBUTE_UNUSED) |
| 16565 | { |
| 16566 | int oldsize; |
| 16567 | int newsize; |
| 16568 | |
| 16569 | oldsize = fragp->fr_var; |
| 16570 | switch (fragp->fr_subtype) |
| 16571 | { |
| 16572 | case T_MNEM_ldr_pc2: |
| 16573 | newsize = relax_adr(fragp, sec); |
| 16574 | break; |
| 16575 | case T_MNEM_ldr_pc: |
| 16576 | case T_MNEM_ldr_sp: |
| 16577 | case T_MNEM_str_sp: |
| 16578 | newsize = relax_immediate(fragp, 8, 2); |
| 16579 | break; |
| 16580 | case T_MNEM_ldr: |
| 16581 | case T_MNEM_str: |
| 16582 | newsize = relax_immediate(fragp, 5, 2); |
| 16583 | break; |
| 16584 | case T_MNEM_ldrh: |
| 16585 | case T_MNEM_strh: |
| 16586 | newsize = relax_immediate(fragp, 5, 1); |
| 16587 | break; |
| 16588 | case T_MNEM_ldrb: |
| 16589 | case T_MNEM_strb: |
| 16590 | newsize = relax_immediate(fragp, 5, 0); |
| 16591 | break; |
| 16592 | case T_MNEM_adr: |
| 16593 | newsize = relax_adr(fragp, sec); |
| 16594 | break; |
| 16595 | case T_MNEM_mov: |
| 16596 | case T_MNEM_movs: |
| 16597 | case T_MNEM_cmp: |
| 16598 | case T_MNEM_cmn: |
| 16599 | newsize = relax_immediate(fragp, 8, 0); |
| 16600 | break; |
| 16601 | case T_MNEM_b: |
| 16602 | newsize = relax_branch(fragp, sec, 11); |
| 16603 | break; |
| 16604 | case T_MNEM_bcond: |
| 16605 | newsize = relax_branch(fragp, sec, 8); |
| 16606 | break; |
| 16607 | case T_MNEM_add_sp: |
| 16608 | case T_MNEM_add_pc: |
| 16609 | newsize = relax_immediate (fragp, 8, 2); |
| 16610 | break; |
| 16611 | case T_MNEM_inc_sp: |
| 16612 | case T_MNEM_dec_sp: |
| 16613 | newsize = relax_immediate (fragp, 7, 2); |
| 16614 | break; |
| 16615 | case T_MNEM_addi: |
| 16616 | case T_MNEM_addis: |
| 16617 | case T_MNEM_subi: |
| 16618 | case T_MNEM_subis: |
| 16619 | newsize = relax_addsub (fragp, sec); |
| 16620 | break; |
| 16621 | default: |
| 16622 | abort(); |
| 16623 | } |
| 16624 | if (newsize < 0) |
| 16625 | { |
| 16626 | fragp->fr_var = -newsize; |
| 16627 | md_convert_frag (sec->owner, sec, fragp); |
| 16628 | frag_wane(fragp); |
| 16629 | return -(newsize + oldsize); |
| 16630 | } |
| 16631 | fragp->fr_var = newsize; |
| 16632 | return newsize - oldsize; |
| 16633 | } |
| 16634 | |
| 16635 | /* Round up a section size to the appropriate boundary. */ |
| 16636 | |
| 16637 | valueT |
| 16638 | md_section_align (segT segment ATTRIBUTE_UNUSED, |
| 16639 | valueT size) |
| 16640 | { |
| 16641 | #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT)) |
| 16642 | if (OUTPUT_FLAVOR == bfd_target_aout_flavour) |
| 16643 | { |
| 16644 | /* For a.out, force the section size to be aligned. If we don't do |
| 16645 | this, BFD will align it for us, but it will not write out the |
| 16646 | final bytes of the section. This may be a bug in BFD, but it is |
| 16647 | easier to fix it here since that is how the other a.out targets |
| 16648 | work. */ |
| 16649 | int align; |
| 16650 | |
| 16651 | align = bfd_get_section_alignment (stdoutput, segment); |
| 16652 | size = ((size + (1 << align) - 1) & ((valueT) -1 << align)); |
| 16653 | } |
| 16654 | #endif |
| 16655 | |
| 16656 | return size; |
| 16657 | } |
| 16658 | |
| 16659 | /* This is called from HANDLE_ALIGN in write.c. Fill in the contents |
| 16660 | of an rs_align_code fragment. */ |
| 16661 | |
| 16662 | void |
| 16663 | arm_handle_align (fragS * fragP) |
| 16664 | { |
| 16665 | static char const arm_noop[4] = { 0x00, 0x00, 0xa0, 0xe1 }; |
| 16666 | static char const thumb_noop[2] = { 0xc0, 0x46 }; |
| 16667 | static char const arm_bigend_noop[4] = { 0xe1, 0xa0, 0x00, 0x00 }; |
| 16668 | static char const thumb_bigend_noop[2] = { 0x46, 0xc0 }; |
| 16669 | |
| 16670 | int bytes, fix, noop_size; |
| 16671 | char * p; |
| 16672 | const char * noop; |
| 16673 | |
| 16674 | if (fragP->fr_type != rs_align_code) |
| 16675 | return; |
| 16676 | |
| 16677 | bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix; |
| 16678 | p = fragP->fr_literal + fragP->fr_fix; |
| 16679 | fix = 0; |
| 16680 | |
| 16681 | if (bytes > MAX_MEM_FOR_RS_ALIGN_CODE) |
| 16682 | bytes &= MAX_MEM_FOR_RS_ALIGN_CODE; |
| 16683 | |
| 16684 | if (fragP->tc_frag_data) |
| 16685 | { |
| 16686 | if (target_big_endian) |
| 16687 | noop = thumb_bigend_noop; |
| 16688 | else |
| 16689 | noop = thumb_noop; |
| 16690 | noop_size = sizeof (thumb_noop); |
| 16691 | } |
| 16692 | else |
| 16693 | { |
| 16694 | if (target_big_endian) |
| 16695 | noop = arm_bigend_noop; |
| 16696 | else |
| 16697 | noop = arm_noop; |
| 16698 | noop_size = sizeof (arm_noop); |
| 16699 | } |
| 16700 | |
| 16701 | if (bytes & (noop_size - 1)) |
| 16702 | { |
| 16703 | fix = bytes & (noop_size - 1); |
| 16704 | memset (p, 0, fix); |
| 16705 | p += fix; |
| 16706 | bytes -= fix; |
| 16707 | } |
| 16708 | |
| 16709 | while (bytes >= noop_size) |
| 16710 | { |
| 16711 | memcpy (p, noop, noop_size); |
| 16712 | p += noop_size; |
| 16713 | bytes -= noop_size; |
| 16714 | fix += noop_size; |
| 16715 | } |
| 16716 | |
| 16717 | fragP->fr_fix += fix; |
| 16718 | fragP->fr_var = noop_size; |
| 16719 | } |
| 16720 | |
| 16721 | /* Called from md_do_align. Used to create an alignment |
| 16722 | frag in a code section. */ |
| 16723 | |
| 16724 | void |
| 16725 | arm_frag_align_code (int n, int max) |
| 16726 | { |
| 16727 | char * p; |
| 16728 | |
| 16729 | /* We assume that there will never be a requirement |
| 16730 | to support alignments greater than 32 bytes. */ |
| 16731 | if (max > MAX_MEM_FOR_RS_ALIGN_CODE) |
| 16732 | as_fatal (_("alignments greater than 32 bytes not supported in .text sections.")); |
| 16733 | |
| 16734 | p = frag_var (rs_align_code, |
| 16735 | MAX_MEM_FOR_RS_ALIGN_CODE, |
| 16736 | 1, |
| 16737 | (relax_substateT) max, |
| 16738 | (symbolS *) NULL, |
| 16739 | (offsetT) n, |
| 16740 | (char *) NULL); |
| 16741 | *p = 0; |
| 16742 | } |
| 16743 | |
| 16744 | /* Perform target specific initialisation of a frag. */ |
| 16745 | |
| 16746 | void |
| 16747 | arm_init_frag (fragS * fragP) |
| 16748 | { |
| 16749 | /* Record whether this frag is in an ARM or a THUMB area. */ |
| 16750 | fragP->tc_frag_data = thumb_mode; |
| 16751 | } |
| 16752 | |
| 16753 | #ifdef OBJ_ELF |
| 16754 | /* When we change sections we need to issue a new mapping symbol. */ |
| 16755 | |
| 16756 | void |
| 16757 | arm_elf_change_section (void) |
| 16758 | { |
| 16759 | flagword flags; |
| 16760 | segment_info_type *seginfo; |
| 16761 | |
| 16762 | /* Link an unlinked unwind index table section to the .text section. */ |
| 16763 | if (elf_section_type (now_seg) == SHT_ARM_EXIDX |
| 16764 | && elf_linked_to_section (now_seg) == NULL) |
| 16765 | elf_linked_to_section (now_seg) = text_section; |
| 16766 | |
| 16767 | if (!SEG_NORMAL (now_seg)) |
| 16768 | return; |
| 16769 | |
| 16770 | flags = bfd_get_section_flags (stdoutput, now_seg); |
| 16771 | |
| 16772 | /* We can ignore sections that only contain debug info. */ |
| 16773 | if ((flags & SEC_ALLOC) == 0) |
| 16774 | return; |
| 16775 | |
| 16776 | seginfo = seg_info (now_seg); |
| 16777 | mapstate = seginfo->tc_segment_info_data.mapstate; |
| 16778 | marked_pr_dependency = seginfo->tc_segment_info_data.marked_pr_dependency; |
| 16779 | } |
| 16780 | |
| 16781 | int |
| 16782 | arm_elf_section_type (const char * str, size_t len) |
| 16783 | { |
| 16784 | if (len == 5 && strncmp (str, "exidx", 5) == 0) |
| 16785 | return SHT_ARM_EXIDX; |
| 16786 | |
| 16787 | return -1; |
| 16788 | } |
| 16789 | \f |
| 16790 | /* Code to deal with unwinding tables. */ |
| 16791 | |
| 16792 | static void add_unwind_adjustsp (offsetT); |
| 16793 | |
| 16794 | /* Cenerate and deferred unwind frame offset. */ |
| 16795 | |
| 16796 | static void |
| 16797 | flush_pending_unwind (void) |
| 16798 | { |
| 16799 | offsetT offset; |
| 16800 | |
| 16801 | offset = unwind.pending_offset; |
| 16802 | unwind.pending_offset = 0; |
| 16803 | if (offset != 0) |
| 16804 | add_unwind_adjustsp (offset); |
| 16805 | } |
| 16806 | |
| 16807 | /* Add an opcode to this list for this function. Two-byte opcodes should |
| 16808 | be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse |
| 16809 | order. */ |
| 16810 | |
| 16811 | static void |
| 16812 | add_unwind_opcode (valueT op, int length) |
| 16813 | { |
| 16814 | /* Add any deferred stack adjustment. */ |
| 16815 | if (unwind.pending_offset) |
| 16816 | flush_pending_unwind (); |
| 16817 | |
| 16818 | unwind.sp_restored = 0; |
| 16819 | |
| 16820 | if (unwind.opcode_count + length > unwind.opcode_alloc) |
| 16821 | { |
| 16822 | unwind.opcode_alloc += ARM_OPCODE_CHUNK_SIZE; |
| 16823 | if (unwind.opcodes) |
| 16824 | unwind.opcodes = xrealloc (unwind.opcodes, |
| 16825 | unwind.opcode_alloc); |
| 16826 | else |
| 16827 | unwind.opcodes = xmalloc (unwind.opcode_alloc); |
| 16828 | } |
| 16829 | while (length > 0) |
| 16830 | { |
| 16831 | length--; |
| 16832 | unwind.opcodes[unwind.opcode_count] = op & 0xff; |
| 16833 | op >>= 8; |
| 16834 | unwind.opcode_count++; |
| 16835 | } |
| 16836 | } |
| 16837 | |
| 16838 | /* Add unwind opcodes to adjust the stack pointer. */ |
| 16839 | |
| 16840 | static void |
| 16841 | add_unwind_adjustsp (offsetT offset) |
| 16842 | { |
| 16843 | valueT op; |
| 16844 | |
| 16845 | if (offset > 0x200) |
| 16846 | { |
| 16847 | /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */ |
| 16848 | char bytes[5]; |
| 16849 | int n; |
| 16850 | valueT o; |
| 16851 | |
| 16852 | /* Long form: 0xb2, uleb128. */ |
| 16853 | /* This might not fit in a word so add the individual bytes, |
| 16854 | remembering the list is built in reverse order. */ |
| 16855 | o = (valueT) ((offset - 0x204) >> 2); |
| 16856 | if (o == 0) |
| 16857 | add_unwind_opcode (0, 1); |
| 16858 | |
| 16859 | /* Calculate the uleb128 encoding of the offset. */ |
| 16860 | n = 0; |
| 16861 | while (o) |
| 16862 | { |
| 16863 | bytes[n] = o & 0x7f; |
| 16864 | o >>= 7; |
| 16865 | if (o) |
| 16866 | bytes[n] |= 0x80; |
| 16867 | n++; |
| 16868 | } |
| 16869 | /* Add the insn. */ |
| 16870 | for (; n; n--) |
| 16871 | add_unwind_opcode (bytes[n - 1], 1); |
| 16872 | add_unwind_opcode (0xb2, 1); |
| 16873 | } |
| 16874 | else if (offset > 0x100) |
| 16875 | { |
| 16876 | /* Two short opcodes. */ |
| 16877 | add_unwind_opcode (0x3f, 1); |
| 16878 | op = (offset - 0x104) >> 2; |
| 16879 | add_unwind_opcode (op, 1); |
| 16880 | } |
| 16881 | else if (offset > 0) |
| 16882 | { |
| 16883 | /* Short opcode. */ |
| 16884 | op = (offset - 4) >> 2; |
| 16885 | add_unwind_opcode (op, 1); |
| 16886 | } |
| 16887 | else if (offset < 0) |
| 16888 | { |
| 16889 | offset = -offset; |
| 16890 | while (offset > 0x100) |
| 16891 | { |
| 16892 | add_unwind_opcode (0x7f, 1); |
| 16893 | offset -= 0x100; |
| 16894 | } |
| 16895 | op = ((offset - 4) >> 2) | 0x40; |
| 16896 | add_unwind_opcode (op, 1); |
| 16897 | } |
| 16898 | } |
| 16899 | |
| 16900 | /* Finish the list of unwind opcodes for this function. */ |
| 16901 | static void |
| 16902 | finish_unwind_opcodes (void) |
| 16903 | { |
| 16904 | valueT op; |
| 16905 | |
| 16906 | if (unwind.fp_used) |
| 16907 | { |
| 16908 | /* Adjust sp as necessary. */ |
| 16909 | unwind.pending_offset += unwind.fp_offset - unwind.frame_size; |
| 16910 | flush_pending_unwind (); |
| 16911 | |
| 16912 | /* After restoring sp from the frame pointer. */ |
| 16913 | op = 0x90 | unwind.fp_reg; |
| 16914 | add_unwind_opcode (op, 1); |
| 16915 | } |
| 16916 | else |
| 16917 | flush_pending_unwind (); |
| 16918 | } |
| 16919 | |
| 16920 | |
| 16921 | /* Start an exception table entry. If idx is nonzero this is an index table |
| 16922 | entry. */ |
| 16923 | |
| 16924 | static void |
| 16925 | start_unwind_section (const segT text_seg, int idx) |
| 16926 | { |
| 16927 | const char * text_name; |
| 16928 | const char * prefix; |
| 16929 | const char * prefix_once; |
| 16930 | const char * group_name; |
| 16931 | size_t prefix_len; |
| 16932 | size_t text_len; |
| 16933 | char * sec_name; |
| 16934 | size_t sec_name_len; |
| 16935 | int type; |
| 16936 | int flags; |
| 16937 | int linkonce; |
| 16938 | |
| 16939 | if (idx) |
| 16940 | { |
| 16941 | prefix = ELF_STRING_ARM_unwind; |
| 16942 | prefix_once = ELF_STRING_ARM_unwind_once; |
| 16943 | type = SHT_ARM_EXIDX; |
| 16944 | } |
| 16945 | else |
| 16946 | { |
| 16947 | prefix = ELF_STRING_ARM_unwind_info; |
| 16948 | prefix_once = ELF_STRING_ARM_unwind_info_once; |
| 16949 | type = SHT_PROGBITS; |
| 16950 | } |
| 16951 | |
| 16952 | text_name = segment_name (text_seg); |
| 16953 | if (streq (text_name, ".text")) |
| 16954 | text_name = ""; |
| 16955 | |
| 16956 | if (strncmp (text_name, ".gnu.linkonce.t.", |
| 16957 | strlen (".gnu.linkonce.t.")) == 0) |
| 16958 | { |
| 16959 | prefix = prefix_once; |
| 16960 | text_name += strlen (".gnu.linkonce.t."); |
| 16961 | } |
| 16962 | |
| 16963 | prefix_len = strlen (prefix); |
| 16964 | text_len = strlen (text_name); |
| 16965 | sec_name_len = prefix_len + text_len; |
| 16966 | sec_name = xmalloc (sec_name_len + 1); |
| 16967 | memcpy (sec_name, prefix, prefix_len); |
| 16968 | memcpy (sec_name + prefix_len, text_name, text_len); |
| 16969 | sec_name[prefix_len + text_len] = '\0'; |
| 16970 | |
| 16971 | flags = SHF_ALLOC; |
| 16972 | linkonce = 0; |
| 16973 | group_name = 0; |
| 16974 | |
| 16975 | /* Handle COMDAT group. */ |
| 16976 | if (prefix != prefix_once && (text_seg->flags & SEC_LINK_ONCE) != 0) |
| 16977 | { |
| 16978 | group_name = elf_group_name (text_seg); |
| 16979 | if (group_name == NULL) |
| 16980 | { |
| 16981 | as_bad ("Group section `%s' has no group signature", |
| 16982 | segment_name (text_seg)); |
| 16983 | ignore_rest_of_line (); |
| 16984 | return; |
| 16985 | } |
| 16986 | flags |= SHF_GROUP; |
| 16987 | linkonce = 1; |
| 16988 | } |
| 16989 | |
| 16990 | obj_elf_change_section (sec_name, type, flags, 0, group_name, linkonce, 0); |
| 16991 | |
| 16992 | /* Set the setion link for index tables. */ |
| 16993 | if (idx) |
| 16994 | elf_linked_to_section (now_seg) = text_seg; |
| 16995 | } |
| 16996 | |
| 16997 | |
| 16998 | /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional |
| 16999 | personality routine data. Returns zero, or the index table value for |
| 17000 | and inline entry. */ |
| 17001 | |
| 17002 | static valueT |
| 17003 | create_unwind_entry (int have_data) |
| 17004 | { |
| 17005 | int size; |
| 17006 | addressT where; |
| 17007 | char *ptr; |
| 17008 | /* The current word of data. */ |
| 17009 | valueT data; |
| 17010 | /* The number of bytes left in this word. */ |
| 17011 | int n; |
| 17012 | |
| 17013 | finish_unwind_opcodes (); |
| 17014 | |
| 17015 | /* Remember the current text section. */ |
| 17016 | unwind.saved_seg = now_seg; |
| 17017 | unwind.saved_subseg = now_subseg; |
| 17018 | |
| 17019 | start_unwind_section (now_seg, 0); |
| 17020 | |
| 17021 | if (unwind.personality_routine == NULL) |
| 17022 | { |
| 17023 | if (unwind.personality_index == -2) |
| 17024 | { |
| 17025 | if (have_data) |
| 17026 | as_bad (_("handerdata in cantunwind frame")); |
| 17027 | return 1; /* EXIDX_CANTUNWIND. */ |
| 17028 | } |
| 17029 | |
| 17030 | /* Use a default personality routine if none is specified. */ |
| 17031 | if (unwind.personality_index == -1) |
| 17032 | { |
| 17033 | if (unwind.opcode_count > 3) |
| 17034 | unwind.personality_index = 1; |
| 17035 | else |
| 17036 | unwind.personality_index = 0; |
| 17037 | } |
| 17038 | |
| 17039 | /* Space for the personality routine entry. */ |
| 17040 | if (unwind.personality_index == 0) |
| 17041 | { |
| 17042 | if (unwind.opcode_count > 3) |
| 17043 | as_bad (_("too many unwind opcodes for personality routine 0")); |
| 17044 | |
| 17045 | if (!have_data) |
| 17046 | { |
| 17047 | /* All the data is inline in the index table. */ |
| 17048 | data = 0x80; |
| 17049 | n = 3; |
| 17050 | while (unwind.opcode_count > 0) |
| 17051 | { |
| 17052 | unwind.opcode_count--; |
| 17053 | data = (data << 8) | unwind.opcodes[unwind.opcode_count]; |
| 17054 | n--; |
| 17055 | } |
| 17056 | |
| 17057 | /* Pad with "finish" opcodes. */ |
| 17058 | while (n--) |
| 17059 | data = (data << 8) | 0xb0; |
| 17060 | |
| 17061 | return data; |
| 17062 | } |
| 17063 | size = 0; |
| 17064 | } |
| 17065 | else |
| 17066 | /* We get two opcodes "free" in the first word. */ |
| 17067 | size = unwind.opcode_count - 2; |
| 17068 | } |
| 17069 | else |
| 17070 | /* An extra byte is required for the opcode count. */ |
| 17071 | size = unwind.opcode_count + 1; |
| 17072 | |
| 17073 | size = (size + 3) >> 2; |
| 17074 | if (size > 0xff) |
| 17075 | as_bad (_("too many unwind opcodes")); |
| 17076 | |
| 17077 | frag_align (2, 0, 0); |
| 17078 | record_alignment (now_seg, 2); |
| 17079 | unwind.table_entry = expr_build_dot (); |
| 17080 | |
| 17081 | /* Allocate the table entry. */ |
| 17082 | ptr = frag_more ((size << 2) + 4); |
| 17083 | where = frag_now_fix () - ((size << 2) + 4); |
| 17084 | |
| 17085 | switch (unwind.personality_index) |
| 17086 | { |
| 17087 | case -1: |
| 17088 | /* ??? Should this be a PLT generating relocation? */ |
| 17089 | /* Custom personality routine. */ |
| 17090 | fix_new (frag_now, where, 4, unwind.personality_routine, 0, 1, |
| 17091 | BFD_RELOC_ARM_PREL31); |
| 17092 | |
| 17093 | where += 4; |
| 17094 | ptr += 4; |
| 17095 | |
| 17096 | /* Set the first byte to the number of additional words. */ |
| 17097 | data = size - 1; |
| 17098 | n = 3; |
| 17099 | break; |
| 17100 | |
| 17101 | /* ABI defined personality routines. */ |
| 17102 | case 0: |
| 17103 | /* Three opcodes bytes are packed into the first word. */ |
| 17104 | data = 0x80; |
| 17105 | n = 3; |
| 17106 | break; |
| 17107 | |
| 17108 | case 1: |
| 17109 | case 2: |
| 17110 | /* The size and first two opcode bytes go in the first word. */ |
| 17111 | data = ((0x80 + unwind.personality_index) << 8) | size; |
| 17112 | n = 2; |
| 17113 | break; |
| 17114 | |
| 17115 | default: |
| 17116 | /* Should never happen. */ |
| 17117 | abort (); |
| 17118 | } |
| 17119 | |
| 17120 | /* Pack the opcodes into words (MSB first), reversing the list at the same |
| 17121 | time. */ |
| 17122 | while (unwind.opcode_count > 0) |
| 17123 | { |
| 17124 | if (n == 0) |
| 17125 | { |
| 17126 | md_number_to_chars (ptr, data, 4); |
| 17127 | ptr += 4; |
| 17128 | n = 4; |
| 17129 | data = 0; |
| 17130 | } |
| 17131 | unwind.opcode_count--; |
| 17132 | n--; |
| 17133 | data = (data << 8) | unwind.opcodes[unwind.opcode_count]; |
| 17134 | } |
| 17135 | |
| 17136 | /* Finish off the last word. */ |
| 17137 | if (n < 4) |
| 17138 | { |
| 17139 | /* Pad with "finish" opcodes. */ |
| 17140 | while (n--) |
| 17141 | data = (data << 8) | 0xb0; |
| 17142 | |
| 17143 | md_number_to_chars (ptr, data, 4); |
| 17144 | } |
| 17145 | |
| 17146 | if (!have_data) |
| 17147 | { |
| 17148 | /* Add an empty descriptor if there is no user-specified data. */ |
| 17149 | ptr = frag_more (4); |
| 17150 | md_number_to_chars (ptr, 0, 4); |
| 17151 | } |
| 17152 | |
| 17153 | return 0; |
| 17154 | } |
| 17155 | |
| 17156 | |
| 17157 | /* Initialize the DWARF-2 unwind information for this procedure. */ |
| 17158 | |
| 17159 | void |
| 17160 | tc_arm_frame_initial_instructions (void) |
| 17161 | { |
| 17162 | cfi_add_CFA_def_cfa (REG_SP, 0); |
| 17163 | } |
| 17164 | #endif /* OBJ_ELF */ |
| 17165 | |
| 17166 | /* Convert REGNAME to a DWARF-2 register number. */ |
| 17167 | |
| 17168 | int |
| 17169 | tc_arm_regname_to_dw2regnum (char *regname) |
| 17170 | { |
| 17171 | int reg = arm_reg_parse (®name, REG_TYPE_RN); |
| 17172 | |
| 17173 | if (reg == FAIL) |
| 17174 | return -1; |
| 17175 | |
| 17176 | return reg; |
| 17177 | } |
| 17178 | |
| 17179 | #ifdef TE_PE |
| 17180 | void |
| 17181 | tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size) |
| 17182 | { |
| 17183 | expressionS expr; |
| 17184 | |
| 17185 | expr.X_op = O_secrel; |
| 17186 | expr.X_add_symbol = symbol; |
| 17187 | expr.X_add_number = 0; |
| 17188 | emit_expr (&expr, size); |
| 17189 | } |
| 17190 | #endif |
| 17191 | |
| 17192 | /* MD interface: Symbol and relocation handling. */ |
| 17193 | |
| 17194 | /* Return the address within the segment that a PC-relative fixup is |
| 17195 | relative to. For ARM, PC-relative fixups applied to instructions |
| 17196 | are generally relative to the location of the fixup plus 8 bytes. |
| 17197 | Thumb branches are offset by 4, and Thumb loads relative to PC |
| 17198 | require special handling. */ |
| 17199 | |
| 17200 | long |
| 17201 | md_pcrel_from_section (fixS * fixP, segT seg) |
| 17202 | { |
| 17203 | offsetT base = fixP->fx_where + fixP->fx_frag->fr_address; |
| 17204 | |
| 17205 | /* If this is pc-relative and we are going to emit a relocation |
| 17206 | then we just want to put out any pipeline compensation that the linker |
| 17207 | will need. Otherwise we want to use the calculated base. |
| 17208 | For WinCE we skip the bias for externals as well, since this |
| 17209 | is how the MS ARM-CE assembler behaves and we want to be compatible. */ |
| 17210 | if (fixP->fx_pcrel |
| 17211 | && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg) |
| 17212 | || (arm_force_relocation (fixP) |
| 17213 | #ifdef TE_WINCE |
| 17214 | && !S_IS_EXTERNAL (fixP->fx_addsy) |
| 17215 | #endif |
| 17216 | ))) |
| 17217 | base = 0; |
| 17218 | |
| 17219 | switch (fixP->fx_r_type) |
| 17220 | { |
| 17221 | /* PC relative addressing on the Thumb is slightly odd as the |
| 17222 | bottom two bits of the PC are forced to zero for the |
| 17223 | calculation. This happens *after* application of the |
| 17224 | pipeline offset. However, Thumb adrl already adjusts for |
| 17225 | this, so we need not do it again. */ |
| 17226 | case BFD_RELOC_ARM_THUMB_ADD: |
| 17227 | return base & ~3; |
| 17228 | |
| 17229 | case BFD_RELOC_ARM_THUMB_OFFSET: |
| 17230 | case BFD_RELOC_ARM_T32_OFFSET_IMM: |
| 17231 | case BFD_RELOC_ARM_T32_ADD_PC12: |
| 17232 | case BFD_RELOC_ARM_T32_CP_OFF_IMM: |
| 17233 | return (base + 4) & ~3; |
| 17234 | |
| 17235 | /* Thumb branches are simply offset by +4. */ |
| 17236 | case BFD_RELOC_THUMB_PCREL_BRANCH7: |
| 17237 | case BFD_RELOC_THUMB_PCREL_BRANCH9: |
| 17238 | case BFD_RELOC_THUMB_PCREL_BRANCH12: |
| 17239 | case BFD_RELOC_THUMB_PCREL_BRANCH20: |
| 17240 | case BFD_RELOC_THUMB_PCREL_BRANCH23: |
| 17241 | case BFD_RELOC_THUMB_PCREL_BRANCH25: |
| 17242 | case BFD_RELOC_THUMB_PCREL_BLX: |
| 17243 | return base + 4; |
| 17244 | |
| 17245 | /* ARM mode branches are offset by +8. However, the Windows CE |
| 17246 | loader expects the relocation not to take this into account. */ |
| 17247 | case BFD_RELOC_ARM_PCREL_BRANCH: |
| 17248 | case BFD_RELOC_ARM_PCREL_CALL: |
| 17249 | case BFD_RELOC_ARM_PCREL_JUMP: |
| 17250 | case BFD_RELOC_ARM_PCREL_BLX: |
| 17251 | case BFD_RELOC_ARM_PLT32: |
| 17252 | #ifdef TE_WINCE |
| 17253 | /* When handling fixups immediately, because we have already |
| 17254 | discovered the value of a symbol, or the address of the frag involved |
| 17255 | we must account for the offset by +8, as the OS loader will never see the reloc. |
| 17256 | see fixup_segment() in write.c |
| 17257 | The S_IS_EXTERNAL test handles the case of global symbols. |
| 17258 | Those need the calculated base, not just the pipe compensation the linker will need. */ |
| 17259 | if (fixP->fx_pcrel |
| 17260 | && fixP->fx_addsy != NULL |
| 17261 | && (S_GET_SEGMENT (fixP->fx_addsy) == seg) |
| 17262 | && (S_IS_EXTERNAL (fixP->fx_addsy) || !arm_force_relocation (fixP))) |
| 17263 | return base + 8; |
| 17264 | return base; |
| 17265 | #else |
| 17266 | return base + 8; |
| 17267 | #endif |
| 17268 | |
| 17269 | /* ARM mode loads relative to PC are also offset by +8. Unlike |
| 17270 | branches, the Windows CE loader *does* expect the relocation |
| 17271 | to take this into account. */ |
| 17272 | case BFD_RELOC_ARM_OFFSET_IMM: |
| 17273 | case BFD_RELOC_ARM_OFFSET_IMM8: |
| 17274 | case BFD_RELOC_ARM_HWLITERAL: |
| 17275 | case BFD_RELOC_ARM_LITERAL: |
| 17276 | case BFD_RELOC_ARM_CP_OFF_IMM: |
| 17277 | return base + 8; |
| 17278 | |
| 17279 | |
| 17280 | /* Other PC-relative relocations are un-offset. */ |
| 17281 | default: |
| 17282 | return base; |
| 17283 | } |
| 17284 | } |
| 17285 | |
| 17286 | /* Under ELF we need to default _GLOBAL_OFFSET_TABLE. |
| 17287 | Otherwise we have no need to default values of symbols. */ |
| 17288 | |
| 17289 | symbolS * |
| 17290 | md_undefined_symbol (char * name ATTRIBUTE_UNUSED) |
| 17291 | { |
| 17292 | #ifdef OBJ_ELF |
| 17293 | if (name[0] == '_' && name[1] == 'G' |
| 17294 | && streq (name, GLOBAL_OFFSET_TABLE_NAME)) |
| 17295 | { |
| 17296 | if (!GOT_symbol) |
| 17297 | { |
| 17298 | if (symbol_find (name)) |
| 17299 | as_bad ("GOT already in the symbol table"); |
| 17300 | |
| 17301 | GOT_symbol = symbol_new (name, undefined_section, |
| 17302 | (valueT) 0, & zero_address_frag); |
| 17303 | } |
| 17304 | |
| 17305 | return GOT_symbol; |
| 17306 | } |
| 17307 | #endif |
| 17308 | |
| 17309 | return 0; |
| 17310 | } |
| 17311 | |
| 17312 | /* Subroutine of md_apply_fix. Check to see if an immediate can be |
| 17313 | computed as two separate immediate values, added together. We |
| 17314 | already know that this value cannot be computed by just one ARM |
| 17315 | instruction. */ |
| 17316 | |
| 17317 | static unsigned int |
| 17318 | validate_immediate_twopart (unsigned int val, |
| 17319 | unsigned int * highpart) |
| 17320 | { |
| 17321 | unsigned int a; |
| 17322 | unsigned int i; |
| 17323 | |
| 17324 | for (i = 0; i < 32; i += 2) |
| 17325 | if (((a = rotate_left (val, i)) & 0xff) != 0) |
| 17326 | { |
| 17327 | if (a & 0xff00) |
| 17328 | { |
| 17329 | if (a & ~ 0xffff) |
| 17330 | continue; |
| 17331 | * highpart = (a >> 8) | ((i + 24) << 7); |
| 17332 | } |
| 17333 | else if (a & 0xff0000) |
| 17334 | { |
| 17335 | if (a & 0xff000000) |
| 17336 | continue; |
| 17337 | * highpart = (a >> 16) | ((i + 16) << 7); |
| 17338 | } |
| 17339 | else |
| 17340 | { |
| 17341 | assert (a & 0xff000000); |
| 17342 | * highpart = (a >> 24) | ((i + 8) << 7); |
| 17343 | } |
| 17344 | |
| 17345 | return (a & 0xff) | (i << 7); |
| 17346 | } |
| 17347 | |
| 17348 | return FAIL; |
| 17349 | } |
| 17350 | |
| 17351 | static int |
| 17352 | validate_offset_imm (unsigned int val, int hwse) |
| 17353 | { |
| 17354 | if ((hwse && val > 255) || val > 4095) |
| 17355 | return FAIL; |
| 17356 | return val; |
| 17357 | } |
| 17358 | |
| 17359 | /* Subroutine of md_apply_fix. Do those data_ops which can take a |
| 17360 | negative immediate constant by altering the instruction. A bit of |
| 17361 | a hack really. |
| 17362 | MOV <-> MVN |
| 17363 | AND <-> BIC |
| 17364 | ADC <-> SBC |
| 17365 | by inverting the second operand, and |
| 17366 | ADD <-> SUB |
| 17367 | CMP <-> CMN |
| 17368 | by negating the second operand. */ |
| 17369 | |
| 17370 | static int |
| 17371 | negate_data_op (unsigned long * instruction, |
| 17372 | unsigned long value) |
| 17373 | { |
| 17374 | int op, new_inst; |
| 17375 | unsigned long negated, inverted; |
| 17376 | |
| 17377 | negated = encode_arm_immediate (-value); |
| 17378 | inverted = encode_arm_immediate (~value); |
| 17379 | |
| 17380 | op = (*instruction >> DATA_OP_SHIFT) & 0xf; |
| 17381 | switch (op) |
| 17382 | { |
| 17383 | /* First negates. */ |
| 17384 | case OPCODE_SUB: /* ADD <-> SUB */ |
| 17385 | new_inst = OPCODE_ADD; |
| 17386 | value = negated; |
| 17387 | break; |
| 17388 | |
| 17389 | case OPCODE_ADD: |
| 17390 | new_inst = OPCODE_SUB; |
| 17391 | value = negated; |
| 17392 | break; |
| 17393 | |
| 17394 | case OPCODE_CMP: /* CMP <-> CMN */ |
| 17395 | new_inst = OPCODE_CMN; |
| 17396 | value = negated; |
| 17397 | break; |
| 17398 | |
| 17399 | case OPCODE_CMN: |
| 17400 | new_inst = OPCODE_CMP; |
| 17401 | value = negated; |
| 17402 | break; |
| 17403 | |
| 17404 | /* Now Inverted ops. */ |
| 17405 | case OPCODE_MOV: /* MOV <-> MVN */ |
| 17406 | new_inst = OPCODE_MVN; |
| 17407 | value = inverted; |
| 17408 | break; |
| 17409 | |
| 17410 | case OPCODE_MVN: |
| 17411 | new_inst = OPCODE_MOV; |
| 17412 | value = inverted; |
| 17413 | break; |
| 17414 | |
| 17415 | case OPCODE_AND: /* AND <-> BIC */ |
| 17416 | new_inst = OPCODE_BIC; |
| 17417 | value = inverted; |
| 17418 | break; |
| 17419 | |
| 17420 | case OPCODE_BIC: |
| 17421 | new_inst = OPCODE_AND; |
| 17422 | value = inverted; |
| 17423 | break; |
| 17424 | |
| 17425 | case OPCODE_ADC: /* ADC <-> SBC */ |
| 17426 | new_inst = OPCODE_SBC; |
| 17427 | value = inverted; |
| 17428 | break; |
| 17429 | |
| 17430 | case OPCODE_SBC: |
| 17431 | new_inst = OPCODE_ADC; |
| 17432 | value = inverted; |
| 17433 | break; |
| 17434 | |
| 17435 | /* We cannot do anything. */ |
| 17436 | default: |
| 17437 | return FAIL; |
| 17438 | } |
| 17439 | |
| 17440 | if (value == (unsigned) FAIL) |
| 17441 | return FAIL; |
| 17442 | |
| 17443 | *instruction &= OPCODE_MASK; |
| 17444 | *instruction |= new_inst << DATA_OP_SHIFT; |
| 17445 | return value; |
| 17446 | } |
| 17447 | |
| 17448 | /* Like negate_data_op, but for Thumb-2. */ |
| 17449 | |
| 17450 | static unsigned int |
| 17451 | thumb32_negate_data_op (offsetT *instruction, unsigned int value) |
| 17452 | { |
| 17453 | int op, new_inst; |
| 17454 | int rd; |
| 17455 | unsigned int negated, inverted; |
| 17456 | |
| 17457 | negated = encode_thumb32_immediate (-value); |
| 17458 | inverted = encode_thumb32_immediate (~value); |
| 17459 | |
| 17460 | rd = (*instruction >> 8) & 0xf; |
| 17461 | op = (*instruction >> T2_DATA_OP_SHIFT) & 0xf; |
| 17462 | switch (op) |
| 17463 | { |
| 17464 | /* ADD <-> SUB. Includes CMP <-> CMN. */ |
| 17465 | case T2_OPCODE_SUB: |
| 17466 | new_inst = T2_OPCODE_ADD; |
| 17467 | value = negated; |
| 17468 | break; |
| 17469 | |
| 17470 | case T2_OPCODE_ADD: |
| 17471 | new_inst = T2_OPCODE_SUB; |
| 17472 | value = negated; |
| 17473 | break; |
| 17474 | |
| 17475 | /* ORR <-> ORN. Includes MOV <-> MVN. */ |
| 17476 | case T2_OPCODE_ORR: |
| 17477 | new_inst = T2_OPCODE_ORN; |
| 17478 | value = inverted; |
| 17479 | break; |
| 17480 | |
| 17481 | case T2_OPCODE_ORN: |
| 17482 | new_inst = T2_OPCODE_ORR; |
| 17483 | value = inverted; |
| 17484 | break; |
| 17485 | |
| 17486 | /* AND <-> BIC. TST has no inverted equivalent. */ |
| 17487 | case T2_OPCODE_AND: |
| 17488 | new_inst = T2_OPCODE_BIC; |
| 17489 | if (rd == 15) |
| 17490 | value = FAIL; |
| 17491 | else |
| 17492 | value = inverted; |
| 17493 | break; |
| 17494 | |
| 17495 | case T2_OPCODE_BIC: |
| 17496 | new_inst = T2_OPCODE_AND; |
| 17497 | value = inverted; |
| 17498 | break; |
| 17499 | |
| 17500 | /* ADC <-> SBC */ |
| 17501 | case T2_OPCODE_ADC: |
| 17502 | new_inst = T2_OPCODE_SBC; |
| 17503 | value = inverted; |
| 17504 | break; |
| 17505 | |
| 17506 | case T2_OPCODE_SBC: |
| 17507 | new_inst = T2_OPCODE_ADC; |
| 17508 | value = inverted; |
| 17509 | break; |
| 17510 | |
| 17511 | /* We cannot do anything. */ |
| 17512 | default: |
| 17513 | return FAIL; |
| 17514 | } |
| 17515 | |
| 17516 | if (value == (unsigned int)FAIL) |
| 17517 | return FAIL; |
| 17518 | |
| 17519 | *instruction &= T2_OPCODE_MASK; |
| 17520 | *instruction |= new_inst << T2_DATA_OP_SHIFT; |
| 17521 | return value; |
| 17522 | } |
| 17523 | |
| 17524 | /* Read a 32-bit thumb instruction from buf. */ |
| 17525 | static unsigned long |
| 17526 | get_thumb32_insn (char * buf) |
| 17527 | { |
| 17528 | unsigned long insn; |
| 17529 | insn = md_chars_to_number (buf, THUMB_SIZE) << 16; |
| 17530 | insn |= md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE); |
| 17531 | |
| 17532 | return insn; |
| 17533 | } |
| 17534 | |
| 17535 | |
| 17536 | /* We usually want to set the low bit on the address of thumb function |
| 17537 | symbols. In particular .word foo - . should have the low bit set. |
| 17538 | Generic code tries to fold the difference of two symbols to |
| 17539 | a constant. Prevent this and force a relocation when the first symbols |
| 17540 | is a thumb function. */ |
| 17541 | int |
| 17542 | arm_optimize_expr (expressionS *l, operatorT op, expressionS *r) |
| 17543 | { |
| 17544 | if (op == O_subtract |
| 17545 | && l->X_op == O_symbol |
| 17546 | && r->X_op == O_symbol |
| 17547 | && THUMB_IS_FUNC (l->X_add_symbol)) |
| 17548 | { |
| 17549 | l->X_op = O_subtract; |
| 17550 | l->X_op_symbol = r->X_add_symbol; |
| 17551 | l->X_add_number -= r->X_add_number; |
| 17552 | return 1; |
| 17553 | } |
| 17554 | /* Process as normal. */ |
| 17555 | return 0; |
| 17556 | } |
| 17557 | |
| 17558 | void |
| 17559 | md_apply_fix (fixS * fixP, |
| 17560 | valueT * valP, |
| 17561 | segT seg) |
| 17562 | { |
| 17563 | offsetT value = * valP; |
| 17564 | offsetT newval; |
| 17565 | unsigned int newimm; |
| 17566 | unsigned long temp; |
| 17567 | int sign; |
| 17568 | char * buf = fixP->fx_where + fixP->fx_frag->fr_literal; |
| 17569 | |
| 17570 | assert (fixP->fx_r_type <= BFD_RELOC_UNUSED); |
| 17571 | |
| 17572 | /* Note whether this will delete the relocation. */ |
| 17573 | |
| 17574 | if (fixP->fx_addsy == 0 && !fixP->fx_pcrel) |
| 17575 | fixP->fx_done = 1; |
| 17576 | |
| 17577 | /* On a 64-bit host, silently truncate 'value' to 32 bits for |
| 17578 | consistency with the behavior on 32-bit hosts. Remember value |
| 17579 | for emit_reloc. */ |
| 17580 | value &= 0xffffffff; |
| 17581 | value ^= 0x80000000; |
| 17582 | value -= 0x80000000; |
| 17583 | |
| 17584 | *valP = value; |
| 17585 | fixP->fx_addnumber = value; |
| 17586 | |
| 17587 | /* Same treatment for fixP->fx_offset. */ |
| 17588 | fixP->fx_offset &= 0xffffffff; |
| 17589 | fixP->fx_offset ^= 0x80000000; |
| 17590 | fixP->fx_offset -= 0x80000000; |
| 17591 | |
| 17592 | switch (fixP->fx_r_type) |
| 17593 | { |
| 17594 | case BFD_RELOC_NONE: |
| 17595 | /* This will need to go in the object file. */ |
| 17596 | fixP->fx_done = 0; |
| 17597 | break; |
| 17598 | |
| 17599 | case BFD_RELOC_ARM_IMMEDIATE: |
| 17600 | /* We claim that this fixup has been processed here, |
| 17601 | even if in fact we generate an error because we do |
| 17602 | not have a reloc for it, so tc_gen_reloc will reject it. */ |
| 17603 | fixP->fx_done = 1; |
| 17604 | |
| 17605 | if (fixP->fx_addsy |
| 17606 | && ! S_IS_DEFINED (fixP->fx_addsy)) |
| 17607 | { |
| 17608 | as_bad_where (fixP->fx_file, fixP->fx_line, |
| 17609 | _("undefined symbol %s used as an immediate value"), |
| 17610 | S_GET_NAME (fixP->fx_addsy)); |
| 17611 | break; |
| 17612 | } |
| 17613 | |
| 17614 | newimm = encode_arm_immediate (value); |
| 17615 | temp = md_chars_to_number (buf, INSN_SIZE); |
| 17616 | |
| 17617 | /* If the instruction will fail, see if we can fix things up by |
| 17618 | changing the opcode. */ |
| 17619 | if (newimm == (unsigned int) FAIL |
| 17620 | && (newimm = negate_data_op (&temp, value)) == (unsigned int) FAIL) |
| 17621 | { |
| 17622 | as_bad_where (fixP->fx_file, fixP->fx_line, |
| 17623 | _("invalid constant (%lx) after fixup"), |
| 17624 | (unsigned long) value); |
| 17625 | break; |
| 17626 | } |
| 17627 | |
| 17628 | newimm |= (temp & 0xfffff000); |
| 17629 | md_number_to_chars (buf, (valueT) newimm, INSN_SIZE); |
| 17630 | break; |
| 17631 | |
| 17632 | case BFD_RELOC_ARM_ADRL_IMMEDIATE: |
| 17633 | { |
| 17634 | unsigned int highpart = 0; |
| 17635 | unsigned int newinsn = 0xe1a00000; /* nop. */ |
| 17636 | |
| 17637 | newimm = encode_arm_immediate (value); |
| 17638 | temp = md_chars_to_number (buf, INSN_SIZE); |
| 17639 | |
| 17640 | /* If the instruction will fail, see if we can fix things up by |
| 17641 | changing the opcode. */ |
| 17642 | if (newimm == (unsigned int) FAIL |
| 17643 | && (newimm = negate_data_op (& temp, value)) == (unsigned int) FAIL) |
| 17644 | { |
| 17645 | /* No ? OK - try using two ADD instructions to generate |
| 17646 | the value. */ |
| 17647 | newimm = validate_immediate_twopart (value, & highpart); |
| 17648 | |
| 17649 | /* Yes - then make sure that the second instruction is |
| 17650 | also an add. */ |
| 17651 | if (newimm != (unsigned int) FAIL) |
| 17652 | newinsn = temp; |
| 17653 | /* Still No ? Try using a negated value. */ |
| 17654 | else if ((newimm = validate_immediate_twopart (- value, & highpart)) != (unsigned int) FAIL) |
| 17655 | temp = newinsn = (temp & OPCODE_MASK) | OPCODE_SUB << DATA_OP_SHIFT; |
| 17656 | /* Otherwise - give up. */ |
| 17657 | else |
| 17658 | { |
| 17659 | as_bad_where (fixP->fx_file, fixP->fx_line, |
| 17660 | _("unable to compute ADRL instructions for PC offset of 0x%lx"), |
| 17661 | (long) value); |
| 17662 | break; |
| 17663 | } |
| 17664 | |
| 17665 | /* Replace the first operand in the 2nd instruction (which |
| 17666 | is the PC) with the destination register. We have |
| 17667 | already added in the PC in the first instruction and we |
| 17668 | do not want to do it again. */ |
| 17669 | newinsn &= ~ 0xf0000; |
| 17670 | newinsn |= ((newinsn & 0x0f000) << 4); |
| 17671 | } |
| 17672 | |
| 17673 | newimm |= (temp & 0xfffff000); |
| 17674 | md_number_to_chars (buf, (valueT) newimm, INSN_SIZE); |
| 17675 | |
| 17676 | highpart |= (newinsn & 0xfffff000); |
| 17677 | md_number_to_chars (buf + INSN_SIZE, (valueT) highpart, INSN_SIZE); |
| 17678 | } |
| 17679 | break; |
| 17680 | |
| 17681 | case BFD_RELOC_ARM_OFFSET_IMM: |
| 17682 | if (!fixP->fx_done && seg->use_rela_p) |
| 17683 | value = 0; |
| 17684 | |
| 17685 | case BFD_RELOC_ARM_LITERAL: |
| 17686 | sign = value >= 0; |
| 17687 | |
| 17688 | if (value < 0) |
| 17689 | value = - value; |
| 17690 | |
| 17691 | if (validate_offset_imm (value, 0) == FAIL) |
| 17692 | { |
| 17693 | if (fixP->fx_r_type == BFD_RELOC_ARM_LITERAL) |
| 17694 | as_bad_where (fixP->fx_file, fixP->fx_line, |
| 17695 | _("invalid literal constant: pool needs to be closer")); |
| 17696 | else |
| 17697 | as_bad_where (fixP->fx_file, fixP->fx_line, |
| 17698 | _("bad immediate value for offset (%ld)"), |
| 17699 | (long) value); |
| 17700 | break; |
| 17701 | } |
| 17702 | |
| 17703 | newval = md_chars_to_number (buf, INSN_SIZE); |
| 17704 | newval &= 0xff7ff000; |
| 17705 | newval |= value | (sign ? INDEX_UP : 0); |
| 17706 | md_number_to_chars (buf, newval, INSN_SIZE); |
| 17707 | break; |
| 17708 | |
| 17709 | case BFD_RELOC_ARM_OFFSET_IMM8: |
| 17710 | case BFD_RELOC_ARM_HWLITERAL: |
| 17711 | sign = value >= 0; |
| 17712 | |
| 17713 | if (value < 0) |
| 17714 | value = - value; |
| 17715 | |
| 17716 | if (validate_offset_imm (value, 1) == FAIL) |
| 17717 | { |
| 17718 | if (fixP->fx_r_type == BFD_RELOC_ARM_HWLITERAL) |
| 17719 | as_bad_where (fixP->fx_file, fixP->fx_line, |
| 17720 | _("invalid literal constant: pool needs to be closer")); |
| 17721 | else |
| 17722 | as_bad (_("bad immediate value for half-word offset (%ld)"), |
| 17723 | (long) value); |
| 17724 | break; |
| 17725 | } |
| 17726 | |
| 17727 | newval = md_chars_to_number (buf, INSN_SIZE); |
| 17728 | newval &= 0xff7ff0f0; |
| 17729 | newval |= ((value >> 4) << 8) | (value & 0xf) | (sign ? INDEX_UP : 0); |
| 17730 | md_number_to_chars (buf, newval, INSN_SIZE); |
| 17731 | break; |
| 17732 | |
| 17733 | case BFD_RELOC_ARM_T32_OFFSET_U8: |
| 17734 | if (value < 0 || value > 1020 || value % 4 != 0) |
| 17735 | as_bad_where (fixP->fx_file, fixP->fx_line, |
| 17736 | _("bad immediate value for offset (%ld)"), (long) value); |
| 17737 | value /= 4; |
| 17738 | |
| 17739 | newval = md_chars_to_number (buf+2, THUMB_SIZE); |
| 17740 | newval |= value; |
| 17741 | md_number_to_chars (buf+2, newval, THUMB_SIZE); |
| 17742 | break; |
| 17743 | |
| 17744 | case BFD_RELOC_ARM_T32_OFFSET_IMM: |
| 17745 | /* This is a complicated relocation used for all varieties of Thumb32 |
| 17746 | load/store instruction with immediate offset: |
| 17747 | |
| 17748 | 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit, |
| 17749 | *4, optional writeback(W) |
| 17750 | (doubleword load/store) |
| 17751 | |
| 17752 | 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel |
| 17753 | 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit |
| 17754 | 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction) |
| 17755 | 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit |
| 17756 | 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit |
| 17757 | |
| 17758 | Uppercase letters indicate bits that are already encoded at |
| 17759 | this point. Lowercase letters are our problem. For the |
| 17760 | second block of instructions, the secondary opcode nybble |
| 17761 | (bits 8..11) is present, and bit 23 is zero, even if this is |
| 17762 | a PC-relative operation. */ |
| 17763 | newval = md_chars_to_number (buf, THUMB_SIZE); |
| 17764 | newval <<= 16; |
| 17765 | newval |= md_chars_to_number (buf+THUMB_SIZE, THUMB_SIZE); |
| 17766 | |
| 17767 | if ((newval & 0xf0000000) == 0xe0000000) |
| 17768 | { |
| 17769 | /* Doubleword load/store: 8-bit offset, scaled by 4. */ |
| 17770 | if (value >= 0) |
| 17771 | newval |= (1 << 23); |
| 17772 | else |
| 17773 | value = -value; |
| 17774 | if (value % 4 != 0) |
| 17775 | { |
| 17776 | as_bad_where (fixP->fx_file, fixP->fx_line, |
| 17777 | _("offset not a multiple of 4")); |
| 17778 | break; |
| 17779 | } |
| 17780 | value /= 4; |
| 17781 | if (value > 0xff) |
| 17782 | { |
| 17783 | as_bad_where (fixP->fx_file, fixP->fx_line, |
| 17784 | _("offset out of range")); |
| 17785 | break; |
| 17786 | } |
| 17787 | newval &= ~0xff; |
| 17788 | } |
| 17789 | else if ((newval & 0x000f0000) == 0x000f0000) |
| 17790 | { |
| 17791 | /* PC-relative, 12-bit offset. */ |
| 17792 | if (value >= 0) |
| 17793 | newval |= (1 << 23); |
| 17794 | else |
| 17795 | value = -value; |
| 17796 | if (value > 0xfff) |
| 17797 | { |
| 17798 | as_bad_where (fixP->fx_file, fixP->fx_line, |
| 17799 | _("offset out of range")); |
| 17800 | break; |
| 17801 | } |
| 17802 | newval &= ~0xfff; |
| 17803 | } |
| 17804 | else if ((newval & 0x00000100) == 0x00000100) |
| 17805 | { |
| 17806 | /* Writeback: 8-bit, +/- offset. */ |
| 17807 | if (value >= 0) |
| 17808 | newval |= (1 << 9); |
| 17809 | else |
| 17810 | value = -value; |
| 17811 | if (value > 0xff) |
| 17812 | { |
| 17813 | as_bad_where (fixP->fx_file, fixP->fx_line, |
| 17814 | _("offset out of range")); |
| 17815 | break; |
| 17816 | } |
| 17817 | newval &= ~0xff; |
| 17818 | } |
| 17819 | else if ((newval & 0x00000f00) == 0x00000e00) |
| 17820 | { |
| 17821 | /* T-instruction: positive 8-bit offset. */ |
| 17822 | if (value < 0 || value > 0xff) |
| 17823 | { |
| 17824 | as_bad_where (fixP->fx_file, fixP->fx_line, |
| 17825 | _("offset out of range")); |
| 17826 | break; |
| 17827 | } |
| 17828 | newval &= ~0xff; |
| 17829 | newval |= value; |
| 17830 | } |
| 17831 | else |
| 17832 | { |
| 17833 | /* Positive 12-bit or negative 8-bit offset. */ |
| 17834 | int limit; |
| 17835 | if (value >= 0) |
| 17836 | { |
| 17837 | newval |= (1 << 23); |
| 17838 | limit = 0xfff; |
| 17839 | } |
| 17840 | else |
| 17841 | { |
| 17842 | value = -value; |
| 17843 | limit = 0xff; |
| 17844 | } |
| 17845 | if (value > limit) |
| 17846 | { |
| 17847 | as_bad_where (fixP->fx_file, fixP->fx_line, |
| 17848 | _("offset out of range")); |
| 17849 | break; |
| 17850 | } |
| 17851 | newval &= ~limit; |
| 17852 | } |
| 17853 | |
| 17854 | newval |= value; |
| 17855 | md_number_to_chars (buf, (newval >> 16) & 0xffff, THUMB_SIZE); |
| 17856 | md_number_to_chars (buf + THUMB_SIZE, newval & 0xffff, THUMB_SIZE); |
| 17857 | break; |
| 17858 | |
| 17859 | case BFD_RELOC_ARM_SHIFT_IMM: |
| 17860 | newval = md_chars_to_number (buf, INSN_SIZE); |
| 17861 | if (((unsigned long) value) > 32 |
| 17862 | || (value == 32 |
| 17863 | && (((newval & 0x60) == 0) || (newval & 0x60) == 0x60))) |
| 17864 | { |
| 17865 | as_bad_where (fixP->fx_file, fixP->fx_line, |
| 17866 | _("shift expression is too large")); |
| 17867 | break; |
| 17868 | } |
| 17869 | |
| 17870 | if (value == 0) |
| 17871 | /* Shifts of zero must be done as lsl. */ |
| 17872 | newval &= ~0x60; |
| 17873 | else if (value == 32) |
| 17874 | value = 0; |
| 17875 | newval &= 0xfffff07f; |
| 17876 | newval |= (value & 0x1f) << 7; |
| 17877 | md_number_to_chars (buf, newval, INSN_SIZE); |
| 17878 | break; |
| 17879 | |
| 17880 | case BFD_RELOC_ARM_T32_IMMEDIATE: |
| 17881 | case BFD_RELOC_ARM_T32_ADD_IMM: |
| 17882 | case BFD_RELOC_ARM_T32_IMM12: |
| 17883 | case BFD_RELOC_ARM_T32_ADD_PC12: |
| 17884 | /* We claim that this fixup has been processed here, |
| 17885 | even if in fact we generate an error because we do |
| 17886 | not have a reloc for it, so tc_gen_reloc will reject it. */ |
| 17887 | fixP->fx_done = 1; |
| 17888 | |
| 17889 | if (fixP->fx_addsy |
| 17890 | && ! S_IS_DEFINED (fixP->fx_addsy)) |
| 17891 | { |
| 17892 | as_bad_where (fixP->fx_file, fixP->fx_line, |
| 17893 | _("undefined symbol %s used as an immediate value"), |
| 17894 | S_GET_NAME (fixP->fx_addsy)); |
| 17895 | break; |
| 17896 | } |
| 17897 | |
| 17898 | newval = md_chars_to_number (buf, THUMB_SIZE); |
| 17899 | newval <<= 16; |
| 17900 | newval |= md_chars_to_number (buf+2, THUMB_SIZE); |
| 17901 | |
| 17902 | newimm = FAIL; |
| 17903 | if (fixP->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE |
| 17904 | || fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM) |
| 17905 | { |
| 17906 | newimm = encode_thumb32_immediate (value); |
| 17907 | if (newimm == (unsigned int) FAIL) |
| 17908 | newimm = thumb32_negate_data_op (&newval, value); |
| 17909 | } |
| 17910 | if (fixP->fx_r_type != BFD_RELOC_ARM_T32_IMMEDIATE |
| 17911 | && newimm == (unsigned int) FAIL) |
| 17912 | { |
| 17913 | /* Turn add/sum into addw/subw. */ |
| 17914 | if (fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM) |
| 17915 | newval = (newval & 0xfeffffff) | 0x02000000; |
| 17916 | |
| 17917 | /* 12 bit immediate for addw/subw. */ |
| 17918 | if (value < 0) |
| 17919 | { |
| 17920 | value = -value; |
| 17921 | newval ^= 0x00a00000; |
| 17922 | } |
| 17923 | if (value > 0xfff) |
| 17924 | newimm = (unsigned int) FAIL; |
| 17925 | else |
| 17926 | newimm = value; |
| 17927 | } |
| 17928 | |
| 17929 | if (newimm == (unsigned int)FAIL) |
| 17930 | { |
| 17931 | as_bad_where (fixP->fx_file, fixP->fx_line, |
| 17932 | _("invalid constant (%lx) after fixup"), |
| 17933 | (unsigned long) value); |
| 17934 | break; |
| 17935 | } |
| 17936 | |
| 17937 | newval |= (newimm & 0x800) << 15; |
| 17938 | newval |= (newimm & 0x700) << 4; |
| 17939 | newval |= (newimm & 0x0ff); |
| 17940 | |
| 17941 | md_number_to_chars (buf, (valueT) ((newval >> 16) & 0xffff), THUMB_SIZE); |
| 17942 | md_number_to_chars (buf+2, (valueT) (newval & 0xffff), THUMB_SIZE); |
| 17943 | break; |
| 17944 | |
| 17945 | case BFD_RELOC_ARM_SMC: |
| 17946 | if (((unsigned long) value) > 0xffff) |
| 17947 | as_bad_where (fixP->fx_file, fixP->fx_line, |
| 17948 | _("invalid smc expression")); |
| 17949 | newval = md_chars_to_number (buf, INSN_SIZE); |
| 17950 | newval |= (value & 0xf) | ((value & 0xfff0) << 4); |
| 17951 | md_number_to_chars (buf, newval, INSN_SIZE); |
| 17952 | break; |
| 17953 | |
| 17954 | case BFD_RELOC_ARM_SWI: |
| 17955 | if (fixP->tc_fix_data != 0) |
| 17956 | { |
| 17957 | if (((unsigned long) value) > 0xff) |
| 17958 | as_bad_where (fixP->fx_file, fixP->fx_line, |
| 17959 | _("invalid swi expression")); |
| 17960 | newval = md_chars_to_number (buf, THUMB_SIZE); |
| 17961 | newval |= value; |
| 17962 | md_number_to_chars (buf, newval, THUMB_SIZE); |
| 17963 | } |
| 17964 | else |
| 17965 | { |
| 17966 | if (((unsigned long) value) > 0x00ffffff) |
| 17967 | as_bad_where (fixP->fx_file, fixP->fx_line, |
| 17968 | _("invalid swi expression")); |
| 17969 | newval = md_chars_to_number (buf, INSN_SIZE); |
| 17970 | newval |= value; |
| 17971 | md_number_to_chars (buf, newval, INSN_SIZE); |
| 17972 | } |
| 17973 | break; |
| 17974 | |
| 17975 | case BFD_RELOC_ARM_MULTI: |
| 17976 | if (((unsigned long) value) > 0xffff) |
| 17977 | as_bad_where (fixP->fx_file, fixP->fx_line, |
| 17978 | _("invalid expression in load/store multiple")); |
| 17979 | newval = value | md_chars_to_number (buf, INSN_SIZE); |
| 17980 | md_number_to_chars (buf, newval, INSN_SIZE); |
| 17981 | break; |
| 17982 | |
| 17983 | #ifdef OBJ_ELF |
| 17984 | case BFD_RELOC_ARM_PCREL_CALL: |
| 17985 | newval = md_chars_to_number (buf, INSN_SIZE); |
| 17986 | if ((newval & 0xf0000000) == 0xf0000000) |
| 17987 | temp = 1; |
| 17988 | else |
| 17989 | temp = 3; |
| 17990 | goto arm_branch_common; |
| 17991 | |
| 17992 | case BFD_RELOC_ARM_PCREL_JUMP: |
| 17993 | case BFD_RELOC_ARM_PLT32: |
| 17994 | #endif |
| 17995 | case BFD_RELOC_ARM_PCREL_BRANCH: |
| 17996 | temp = 3; |
| 17997 | goto arm_branch_common; |
| 17998 | |
| 17999 | case BFD_RELOC_ARM_PCREL_BLX: |
| 18000 | temp = 1; |
| 18001 | arm_branch_common: |
| 18002 | /* We are going to store value (shifted right by two) in the |
| 18003 | instruction, in a 24 bit, signed field. Bits 26 through 32 either |
| 18004 | all clear or all set and bit 0 must be clear. For B/BL bit 1 must |
| 18005 | also be be clear. */ |
| 18006 | if (value & temp) |
| 18007 | as_bad_where (fixP->fx_file, fixP->fx_line, |
| 18008 | _("misaligned branch destination")); |
| 18009 | if ((value & (offsetT)0xfe000000) != (offsetT)0 |
| 18010 | && (value & (offsetT)0xfe000000) != (offsetT)0xfe000000) |
| 18011 | as_bad_where (fixP->fx_file, fixP->fx_line, |
| 18012 | _("branch out of range")); |
| 18013 | |
| 18014 | if (fixP->fx_done || !seg->use_rela_p) |
| 18015 | { |
| 18016 | newval = md_chars_to_number (buf, INSN_SIZE); |
| 18017 | newval |= (value >> 2) & 0x00ffffff; |
| 18018 | /* Set the H bit on BLX instructions. */ |
| 18019 | if (temp == 1) |
| 18020 | { |
| 18021 | if (value & 2) |
| 18022 | newval |= 0x01000000; |
| 18023 | else |
| 18024 | newval &= ~0x01000000; |
| 18025 | } |
| 18026 | md_number_to_chars (buf, newval, INSN_SIZE); |
| 18027 | } |
| 18028 | break; |
| 18029 | |
| 18030 | case BFD_RELOC_THUMB_PCREL_BRANCH7: /* CBZ */ |
| 18031 | /* CBZ can only branch forward. */ |
| 18032 | if (value & ~0x7e) |
| 18033 | as_bad_where (fixP->fx_file, fixP->fx_line, |
| 18034 | _("branch out of range")); |
| 18035 | |
| 18036 | if (fixP->fx_done || !seg->use_rela_p) |
| 18037 | { |
| 18038 | newval = md_chars_to_number (buf, THUMB_SIZE); |
| 18039 | newval |= ((value & 0x3e) << 2) | ((value & 0x40) << 3); |
| 18040 | md_number_to_chars (buf, newval, THUMB_SIZE); |
| 18041 | } |
| 18042 | break; |
| 18043 | |
| 18044 | case BFD_RELOC_THUMB_PCREL_BRANCH9: /* Conditional branch. */ |
| 18045 | if ((value & ~0xff) && ((value & ~0xff) != ~0xff)) |
| 18046 | as_bad_where (fixP->fx_file, fixP->fx_line, |
| 18047 | _("branch out of range")); |
| 18048 | |
| 18049 | if (fixP->fx_done || !seg->use_rela_p) |
| 18050 | { |
| 18051 | newval = md_chars_to_number (buf, THUMB_SIZE); |
| 18052 | newval |= (value & 0x1ff) >> 1; |
| 18053 | md_number_to_chars (buf, newval, THUMB_SIZE); |
| 18054 | } |
| 18055 | break; |
| 18056 | |
| 18057 | case BFD_RELOC_THUMB_PCREL_BRANCH12: /* Unconditional branch. */ |
| 18058 | if ((value & ~0x7ff) && ((value & ~0x7ff) != ~0x7ff)) |
| 18059 | as_bad_where (fixP->fx_file, fixP->fx_line, |
| 18060 | _("branch out of range")); |
| 18061 | |
| 18062 | if (fixP->fx_done || !seg->use_rela_p) |
| 18063 | { |
| 18064 | newval = md_chars_to_number (buf, THUMB_SIZE); |
| 18065 | newval |= (value & 0xfff) >> 1; |
| 18066 | md_number_to_chars (buf, newval, THUMB_SIZE); |
| 18067 | } |
| 18068 | break; |
| 18069 | |
| 18070 | case BFD_RELOC_THUMB_PCREL_BRANCH20: |
| 18071 | if ((value & ~0x1fffff) && ((value & ~0x1fffff) != ~0x1fffff)) |
| 18072 | as_bad_where (fixP->fx_file, fixP->fx_line, |
| 18073 | _("conditional branch out of range")); |
| 18074 | |
| 18075 | if (fixP->fx_done || !seg->use_rela_p) |
| 18076 | { |
| 18077 | offsetT newval2; |
| 18078 | addressT S, J1, J2, lo, hi; |
| 18079 | |
| 18080 | S = (value & 0x00100000) >> 20; |
| 18081 | J2 = (value & 0x00080000) >> 19; |
| 18082 | J1 = (value & 0x00040000) >> 18; |
| 18083 | hi = (value & 0x0003f000) >> 12; |
| 18084 | lo = (value & 0x00000ffe) >> 1; |
| 18085 | |
| 18086 | newval = md_chars_to_number (buf, THUMB_SIZE); |
| 18087 | newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE); |
| 18088 | newval |= (S << 10) | hi; |
| 18089 | newval2 |= (J1 << 13) | (J2 << 11) | lo; |
| 18090 | md_number_to_chars (buf, newval, THUMB_SIZE); |
| 18091 | md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE); |
| 18092 | } |
| 18093 | break; |
| 18094 | |
| 18095 | case BFD_RELOC_THUMB_PCREL_BLX: |
| 18096 | case BFD_RELOC_THUMB_PCREL_BRANCH23: |
| 18097 | if ((value & ~0x3fffff) && ((value & ~0x3fffff) != ~0x3fffff)) |
| 18098 | as_bad_where (fixP->fx_file, fixP->fx_line, |
| 18099 | _("branch out of range")); |
| 18100 | |
| 18101 | if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX) |
| 18102 | /* For a BLX instruction, make sure that the relocation is rounded up |
| 18103 | to a word boundary. This follows the semantics of the instruction |
| 18104 | which specifies that bit 1 of the target address will come from bit |
| 18105 | 1 of the base address. */ |
| 18106 | value = (value + 1) & ~ 1; |
| 18107 | |
| 18108 | if (fixP->fx_done || !seg->use_rela_p) |
| 18109 | { |
| 18110 | offsetT newval2; |
| 18111 | |
| 18112 | newval = md_chars_to_number (buf, THUMB_SIZE); |
| 18113 | newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE); |
| 18114 | newval |= (value & 0x7fffff) >> 12; |
| 18115 | newval2 |= (value & 0xfff) >> 1; |
| 18116 | md_number_to_chars (buf, newval, THUMB_SIZE); |
| 18117 | md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE); |
| 18118 | } |
| 18119 | break; |
| 18120 | |
| 18121 | case BFD_RELOC_THUMB_PCREL_BRANCH25: |
| 18122 | if ((value & ~0x1ffffff) && ((value & ~0x1ffffff) != ~0x1ffffff)) |
| 18123 | as_bad_where (fixP->fx_file, fixP->fx_line, |
| 18124 | _("branch out of range")); |
| 18125 | |
| 18126 | if (fixP->fx_done || !seg->use_rela_p) |
| 18127 | { |
| 18128 | offsetT newval2; |
| 18129 | addressT S, I1, I2, lo, hi; |
| 18130 | |
| 18131 | S = (value & 0x01000000) >> 24; |
| 18132 | I1 = (value & 0x00800000) >> 23; |
| 18133 | I2 = (value & 0x00400000) >> 22; |
| 18134 | hi = (value & 0x003ff000) >> 12; |
| 18135 | lo = (value & 0x00000ffe) >> 1; |
| 18136 | |
| 18137 | I1 = !(I1 ^ S); |
| 18138 | I2 = !(I2 ^ S); |
| 18139 | |
| 18140 | newval = md_chars_to_number (buf, THUMB_SIZE); |
| 18141 | newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE); |
| 18142 | newval |= (S << 10) | hi; |
| 18143 | newval2 |= (I1 << 13) | (I2 << 11) | lo; |
| 18144 | md_number_to_chars (buf, newval, THUMB_SIZE); |
| 18145 | md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE); |
| 18146 | } |
| 18147 | break; |
| 18148 | |
| 18149 | case BFD_RELOC_8: |
| 18150 | if (fixP->fx_done || !seg->use_rela_p) |
| 18151 | md_number_to_chars (buf, value, 1); |
| 18152 | break; |
| 18153 | |
| 18154 | case BFD_RELOC_16: |
| 18155 | if (fixP->fx_done || !seg->use_rela_p) |
| 18156 | md_number_to_chars (buf, value, 2); |
| 18157 | break; |
| 18158 | |
| 18159 | #ifdef OBJ_ELF |
| 18160 | case BFD_RELOC_ARM_TLS_GD32: |
| 18161 | case BFD_RELOC_ARM_TLS_LE32: |
| 18162 | case BFD_RELOC_ARM_TLS_IE32: |
| 18163 | case BFD_RELOC_ARM_TLS_LDM32: |
| 18164 | case BFD_RELOC_ARM_TLS_LDO32: |
| 18165 | S_SET_THREAD_LOCAL (fixP->fx_addsy); |
| 18166 | /* fall through */ |
| 18167 | |
| 18168 | case BFD_RELOC_ARM_GOT32: |
| 18169 | case BFD_RELOC_ARM_GOTOFF: |
| 18170 | case BFD_RELOC_ARM_TARGET2: |
| 18171 | if (fixP->fx_done || !seg->use_rela_p) |
| 18172 | md_number_to_chars (buf, 0, 4); |
| 18173 | break; |
| 18174 | #endif |
| 18175 | |
| 18176 | case BFD_RELOC_RVA: |
| 18177 | case BFD_RELOC_32: |
| 18178 | case BFD_RELOC_ARM_TARGET1: |
| 18179 | case BFD_RELOC_ARM_ROSEGREL32: |
| 18180 | case BFD_RELOC_ARM_SBREL32: |
| 18181 | case BFD_RELOC_32_PCREL: |
| 18182 | #ifdef TE_PE |
| 18183 | case BFD_RELOC_32_SECREL: |
| 18184 | #endif |
| 18185 | if (fixP->fx_done || !seg->use_rela_p) |
| 18186 | #ifdef TE_WINCE |
| 18187 | /* For WinCE we only do this for pcrel fixups. */ |
| 18188 | if (fixP->fx_done || fixP->fx_pcrel) |
| 18189 | #endif |
| 18190 | md_number_to_chars (buf, value, 4); |
| 18191 | break; |
| 18192 | |
| 18193 | #ifdef OBJ_ELF |
| 18194 | case BFD_RELOC_ARM_PREL31: |
| 18195 | if (fixP->fx_done || !seg->use_rela_p) |
| 18196 | { |
| 18197 | newval = md_chars_to_number (buf, 4) & 0x80000000; |
| 18198 | if ((value ^ (value >> 1)) & 0x40000000) |
| 18199 | { |
| 18200 | as_bad_where (fixP->fx_file, fixP->fx_line, |
| 18201 | _("rel31 relocation overflow")); |
| 18202 | } |
| 18203 | newval |= value & 0x7fffffff; |
| 18204 | md_number_to_chars (buf, newval, 4); |
| 18205 | } |
| 18206 | break; |
| 18207 | #endif |
| 18208 | |
| 18209 | case BFD_RELOC_ARM_CP_OFF_IMM: |
| 18210 | case BFD_RELOC_ARM_T32_CP_OFF_IMM: |
| 18211 | if (value < -1023 || value > 1023 || (value & 3)) |
| 18212 | as_bad_where (fixP->fx_file, fixP->fx_line, |
| 18213 | _("co-processor offset out of range")); |
| 18214 | cp_off_common: |
| 18215 | sign = value >= 0; |
| 18216 | if (value < 0) |
| 18217 | value = -value; |
| 18218 | if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM |
| 18219 | || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2) |
| 18220 | newval = md_chars_to_number (buf, INSN_SIZE); |
| 18221 | else |
| 18222 | newval = get_thumb32_insn (buf); |
| 18223 | newval &= 0xff7fff00; |
| 18224 | newval |= (value >> 2) | (sign ? INDEX_UP : 0); |
| 18225 | if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM |
| 18226 | || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2) |
| 18227 | md_number_to_chars (buf, newval, INSN_SIZE); |
| 18228 | else |
| 18229 | put_thumb32_insn (buf, newval); |
| 18230 | break; |
| 18231 | |
| 18232 | case BFD_RELOC_ARM_CP_OFF_IMM_S2: |
| 18233 | case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2: |
| 18234 | if (value < -255 || value > 255) |
| 18235 | as_bad_where (fixP->fx_file, fixP->fx_line, |
| 18236 | _("co-processor offset out of range")); |
| 18237 | value *= 4; |
| 18238 | goto cp_off_common; |
| 18239 | |
| 18240 | case BFD_RELOC_ARM_THUMB_OFFSET: |
| 18241 | newval = md_chars_to_number (buf, THUMB_SIZE); |
| 18242 | /* Exactly what ranges, and where the offset is inserted depends |
| 18243 | on the type of instruction, we can establish this from the |
| 18244 | top 4 bits. */ |
| 18245 | switch (newval >> 12) |
| 18246 | { |
| 18247 | case 4: /* PC load. */ |
| 18248 | /* Thumb PC loads are somewhat odd, bit 1 of the PC is |
| 18249 | forced to zero for these loads; md_pcrel_from has already |
| 18250 | compensated for this. */ |
| 18251 | if (value & 3) |
| 18252 | as_bad_where (fixP->fx_file, fixP->fx_line, |
| 18253 | _("invalid offset, target not word aligned (0x%08lX)"), |
| 18254 | (((unsigned long) fixP->fx_frag->fr_address |
| 18255 | + (unsigned long) fixP->fx_where) & ~3) |
| 18256 | + (unsigned long) value); |
| 18257 | |
| 18258 | if (value & ~0x3fc) |
| 18259 | as_bad_where (fixP->fx_file, fixP->fx_line, |
| 18260 | _("invalid offset, value too big (0x%08lX)"), |
| 18261 | (long) value); |
| 18262 | |
| 18263 | newval |= value >> 2; |
| 18264 | break; |
| 18265 | |
| 18266 | case 9: /* SP load/store. */ |
| 18267 | if (value & ~0x3fc) |
| 18268 | as_bad_where (fixP->fx_file, fixP->fx_line, |
| 18269 | _("invalid offset, value too big (0x%08lX)"), |
| 18270 | (long) value); |
| 18271 | newval |= value >> 2; |
| 18272 | break; |
| 18273 | |
| 18274 | case 6: /* Word load/store. */ |
| 18275 | if (value & ~0x7c) |
| 18276 | as_bad_where (fixP->fx_file, fixP->fx_line, |
| 18277 | _("invalid offset, value too big (0x%08lX)"), |
| 18278 | (long) value); |
| 18279 | newval |= value << 4; /* 6 - 2. */ |
| 18280 | break; |
| 18281 | |
| 18282 | case 7: /* Byte load/store. */ |
| 18283 | if (value & ~0x1f) |
| 18284 | as_bad_where (fixP->fx_file, fixP->fx_line, |
| 18285 | _("invalid offset, value too big (0x%08lX)"), |
| 18286 | (long) value); |
| 18287 | newval |= value << 6; |
| 18288 | break; |
| 18289 | |
| 18290 | case 8: /* Halfword load/store. */ |
| 18291 | if (value & ~0x3e) |
| 18292 | as_bad_where (fixP->fx_file, fixP->fx_line, |
| 18293 | _("invalid offset, value too big (0x%08lX)"), |
| 18294 | (long) value); |
| 18295 | newval |= value << 5; /* 6 - 1. */ |
| 18296 | break; |
| 18297 | |
| 18298 | default: |
| 18299 | as_bad_where (fixP->fx_file, fixP->fx_line, |
| 18300 | "Unable to process relocation for thumb opcode: %lx", |
| 18301 | (unsigned long) newval); |
| 18302 | break; |
| 18303 | } |
| 18304 | md_number_to_chars (buf, newval, THUMB_SIZE); |
| 18305 | break; |
| 18306 | |
| 18307 | case BFD_RELOC_ARM_THUMB_ADD: |
| 18308 | /* This is a complicated relocation, since we use it for all of |
| 18309 | the following immediate relocations: |
| 18310 | |
| 18311 | 3bit ADD/SUB |
| 18312 | 8bit ADD/SUB |
| 18313 | 9bit ADD/SUB SP word-aligned |
| 18314 | 10bit ADD PC/SP word-aligned |
| 18315 | |
| 18316 | The type of instruction being processed is encoded in the |
| 18317 | instruction field: |
| 18318 | |
| 18319 | 0x8000 SUB |
| 18320 | 0x00F0 Rd |
| 18321 | 0x000F Rs |
| 18322 | */ |
| 18323 | newval = md_chars_to_number (buf, THUMB_SIZE); |
| 18324 | { |
| 18325 | int rd = (newval >> 4) & 0xf; |
| 18326 | int rs = newval & 0xf; |
| 18327 | int subtract = !!(newval & 0x8000); |
| 18328 | |
| 18329 | /* Check for HI regs, only very restricted cases allowed: |
| 18330 | Adjusting SP, and using PC or SP to get an address. */ |
| 18331 | if ((rd > 7 && (rd != REG_SP || rs != REG_SP)) |
| 18332 | || (rs > 7 && rs != REG_SP && rs != REG_PC)) |
| 18333 | as_bad_where (fixP->fx_file, fixP->fx_line, |
| 18334 | _("invalid Hi register with immediate")); |
| 18335 | |
| 18336 | /* If value is negative, choose the opposite instruction. */ |
| 18337 | if (value < 0) |
| 18338 | { |
| 18339 | value = -value; |
| 18340 | subtract = !subtract; |
| 18341 | if (value < 0) |
| 18342 | as_bad_where (fixP->fx_file, fixP->fx_line, |
| 18343 | _("immediate value out of range")); |
| 18344 | } |
| 18345 | |
| 18346 | if (rd == REG_SP) |
| 18347 | { |
| 18348 | if (value & ~0x1fc) |
| 18349 | as_bad_where (fixP->fx_file, fixP->fx_line, |
| 18350 | _("invalid immediate for stack address calculation")); |
| 18351 | newval = subtract ? T_OPCODE_SUB_ST : T_OPCODE_ADD_ST; |
| 18352 | newval |= value >> 2; |
| 18353 | } |
| 18354 | else if (rs == REG_PC || rs == REG_SP) |
| 18355 | { |
| 18356 | if (subtract || value & ~0x3fc) |
| 18357 | as_bad_where (fixP->fx_file, fixP->fx_line, |
| 18358 | _("invalid immediate for address calculation (value = 0x%08lX)"), |
| 18359 | (unsigned long) value); |
| 18360 | newval = (rs == REG_PC ? T_OPCODE_ADD_PC : T_OPCODE_ADD_SP); |
| 18361 | newval |= rd << 8; |
| 18362 | newval |= value >> 2; |
| 18363 | } |
| 18364 | else if (rs == rd) |
| 18365 | { |
| 18366 | if (value & ~0xff) |
| 18367 | as_bad_where (fixP->fx_file, fixP->fx_line, |
| 18368 | _("immediate value out of range")); |
| 18369 | newval = subtract ? T_OPCODE_SUB_I8 : T_OPCODE_ADD_I8; |
| 18370 | newval |= (rd << 8) | value; |
| 18371 | } |
| 18372 | else |
| 18373 | { |
| 18374 | if (value & ~0x7) |
| 18375 | as_bad_where (fixP->fx_file, fixP->fx_line, |
| 18376 | _("immediate value out of range")); |
| 18377 | newval = subtract ? T_OPCODE_SUB_I3 : T_OPCODE_ADD_I3; |
| 18378 | newval |= rd | (rs << 3) | (value << 6); |
| 18379 | } |
| 18380 | } |
| 18381 | md_number_to_chars (buf, newval, THUMB_SIZE); |
| 18382 | break; |
| 18383 | |
| 18384 | case BFD_RELOC_ARM_THUMB_IMM: |
| 18385 | newval = md_chars_to_number (buf, THUMB_SIZE); |
| 18386 | if (value < 0 || value > 255) |
| 18387 | as_bad_where (fixP->fx_file, fixP->fx_line, |
| 18388 | _("invalid immediate: %ld is too large"), |
| 18389 | (long) value); |
| 18390 | newval |= value; |
| 18391 | md_number_to_chars (buf, newval, THUMB_SIZE); |
| 18392 | break; |
| 18393 | |
| 18394 | case BFD_RELOC_ARM_THUMB_SHIFT: |
| 18395 | /* 5bit shift value (0..32). LSL cannot take 32. */ |
| 18396 | newval = md_chars_to_number (buf, THUMB_SIZE) & 0xf83f; |
| 18397 | temp = newval & 0xf800; |
| 18398 | if (value < 0 || value > 32 || (value == 32 && temp == T_OPCODE_LSL_I)) |
| 18399 | as_bad_where (fixP->fx_file, fixP->fx_line, |
| 18400 | _("invalid shift value: %ld"), (long) value); |
| 18401 | /* Shifts of zero must be encoded as LSL. */ |
| 18402 | if (value == 0) |
| 18403 | newval = (newval & 0x003f) | T_OPCODE_LSL_I; |
| 18404 | /* Shifts of 32 are encoded as zero. */ |
| 18405 | else if (value == 32) |
| 18406 | value = 0; |
| 18407 | newval |= value << 6; |
| 18408 | md_number_to_chars (buf, newval, THUMB_SIZE); |
| 18409 | break; |
| 18410 | |
| 18411 | case BFD_RELOC_VTABLE_INHERIT: |
| 18412 | case BFD_RELOC_VTABLE_ENTRY: |
| 18413 | fixP->fx_done = 0; |
| 18414 | return; |
| 18415 | |
| 18416 | case BFD_RELOC_ARM_MOVW: |
| 18417 | case BFD_RELOC_ARM_MOVT: |
| 18418 | case BFD_RELOC_ARM_THUMB_MOVW: |
| 18419 | case BFD_RELOC_ARM_THUMB_MOVT: |
| 18420 | if (fixP->fx_done || !seg->use_rela_p) |
| 18421 | { |
| 18422 | /* REL format relocations are limited to a 16-bit addend. */ |
| 18423 | if (!fixP->fx_done) |
| 18424 | { |
| 18425 | if (value < -0x1000 || value > 0xffff) |
| 18426 | as_bad_where (fixP->fx_file, fixP->fx_line, |
| 18427 | _("offset too big")); |
| 18428 | } |
| 18429 | else if (fixP->fx_r_type == BFD_RELOC_ARM_MOVT |
| 18430 | || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT) |
| 18431 | { |
| 18432 | value >>= 16; |
| 18433 | } |
| 18434 | |
| 18435 | if (fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW |
| 18436 | || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT) |
| 18437 | { |
| 18438 | newval = get_thumb32_insn (buf); |
| 18439 | newval &= 0xfbf08f00; |
| 18440 | newval |= (value & 0xf000) << 4; |
| 18441 | newval |= (value & 0x0800) << 15; |
| 18442 | newval |= (value & 0x0700) << 4; |
| 18443 | newval |= (value & 0x00ff); |
| 18444 | put_thumb32_insn (buf, newval); |
| 18445 | } |
| 18446 | else |
| 18447 | { |
| 18448 | newval = md_chars_to_number (buf, 4); |
| 18449 | newval &= 0xfff0f000; |
| 18450 | newval |= value & 0x0fff; |
| 18451 | newval |= (value & 0xf000) << 4; |
| 18452 | md_number_to_chars (buf, newval, 4); |
| 18453 | } |
| 18454 | } |
| 18455 | return; |
| 18456 | |
| 18457 | case BFD_RELOC_ARM_ALU_PC_G0_NC: |
| 18458 | case BFD_RELOC_ARM_ALU_PC_G0: |
| 18459 | case BFD_RELOC_ARM_ALU_PC_G1_NC: |
| 18460 | case BFD_RELOC_ARM_ALU_PC_G1: |
| 18461 | case BFD_RELOC_ARM_ALU_PC_G2: |
| 18462 | case BFD_RELOC_ARM_ALU_SB_G0_NC: |
| 18463 | case BFD_RELOC_ARM_ALU_SB_G0: |
| 18464 | case BFD_RELOC_ARM_ALU_SB_G1_NC: |
| 18465 | case BFD_RELOC_ARM_ALU_SB_G1: |
| 18466 | case BFD_RELOC_ARM_ALU_SB_G2: |
| 18467 | assert (!fixP->fx_done); |
| 18468 | if (!seg->use_rela_p) |
| 18469 | { |
| 18470 | bfd_vma insn; |
| 18471 | bfd_vma encoded_addend; |
| 18472 | bfd_vma addend_abs = abs (value); |
| 18473 | |
| 18474 | /* Check that the absolute value of the addend can be |
| 18475 | expressed as an 8-bit constant plus a rotation. */ |
| 18476 | encoded_addend = encode_arm_immediate (addend_abs); |
| 18477 | if (encoded_addend == (unsigned int) FAIL) |
| 18478 | as_bad_where (fixP->fx_file, fixP->fx_line, |
| 18479 | _("the offset 0x%08lX is not representable"), |
| 18480 | addend_abs); |
| 18481 | |
| 18482 | /* Extract the instruction. */ |
| 18483 | insn = md_chars_to_number (buf, INSN_SIZE); |
| 18484 | |
| 18485 | /* If the addend is positive, use an ADD instruction. |
| 18486 | Otherwise use a SUB. Take care not to destroy the S bit. */ |
| 18487 | insn &= 0xff1fffff; |
| 18488 | if (value < 0) |
| 18489 | insn |= 1 << 22; |
| 18490 | else |
| 18491 | insn |= 1 << 23; |
| 18492 | |
| 18493 | /* Place the encoded addend into the first 12 bits of the |
| 18494 | instruction. */ |
| 18495 | insn &= 0xfffff000; |
| 18496 | insn |= encoded_addend; |
| 18497 | |
| 18498 | /* Update the instruction. */ |
| 18499 | md_number_to_chars (buf, insn, INSN_SIZE); |
| 18500 | } |
| 18501 | break; |
| 18502 | |
| 18503 | case BFD_RELOC_ARM_LDR_PC_G0: |
| 18504 | case BFD_RELOC_ARM_LDR_PC_G1: |
| 18505 | case BFD_RELOC_ARM_LDR_PC_G2: |
| 18506 | case BFD_RELOC_ARM_LDR_SB_G0: |
| 18507 | case BFD_RELOC_ARM_LDR_SB_G1: |
| 18508 | case BFD_RELOC_ARM_LDR_SB_G2: |
| 18509 | assert (!fixP->fx_done); |
| 18510 | if (!seg->use_rela_p) |
| 18511 | { |
| 18512 | bfd_vma insn; |
| 18513 | bfd_vma addend_abs = abs (value); |
| 18514 | |
| 18515 | /* Check that the absolute value of the addend can be |
| 18516 | encoded in 12 bits. */ |
| 18517 | if (addend_abs >= 0x1000) |
| 18518 | as_bad_where (fixP->fx_file, fixP->fx_line, |
| 18519 | _("bad offset 0x%08lX (only 12 bits available for the magnitude)"), |
| 18520 | addend_abs); |
| 18521 | |
| 18522 | /* Extract the instruction. */ |
| 18523 | insn = md_chars_to_number (buf, INSN_SIZE); |
| 18524 | |
| 18525 | /* If the addend is negative, clear bit 23 of the instruction. |
| 18526 | Otherwise set it. */ |
| 18527 | if (value < 0) |
| 18528 | insn &= ~(1 << 23); |
| 18529 | else |
| 18530 | insn |= 1 << 23; |
| 18531 | |
| 18532 | /* Place the absolute value of the addend into the first 12 bits |
| 18533 | of the instruction. */ |
| 18534 | insn &= 0xfffff000; |
| 18535 | insn |= addend_abs; |
| 18536 | |
| 18537 | /* Update the instruction. */ |
| 18538 | md_number_to_chars (buf, insn, INSN_SIZE); |
| 18539 | } |
| 18540 | break; |
| 18541 | |
| 18542 | case BFD_RELOC_ARM_LDRS_PC_G0: |
| 18543 | case BFD_RELOC_ARM_LDRS_PC_G1: |
| 18544 | case BFD_RELOC_ARM_LDRS_PC_G2: |
| 18545 | case BFD_RELOC_ARM_LDRS_SB_G0: |
| 18546 | case BFD_RELOC_ARM_LDRS_SB_G1: |
| 18547 | case BFD_RELOC_ARM_LDRS_SB_G2: |
| 18548 | assert (!fixP->fx_done); |
| 18549 | if (!seg->use_rela_p) |
| 18550 | { |
| 18551 | bfd_vma insn; |
| 18552 | bfd_vma addend_abs = abs (value); |
| 18553 | |
| 18554 | /* Check that the absolute value of the addend can be |
| 18555 | encoded in 8 bits. */ |
| 18556 | if (addend_abs >= 0x100) |
| 18557 | as_bad_where (fixP->fx_file, fixP->fx_line, |
| 18558 | _("bad offset 0x%08lX (only 8 bits available for the magnitude)"), |
| 18559 | addend_abs); |
| 18560 | |
| 18561 | /* Extract the instruction. */ |
| 18562 | insn = md_chars_to_number (buf, INSN_SIZE); |
| 18563 | |
| 18564 | /* If the addend is negative, clear bit 23 of the instruction. |
| 18565 | Otherwise set it. */ |
| 18566 | if (value < 0) |
| 18567 | insn &= ~(1 << 23); |
| 18568 | else |
| 18569 | insn |= 1 << 23; |
| 18570 | |
| 18571 | /* Place the first four bits of the absolute value of the addend |
| 18572 | into the first 4 bits of the instruction, and the remaining |
| 18573 | four into bits 8 .. 11. */ |
| 18574 | insn &= 0xfffff0f0; |
| 18575 | insn |= (addend_abs & 0xf) | ((addend_abs & 0xf0) << 4); |
| 18576 | |
| 18577 | /* Update the instruction. */ |
| 18578 | md_number_to_chars (buf, insn, INSN_SIZE); |
| 18579 | } |
| 18580 | break; |
| 18581 | |
| 18582 | case BFD_RELOC_ARM_LDC_PC_G0: |
| 18583 | case BFD_RELOC_ARM_LDC_PC_G1: |
| 18584 | case BFD_RELOC_ARM_LDC_PC_G2: |
| 18585 | case BFD_RELOC_ARM_LDC_SB_G0: |
| 18586 | case BFD_RELOC_ARM_LDC_SB_G1: |
| 18587 | case BFD_RELOC_ARM_LDC_SB_G2: |
| 18588 | assert (!fixP->fx_done); |
| 18589 | if (!seg->use_rela_p) |
| 18590 | { |
| 18591 | bfd_vma insn; |
| 18592 | bfd_vma addend_abs = abs (value); |
| 18593 | |
| 18594 | /* Check that the absolute value of the addend is a multiple of |
| 18595 | four and, when divided by four, fits in 8 bits. */ |
| 18596 | if (addend_abs & 0x3) |
| 18597 | as_bad_where (fixP->fx_file, fixP->fx_line, |
| 18598 | _("bad offset 0x%08lX (must be word-aligned)"), |
| 18599 | addend_abs); |
| 18600 | |
| 18601 | if ((addend_abs >> 2) > 0xff) |
| 18602 | as_bad_where (fixP->fx_file, fixP->fx_line, |
| 18603 | _("bad offset 0x%08lX (must be an 8-bit number of words)"), |
| 18604 | addend_abs); |
| 18605 | |
| 18606 | /* Extract the instruction. */ |
| 18607 | insn = md_chars_to_number (buf, INSN_SIZE); |
| 18608 | |
| 18609 | /* If the addend is negative, clear bit 23 of the instruction. |
| 18610 | Otherwise set it. */ |
| 18611 | if (value < 0) |
| 18612 | insn &= ~(1 << 23); |
| 18613 | else |
| 18614 | insn |= 1 << 23; |
| 18615 | |
| 18616 | /* Place the addend (divided by four) into the first eight |
| 18617 | bits of the instruction. */ |
| 18618 | insn &= 0xfffffff0; |
| 18619 | insn |= addend_abs >> 2; |
| 18620 | |
| 18621 | /* Update the instruction. */ |
| 18622 | md_number_to_chars (buf, insn, INSN_SIZE); |
| 18623 | } |
| 18624 | break; |
| 18625 | |
| 18626 | case BFD_RELOC_UNUSED: |
| 18627 | default: |
| 18628 | as_bad_where (fixP->fx_file, fixP->fx_line, |
| 18629 | _("bad relocation fixup type (%d)"), fixP->fx_r_type); |
| 18630 | } |
| 18631 | } |
| 18632 | |
| 18633 | /* Translate internal representation of relocation info to BFD target |
| 18634 | format. */ |
| 18635 | |
| 18636 | arelent * |
| 18637 | tc_gen_reloc (asection *section, fixS *fixp) |
| 18638 | { |
| 18639 | arelent * reloc; |
| 18640 | bfd_reloc_code_real_type code; |
| 18641 | |
| 18642 | reloc = xmalloc (sizeof (arelent)); |
| 18643 | |
| 18644 | reloc->sym_ptr_ptr = xmalloc (sizeof (asymbol *)); |
| 18645 | *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy); |
| 18646 | reloc->address = fixp->fx_frag->fr_address + fixp->fx_where; |
| 18647 | |
| 18648 | if (fixp->fx_pcrel) |
| 18649 | { |
| 18650 | if (section->use_rela_p) |
| 18651 | fixp->fx_offset -= md_pcrel_from_section (fixp, section); |
| 18652 | else |
| 18653 | fixp->fx_offset = reloc->address; |
| 18654 | } |
| 18655 | reloc->addend = fixp->fx_offset; |
| 18656 | |
| 18657 | switch (fixp->fx_r_type) |
| 18658 | { |
| 18659 | case BFD_RELOC_8: |
| 18660 | if (fixp->fx_pcrel) |
| 18661 | { |
| 18662 | code = BFD_RELOC_8_PCREL; |
| 18663 | break; |
| 18664 | } |
| 18665 | |
| 18666 | case BFD_RELOC_16: |
| 18667 | if (fixp->fx_pcrel) |
| 18668 | { |
| 18669 | code = BFD_RELOC_16_PCREL; |
| 18670 | break; |
| 18671 | } |
| 18672 | |
| 18673 | case BFD_RELOC_32: |
| 18674 | if (fixp->fx_pcrel) |
| 18675 | { |
| 18676 | code = BFD_RELOC_32_PCREL; |
| 18677 | break; |
| 18678 | } |
| 18679 | |
| 18680 | case BFD_RELOC_ARM_MOVW: |
| 18681 | if (fixp->fx_pcrel) |
| 18682 | { |
| 18683 | code = BFD_RELOC_ARM_MOVW_PCREL; |
| 18684 | break; |
| 18685 | } |
| 18686 | |
| 18687 | case BFD_RELOC_ARM_MOVT: |
| 18688 | if (fixp->fx_pcrel) |
| 18689 | { |
| 18690 | code = BFD_RELOC_ARM_MOVT_PCREL; |
| 18691 | break; |
| 18692 | } |
| 18693 | |
| 18694 | case BFD_RELOC_ARM_THUMB_MOVW: |
| 18695 | if (fixp->fx_pcrel) |
| 18696 | { |
| 18697 | code = BFD_RELOC_ARM_THUMB_MOVW_PCREL; |
| 18698 | break; |
| 18699 | } |
| 18700 | |
| 18701 | case BFD_RELOC_ARM_THUMB_MOVT: |
| 18702 | if (fixp->fx_pcrel) |
| 18703 | { |
| 18704 | code = BFD_RELOC_ARM_THUMB_MOVT_PCREL; |
| 18705 | break; |
| 18706 | } |
| 18707 | |
| 18708 | case BFD_RELOC_NONE: |
| 18709 | case BFD_RELOC_ARM_PCREL_BRANCH: |
| 18710 | case BFD_RELOC_ARM_PCREL_BLX: |
| 18711 | case BFD_RELOC_RVA: |
| 18712 | case BFD_RELOC_THUMB_PCREL_BRANCH7: |
| 18713 | case BFD_RELOC_THUMB_PCREL_BRANCH9: |
| 18714 | case BFD_RELOC_THUMB_PCREL_BRANCH12: |
| 18715 | case BFD_RELOC_THUMB_PCREL_BRANCH20: |
| 18716 | case BFD_RELOC_THUMB_PCREL_BRANCH23: |
| 18717 | case BFD_RELOC_THUMB_PCREL_BRANCH25: |
| 18718 | case BFD_RELOC_THUMB_PCREL_BLX: |
| 18719 | case BFD_RELOC_VTABLE_ENTRY: |
| 18720 | case BFD_RELOC_VTABLE_INHERIT: |
| 18721 | #ifdef TE_PE |
| 18722 | case BFD_RELOC_32_SECREL: |
| 18723 | #endif |
| 18724 | code = fixp->fx_r_type; |
| 18725 | break; |
| 18726 | |
| 18727 | case BFD_RELOC_ARM_LITERAL: |
| 18728 | case BFD_RELOC_ARM_HWLITERAL: |
| 18729 | /* If this is called then the a literal has |
| 18730 | been referenced across a section boundary. */ |
| 18731 | as_bad_where (fixp->fx_file, fixp->fx_line, |
| 18732 | _("literal referenced across section boundary")); |
| 18733 | return NULL; |
| 18734 | |
| 18735 | #ifdef OBJ_ELF |
| 18736 | case BFD_RELOC_ARM_GOT32: |
| 18737 | case BFD_RELOC_ARM_GOTOFF: |
| 18738 | case BFD_RELOC_ARM_PLT32: |
| 18739 | case BFD_RELOC_ARM_TARGET1: |
| 18740 | case BFD_RELOC_ARM_ROSEGREL32: |
| 18741 | case BFD_RELOC_ARM_SBREL32: |
| 18742 | case BFD_RELOC_ARM_PREL31: |
| 18743 | case BFD_RELOC_ARM_TARGET2: |
| 18744 | case BFD_RELOC_ARM_TLS_LE32: |
| 18745 | case BFD_RELOC_ARM_TLS_LDO32: |
| 18746 | case BFD_RELOC_ARM_PCREL_CALL: |
| 18747 | case BFD_RELOC_ARM_PCREL_JUMP: |
| 18748 | case BFD_RELOC_ARM_ALU_PC_G0_NC: |
| 18749 | case BFD_RELOC_ARM_ALU_PC_G0: |
| 18750 | case BFD_RELOC_ARM_ALU_PC_G1_NC: |
| 18751 | case BFD_RELOC_ARM_ALU_PC_G1: |
| 18752 | case BFD_RELOC_ARM_ALU_PC_G2: |
| 18753 | case BFD_RELOC_ARM_LDR_PC_G0: |
| 18754 | case BFD_RELOC_ARM_LDR_PC_G1: |
| 18755 | case BFD_RELOC_ARM_LDR_PC_G2: |
| 18756 | case BFD_RELOC_ARM_LDRS_PC_G0: |
| 18757 | case BFD_RELOC_ARM_LDRS_PC_G1: |
| 18758 | case BFD_RELOC_ARM_LDRS_PC_G2: |
| 18759 | case BFD_RELOC_ARM_LDC_PC_G0: |
| 18760 | case BFD_RELOC_ARM_LDC_PC_G1: |
| 18761 | case BFD_RELOC_ARM_LDC_PC_G2: |
| 18762 | case BFD_RELOC_ARM_ALU_SB_G0_NC: |
| 18763 | case BFD_RELOC_ARM_ALU_SB_G0: |
| 18764 | case BFD_RELOC_ARM_ALU_SB_G1_NC: |
| 18765 | case BFD_RELOC_ARM_ALU_SB_G1: |
| 18766 | case BFD_RELOC_ARM_ALU_SB_G2: |
| 18767 | case BFD_RELOC_ARM_LDR_SB_G0: |
| 18768 | case BFD_RELOC_ARM_LDR_SB_G1: |
| 18769 | case BFD_RELOC_ARM_LDR_SB_G2: |
| 18770 | case BFD_RELOC_ARM_LDRS_SB_G0: |
| 18771 | case BFD_RELOC_ARM_LDRS_SB_G1: |
| 18772 | case BFD_RELOC_ARM_LDRS_SB_G2: |
| 18773 | case BFD_RELOC_ARM_LDC_SB_G0: |
| 18774 | case BFD_RELOC_ARM_LDC_SB_G1: |
| 18775 | case BFD_RELOC_ARM_LDC_SB_G2: |
| 18776 | code = fixp->fx_r_type; |
| 18777 | break; |
| 18778 | |
| 18779 | case BFD_RELOC_ARM_TLS_GD32: |
| 18780 | case BFD_RELOC_ARM_TLS_IE32: |
| 18781 | case BFD_RELOC_ARM_TLS_LDM32: |
| 18782 | /* BFD will include the symbol's address in the addend. |
| 18783 | But we don't want that, so subtract it out again here. */ |
| 18784 | if (!S_IS_COMMON (fixp->fx_addsy)) |
| 18785 | reloc->addend -= (*reloc->sym_ptr_ptr)->value; |
| 18786 | code = fixp->fx_r_type; |
| 18787 | break; |
| 18788 | #endif |
| 18789 | |
| 18790 | case BFD_RELOC_ARM_IMMEDIATE: |
| 18791 | as_bad_where (fixp->fx_file, fixp->fx_line, |
| 18792 | _("internal relocation (type: IMMEDIATE) not fixed up")); |
| 18793 | return NULL; |
| 18794 | |
| 18795 | case BFD_RELOC_ARM_ADRL_IMMEDIATE: |
| 18796 | as_bad_where (fixp->fx_file, fixp->fx_line, |
| 18797 | _("ADRL used for a symbol not defined in the same file")); |
| 18798 | return NULL; |
| 18799 | |
| 18800 | case BFD_RELOC_ARM_OFFSET_IMM: |
| 18801 | if (section->use_rela_p) |
| 18802 | { |
| 18803 | code = fixp->fx_r_type; |
| 18804 | break; |
| 18805 | } |
| 18806 | |
| 18807 | if (fixp->fx_addsy != NULL |
| 18808 | && !S_IS_DEFINED (fixp->fx_addsy) |
| 18809 | && S_IS_LOCAL (fixp->fx_addsy)) |
| 18810 | { |
| 18811 | as_bad_where (fixp->fx_file, fixp->fx_line, |
| 18812 | _("undefined local label `%s'"), |
| 18813 | S_GET_NAME (fixp->fx_addsy)); |
| 18814 | return NULL; |
| 18815 | } |
| 18816 | |
| 18817 | as_bad_where (fixp->fx_file, fixp->fx_line, |
| 18818 | _("internal_relocation (type: OFFSET_IMM) not fixed up")); |
| 18819 | return NULL; |
| 18820 | |
| 18821 | default: |
| 18822 | { |
| 18823 | char * type; |
| 18824 | |
| 18825 | switch (fixp->fx_r_type) |
| 18826 | { |
| 18827 | case BFD_RELOC_NONE: type = "NONE"; break; |
| 18828 | case BFD_RELOC_ARM_OFFSET_IMM8: type = "OFFSET_IMM8"; break; |
| 18829 | case BFD_RELOC_ARM_SHIFT_IMM: type = "SHIFT_IMM"; break; |
| 18830 | case BFD_RELOC_ARM_SMC: type = "SMC"; break; |
| 18831 | case BFD_RELOC_ARM_SWI: type = "SWI"; break; |
| 18832 | case BFD_RELOC_ARM_MULTI: type = "MULTI"; break; |
| 18833 | case BFD_RELOC_ARM_CP_OFF_IMM: type = "CP_OFF_IMM"; break; |
| 18834 | case BFD_RELOC_ARM_T32_CP_OFF_IMM: type = "T32_CP_OFF_IMM"; break; |
| 18835 | case BFD_RELOC_ARM_THUMB_ADD: type = "THUMB_ADD"; break; |
| 18836 | case BFD_RELOC_ARM_THUMB_SHIFT: type = "THUMB_SHIFT"; break; |
| 18837 | case BFD_RELOC_ARM_THUMB_IMM: type = "THUMB_IMM"; break; |
| 18838 | case BFD_RELOC_ARM_THUMB_OFFSET: type = "THUMB_OFFSET"; break; |
| 18839 | default: type = _("<unknown>"); break; |
| 18840 | } |
| 18841 | as_bad_where (fixp->fx_file, fixp->fx_line, |
| 18842 | _("cannot represent %s relocation in this object file format"), |
| 18843 | type); |
| 18844 | return NULL; |
| 18845 | } |
| 18846 | } |
| 18847 | |
| 18848 | #ifdef OBJ_ELF |
| 18849 | if ((code == BFD_RELOC_32_PCREL || code == BFD_RELOC_32) |
| 18850 | && GOT_symbol |
| 18851 | && fixp->fx_addsy == GOT_symbol) |
| 18852 | { |
| 18853 | code = BFD_RELOC_ARM_GOTPC; |
| 18854 | reloc->addend = fixp->fx_offset = reloc->address; |
| 18855 | } |
| 18856 | #endif |
| 18857 | |
| 18858 | reloc->howto = bfd_reloc_type_lookup (stdoutput, code); |
| 18859 | |
| 18860 | if (reloc->howto == NULL) |
| 18861 | { |
| 18862 | as_bad_where (fixp->fx_file, fixp->fx_line, |
| 18863 | _("cannot represent %s relocation in this object file format"), |
| 18864 | bfd_get_reloc_code_name (code)); |
| 18865 | return NULL; |
| 18866 | } |
| 18867 | |
| 18868 | /* HACK: Since arm ELF uses Rel instead of Rela, encode the |
| 18869 | vtable entry to be used in the relocation's section offset. */ |
| 18870 | if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY) |
| 18871 | reloc->address = fixp->fx_offset; |
| 18872 | |
| 18873 | return reloc; |
| 18874 | } |
| 18875 | |
| 18876 | /* This fix_new is called by cons via TC_CONS_FIX_NEW. */ |
| 18877 | |
| 18878 | void |
| 18879 | cons_fix_new_arm (fragS * frag, |
| 18880 | int where, |
| 18881 | int size, |
| 18882 | expressionS * exp) |
| 18883 | { |
| 18884 | bfd_reloc_code_real_type type; |
| 18885 | int pcrel = 0; |
| 18886 | |
| 18887 | /* Pick a reloc. |
| 18888 | FIXME: @@ Should look at CPU word size. */ |
| 18889 | switch (size) |
| 18890 | { |
| 18891 | case 1: |
| 18892 | type = BFD_RELOC_8; |
| 18893 | break; |
| 18894 | case 2: |
| 18895 | type = BFD_RELOC_16; |
| 18896 | break; |
| 18897 | case 4: |
| 18898 | default: |
| 18899 | type = BFD_RELOC_32; |
| 18900 | break; |
| 18901 | case 8: |
| 18902 | type = BFD_RELOC_64; |
| 18903 | break; |
| 18904 | } |
| 18905 | |
| 18906 | #ifdef TE_PE |
| 18907 | if (exp->X_op == O_secrel) |
| 18908 | { |
| 18909 | exp->X_op = O_symbol; |
| 18910 | type = BFD_RELOC_32_SECREL; |
| 18911 | } |
| 18912 | #endif |
| 18913 | |
| 18914 | fix_new_exp (frag, where, (int) size, exp, pcrel, type); |
| 18915 | } |
| 18916 | |
| 18917 | #if defined OBJ_COFF || defined OBJ_ELF |
| 18918 | void |
| 18919 | arm_validate_fix (fixS * fixP) |
| 18920 | { |
| 18921 | /* If the destination of the branch is a defined symbol which does not have |
| 18922 | the THUMB_FUNC attribute, then we must be calling a function which has |
| 18923 | the (interfacearm) attribute. We look for the Thumb entry point to that |
| 18924 | function and change the branch to refer to that function instead. */ |
| 18925 | if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BRANCH23 |
| 18926 | && fixP->fx_addsy != NULL |
| 18927 | && S_IS_DEFINED (fixP->fx_addsy) |
| 18928 | && ! THUMB_IS_FUNC (fixP->fx_addsy)) |
| 18929 | { |
| 18930 | fixP->fx_addsy = find_real_start (fixP->fx_addsy); |
| 18931 | } |
| 18932 | } |
| 18933 | #endif |
| 18934 | |
| 18935 | int |
| 18936 | arm_force_relocation (struct fix * fixp) |
| 18937 | { |
| 18938 | #if defined (OBJ_COFF) && defined (TE_PE) |
| 18939 | if (fixp->fx_r_type == BFD_RELOC_RVA) |
| 18940 | return 1; |
| 18941 | #endif |
| 18942 | |
| 18943 | /* Resolve these relocations even if the symbol is extern or weak. */ |
| 18944 | if (fixp->fx_r_type == BFD_RELOC_ARM_IMMEDIATE |
| 18945 | || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM |
| 18946 | || fixp->fx_r_type == BFD_RELOC_ARM_ADRL_IMMEDIATE |
| 18947 | || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM |
| 18948 | || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE |
| 18949 | || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMM12 |
| 18950 | || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_PC12) |
| 18951 | return 0; |
| 18952 | |
| 18953 | /* Always leave these relocations for the linker. */ |
| 18954 | if ((fixp->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC |
| 18955 | && fixp->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2) |
| 18956 | || fixp->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0) |
| 18957 | return 1; |
| 18958 | |
| 18959 | return generic_force_reloc (fixp); |
| 18960 | } |
| 18961 | |
| 18962 | #if defined (OBJ_ELF) || defined (OBJ_COFF) |
| 18963 | /* Relocations against function names must be left unadjusted, |
| 18964 | so that the linker can use this information to generate interworking |
| 18965 | stubs. The MIPS version of this function |
| 18966 | also prevents relocations that are mips-16 specific, but I do not |
| 18967 | know why it does this. |
| 18968 | |
| 18969 | FIXME: |
| 18970 | There is one other problem that ought to be addressed here, but |
| 18971 | which currently is not: Taking the address of a label (rather |
| 18972 | than a function) and then later jumping to that address. Such |
| 18973 | addresses also ought to have their bottom bit set (assuming that |
| 18974 | they reside in Thumb code), but at the moment they will not. */ |
| 18975 | |
| 18976 | bfd_boolean |
| 18977 | arm_fix_adjustable (fixS * fixP) |
| 18978 | { |
| 18979 | if (fixP->fx_addsy == NULL) |
| 18980 | return 1; |
| 18981 | |
| 18982 | /* Preserve relocations against symbols with function type. */ |
| 18983 | if (symbol_get_bfdsym (fixP->fx_addsy)->flags & BSF_FUNCTION) |
| 18984 | return 0; |
| 18985 | |
| 18986 | if (THUMB_IS_FUNC (fixP->fx_addsy) |
| 18987 | && fixP->fx_subsy == NULL) |
| 18988 | return 0; |
| 18989 | |
| 18990 | /* We need the symbol name for the VTABLE entries. */ |
| 18991 | if ( fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT |
| 18992 | || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY) |
| 18993 | return 0; |
| 18994 | |
| 18995 | /* Don't allow symbols to be discarded on GOT related relocs. */ |
| 18996 | if (fixP->fx_r_type == BFD_RELOC_ARM_PLT32 |
| 18997 | || fixP->fx_r_type == BFD_RELOC_ARM_GOT32 |
| 18998 | || fixP->fx_r_type == BFD_RELOC_ARM_GOTOFF |
| 18999 | || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32 |
| 19000 | || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LE32 |
| 19001 | || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32 |
| 19002 | || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32 |
| 19003 | || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDO32 |
| 19004 | || fixP->fx_r_type == BFD_RELOC_ARM_TARGET2) |
| 19005 | return 0; |
| 19006 | |
| 19007 | /* Similarly for group relocations. */ |
| 19008 | if ((fixP->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC |
| 19009 | && fixP->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2) |
| 19010 | || fixP->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0) |
| 19011 | return 0; |
| 19012 | |
| 19013 | return 1; |
| 19014 | } |
| 19015 | #endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */ |
| 19016 | |
| 19017 | #ifdef OBJ_ELF |
| 19018 | |
| 19019 | const char * |
| 19020 | elf32_arm_target_format (void) |
| 19021 | { |
| 19022 | #ifdef TE_SYMBIAN |
| 19023 | return (target_big_endian |
| 19024 | ? "elf32-bigarm-symbian" |
| 19025 | : "elf32-littlearm-symbian"); |
| 19026 | #elif defined (TE_VXWORKS) |
| 19027 | return (target_big_endian |
| 19028 | ? "elf32-bigarm-vxworks" |
| 19029 | : "elf32-littlearm-vxworks"); |
| 19030 | #else |
| 19031 | if (target_big_endian) |
| 19032 | return "elf32-bigarm"; |
| 19033 | else |
| 19034 | return "elf32-littlearm"; |
| 19035 | #endif |
| 19036 | } |
| 19037 | |
| 19038 | void |
| 19039 | armelf_frob_symbol (symbolS * symp, |
| 19040 | int * puntp) |
| 19041 | { |
| 19042 | elf_frob_symbol (symp, puntp); |
| 19043 | } |
| 19044 | #endif |
| 19045 | |
| 19046 | /* MD interface: Finalization. */ |
| 19047 | |
| 19048 | /* A good place to do this, although this was probably not intended |
| 19049 | for this kind of use. We need to dump the literal pool before |
| 19050 | references are made to a null symbol pointer. */ |
| 19051 | |
| 19052 | void |
| 19053 | arm_cleanup (void) |
| 19054 | { |
| 19055 | literal_pool * pool; |
| 19056 | |
| 19057 | for (pool = list_of_pools; pool; pool = pool->next) |
| 19058 | { |
| 19059 | /* Put it at the end of the relevent section. */ |
| 19060 | subseg_set (pool->section, pool->sub_section); |
| 19061 | #ifdef OBJ_ELF |
| 19062 | arm_elf_change_section (); |
| 19063 | #endif |
| 19064 | s_ltorg (0); |
| 19065 | } |
| 19066 | } |
| 19067 | |
| 19068 | /* Adjust the symbol table. This marks Thumb symbols as distinct from |
| 19069 | ARM ones. */ |
| 19070 | |
| 19071 | void |
| 19072 | arm_adjust_symtab (void) |
| 19073 | { |
| 19074 | #ifdef OBJ_COFF |
| 19075 | symbolS * sym; |
| 19076 | |
| 19077 | for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym)) |
| 19078 | { |
| 19079 | if (ARM_IS_THUMB (sym)) |
| 19080 | { |
| 19081 | if (THUMB_IS_FUNC (sym)) |
| 19082 | { |
| 19083 | /* Mark the symbol as a Thumb function. */ |
| 19084 | if ( S_GET_STORAGE_CLASS (sym) == C_STAT |
| 19085 | || S_GET_STORAGE_CLASS (sym) == C_LABEL) /* This can happen! */ |
| 19086 | S_SET_STORAGE_CLASS (sym, C_THUMBSTATFUNC); |
| 19087 | |
| 19088 | else if (S_GET_STORAGE_CLASS (sym) == C_EXT) |
| 19089 | S_SET_STORAGE_CLASS (sym, C_THUMBEXTFUNC); |
| 19090 | else |
| 19091 | as_bad (_("%s: unexpected function type: %d"), |
| 19092 | S_GET_NAME (sym), S_GET_STORAGE_CLASS (sym)); |
| 19093 | } |
| 19094 | else switch (S_GET_STORAGE_CLASS (sym)) |
| 19095 | { |
| 19096 | case C_EXT: |
| 19097 | S_SET_STORAGE_CLASS (sym, C_THUMBEXT); |
| 19098 | break; |
| 19099 | case C_STAT: |
| 19100 | S_SET_STORAGE_CLASS (sym, C_THUMBSTAT); |
| 19101 | break; |
| 19102 | case C_LABEL: |
| 19103 | S_SET_STORAGE_CLASS (sym, C_THUMBLABEL); |
| 19104 | break; |
| 19105 | default: |
| 19106 | /* Do nothing. */ |
| 19107 | break; |
| 19108 | } |
| 19109 | } |
| 19110 | |
| 19111 | if (ARM_IS_INTERWORK (sym)) |
| 19112 | coffsymbol (symbol_get_bfdsym (sym))->native->u.syment.n_flags = 0xFF; |
| 19113 | } |
| 19114 | #endif |
| 19115 | #ifdef OBJ_ELF |
| 19116 | symbolS * sym; |
| 19117 | char bind; |
| 19118 | |
| 19119 | for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym)) |
| 19120 | { |
| 19121 | if (ARM_IS_THUMB (sym)) |
| 19122 | { |
| 19123 | elf_symbol_type * elf_sym; |
| 19124 | |
| 19125 | elf_sym = elf_symbol (symbol_get_bfdsym (sym)); |
| 19126 | bind = ELF_ST_BIND (elf_sym->internal_elf_sym.st_info); |
| 19127 | |
| 19128 | if (! bfd_is_arm_special_symbol_name (elf_sym->symbol.name, |
| 19129 | BFD_ARM_SPECIAL_SYM_TYPE_ANY)) |
| 19130 | { |
| 19131 | /* If it's a .thumb_func, declare it as so, |
| 19132 | otherwise tag label as .code 16. */ |
| 19133 | if (THUMB_IS_FUNC (sym)) |
| 19134 | elf_sym->internal_elf_sym.st_info = |
| 19135 | ELF_ST_INFO (bind, STT_ARM_TFUNC); |
| 19136 | else if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4) |
| 19137 | elf_sym->internal_elf_sym.st_info = |
| 19138 | ELF_ST_INFO (bind, STT_ARM_16BIT); |
| 19139 | } |
| 19140 | } |
| 19141 | } |
| 19142 | #endif |
| 19143 | } |
| 19144 | |
| 19145 | /* MD interface: Initialization. */ |
| 19146 | |
| 19147 | static void |
| 19148 | set_constant_flonums (void) |
| 19149 | { |
| 19150 | int i; |
| 19151 | |
| 19152 | for (i = 0; i < NUM_FLOAT_VALS; i++) |
| 19153 | if (atof_ieee ((char *) fp_const[i], 'x', fp_values[i]) == NULL) |
| 19154 | abort (); |
| 19155 | } |
| 19156 | |
| 19157 | /* Auto-select Thumb mode if it's the only available instruction set for the |
| 19158 | given architecture. */ |
| 19159 | |
| 19160 | static void |
| 19161 | autoselect_thumb_from_cpu_variant (void) |
| 19162 | { |
| 19163 | if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1)) |
| 19164 | opcode_select (16); |
| 19165 | } |
| 19166 | |
| 19167 | void |
| 19168 | md_begin (void) |
| 19169 | { |
| 19170 | unsigned mach; |
| 19171 | unsigned int i; |
| 19172 | |
| 19173 | if ( (arm_ops_hsh = hash_new ()) == NULL |
| 19174 | || (arm_cond_hsh = hash_new ()) == NULL |
| 19175 | || (arm_shift_hsh = hash_new ()) == NULL |
| 19176 | || (arm_psr_hsh = hash_new ()) == NULL |
| 19177 | || (arm_v7m_psr_hsh = hash_new ()) == NULL |
| 19178 | || (arm_reg_hsh = hash_new ()) == NULL |
| 19179 | || (arm_reloc_hsh = hash_new ()) == NULL |
| 19180 | || (arm_barrier_opt_hsh = hash_new ()) == NULL) |
| 19181 | as_fatal (_("virtual memory exhausted")); |
| 19182 | |
| 19183 | for (i = 0; i < sizeof (insns) / sizeof (struct asm_opcode); i++) |
| 19184 | hash_insert (arm_ops_hsh, insns[i].template, (PTR) (insns + i)); |
| 19185 | for (i = 0; i < sizeof (conds) / sizeof (struct asm_cond); i++) |
| 19186 | hash_insert (arm_cond_hsh, conds[i].template, (PTR) (conds + i)); |
| 19187 | for (i = 0; i < sizeof (shift_names) / sizeof (struct asm_shift_name); i++) |
| 19188 | hash_insert (arm_shift_hsh, shift_names[i].name, (PTR) (shift_names + i)); |
| 19189 | for (i = 0; i < sizeof (psrs) / sizeof (struct asm_psr); i++) |
| 19190 | hash_insert (arm_psr_hsh, psrs[i].template, (PTR) (psrs + i)); |
| 19191 | for (i = 0; i < sizeof (v7m_psrs) / sizeof (struct asm_psr); i++) |
| 19192 | hash_insert (arm_v7m_psr_hsh, v7m_psrs[i].template, (PTR) (v7m_psrs + i)); |
| 19193 | for (i = 0; i < sizeof (reg_names) / sizeof (struct reg_entry); i++) |
| 19194 | hash_insert (arm_reg_hsh, reg_names[i].name, (PTR) (reg_names + i)); |
| 19195 | for (i = 0; |
| 19196 | i < sizeof (barrier_opt_names) / sizeof (struct asm_barrier_opt); |
| 19197 | i++) |
| 19198 | hash_insert (arm_barrier_opt_hsh, barrier_opt_names[i].template, |
| 19199 | (PTR) (barrier_opt_names + i)); |
| 19200 | #ifdef OBJ_ELF |
| 19201 | for (i = 0; i < sizeof (reloc_names) / sizeof (struct reloc_entry); i++) |
| 19202 | hash_insert (arm_reloc_hsh, reloc_names[i].name, (PTR) (reloc_names + i)); |
| 19203 | #endif |
| 19204 | |
| 19205 | set_constant_flonums (); |
| 19206 | |
| 19207 | /* Set the cpu variant based on the command-line options. We prefer |
| 19208 | -mcpu= over -march= if both are set (as for GCC); and we prefer |
| 19209 | -mfpu= over any other way of setting the floating point unit. |
| 19210 | Use of legacy options with new options are faulted. */ |
| 19211 | if (legacy_cpu) |
| 19212 | { |
| 19213 | if (mcpu_cpu_opt || march_cpu_opt) |
| 19214 | as_bad (_("use of old and new-style options to set CPU type")); |
| 19215 | |
| 19216 | mcpu_cpu_opt = legacy_cpu; |
| 19217 | } |
| 19218 | else if (!mcpu_cpu_opt) |
| 19219 | mcpu_cpu_opt = march_cpu_opt; |
| 19220 | |
| 19221 | if (legacy_fpu) |
| 19222 | { |
| 19223 | if (mfpu_opt) |
| 19224 | as_bad (_("use of old and new-style options to set FPU type")); |
| 19225 | |
| 19226 | mfpu_opt = legacy_fpu; |
| 19227 | } |
| 19228 | else if (!mfpu_opt) |
| 19229 | { |
| 19230 | #if !(defined (TE_LINUX) || defined (TE_NetBSD) || defined (TE_VXWORKS)) |
| 19231 | /* Some environments specify a default FPU. If they don't, infer it |
| 19232 | from the processor. */ |
| 19233 | if (mcpu_fpu_opt) |
| 19234 | mfpu_opt = mcpu_fpu_opt; |
| 19235 | else |
| 19236 | mfpu_opt = march_fpu_opt; |
| 19237 | #else |
| 19238 | mfpu_opt = &fpu_default; |
| 19239 | #endif |
| 19240 | } |
| 19241 | |
| 19242 | if (!mfpu_opt) |
| 19243 | { |
| 19244 | if (!mcpu_cpu_opt) |
| 19245 | mfpu_opt = &fpu_default; |
| 19246 | else if (ARM_CPU_HAS_FEATURE (*mcpu_fpu_opt, arm_ext_v5)) |
| 19247 | mfpu_opt = &fpu_arch_vfp_v2; |
| 19248 | else |
| 19249 | mfpu_opt = &fpu_arch_fpa; |
| 19250 | } |
| 19251 | |
| 19252 | #ifdef CPU_DEFAULT |
| 19253 | if (!mcpu_cpu_opt) |
| 19254 | { |
| 19255 | mcpu_cpu_opt = &cpu_default; |
| 19256 | selected_cpu = cpu_default; |
| 19257 | } |
| 19258 | #else |
| 19259 | if (mcpu_cpu_opt) |
| 19260 | selected_cpu = *mcpu_cpu_opt; |
| 19261 | else |
| 19262 | mcpu_cpu_opt = &arm_arch_any; |
| 19263 | #endif |
| 19264 | |
| 19265 | ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt); |
| 19266 | |
| 19267 | autoselect_thumb_from_cpu_variant (); |
| 19268 | |
| 19269 | arm_arch_used = thumb_arch_used = arm_arch_none; |
| 19270 | |
| 19271 | #if defined OBJ_COFF || defined OBJ_ELF |
| 19272 | { |
| 19273 | unsigned int flags = 0; |
| 19274 | |
| 19275 | #if defined OBJ_ELF |
| 19276 | flags = meabi_flags; |
| 19277 | |
| 19278 | switch (meabi_flags) |
| 19279 | { |
| 19280 | case EF_ARM_EABI_UNKNOWN: |
| 19281 | #endif |
| 19282 | /* Set the flags in the private structure. */ |
| 19283 | if (uses_apcs_26) flags |= F_APCS26; |
| 19284 | if (support_interwork) flags |= F_INTERWORK; |
| 19285 | if (uses_apcs_float) flags |= F_APCS_FLOAT; |
| 19286 | if (pic_code) flags |= F_PIC; |
| 19287 | if (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_any_hard)) |
| 19288 | flags |= F_SOFT_FLOAT; |
| 19289 | |
| 19290 | switch (mfloat_abi_opt) |
| 19291 | { |
| 19292 | case ARM_FLOAT_ABI_SOFT: |
| 19293 | case ARM_FLOAT_ABI_SOFTFP: |
| 19294 | flags |= F_SOFT_FLOAT; |
| 19295 | break; |
| 19296 | |
| 19297 | case ARM_FLOAT_ABI_HARD: |
| 19298 | if (flags & F_SOFT_FLOAT) |
| 19299 | as_bad (_("hard-float conflicts with specified fpu")); |
| 19300 | break; |
| 19301 | } |
| 19302 | |
| 19303 | /* Using pure-endian doubles (even if soft-float). */ |
| 19304 | if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure)) |
| 19305 | flags |= F_VFP_FLOAT; |
| 19306 | |
| 19307 | #if defined OBJ_ELF |
| 19308 | if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_maverick)) |
| 19309 | flags |= EF_ARM_MAVERICK_FLOAT; |
| 19310 | break; |
| 19311 | |
| 19312 | case EF_ARM_EABI_VER4: |
| 19313 | case EF_ARM_EABI_VER5: |
| 19314 | /* No additional flags to set. */ |
| 19315 | break; |
| 19316 | |
| 19317 | default: |
| 19318 | abort (); |
| 19319 | } |
| 19320 | #endif |
| 19321 | bfd_set_private_flags (stdoutput, flags); |
| 19322 | |
| 19323 | /* We have run out flags in the COFF header to encode the |
| 19324 | status of ATPCS support, so instead we create a dummy, |
| 19325 | empty, debug section called .arm.atpcs. */ |
| 19326 | if (atpcs) |
| 19327 | { |
| 19328 | asection * sec; |
| 19329 | |
| 19330 | sec = bfd_make_section (stdoutput, ".arm.atpcs"); |
| 19331 | |
| 19332 | if (sec != NULL) |
| 19333 | { |
| 19334 | bfd_set_section_flags |
| 19335 | (stdoutput, sec, SEC_READONLY | SEC_DEBUGGING /* | SEC_HAS_CONTENTS */); |
| 19336 | bfd_set_section_size (stdoutput, sec, 0); |
| 19337 | bfd_set_section_contents (stdoutput, sec, NULL, 0, 0); |
| 19338 | } |
| 19339 | } |
| 19340 | } |
| 19341 | #endif |
| 19342 | |
| 19343 | /* Record the CPU type as well. */ |
| 19344 | if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2)) |
| 19345 | mach = bfd_mach_arm_iWMMXt2; |
| 19346 | else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt)) |
| 19347 | mach = bfd_mach_arm_iWMMXt; |
| 19348 | else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_xscale)) |
| 19349 | mach = bfd_mach_arm_XScale; |
| 19350 | else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_maverick)) |
| 19351 | mach = bfd_mach_arm_ep9312; |
| 19352 | else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5e)) |
| 19353 | mach = bfd_mach_arm_5TE; |
| 19354 | else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5)) |
| 19355 | { |
| 19356 | if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t)) |
| 19357 | mach = bfd_mach_arm_5T; |
| 19358 | else |
| 19359 | mach = bfd_mach_arm_5; |
| 19360 | } |
| 19361 | else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4)) |
| 19362 | { |
| 19363 | if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t)) |
| 19364 | mach = bfd_mach_arm_4T; |
| 19365 | else |
| 19366 | mach = bfd_mach_arm_4; |
| 19367 | } |
| 19368 | else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3m)) |
| 19369 | mach = bfd_mach_arm_3M; |
| 19370 | else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3)) |
| 19371 | mach = bfd_mach_arm_3; |
| 19372 | else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2s)) |
| 19373 | mach = bfd_mach_arm_2a; |
| 19374 | else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2)) |
| 19375 | mach = bfd_mach_arm_2; |
| 19376 | else |
| 19377 | mach = bfd_mach_arm_unknown; |
| 19378 | |
| 19379 | bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach); |
| 19380 | } |
| 19381 | |
| 19382 | /* Command line processing. */ |
| 19383 | |
| 19384 | /* md_parse_option |
| 19385 | Invocation line includes a switch not recognized by the base assembler. |
| 19386 | See if it's a processor-specific option. |
| 19387 | |
| 19388 | This routine is somewhat complicated by the need for backwards |
| 19389 | compatibility (since older releases of gcc can't be changed). |
| 19390 | The new options try to make the interface as compatible as |
| 19391 | possible with GCC. |
| 19392 | |
| 19393 | New options (supported) are: |
| 19394 | |
| 19395 | -mcpu=<cpu name> Assemble for selected processor |
| 19396 | -march=<architecture name> Assemble for selected architecture |
| 19397 | -mfpu=<fpu architecture> Assemble for selected FPU. |
| 19398 | -EB/-mbig-endian Big-endian |
| 19399 | -EL/-mlittle-endian Little-endian |
| 19400 | -k Generate PIC code |
| 19401 | -mthumb Start in Thumb mode |
| 19402 | -mthumb-interwork Code supports ARM/Thumb interworking |
| 19403 | |
| 19404 | For now we will also provide support for: |
| 19405 | |
| 19406 | -mapcs-32 32-bit Program counter |
| 19407 | -mapcs-26 26-bit Program counter |
| 19408 | -macps-float Floats passed in FP registers |
| 19409 | -mapcs-reentrant Reentrant code |
| 19410 | -matpcs |
| 19411 | (sometime these will probably be replaced with -mapcs=<list of options> |
| 19412 | and -matpcs=<list of options>) |
| 19413 | |
| 19414 | The remaining options are only supported for back-wards compatibility. |
| 19415 | Cpu variants, the arm part is optional: |
| 19416 | -m[arm]1 Currently not supported. |
| 19417 | -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor |
| 19418 | -m[arm]3 Arm 3 processor |
| 19419 | -m[arm]6[xx], Arm 6 processors |
| 19420 | -m[arm]7[xx][t][[d]m] Arm 7 processors |
| 19421 | -m[arm]8[10] Arm 8 processors |
| 19422 | -m[arm]9[20][tdmi] Arm 9 processors |
| 19423 | -mstrongarm[110[0]] StrongARM processors |
| 19424 | -mxscale XScale processors |
| 19425 | -m[arm]v[2345[t[e]]] Arm architectures |
| 19426 | -mall All (except the ARM1) |
| 19427 | FP variants: |
| 19428 | -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions |
| 19429 | -mfpe-old (No float load/store multiples) |
| 19430 | -mvfpxd VFP Single precision |
| 19431 | -mvfp All VFP |
| 19432 | -mno-fpu Disable all floating point instructions |
| 19433 | |
| 19434 | The following CPU names are recognized: |
| 19435 | arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620, |
| 19436 | arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700, |
| 19437 | arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c, |
| 19438 | arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9, |
| 19439 | arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e, |
| 19440 | arm10t arm10e, arm1020t, arm1020e, arm10200e, |
| 19441 | strongarm, strongarm110, strongarm1100, strongarm1110, xscale. |
| 19442 | |
| 19443 | */ |
| 19444 | |
| 19445 | const char * md_shortopts = "m:k"; |
| 19446 | |
| 19447 | #ifdef ARM_BI_ENDIAN |
| 19448 | #define OPTION_EB (OPTION_MD_BASE + 0) |
| 19449 | #define OPTION_EL (OPTION_MD_BASE + 1) |
| 19450 | #else |
| 19451 | #if TARGET_BYTES_BIG_ENDIAN |
| 19452 | #define OPTION_EB (OPTION_MD_BASE + 0) |
| 19453 | #else |
| 19454 | #define OPTION_EL (OPTION_MD_BASE + 1) |
| 19455 | #endif |
| 19456 | #endif |
| 19457 | |
| 19458 | struct option md_longopts[] = |
| 19459 | { |
| 19460 | #ifdef OPTION_EB |
| 19461 | {"EB", no_argument, NULL, OPTION_EB}, |
| 19462 | #endif |
| 19463 | #ifdef OPTION_EL |
| 19464 | {"EL", no_argument, NULL, OPTION_EL}, |
| 19465 | #endif |
| 19466 | {NULL, no_argument, NULL, 0} |
| 19467 | }; |
| 19468 | |
| 19469 | size_t md_longopts_size = sizeof (md_longopts); |
| 19470 | |
| 19471 | struct arm_option_table |
| 19472 | { |
| 19473 | char *option; /* Option name to match. */ |
| 19474 | char *help; /* Help information. */ |
| 19475 | int *var; /* Variable to change. */ |
| 19476 | int value; /* What to change it to. */ |
| 19477 | char *deprecated; /* If non-null, print this message. */ |
| 19478 | }; |
| 19479 | |
| 19480 | struct arm_option_table arm_opts[] = |
| 19481 | { |
| 19482 | {"k", N_("generate PIC code"), &pic_code, 1, NULL}, |
| 19483 | {"mthumb", N_("assemble Thumb code"), &thumb_mode, 1, NULL}, |
| 19484 | {"mthumb-interwork", N_("support ARM/Thumb interworking"), |
| 19485 | &support_interwork, 1, NULL}, |
| 19486 | {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26, 0, NULL}, |
| 19487 | {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26, 1, NULL}, |
| 19488 | {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float, |
| 19489 | 1, NULL}, |
| 19490 | {"mapcs-reentrant", N_("re-entrant code"), &pic_code, 1, NULL}, |
| 19491 | {"matpcs", N_("code is ATPCS conformant"), &atpcs, 1, NULL}, |
| 19492 | {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL}, |
| 19493 | {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0, |
| 19494 | NULL}, |
| 19495 | |
| 19496 | /* These are recognized by the assembler, but have no affect on code. */ |
| 19497 | {"mapcs-frame", N_("use frame pointer"), NULL, 0, NULL}, |
| 19498 | {"mapcs-stack-check", N_("use stack size checking"), NULL, 0, NULL}, |
| 19499 | {NULL, NULL, NULL, 0, NULL} |
| 19500 | }; |
| 19501 | |
| 19502 | struct arm_legacy_option_table |
| 19503 | { |
| 19504 | char *option; /* Option name to match. */ |
| 19505 | const arm_feature_set **var; /* Variable to change. */ |
| 19506 | const arm_feature_set value; /* What to change it to. */ |
| 19507 | char *deprecated; /* If non-null, print this message. */ |
| 19508 | }; |
| 19509 | |
| 19510 | const struct arm_legacy_option_table arm_legacy_opts[] = |
| 19511 | { |
| 19512 | /* DON'T add any new processors to this list -- we want the whole list |
| 19513 | to go away... Add them to the processors table instead. */ |
| 19514 | {"marm1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")}, |
| 19515 | {"m1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")}, |
| 19516 | {"marm2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")}, |
| 19517 | {"m2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")}, |
| 19518 | {"marm250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")}, |
| 19519 | {"m250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")}, |
| 19520 | {"marm3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")}, |
| 19521 | {"m3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")}, |
| 19522 | {"marm6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")}, |
| 19523 | {"m6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")}, |
| 19524 | {"marm600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")}, |
| 19525 | {"m600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")}, |
| 19526 | {"marm610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")}, |
| 19527 | {"m610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")}, |
| 19528 | {"marm620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")}, |
| 19529 | {"m620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")}, |
| 19530 | {"marm7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")}, |
| 19531 | {"m7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")}, |
| 19532 | {"marm70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")}, |
| 19533 | {"m70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")}, |
| 19534 | {"marm700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")}, |
| 19535 | {"m700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")}, |
| 19536 | {"marm700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")}, |
| 19537 | {"m700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")}, |
| 19538 | {"marm710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")}, |
| 19539 | {"m710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")}, |
| 19540 | {"marm710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")}, |
| 19541 | {"m710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")}, |
| 19542 | {"marm720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")}, |
| 19543 | {"m720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")}, |
| 19544 | {"marm7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")}, |
| 19545 | {"m7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")}, |
| 19546 | {"marm7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")}, |
| 19547 | {"m7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")}, |
| 19548 | {"marm7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")}, |
| 19549 | {"m7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")}, |
| 19550 | {"marm7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")}, |
| 19551 | {"m7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")}, |
| 19552 | {"marm7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")}, |
| 19553 | {"m7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")}, |
| 19554 | {"marm7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")}, |
| 19555 | {"m7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")}, |
| 19556 | {"marm7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")}, |
| 19557 | {"m7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")}, |
| 19558 | {"marm7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")}, |
| 19559 | {"m7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")}, |
| 19560 | {"marm7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")}, |
| 19561 | {"m7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")}, |
| 19562 | {"marm7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")}, |
| 19563 | {"m7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")}, |
| 19564 | {"marm710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")}, |
| 19565 | {"m710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")}, |
| 19566 | {"marm720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")}, |
| 19567 | {"m720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")}, |
| 19568 | {"marm740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")}, |
| 19569 | {"m740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")}, |
| 19570 | {"marm8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")}, |
| 19571 | {"m8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")}, |
| 19572 | {"marm810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")}, |
| 19573 | {"m810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")}, |
| 19574 | {"marm9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")}, |
| 19575 | {"m9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")}, |
| 19576 | {"marm9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")}, |
| 19577 | {"m9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")}, |
| 19578 | {"marm920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")}, |
| 19579 | {"m920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")}, |
| 19580 | {"marm940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")}, |
| 19581 | {"m940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")}, |
| 19582 | {"mstrongarm", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=strongarm")}, |
| 19583 | {"mstrongarm110", &legacy_cpu, ARM_ARCH_V4, |
| 19584 | N_("use -mcpu=strongarm110")}, |
| 19585 | {"mstrongarm1100", &legacy_cpu, ARM_ARCH_V4, |
| 19586 | N_("use -mcpu=strongarm1100")}, |
| 19587 | {"mstrongarm1110", &legacy_cpu, ARM_ARCH_V4, |
| 19588 | N_("use -mcpu=strongarm1110")}, |
| 19589 | {"mxscale", &legacy_cpu, ARM_ARCH_XSCALE, N_("use -mcpu=xscale")}, |
| 19590 | {"miwmmxt", &legacy_cpu, ARM_ARCH_IWMMXT, N_("use -mcpu=iwmmxt")}, |
| 19591 | {"mall", &legacy_cpu, ARM_ANY, N_("use -mcpu=all")}, |
| 19592 | |
| 19593 | /* Architecture variants -- don't add any more to this list either. */ |
| 19594 | {"mv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")}, |
| 19595 | {"marmv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")}, |
| 19596 | {"mv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")}, |
| 19597 | {"marmv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")}, |
| 19598 | {"mv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")}, |
| 19599 | {"marmv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")}, |
| 19600 | {"mv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")}, |
| 19601 | {"marmv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")}, |
| 19602 | {"mv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")}, |
| 19603 | {"marmv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")}, |
| 19604 | {"mv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")}, |
| 19605 | {"marmv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")}, |
| 19606 | {"mv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")}, |
| 19607 | {"marmv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")}, |
| 19608 | {"mv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")}, |
| 19609 | {"marmv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")}, |
| 19610 | {"mv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")}, |
| 19611 | {"marmv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")}, |
| 19612 | |
| 19613 | /* Floating point variants -- don't add any more to this list either. */ |
| 19614 | {"mfpe-old", &legacy_fpu, FPU_ARCH_FPE, N_("use -mfpu=fpe")}, |
| 19615 | {"mfpa10", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa10")}, |
| 19616 | {"mfpa11", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa11")}, |
| 19617 | {"mno-fpu", &legacy_fpu, ARM_ARCH_NONE, |
| 19618 | N_("use either -mfpu=softfpa or -mfpu=softvfp")}, |
| 19619 | |
| 19620 | {NULL, NULL, ARM_ARCH_NONE, NULL} |
| 19621 | }; |
| 19622 | |
| 19623 | struct arm_cpu_option_table |
| 19624 | { |
| 19625 | char *name; |
| 19626 | const arm_feature_set value; |
| 19627 | /* For some CPUs we assume an FPU unless the user explicitly sets |
| 19628 | -mfpu=... */ |
| 19629 | const arm_feature_set default_fpu; |
| 19630 | /* The canonical name of the CPU, or NULL to use NAME converted to upper |
| 19631 | case. */ |
| 19632 | const char *canonical_name; |
| 19633 | }; |
| 19634 | |
| 19635 | /* This list should, at a minimum, contain all the cpu names |
| 19636 | recognized by GCC. */ |
| 19637 | static const struct arm_cpu_option_table arm_cpus[] = |
| 19638 | { |
| 19639 | {"all", ARM_ANY, FPU_ARCH_FPA, NULL}, |
| 19640 | {"arm1", ARM_ARCH_V1, FPU_ARCH_FPA, NULL}, |
| 19641 | {"arm2", ARM_ARCH_V2, FPU_ARCH_FPA, NULL}, |
| 19642 | {"arm250", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL}, |
| 19643 | {"arm3", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL}, |
| 19644 | {"arm6", ARM_ARCH_V3, FPU_ARCH_FPA, NULL}, |
| 19645 | {"arm60", ARM_ARCH_V3, FPU_ARCH_FPA, NULL}, |
| 19646 | {"arm600", ARM_ARCH_V3, FPU_ARCH_FPA, NULL}, |
| 19647 | {"arm610", ARM_ARCH_V3, FPU_ARCH_FPA, NULL}, |
| 19648 | {"arm620", ARM_ARCH_V3, FPU_ARCH_FPA, NULL}, |
| 19649 | {"arm7", ARM_ARCH_V3, FPU_ARCH_FPA, NULL}, |
| 19650 | {"arm7m", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL}, |
| 19651 | {"arm7d", ARM_ARCH_V3, FPU_ARCH_FPA, NULL}, |
| 19652 | {"arm7dm", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL}, |
| 19653 | {"arm7di", ARM_ARCH_V3, FPU_ARCH_FPA, NULL}, |
| 19654 | {"arm7dmi", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL}, |
| 19655 | {"arm70", ARM_ARCH_V3, FPU_ARCH_FPA, NULL}, |
| 19656 | {"arm700", ARM_ARCH_V3, FPU_ARCH_FPA, NULL}, |
| 19657 | {"arm700i", ARM_ARCH_V3, FPU_ARCH_FPA, NULL}, |
| 19658 | {"arm710", ARM_ARCH_V3, FPU_ARCH_FPA, NULL}, |
| 19659 | {"arm710t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL}, |
| 19660 | {"arm720", ARM_ARCH_V3, FPU_ARCH_FPA, NULL}, |
| 19661 | {"arm720t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL}, |
| 19662 | {"arm740t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL}, |
| 19663 | {"arm710c", ARM_ARCH_V3, FPU_ARCH_FPA, NULL}, |
| 19664 | {"arm7100", ARM_ARCH_V3, FPU_ARCH_FPA, NULL}, |
| 19665 | {"arm7500", ARM_ARCH_V3, FPU_ARCH_FPA, NULL}, |
| 19666 | {"arm7500fe", ARM_ARCH_V3, FPU_ARCH_FPA, NULL}, |
| 19667 | {"arm7t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL}, |
| 19668 | {"arm7tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL}, |
| 19669 | {"arm7tdmi-s", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL}, |
| 19670 | {"arm8", ARM_ARCH_V4, FPU_ARCH_FPA, NULL}, |
| 19671 | {"arm810", ARM_ARCH_V4, FPU_ARCH_FPA, NULL}, |
| 19672 | {"strongarm", ARM_ARCH_V4, FPU_ARCH_FPA, NULL}, |
| 19673 | {"strongarm1", ARM_ARCH_V4, FPU_ARCH_FPA, NULL}, |
| 19674 | {"strongarm110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL}, |
| 19675 | {"strongarm1100", ARM_ARCH_V4, FPU_ARCH_FPA, NULL}, |
| 19676 | {"strongarm1110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL}, |
| 19677 | {"arm9", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL}, |
| 19678 | {"arm920", ARM_ARCH_V4T, FPU_ARCH_FPA, "ARM920T"}, |
| 19679 | {"arm920t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL}, |
| 19680 | {"arm922t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL}, |
| 19681 | {"arm940t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL}, |
| 19682 | {"arm9tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL}, |
| 19683 | /* For V5 or later processors we default to using VFP; but the user |
| 19684 | should really set the FPU type explicitly. */ |
| 19685 | {"arm9e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL}, |
| 19686 | {"arm9e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL}, |
| 19687 | {"arm926ej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"}, |
| 19688 | {"arm926ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"}, |
| 19689 | {"arm926ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL}, |
| 19690 | {"arm946e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL}, |
| 19691 | {"arm946e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM946E-S"}, |
| 19692 | {"arm946e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL}, |
| 19693 | {"arm966e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL}, |
| 19694 | {"arm966e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM966E-S"}, |
| 19695 | {"arm966e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL}, |
| 19696 | {"arm968e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL}, |
| 19697 | {"arm10t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL}, |
| 19698 | {"arm10tdmi", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL}, |
| 19699 | {"arm10e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL}, |
| 19700 | {"arm1020", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM1020E"}, |
| 19701 | {"arm1020t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL}, |
| 19702 | {"arm1020e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL}, |
| 19703 | {"arm1022e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL}, |
| 19704 | {"arm1026ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM1026EJ-S"}, |
| 19705 | {"arm1026ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL}, |
| 19706 | {"arm1136js", ARM_ARCH_V6, FPU_NONE, "ARM1136J-S"}, |
| 19707 | {"arm1136j-s", ARM_ARCH_V6, FPU_NONE, NULL}, |
| 19708 | {"arm1136jfs", ARM_ARCH_V6, FPU_ARCH_VFP_V2, "ARM1136JF-S"}, |
| 19709 | {"arm1136jf-s", ARM_ARCH_V6, FPU_ARCH_VFP_V2, NULL}, |
| 19710 | {"mpcore", ARM_ARCH_V6K, FPU_ARCH_VFP_V2, NULL}, |
| 19711 | {"mpcorenovfp", ARM_ARCH_V6K, FPU_NONE, NULL}, |
| 19712 | {"arm1156t2-s", ARM_ARCH_V6T2, FPU_NONE, NULL}, |
| 19713 | {"arm1156t2f-s", ARM_ARCH_V6T2, FPU_ARCH_VFP_V2, NULL}, |
| 19714 | {"arm1176jz-s", ARM_ARCH_V6ZK, FPU_NONE, NULL}, |
| 19715 | {"arm1176jzf-s", ARM_ARCH_V6ZK, FPU_ARCH_VFP_V2, NULL}, |
| 19716 | {"cortex-a8", ARM_ARCH_V7A, ARM_FEATURE(0, FPU_VFP_V3 |
| 19717 | | FPU_NEON_EXT_V1), |
| 19718 | NULL}, |
| 19719 | {"cortex-r4", ARM_ARCH_V7R, FPU_NONE, NULL}, |
| 19720 | {"cortex-m3", ARM_ARCH_V7M, FPU_NONE, NULL}, |
| 19721 | /* ??? XSCALE is really an architecture. */ |
| 19722 | {"xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL}, |
| 19723 | /* ??? iwmmxt is not a processor. */ |
| 19724 | {"iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP_V2, NULL}, |
| 19725 | {"iwmmxt2", ARM_ARCH_IWMMXT2,FPU_ARCH_VFP_V2, NULL}, |
| 19726 | {"i80200", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL}, |
| 19727 | /* Maverick */ |
| 19728 | {"ep9312", ARM_FEATURE(ARM_AEXT_V4T, ARM_CEXT_MAVERICK), FPU_ARCH_MAVERICK, "ARM920T"}, |
| 19729 | {NULL, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL} |
| 19730 | }; |
| 19731 | |
| 19732 | struct arm_arch_option_table |
| 19733 | { |
| 19734 | char *name; |
| 19735 | const arm_feature_set value; |
| 19736 | const arm_feature_set default_fpu; |
| 19737 | }; |
| 19738 | |
| 19739 | /* This list should, at a minimum, contain all the architecture names |
| 19740 | recognized by GCC. */ |
| 19741 | static const struct arm_arch_option_table arm_archs[] = |
| 19742 | { |
| 19743 | {"all", ARM_ANY, FPU_ARCH_FPA}, |
| 19744 | {"armv1", ARM_ARCH_V1, FPU_ARCH_FPA}, |
| 19745 | {"armv2", ARM_ARCH_V2, FPU_ARCH_FPA}, |
| 19746 | {"armv2a", ARM_ARCH_V2S, FPU_ARCH_FPA}, |
| 19747 | {"armv2s", ARM_ARCH_V2S, FPU_ARCH_FPA}, |
| 19748 | {"armv3", ARM_ARCH_V3, FPU_ARCH_FPA}, |
| 19749 | {"armv3m", ARM_ARCH_V3M, FPU_ARCH_FPA}, |
| 19750 | {"armv4", ARM_ARCH_V4, FPU_ARCH_FPA}, |
| 19751 | {"armv4xm", ARM_ARCH_V4xM, FPU_ARCH_FPA}, |
| 19752 | {"armv4t", ARM_ARCH_V4T, FPU_ARCH_FPA}, |
| 19753 | {"armv4txm", ARM_ARCH_V4TxM, FPU_ARCH_FPA}, |
| 19754 | {"armv5", ARM_ARCH_V5, FPU_ARCH_VFP}, |
| 19755 | {"armv5t", ARM_ARCH_V5T, FPU_ARCH_VFP}, |
| 19756 | {"armv5txm", ARM_ARCH_V5TxM, FPU_ARCH_VFP}, |
| 19757 | {"armv5te", ARM_ARCH_V5TE, FPU_ARCH_VFP}, |
| 19758 | {"armv5texp", ARM_ARCH_V5TExP, FPU_ARCH_VFP}, |
| 19759 | {"armv5tej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP}, |
| 19760 | {"armv6", ARM_ARCH_V6, FPU_ARCH_VFP}, |
| 19761 | {"armv6j", ARM_ARCH_V6, FPU_ARCH_VFP}, |
| 19762 | {"armv6k", ARM_ARCH_V6K, FPU_ARCH_VFP}, |
| 19763 | {"armv6z", ARM_ARCH_V6Z, FPU_ARCH_VFP}, |
| 19764 | {"armv6zk", ARM_ARCH_V6ZK, FPU_ARCH_VFP}, |
| 19765 | {"armv6t2", ARM_ARCH_V6T2, FPU_ARCH_VFP}, |
| 19766 | {"armv6kt2", ARM_ARCH_V6KT2, FPU_ARCH_VFP}, |
| 19767 | {"armv6zt2", ARM_ARCH_V6ZT2, FPU_ARCH_VFP}, |
| 19768 | {"armv6zkt2", ARM_ARCH_V6ZKT2, FPU_ARCH_VFP}, |
| 19769 | {"armv7", ARM_ARCH_V7, FPU_ARCH_VFP}, |
| 19770 | {"armv7a", ARM_ARCH_V7A, FPU_ARCH_VFP}, |
| 19771 | {"armv7r", ARM_ARCH_V7R, FPU_ARCH_VFP}, |
| 19772 | {"armv7m", ARM_ARCH_V7M, FPU_ARCH_VFP}, |
| 19773 | {"xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP}, |
| 19774 | {"iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP}, |
| 19775 | {"iwmmxt2", ARM_ARCH_IWMMXT2,FPU_ARCH_VFP}, |
| 19776 | {NULL, ARM_ARCH_NONE, ARM_ARCH_NONE} |
| 19777 | }; |
| 19778 | |
| 19779 | /* ISA extensions in the co-processor space. */ |
| 19780 | struct arm_option_cpu_value_table |
| 19781 | { |
| 19782 | char *name; |
| 19783 | const arm_feature_set value; |
| 19784 | }; |
| 19785 | |
| 19786 | static const struct arm_option_cpu_value_table arm_extensions[] = |
| 19787 | { |
| 19788 | {"maverick", ARM_FEATURE (0, ARM_CEXT_MAVERICK)}, |
| 19789 | {"xscale", ARM_FEATURE (0, ARM_CEXT_XSCALE)}, |
| 19790 | {"iwmmxt", ARM_FEATURE (0, ARM_CEXT_IWMMXT)}, |
| 19791 | {"iwmmxt2", ARM_FEATURE (0, ARM_CEXT_IWMMXT2)}, |
| 19792 | {NULL, ARM_ARCH_NONE} |
| 19793 | }; |
| 19794 | |
| 19795 | /* This list should, at a minimum, contain all the fpu names |
| 19796 | recognized by GCC. */ |
| 19797 | static const struct arm_option_cpu_value_table arm_fpus[] = |
| 19798 | { |
| 19799 | {"softfpa", FPU_NONE}, |
| 19800 | {"fpe", FPU_ARCH_FPE}, |
| 19801 | {"fpe2", FPU_ARCH_FPE}, |
| 19802 | {"fpe3", FPU_ARCH_FPA}, /* Third release supports LFM/SFM. */ |
| 19803 | {"fpa", FPU_ARCH_FPA}, |
| 19804 | {"fpa10", FPU_ARCH_FPA}, |
| 19805 | {"fpa11", FPU_ARCH_FPA}, |
| 19806 | {"arm7500fe", FPU_ARCH_FPA}, |
| 19807 | {"softvfp", FPU_ARCH_VFP}, |
| 19808 | {"softvfp+vfp", FPU_ARCH_VFP_V2}, |
| 19809 | {"vfp", FPU_ARCH_VFP_V2}, |
| 19810 | {"vfp9", FPU_ARCH_VFP_V2}, |
| 19811 | {"vfp3", FPU_ARCH_VFP_V3}, |
| 19812 | {"vfp10", FPU_ARCH_VFP_V2}, |
| 19813 | {"vfp10-r0", FPU_ARCH_VFP_V1}, |
| 19814 | {"vfpxd", FPU_ARCH_VFP_V1xD}, |
| 19815 | {"arm1020t", FPU_ARCH_VFP_V1}, |
| 19816 | {"arm1020e", FPU_ARCH_VFP_V2}, |
| 19817 | {"arm1136jfs", FPU_ARCH_VFP_V2}, |
| 19818 | {"arm1136jf-s", FPU_ARCH_VFP_V2}, |
| 19819 | {"maverick", FPU_ARCH_MAVERICK}, |
| 19820 | {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1}, |
| 19821 | {NULL, ARM_ARCH_NONE} |
| 19822 | }; |
| 19823 | |
| 19824 | struct arm_option_value_table |
| 19825 | { |
| 19826 | char *name; |
| 19827 | long value; |
| 19828 | }; |
| 19829 | |
| 19830 | static const struct arm_option_value_table arm_float_abis[] = |
| 19831 | { |
| 19832 | {"hard", ARM_FLOAT_ABI_HARD}, |
| 19833 | {"softfp", ARM_FLOAT_ABI_SOFTFP}, |
| 19834 | {"soft", ARM_FLOAT_ABI_SOFT}, |
| 19835 | {NULL, 0} |
| 19836 | }; |
| 19837 | |
| 19838 | #ifdef OBJ_ELF |
| 19839 | /* We only know how to output GNU and ver 4/5 (AAELF) formats. */ |
| 19840 | static const struct arm_option_value_table arm_eabis[] = |
| 19841 | { |
| 19842 | {"gnu", EF_ARM_EABI_UNKNOWN}, |
| 19843 | {"4", EF_ARM_EABI_VER4}, |
| 19844 | {"5", EF_ARM_EABI_VER5}, |
| 19845 | {NULL, 0} |
| 19846 | }; |
| 19847 | #endif |
| 19848 | |
| 19849 | struct arm_long_option_table |
| 19850 | { |
| 19851 | char * option; /* Substring to match. */ |
| 19852 | char * help; /* Help information. */ |
| 19853 | int (* func) (char * subopt); /* Function to decode sub-option. */ |
| 19854 | char * deprecated; /* If non-null, print this message. */ |
| 19855 | }; |
| 19856 | |
| 19857 | static int |
| 19858 | arm_parse_extension (char * str, const arm_feature_set **opt_p) |
| 19859 | { |
| 19860 | arm_feature_set *ext_set = xmalloc (sizeof (arm_feature_set)); |
| 19861 | |
| 19862 | /* Copy the feature set, so that we can modify it. */ |
| 19863 | *ext_set = **opt_p; |
| 19864 | *opt_p = ext_set; |
| 19865 | |
| 19866 | while (str != NULL && *str != 0) |
| 19867 | { |
| 19868 | const struct arm_option_cpu_value_table * opt; |
| 19869 | char * ext; |
| 19870 | int optlen; |
| 19871 | |
| 19872 | if (*str != '+') |
| 19873 | { |
| 19874 | as_bad (_("invalid architectural extension")); |
| 19875 | return 0; |
| 19876 | } |
| 19877 | |
| 19878 | str++; |
| 19879 | ext = strchr (str, '+'); |
| 19880 | |
| 19881 | if (ext != NULL) |
| 19882 | optlen = ext - str; |
| 19883 | else |
| 19884 | optlen = strlen (str); |
| 19885 | |
| 19886 | if (optlen == 0) |
| 19887 | { |
| 19888 | as_bad (_("missing architectural extension")); |
| 19889 | return 0; |
| 19890 | } |
| 19891 | |
| 19892 | for (opt = arm_extensions; opt->name != NULL; opt++) |
| 19893 | if (strncmp (opt->name, str, optlen) == 0) |
| 19894 | { |
| 19895 | ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->value); |
| 19896 | break; |
| 19897 | } |
| 19898 | |
| 19899 | if (opt->name == NULL) |
| 19900 | { |
| 19901 | as_bad (_("unknown architectural extnsion `%s'"), str); |
| 19902 | return 0; |
| 19903 | } |
| 19904 | |
| 19905 | str = ext; |
| 19906 | }; |
| 19907 | |
| 19908 | return 1; |
| 19909 | } |
| 19910 | |
| 19911 | static int |
| 19912 | arm_parse_cpu (char * str) |
| 19913 | { |
| 19914 | const struct arm_cpu_option_table * opt; |
| 19915 | char * ext = strchr (str, '+'); |
| 19916 | int optlen; |
| 19917 | |
| 19918 | if (ext != NULL) |
| 19919 | optlen = ext - str; |
| 19920 | else |
| 19921 | optlen = strlen (str); |
| 19922 | |
| 19923 | if (optlen == 0) |
| 19924 | { |
| 19925 | as_bad (_("missing cpu name `%s'"), str); |
| 19926 | return 0; |
| 19927 | } |
| 19928 | |
| 19929 | for (opt = arm_cpus; opt->name != NULL; opt++) |
| 19930 | if (strncmp (opt->name, str, optlen) == 0) |
| 19931 | { |
| 19932 | mcpu_cpu_opt = &opt->value; |
| 19933 | mcpu_fpu_opt = &opt->default_fpu; |
| 19934 | if (opt->canonical_name) |
| 19935 | strcpy(selected_cpu_name, opt->canonical_name); |
| 19936 | else |
| 19937 | { |
| 19938 | int i; |
| 19939 | for (i = 0; i < optlen; i++) |
| 19940 | selected_cpu_name[i] = TOUPPER (opt->name[i]); |
| 19941 | selected_cpu_name[i] = 0; |
| 19942 | } |
| 19943 | |
| 19944 | if (ext != NULL) |
| 19945 | return arm_parse_extension (ext, &mcpu_cpu_opt); |
| 19946 | |
| 19947 | return 1; |
| 19948 | } |
| 19949 | |
| 19950 | as_bad (_("unknown cpu `%s'"), str); |
| 19951 | return 0; |
| 19952 | } |
| 19953 | |
| 19954 | static int |
| 19955 | arm_parse_arch (char * str) |
| 19956 | { |
| 19957 | const struct arm_arch_option_table *opt; |
| 19958 | char *ext = strchr (str, '+'); |
| 19959 | int optlen; |
| 19960 | |
| 19961 | if (ext != NULL) |
| 19962 | optlen = ext - str; |
| 19963 | else |
| 19964 | optlen = strlen (str); |
| 19965 | |
| 19966 | if (optlen == 0) |
| 19967 | { |
| 19968 | as_bad (_("missing architecture name `%s'"), str); |
| 19969 | return 0; |
| 19970 | } |
| 19971 | |
| 19972 | for (opt = arm_archs; opt->name != NULL; opt++) |
| 19973 | if (streq (opt->name, str)) |
| 19974 | { |
| 19975 | march_cpu_opt = &opt->value; |
| 19976 | march_fpu_opt = &opt->default_fpu; |
| 19977 | strcpy(selected_cpu_name, opt->name); |
| 19978 | |
| 19979 | if (ext != NULL) |
| 19980 | return arm_parse_extension (ext, &march_cpu_opt); |
| 19981 | |
| 19982 | return 1; |
| 19983 | } |
| 19984 | |
| 19985 | as_bad (_("unknown architecture `%s'\n"), str); |
| 19986 | return 0; |
| 19987 | } |
| 19988 | |
| 19989 | static int |
| 19990 | arm_parse_fpu (char * str) |
| 19991 | { |
| 19992 | const struct arm_option_cpu_value_table * opt; |
| 19993 | |
| 19994 | for (opt = arm_fpus; opt->name != NULL; opt++) |
| 19995 | if (streq (opt->name, str)) |
| 19996 | { |
| 19997 | mfpu_opt = &opt->value; |
| 19998 | return 1; |
| 19999 | } |
| 20000 | |
| 20001 | as_bad (_("unknown floating point format `%s'\n"), str); |
| 20002 | return 0; |
| 20003 | } |
| 20004 | |
| 20005 | static int |
| 20006 | arm_parse_float_abi (char * str) |
| 20007 | { |
| 20008 | const struct arm_option_value_table * opt; |
| 20009 | |
| 20010 | for (opt = arm_float_abis; opt->name != NULL; opt++) |
| 20011 | if (streq (opt->name, str)) |
| 20012 | { |
| 20013 | mfloat_abi_opt = opt->value; |
| 20014 | return 1; |
| 20015 | } |
| 20016 | |
| 20017 | as_bad (_("unknown floating point abi `%s'\n"), str); |
| 20018 | return 0; |
| 20019 | } |
| 20020 | |
| 20021 | #ifdef OBJ_ELF |
| 20022 | static int |
| 20023 | arm_parse_eabi (char * str) |
| 20024 | { |
| 20025 | const struct arm_option_value_table *opt; |
| 20026 | |
| 20027 | for (opt = arm_eabis; opt->name != NULL; opt++) |
| 20028 | if (streq (opt->name, str)) |
| 20029 | { |
| 20030 | meabi_flags = opt->value; |
| 20031 | return 1; |
| 20032 | } |
| 20033 | as_bad (_("unknown EABI `%s'\n"), str); |
| 20034 | return 0; |
| 20035 | } |
| 20036 | #endif |
| 20037 | |
| 20038 | struct arm_long_option_table arm_long_opts[] = |
| 20039 | { |
| 20040 | {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"), |
| 20041 | arm_parse_cpu, NULL}, |
| 20042 | {"march=", N_("<arch name>\t assemble for architecture <arch name>"), |
| 20043 | arm_parse_arch, NULL}, |
| 20044 | {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"), |
| 20045 | arm_parse_fpu, NULL}, |
| 20046 | {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"), |
| 20047 | arm_parse_float_abi, NULL}, |
| 20048 | #ifdef OBJ_ELF |
| 20049 | {"meabi=", N_("<ver>\t assemble for eabi version <ver>"), |
| 20050 | arm_parse_eabi, NULL}, |
| 20051 | #endif |
| 20052 | {NULL, NULL, 0, NULL} |
| 20053 | }; |
| 20054 | |
| 20055 | int |
| 20056 | md_parse_option (int c, char * arg) |
| 20057 | { |
| 20058 | struct arm_option_table *opt; |
| 20059 | const struct arm_legacy_option_table *fopt; |
| 20060 | struct arm_long_option_table *lopt; |
| 20061 | |
| 20062 | switch (c) |
| 20063 | { |
| 20064 | #ifdef OPTION_EB |
| 20065 | case OPTION_EB: |
| 20066 | target_big_endian = 1; |
| 20067 | break; |
| 20068 | #endif |
| 20069 | |
| 20070 | #ifdef OPTION_EL |
| 20071 | case OPTION_EL: |
| 20072 | target_big_endian = 0; |
| 20073 | break; |
| 20074 | #endif |
| 20075 | |
| 20076 | case 'a': |
| 20077 | /* Listing option. Just ignore these, we don't support additional |
| 20078 | ones. */ |
| 20079 | return 0; |
| 20080 | |
| 20081 | default: |
| 20082 | for (opt = arm_opts; opt->option != NULL; opt++) |
| 20083 | { |
| 20084 | if (c == opt->option[0] |
| 20085 | && ((arg == NULL && opt->option[1] == 0) |
| 20086 | || streq (arg, opt->option + 1))) |
| 20087 | { |
| 20088 | #if WARN_DEPRECATED |
| 20089 | /* If the option is deprecated, tell the user. */ |
| 20090 | if (opt->deprecated != NULL) |
| 20091 | as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, |
| 20092 | arg ? arg : "", _(opt->deprecated)); |
| 20093 | #endif |
| 20094 | |
| 20095 | if (opt->var != NULL) |
| 20096 | *opt->var = opt->value; |
| 20097 | |
| 20098 | return 1; |
| 20099 | } |
| 20100 | } |
| 20101 | |
| 20102 | for (fopt = arm_legacy_opts; fopt->option != NULL; fopt++) |
| 20103 | { |
| 20104 | if (c == fopt->option[0] |
| 20105 | && ((arg == NULL && fopt->option[1] == 0) |
| 20106 | || streq (arg, fopt->option + 1))) |
| 20107 | { |
| 20108 | #if WARN_DEPRECATED |
| 20109 | /* If the option is deprecated, tell the user. */ |
| 20110 | if (fopt->deprecated != NULL) |
| 20111 | as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, |
| 20112 | arg ? arg : "", _(fopt->deprecated)); |
| 20113 | #endif |
| 20114 | |
| 20115 | if (fopt->var != NULL) |
| 20116 | *fopt->var = &fopt->value; |
| 20117 | |
| 20118 | return 1; |
| 20119 | } |
| 20120 | } |
| 20121 | |
| 20122 | for (lopt = arm_long_opts; lopt->option != NULL; lopt++) |
| 20123 | { |
| 20124 | /* These options are expected to have an argument. */ |
| 20125 | if (c == lopt->option[0] |
| 20126 | && arg != NULL |
| 20127 | && strncmp (arg, lopt->option + 1, |
| 20128 | strlen (lopt->option + 1)) == 0) |
| 20129 | { |
| 20130 | #if WARN_DEPRECATED |
| 20131 | /* If the option is deprecated, tell the user. */ |
| 20132 | if (lopt->deprecated != NULL) |
| 20133 | as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg, |
| 20134 | _(lopt->deprecated)); |
| 20135 | #endif |
| 20136 | |
| 20137 | /* Call the sup-option parser. */ |
| 20138 | return lopt->func (arg + strlen (lopt->option) - 1); |
| 20139 | } |
| 20140 | } |
| 20141 | |
| 20142 | return 0; |
| 20143 | } |
| 20144 | |
| 20145 | return 1; |
| 20146 | } |
| 20147 | |
| 20148 | void |
| 20149 | md_show_usage (FILE * fp) |
| 20150 | { |
| 20151 | struct arm_option_table *opt; |
| 20152 | struct arm_long_option_table *lopt; |
| 20153 | |
| 20154 | fprintf (fp, _(" ARM-specific assembler options:\n")); |
| 20155 | |
| 20156 | for (opt = arm_opts; opt->option != NULL; opt++) |
| 20157 | if (opt->help != NULL) |
| 20158 | fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help)); |
| 20159 | |
| 20160 | for (lopt = arm_long_opts; lopt->option != NULL; lopt++) |
| 20161 | if (lopt->help != NULL) |
| 20162 | fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help)); |
| 20163 | |
| 20164 | #ifdef OPTION_EB |
| 20165 | fprintf (fp, _("\ |
| 20166 | -EB assemble code for a big-endian cpu\n")); |
| 20167 | #endif |
| 20168 | |
| 20169 | #ifdef OPTION_EL |
| 20170 | fprintf (fp, _("\ |
| 20171 | -EL assemble code for a little-endian cpu\n")); |
| 20172 | #endif |
| 20173 | } |
| 20174 | |
| 20175 | |
| 20176 | #ifdef OBJ_ELF |
| 20177 | typedef struct |
| 20178 | { |
| 20179 | int val; |
| 20180 | arm_feature_set flags; |
| 20181 | } cpu_arch_ver_table; |
| 20182 | |
| 20183 | /* Mapping from CPU features to EABI CPU arch values. Table must be sorted |
| 20184 | least features first. */ |
| 20185 | static const cpu_arch_ver_table cpu_arch_ver[] = |
| 20186 | { |
| 20187 | {1, ARM_ARCH_V4}, |
| 20188 | {2, ARM_ARCH_V4T}, |
| 20189 | {3, ARM_ARCH_V5}, |
| 20190 | {4, ARM_ARCH_V5TE}, |
| 20191 | {5, ARM_ARCH_V5TEJ}, |
| 20192 | {6, ARM_ARCH_V6}, |
| 20193 | {7, ARM_ARCH_V6Z}, |
| 20194 | {8, ARM_ARCH_V6K}, |
| 20195 | {9, ARM_ARCH_V6T2}, |
| 20196 | {10, ARM_ARCH_V7A}, |
| 20197 | {10, ARM_ARCH_V7R}, |
| 20198 | {10, ARM_ARCH_V7M}, |
| 20199 | {0, ARM_ARCH_NONE} |
| 20200 | }; |
| 20201 | |
| 20202 | /* Set the public EABI object attributes. */ |
| 20203 | static void |
| 20204 | aeabi_set_public_attributes (void) |
| 20205 | { |
| 20206 | int arch; |
| 20207 | arm_feature_set flags; |
| 20208 | arm_feature_set tmp; |
| 20209 | const cpu_arch_ver_table *p; |
| 20210 | |
| 20211 | /* Choose the architecture based on the capabilities of the requested cpu |
| 20212 | (if any) and/or the instructions actually used. */ |
| 20213 | ARM_MERGE_FEATURE_SETS (flags, arm_arch_used, thumb_arch_used); |
| 20214 | ARM_MERGE_FEATURE_SETS (flags, flags, *mfpu_opt); |
| 20215 | ARM_MERGE_FEATURE_SETS (flags, flags, selected_cpu); |
| 20216 | /*Allow the user to override the reported architecture. */ |
| 20217 | if (object_arch) |
| 20218 | { |
| 20219 | ARM_CLEAR_FEATURE (flags, flags, arm_arch_any); |
| 20220 | ARM_MERGE_FEATURE_SETS (flags, flags, *object_arch); |
| 20221 | } |
| 20222 | |
| 20223 | tmp = flags; |
| 20224 | arch = 0; |
| 20225 | for (p = cpu_arch_ver; p->val; p++) |
| 20226 | { |
| 20227 | if (ARM_CPU_HAS_FEATURE (tmp, p->flags)) |
| 20228 | { |
| 20229 | arch = p->val; |
| 20230 | ARM_CLEAR_FEATURE (tmp, tmp, p->flags); |
| 20231 | } |
| 20232 | } |
| 20233 | |
| 20234 | /* Tag_CPU_name. */ |
| 20235 | if (selected_cpu_name[0]) |
| 20236 | { |
| 20237 | char *p; |
| 20238 | |
| 20239 | p = selected_cpu_name; |
| 20240 | if (strncmp(p, "armv", 4) == 0) |
| 20241 | { |
| 20242 | int i; |
| 20243 | |
| 20244 | p += 4; |
| 20245 | for (i = 0; p[i]; i++) |
| 20246 | p[i] = TOUPPER (p[i]); |
| 20247 | } |
| 20248 | elf32_arm_add_eabi_attr_string (stdoutput, 5, p); |
| 20249 | } |
| 20250 | /* Tag_CPU_arch. */ |
| 20251 | elf32_arm_add_eabi_attr_int (stdoutput, 6, arch); |
| 20252 | /* Tag_CPU_arch_profile. */ |
| 20253 | if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a)) |
| 20254 | elf32_arm_add_eabi_attr_int (stdoutput, 7, 'A'); |
| 20255 | else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7r)) |
| 20256 | elf32_arm_add_eabi_attr_int (stdoutput, 7, 'R'); |
| 20257 | else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7m)) |
| 20258 | elf32_arm_add_eabi_attr_int (stdoutput, 7, 'M'); |
| 20259 | /* Tag_ARM_ISA_use. */ |
| 20260 | if (ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_full)) |
| 20261 | elf32_arm_add_eabi_attr_int (stdoutput, 8, 1); |
| 20262 | /* Tag_THUMB_ISA_use. */ |
| 20263 | if (ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_full)) |
| 20264 | elf32_arm_add_eabi_attr_int (stdoutput, 9, |
| 20265 | ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_t2) ? 2 : 1); |
| 20266 | /* Tag_VFP_arch. */ |
| 20267 | if (ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_vfp_ext_v3) |
| 20268 | || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_vfp_ext_v3)) |
| 20269 | elf32_arm_add_eabi_attr_int (stdoutput, 10, 3); |
| 20270 | else if (ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_vfp_ext_v2) |
| 20271 | || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_vfp_ext_v2)) |
| 20272 | elf32_arm_add_eabi_attr_int (stdoutput, 10, 2); |
| 20273 | else if (ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_vfp_ext_v1) |
| 20274 | || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_vfp_ext_v1) |
| 20275 | || ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_vfp_ext_v1xd) |
| 20276 | || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_vfp_ext_v1xd)) |
| 20277 | elf32_arm_add_eabi_attr_int (stdoutput, 10, 1); |
| 20278 | /* Tag_WMMX_arch. */ |
| 20279 | if (ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_cext_iwmmxt) |
| 20280 | || ARM_CPU_HAS_FEATURE (arm_arch_used, arm_cext_iwmmxt)) |
| 20281 | elf32_arm_add_eabi_attr_int (stdoutput, 11, 1); |
| 20282 | /* Tag_NEON_arch. */ |
| 20283 | if (ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_neon_ext_v1) |
| 20284 | || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_neon_ext_v1)) |
| 20285 | elf32_arm_add_eabi_attr_int (stdoutput, 12, 1); |
| 20286 | } |
| 20287 | |
| 20288 | /* Add the .ARM.attributes section. */ |
| 20289 | void |
| 20290 | arm_md_end (void) |
| 20291 | { |
| 20292 | segT s; |
| 20293 | char *p; |
| 20294 | addressT addr; |
| 20295 | offsetT size; |
| 20296 | |
| 20297 | if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4) |
| 20298 | return; |
| 20299 | |
| 20300 | aeabi_set_public_attributes (); |
| 20301 | size = elf32_arm_eabi_attr_size (stdoutput); |
| 20302 | s = subseg_new (".ARM.attributes", 0); |
| 20303 | bfd_set_section_flags (stdoutput, s, SEC_READONLY | SEC_DATA); |
| 20304 | addr = frag_now_fix (); |
| 20305 | p = frag_more (size); |
| 20306 | elf32_arm_set_eabi_attr_contents (stdoutput, (bfd_byte *)p, size); |
| 20307 | } |
| 20308 | #endif /* OBJ_ELF */ |
| 20309 | |
| 20310 | |
| 20311 | /* Parse a .cpu directive. */ |
| 20312 | |
| 20313 | static void |
| 20314 | s_arm_cpu (int ignored ATTRIBUTE_UNUSED) |
| 20315 | { |
| 20316 | const struct arm_cpu_option_table *opt; |
| 20317 | char *name; |
| 20318 | char saved_char; |
| 20319 | |
| 20320 | name = input_line_pointer; |
| 20321 | while (*input_line_pointer && !ISSPACE(*input_line_pointer)) |
| 20322 | input_line_pointer++; |
| 20323 | saved_char = *input_line_pointer; |
| 20324 | *input_line_pointer = 0; |
| 20325 | |
| 20326 | /* Skip the first "all" entry. */ |
| 20327 | for (opt = arm_cpus + 1; opt->name != NULL; opt++) |
| 20328 | if (streq (opt->name, name)) |
| 20329 | { |
| 20330 | mcpu_cpu_opt = &opt->value; |
| 20331 | selected_cpu = opt->value; |
| 20332 | if (opt->canonical_name) |
| 20333 | strcpy(selected_cpu_name, opt->canonical_name); |
| 20334 | else |
| 20335 | { |
| 20336 | int i; |
| 20337 | for (i = 0; opt->name[i]; i++) |
| 20338 | selected_cpu_name[i] = TOUPPER (opt->name[i]); |
| 20339 | selected_cpu_name[i] = 0; |
| 20340 | } |
| 20341 | ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt); |
| 20342 | *input_line_pointer = saved_char; |
| 20343 | demand_empty_rest_of_line (); |
| 20344 | return; |
| 20345 | } |
| 20346 | as_bad (_("unknown cpu `%s'"), name); |
| 20347 | *input_line_pointer = saved_char; |
| 20348 | ignore_rest_of_line (); |
| 20349 | } |
| 20350 | |
| 20351 | |
| 20352 | /* Parse a .arch directive. */ |
| 20353 | |
| 20354 | static void |
| 20355 | s_arm_arch (int ignored ATTRIBUTE_UNUSED) |
| 20356 | { |
| 20357 | const struct arm_arch_option_table *opt; |
| 20358 | char saved_char; |
| 20359 | char *name; |
| 20360 | |
| 20361 | name = input_line_pointer; |
| 20362 | while (*input_line_pointer && !ISSPACE(*input_line_pointer)) |
| 20363 | input_line_pointer++; |
| 20364 | saved_char = *input_line_pointer; |
| 20365 | *input_line_pointer = 0; |
| 20366 | |
| 20367 | /* Skip the first "all" entry. */ |
| 20368 | for (opt = arm_archs + 1; opt->name != NULL; opt++) |
| 20369 | if (streq (opt->name, name)) |
| 20370 | { |
| 20371 | mcpu_cpu_opt = &opt->value; |
| 20372 | selected_cpu = opt->value; |
| 20373 | strcpy(selected_cpu_name, opt->name); |
| 20374 | ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt); |
| 20375 | *input_line_pointer = saved_char; |
| 20376 | demand_empty_rest_of_line (); |
| 20377 | return; |
| 20378 | } |
| 20379 | |
| 20380 | as_bad (_("unknown architecture `%s'\n"), name); |
| 20381 | *input_line_pointer = saved_char; |
| 20382 | ignore_rest_of_line (); |
| 20383 | } |
| 20384 | |
| 20385 | |
| 20386 | /* Parse a .object_arch directive. */ |
| 20387 | |
| 20388 | static void |
| 20389 | s_arm_object_arch (int ignored ATTRIBUTE_UNUSED) |
| 20390 | { |
| 20391 | const struct arm_arch_option_table *opt; |
| 20392 | char saved_char; |
| 20393 | char *name; |
| 20394 | |
| 20395 | name = input_line_pointer; |
| 20396 | while (*input_line_pointer && !ISSPACE(*input_line_pointer)) |
| 20397 | input_line_pointer++; |
| 20398 | saved_char = *input_line_pointer; |
| 20399 | *input_line_pointer = 0; |
| 20400 | |
| 20401 | /* Skip the first "all" entry. */ |
| 20402 | for (opt = arm_archs + 1; opt->name != NULL; opt++) |
| 20403 | if (streq (opt->name, name)) |
| 20404 | { |
| 20405 | object_arch = &opt->value; |
| 20406 | *input_line_pointer = saved_char; |
| 20407 | demand_empty_rest_of_line (); |
| 20408 | return; |
| 20409 | } |
| 20410 | |
| 20411 | as_bad (_("unknown architecture `%s'\n"), name); |
| 20412 | *input_line_pointer = saved_char; |
| 20413 | ignore_rest_of_line (); |
| 20414 | } |
| 20415 | |
| 20416 | |
| 20417 | /* Parse a .fpu directive. */ |
| 20418 | |
| 20419 | static void |
| 20420 | s_arm_fpu (int ignored ATTRIBUTE_UNUSED) |
| 20421 | { |
| 20422 | const struct arm_option_cpu_value_table *opt; |
| 20423 | char saved_char; |
| 20424 | char *name; |
| 20425 | |
| 20426 | name = input_line_pointer; |
| 20427 | while (*input_line_pointer && !ISSPACE(*input_line_pointer)) |
| 20428 | input_line_pointer++; |
| 20429 | saved_char = *input_line_pointer; |
| 20430 | *input_line_pointer = 0; |
| 20431 | |
| 20432 | for (opt = arm_fpus; opt->name != NULL; opt++) |
| 20433 | if (streq (opt->name, name)) |
| 20434 | { |
| 20435 | mfpu_opt = &opt->value; |
| 20436 | ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt); |
| 20437 | *input_line_pointer = saved_char; |
| 20438 | demand_empty_rest_of_line (); |
| 20439 | return; |
| 20440 | } |
| 20441 | |
| 20442 | as_bad (_("unknown floating point format `%s'\n"), name); |
| 20443 | *input_line_pointer = saved_char; |
| 20444 | ignore_rest_of_line (); |
| 20445 | } |
| 20446 | |