+ if (!found_return)
+ return 0;
+
+ /* Since any instruction in the epilogue sequence, with the possible
+ exception of return itself, updates the stack pointer, we need to
+ scan backwards for at most one instruction. Try either a 16-bit or
+ a 32-bit instruction. This is just a heuristic, so we do not worry
+ too much about false positives. */
+
+ if (pc - 4 < func_start)
+ return 0;
+ if (target_read_memory (pc - 4, buf, 4))
+ return 0;
+
+ insn = extract_unsigned_integer (buf, 2, byte_order_for_code);
+ insn2 = extract_unsigned_integer (buf + 2, 2, byte_order_for_code);
+
+ if (thumb_instruction_restores_sp (insn2))
+ found_stack_adjust = 1;
+ else if (insn == 0xe8bd) /* ldm.w sp!, <registers> */
+ found_stack_adjust = 1;
+ else if (insn == 0xf85d /* ldr.w <Rt>, [sp], #4 */
+ && (insn2 & 0x0fff) == 0x0b04)
+ found_stack_adjust = 1;
+ else if ((insn & 0xffbf) == 0xecbd /* vldm sp!, <list> */
+ && (insn2 & 0x0e00) == 0x0a00)
+ found_stack_adjust = 1;
+
+ return found_stack_adjust;
+}
+
+/* Return true if we are in the function's epilogue, i.e. after the
+ instruction that destroyed the function's stack frame. */
+
+static int
+arm_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc)
+{
+ enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
+ unsigned int insn;
+ int found_return, found_stack_adjust;
+ CORE_ADDR func_start, func_end;
+
+ if (arm_pc_is_thumb (gdbarch, pc))
+ return thumb_in_function_epilogue_p (gdbarch, pc);
+
+ if (!find_pc_partial_function (pc, NULL, &func_start, &func_end))
+ return 0;
+
+ /* We are in the epilogue if the previous instruction was a stack
+ adjustment and the next instruction is a possible return (bx, mov
+ pc, or pop). We could have to scan backwards to find the stack
+ adjustment, or forwards to find the return, but this is a decent
+ approximation. First scan forwards. */
+
+ found_return = 0;
+ insn = read_memory_unsigned_integer (pc, 4, byte_order_for_code);
+ if (bits (insn, 28, 31) != INST_NV)
+ {
+ if ((insn & 0x0ffffff0) == 0x012fff10)
+ /* BX. */
+ found_return = 1;
+ else if ((insn & 0x0ffffff0) == 0x01a0f000)
+ /* MOV PC. */
+ found_return = 1;
+ else if ((insn & 0x0fff0000) == 0x08bd0000
+ && (insn & 0x0000c000) != 0)
+ /* POP (LDMIA), including PC or LR. */
+ found_return = 1;
+ }
+
+ if (!found_return)
+ return 0;
+
+ /* Scan backwards. This is just a heuristic, so do not worry about
+ false positives from mode changes. */
+
+ if (pc < func_start + 4)
+ return 0;
+
+ found_stack_adjust = 0;
+ insn = read_memory_unsigned_integer (pc - 4, 4, byte_order_for_code);
+ if (bits (insn, 28, 31) != INST_NV)
+ {
+ if ((insn & 0x0df0f000) == 0x0080d000)
+ /* ADD SP (register or immediate). */
+ found_stack_adjust = 1;
+ else if ((insn & 0x0df0f000) == 0x0040d000)
+ /* SUB SP (register or immediate). */
+ found_stack_adjust = 1;
+ else if ((insn & 0x0ffffff0) == 0x01a0d000)
+ /* MOV SP. */
+ found_stack_adjust = 1;
+ else if ((insn & 0x0fff0000) == 0x08bd0000)
+ /* POP (LDMIA). */
+ found_stack_adjust = 1;
+ else if ((insn & 0x0fff0000) == 0x049d0000)
+ /* POP of a single register. */
+ found_stack_adjust = 1;
+ }
+
+ if (found_stack_adjust)
+ return 1;
+
+ return 0;
+}
+
+
+/* When arguments must be pushed onto the stack, they go on in reverse
+ order. The code below implements a FILO (stack) to do this. */
+
+struct stack_item
+{
+ int len;
+ struct stack_item *prev;
+ void *data;
+};
+
+static struct stack_item *
+push_stack_item (struct stack_item *prev, const void *contents, int len)
+{
+ struct stack_item *si;
+ si = xmalloc (sizeof (struct stack_item));
+ si->data = xmalloc (len);
+ si->len = len;
+ si->prev = prev;
+ memcpy (si->data, contents, len);
+ return si;
+}
+
+static struct stack_item *
+pop_stack_item (struct stack_item *si)
+{
+ struct stack_item *dead = si;
+ si = si->prev;
+ xfree (dead->data);
+ xfree (dead);
+ return si;
+}
+
+
+/* Return the alignment (in bytes) of the given type. */
+
+static int
+arm_type_align (struct type *t)
+{
+ int n;
+ int align;
+ int falign;
+
+ t = check_typedef (t);
+ switch (TYPE_CODE (t))
+ {
+ default:
+ /* Should never happen. */
+ internal_error (__FILE__, __LINE__, _("unknown type alignment"));
+ return 4;
+
+ case TYPE_CODE_PTR:
+ case TYPE_CODE_ENUM:
+ case TYPE_CODE_INT:
+ case TYPE_CODE_FLT:
+ case TYPE_CODE_SET:
+ case TYPE_CODE_RANGE:
+ case TYPE_CODE_REF:
+ case TYPE_CODE_CHAR:
+ case TYPE_CODE_BOOL:
+ return TYPE_LENGTH (t);
+
+ case TYPE_CODE_ARRAY:
+ case TYPE_CODE_COMPLEX:
+ /* TODO: What about vector types? */
+ return arm_type_align (TYPE_TARGET_TYPE (t));
+
+ case TYPE_CODE_STRUCT:
+ case TYPE_CODE_UNION:
+ align = 1;
+ for (n = 0; n < TYPE_NFIELDS (t); n++)
+ {
+ falign = arm_type_align (TYPE_FIELD_TYPE (t, n));
+ if (falign > align)
+ align = falign;
+ }
+ return align;
+ }
+}
+
+/* Possible base types for a candidate for passing and returning in
+ VFP registers. */
+
+enum arm_vfp_cprc_base_type
+{
+ VFP_CPRC_UNKNOWN,
+ VFP_CPRC_SINGLE,
+ VFP_CPRC_DOUBLE,
+ VFP_CPRC_VEC64,
+ VFP_CPRC_VEC128
+};
+
+/* The length of one element of base type B. */
+
+static unsigned
+arm_vfp_cprc_unit_length (enum arm_vfp_cprc_base_type b)
+{
+ switch (b)
+ {
+ case VFP_CPRC_SINGLE:
+ return 4;
+ case VFP_CPRC_DOUBLE:
+ return 8;
+ case VFP_CPRC_VEC64:
+ return 8;
+ case VFP_CPRC_VEC128:
+ return 16;
+ default:
+ internal_error (__FILE__, __LINE__, _("Invalid VFP CPRC type: %d."),
+ (int) b);
+ }
+}
+
+/* The character ('s', 'd' or 'q') for the type of VFP register used
+ for passing base type B. */
+
+static int
+arm_vfp_cprc_reg_char (enum arm_vfp_cprc_base_type b)
+{
+ switch (b)
+ {
+ case VFP_CPRC_SINGLE:
+ return 's';
+ case VFP_CPRC_DOUBLE:
+ return 'd';
+ case VFP_CPRC_VEC64:
+ return 'd';
+ case VFP_CPRC_VEC128:
+ return 'q';
+ default:
+ internal_error (__FILE__, __LINE__, _("Invalid VFP CPRC type: %d."),
+ (int) b);
+ }
+}
+
+/* Determine whether T may be part of a candidate for passing and
+ returning in VFP registers, ignoring the limit on the total number
+ of components. If *BASE_TYPE is VFP_CPRC_UNKNOWN, set it to the
+ classification of the first valid component found; if it is not
+ VFP_CPRC_UNKNOWN, all components must have the same classification
+ as *BASE_TYPE. If it is found that T contains a type not permitted
+ for passing and returning in VFP registers, a type differently
+ classified from *BASE_TYPE, or two types differently classified
+ from each other, return -1, otherwise return the total number of
+ base-type elements found (possibly 0 in an empty structure or
+ array). Vector types are not currently supported, matching the
+ generic AAPCS support. */
+
+static int
+arm_vfp_cprc_sub_candidate (struct type *t,
+ enum arm_vfp_cprc_base_type *base_type)
+{
+ t = check_typedef (t);
+ switch (TYPE_CODE (t))
+ {
+ case TYPE_CODE_FLT:
+ switch (TYPE_LENGTH (t))
+ {
+ case 4:
+ if (*base_type == VFP_CPRC_UNKNOWN)
+ *base_type = VFP_CPRC_SINGLE;
+ else if (*base_type != VFP_CPRC_SINGLE)
+ return -1;
+ return 1;
+
+ case 8:
+ if (*base_type == VFP_CPRC_UNKNOWN)
+ *base_type = VFP_CPRC_DOUBLE;
+ else if (*base_type != VFP_CPRC_DOUBLE)
+ return -1;
+ return 1;
+
+ default:
+ return -1;
+ }
+ break;
+
+ case TYPE_CODE_COMPLEX:
+ /* Arguments of complex T where T is one of the types float or
+ double get treated as if they are implemented as:
+
+ struct complexT
+ {
+ T real;
+ T imag;
+ };
+
+ */
+ switch (TYPE_LENGTH (t))
+ {
+ case 8:
+ if (*base_type == VFP_CPRC_UNKNOWN)
+ *base_type = VFP_CPRC_SINGLE;
+ else if (*base_type != VFP_CPRC_SINGLE)
+ return -1;
+ return 2;
+
+ case 16:
+ if (*base_type == VFP_CPRC_UNKNOWN)
+ *base_type = VFP_CPRC_DOUBLE;
+ else if (*base_type != VFP_CPRC_DOUBLE)
+ return -1;
+ return 2;
+
+ default:
+ return -1;
+ }
+ break;
+
+ case TYPE_CODE_ARRAY:
+ {
+ int count;
+ unsigned unitlen;
+ count = arm_vfp_cprc_sub_candidate (TYPE_TARGET_TYPE (t), base_type);
+ if (count == -1)
+ return -1;
+ if (TYPE_LENGTH (t) == 0)
+ {
+ gdb_assert (count == 0);
+ return 0;
+ }
+ else if (count == 0)
+ return -1;
+ unitlen = arm_vfp_cprc_unit_length (*base_type);
+ gdb_assert ((TYPE_LENGTH (t) % unitlen) == 0);
+ return TYPE_LENGTH (t) / unitlen;
+ }
+ break;
+
+ case TYPE_CODE_STRUCT:
+ {
+ int count = 0;
+ unsigned unitlen;
+ int i;
+ for (i = 0; i < TYPE_NFIELDS (t); i++)
+ {
+ int sub_count = arm_vfp_cprc_sub_candidate (TYPE_FIELD_TYPE (t, i),
+ base_type);
+ if (sub_count == -1)
+ return -1;
+ count += sub_count;
+ }
+ if (TYPE_LENGTH (t) == 0)
+ {
+ gdb_assert (count == 0);
+ return 0;
+ }
+ else if (count == 0)
+ return -1;
+ unitlen = arm_vfp_cprc_unit_length (*base_type);
+ if (TYPE_LENGTH (t) != unitlen * count)
+ return -1;
+ return count;
+ }
+
+ case TYPE_CODE_UNION:
+ {
+ int count = 0;
+ unsigned unitlen;
+ int i;
+ for (i = 0; i < TYPE_NFIELDS (t); i++)
+ {
+ int sub_count = arm_vfp_cprc_sub_candidate (TYPE_FIELD_TYPE (t, i),
+ base_type);
+ if (sub_count == -1)
+ return -1;
+ count = (count > sub_count ? count : sub_count);
+ }
+ if (TYPE_LENGTH (t) == 0)
+ {
+ gdb_assert (count == 0);
+ return 0;
+ }
+ else if (count == 0)
+ return -1;
+ unitlen = arm_vfp_cprc_unit_length (*base_type);
+ if (TYPE_LENGTH (t) != unitlen * count)
+ return -1;
+ return count;
+ }
+
+ default:
+ break;
+ }
+
+ return -1;
+}
+
+/* Determine whether T is a VFP co-processor register candidate (CPRC)
+ if passed to or returned from a non-variadic function with the VFP
+ ABI in effect. Return 1 if it is, 0 otherwise. If it is, set
+ *BASE_TYPE to the base type for T and *COUNT to the number of
+ elements of that base type before returning. */
+
+static int
+arm_vfp_call_candidate (struct type *t, enum arm_vfp_cprc_base_type *base_type,
+ int *count)
+{
+ enum arm_vfp_cprc_base_type b = VFP_CPRC_UNKNOWN;
+ int c = arm_vfp_cprc_sub_candidate (t, &b);
+ if (c <= 0 || c > 4)
+ return 0;
+ *base_type = b;
+ *count = c;
+ return 1;
+}
+
+/* Return 1 if the VFP ABI should be used for passing arguments to and
+ returning values from a function of type FUNC_TYPE, 0
+ otherwise. */
+
+static int
+arm_vfp_abi_for_function (struct gdbarch *gdbarch, struct type *func_type)
+{
+ struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
+ /* Variadic functions always use the base ABI. Assume that functions
+ without debug info are not variadic. */
+ if (func_type && TYPE_VARARGS (check_typedef (func_type)))
+ return 0;
+ /* The VFP ABI is only supported as a variant of AAPCS. */
+ if (tdep->arm_abi != ARM_ABI_AAPCS)
+ return 0;
+ return gdbarch_tdep (gdbarch)->fp_model == ARM_FLOAT_VFP;
+}
+
+/* We currently only support passing parameters in integer registers, which
+ conforms with GCC's default model, and VFP argument passing following
+ the VFP variant of AAPCS. Several other variants exist and
+ we should probably support some of them based on the selected ABI. */
+
+static CORE_ADDR
+arm_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
+ struct regcache *regcache, CORE_ADDR bp_addr, int nargs,
+ struct value **args, CORE_ADDR sp, int struct_return,
+ CORE_ADDR struct_addr)
+{
+ enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
+ int argnum;
+ int argreg;
+ int nstack;
+ struct stack_item *si = NULL;
+ int use_vfp_abi;
+ struct type *ftype;
+ unsigned vfp_regs_free = (1 << 16) - 1;
+
+ /* Determine the type of this function and whether the VFP ABI
+ applies. */
+ ftype = check_typedef (value_type (function));
+ if (TYPE_CODE (ftype) == TYPE_CODE_PTR)
+ ftype = check_typedef (TYPE_TARGET_TYPE (ftype));
+ use_vfp_abi = arm_vfp_abi_for_function (gdbarch, ftype);
+
+ /* Set the return address. For the ARM, the return breakpoint is
+ always at BP_ADDR. */
+ if (arm_pc_is_thumb (gdbarch, bp_addr))
+ bp_addr |= 1;
+ regcache_cooked_write_unsigned (regcache, ARM_LR_REGNUM, bp_addr);
+
+ /* Walk through the list of args and determine how large a temporary
+ stack is required. Need to take care here as structs may be
+ passed on the stack, and we have to push them. */
+ nstack = 0;
+
+ argreg = ARM_A1_REGNUM;
+ nstack = 0;
+
+ /* The struct_return pointer occupies the first parameter
+ passing register. */
+ if (struct_return)
+ {
+ if (arm_debug)
+ fprintf_unfiltered (gdb_stdlog, "struct return in %s = %s\n",
+ gdbarch_register_name (gdbarch, argreg),
+ paddress (gdbarch, struct_addr));
+ regcache_cooked_write_unsigned (regcache, argreg, struct_addr);
+ argreg++;
+ }
+
+ for (argnum = 0; argnum < nargs; argnum++)
+ {
+ int len;
+ struct type *arg_type;
+ struct type *target_type;
+ enum type_code typecode;
+ const bfd_byte *val;
+ int align;
+ enum arm_vfp_cprc_base_type vfp_base_type;
+ int vfp_base_count;
+ int may_use_core_reg = 1;
+
+ arg_type = check_typedef (value_type (args[argnum]));
+ len = TYPE_LENGTH (arg_type);
+ target_type = TYPE_TARGET_TYPE (arg_type);
+ typecode = TYPE_CODE (arg_type);
+ val = value_contents (args[argnum]);
+
+ align = arm_type_align (arg_type);
+ /* Round alignment up to a whole number of words. */
+ align = (align + INT_REGISTER_SIZE - 1) & ~(INT_REGISTER_SIZE - 1);
+ /* Different ABIs have different maximum alignments. */
+ if (gdbarch_tdep (gdbarch)->arm_abi == ARM_ABI_APCS)
+ {
+ /* The APCS ABI only requires word alignment. */
+ align = INT_REGISTER_SIZE;
+ }
+ else
+ {
+ /* The AAPCS requires at most doubleword alignment. */
+ if (align > INT_REGISTER_SIZE * 2)
+ align = INT_REGISTER_SIZE * 2;
+ }
+
+ if (use_vfp_abi
+ && arm_vfp_call_candidate (arg_type, &vfp_base_type,
+ &vfp_base_count))
+ {
+ int regno;
+ int unit_length;
+ int shift;
+ unsigned mask;
+
+ /* Because this is a CPRC it cannot go in a core register or
+ cause a core register to be skipped for alignment.
+ Either it goes in VFP registers and the rest of this loop
+ iteration is skipped for this argument, or it goes on the
+ stack (and the stack alignment code is correct for this
+ case). */
+ may_use_core_reg = 0;
+
+ unit_length = arm_vfp_cprc_unit_length (vfp_base_type);
+ shift = unit_length / 4;
+ mask = (1 << (shift * vfp_base_count)) - 1;
+ for (regno = 0; regno < 16; regno += shift)
+ if (((vfp_regs_free >> regno) & mask) == mask)
+ break;
+
+ if (regno < 16)
+ {
+ int reg_char;
+ int reg_scaled;
+ int i;
+
+ vfp_regs_free &= ~(mask << regno);
+ reg_scaled = regno / shift;
+ reg_char = arm_vfp_cprc_reg_char (vfp_base_type);
+ for (i = 0; i < vfp_base_count; i++)
+ {
+ char name_buf[4];
+ int regnum;
+ if (reg_char == 'q')
+ arm_neon_quad_write (gdbarch, regcache, reg_scaled + i,
+ val + i * unit_length);
+ else
+ {
+ xsnprintf (name_buf, sizeof (name_buf), "%c%d",
+ reg_char, reg_scaled + i);
+ regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
+ strlen (name_buf));
+ regcache_cooked_write (regcache, regnum,
+ val + i * unit_length);
+ }
+ }
+ continue;
+ }
+ else
+ {
+ /* This CPRC could not go in VFP registers, so all VFP
+ registers are now marked as used. */
+ vfp_regs_free = 0;
+ }
+ }
+
+ /* Push stack padding for dowubleword alignment. */
+ if (nstack & (align - 1))
+ {
+ si = push_stack_item (si, val, INT_REGISTER_SIZE);
+ nstack += INT_REGISTER_SIZE;
+ }
+
+ /* Doubleword aligned quantities must go in even register pairs. */
+ if (may_use_core_reg
+ && argreg <= ARM_LAST_ARG_REGNUM
+ && align > INT_REGISTER_SIZE
+ && argreg & 1)
+ argreg++;
+
+ /* If the argument is a pointer to a function, and it is a
+ Thumb function, create a LOCAL copy of the value and set
+ the THUMB bit in it. */
+ if (TYPE_CODE_PTR == typecode
+ && target_type != NULL
+ && TYPE_CODE_FUNC == TYPE_CODE (check_typedef (target_type)))
+ {
+ CORE_ADDR regval = extract_unsigned_integer (val, len, byte_order);
+ if (arm_pc_is_thumb (gdbarch, regval))
+ {
+ bfd_byte *copy = alloca (len);
+ store_unsigned_integer (copy, len, byte_order,
+ MAKE_THUMB_ADDR (regval));
+ val = copy;
+ }
+ }
+
+ /* Copy the argument to general registers or the stack in
+ register-sized pieces. Large arguments are split between
+ registers and stack. */
+ while (len > 0)
+ {
+ int partial_len = len < INT_REGISTER_SIZE ? len : INT_REGISTER_SIZE;
+
+ if (may_use_core_reg && argreg <= ARM_LAST_ARG_REGNUM)
+ {
+ /* The argument is being passed in a general purpose
+ register. */
+ CORE_ADDR regval
+ = extract_unsigned_integer (val, partial_len, byte_order);
+ if (byte_order == BFD_ENDIAN_BIG)
+ regval <<= (INT_REGISTER_SIZE - partial_len) * 8;
+ if (arm_debug)
+ fprintf_unfiltered (gdb_stdlog, "arg %d in %s = 0x%s\n",
+ argnum,
+ gdbarch_register_name
+ (gdbarch, argreg),
+ phex (regval, INT_REGISTER_SIZE));
+ regcache_cooked_write_unsigned (regcache, argreg, regval);
+ argreg++;
+ }
+ else
+ {
+ /* Push the arguments onto the stack. */
+ if (arm_debug)
+ fprintf_unfiltered (gdb_stdlog, "arg %d @ sp + %d\n",
+ argnum, nstack);
+ si = push_stack_item (si, val, INT_REGISTER_SIZE);
+ nstack += INT_REGISTER_SIZE;
+ }
+
+ len -= partial_len;
+ val += partial_len;
+ }
+ }
+ /* If we have an odd number of words to push, then decrement the stack
+ by one word now, so first stack argument will be dword aligned. */
+ if (nstack & 4)
+ sp -= 4;
+
+ while (si)
+ {
+ sp -= si->len;
+ write_memory (sp, si->data, si->len);
+ si = pop_stack_item (si);
+ }
+
+ /* Finally, update teh SP register. */
+ regcache_cooked_write_unsigned (regcache, ARM_SP_REGNUM, sp);
+
+ return sp;
+}
+
+
+/* Always align the frame to an 8-byte boundary. This is required on
+ some platforms and harmless on the rest. */
+
+static CORE_ADDR
+arm_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
+{
+ /* Align the stack to eight bytes. */
+ return sp & ~ (CORE_ADDR) 7;
+}
+
+static void
+print_fpu_flags (struct ui_file *file, int flags)
+{
+ if (flags & (1 << 0))
+ fputs_filtered ("IVO ", file);
+ if (flags & (1 << 1))
+ fputs_filtered ("DVZ ", file);
+ if (flags & (1 << 2))
+ fputs_filtered ("OFL ", file);
+ if (flags & (1 << 3))
+ fputs_filtered ("UFL ", file);
+ if (flags & (1 << 4))
+ fputs_filtered ("INX ", file);
+ fputc_filtered ('\n', file);
+}
+
+/* Print interesting information about the floating point processor
+ (if present) or emulator. */
+static void
+arm_print_float_info (struct gdbarch *gdbarch, struct ui_file *file,
+ struct frame_info *frame, const char *args)
+{
+ unsigned long status = get_frame_register_unsigned (frame, ARM_FPS_REGNUM);
+ int type;
+
+ type = (status >> 24) & 127;
+ if (status & (1 << 31))
+ fprintf_filtered (file, _("Hardware FPU type %d\n"), type);
+ else
+ fprintf_filtered (file, _("Software FPU type %d\n"), type);
+ /* i18n: [floating point unit] mask */
+ fputs_filtered (_("mask: "), file);
+ print_fpu_flags (file, status >> 16);
+ /* i18n: [floating point unit] flags */
+ fputs_filtered (_("flags: "), file);
+ print_fpu_flags (file, status);
+}
+
+/* Construct the ARM extended floating point type. */
+static struct type *
+arm_ext_type (struct gdbarch *gdbarch)
+{
+ struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
+
+ if (!tdep->arm_ext_type)
+ tdep->arm_ext_type
+ = arch_float_type (gdbarch, -1, "builtin_type_arm_ext",
+ floatformats_arm_ext);
+
+ return tdep->arm_ext_type;
+}
+
+static struct type *
+arm_neon_double_type (struct gdbarch *gdbarch)
+{
+ struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
+
+ if (tdep->neon_double_type == NULL)
+ {
+ struct type *t, *elem;
+
+ t = arch_composite_type (gdbarch, "__gdb_builtin_type_neon_d",
+ TYPE_CODE_UNION);
+ elem = builtin_type (gdbarch)->builtin_uint8;
+ append_composite_type_field (t, "u8", init_vector_type (elem, 8));
+ elem = builtin_type (gdbarch)->builtin_uint16;
+ append_composite_type_field (t, "u16", init_vector_type (elem, 4));
+ elem = builtin_type (gdbarch)->builtin_uint32;
+ append_composite_type_field (t, "u32", init_vector_type (elem, 2));
+ elem = builtin_type (gdbarch)->builtin_uint64;
+ append_composite_type_field (t, "u64", elem);
+ elem = builtin_type (gdbarch)->builtin_float;
+ append_composite_type_field (t, "f32", init_vector_type (elem, 2));
+ elem = builtin_type (gdbarch)->builtin_double;
+ append_composite_type_field (t, "f64", elem);
+
+ TYPE_VECTOR (t) = 1;
+ TYPE_NAME (t) = "neon_d";
+ tdep->neon_double_type = t;
+ }
+
+ return tdep->neon_double_type;
+}
+
+/* FIXME: The vector types are not correctly ordered on big-endian
+ targets. Just as s0 is the low bits of d0, d0[0] is also the low
+ bits of d0 - regardless of what unit size is being held in d0. So
+ the offset of the first uint8 in d0 is 7, but the offset of the
+ first float is 4. This code works as-is for little-endian
+ targets. */
+
+static struct type *
+arm_neon_quad_type (struct gdbarch *gdbarch)
+{
+ struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
+
+ if (tdep->neon_quad_type == NULL)
+ {
+ struct type *t, *elem;
+
+ t = arch_composite_type (gdbarch, "__gdb_builtin_type_neon_q",
+ TYPE_CODE_UNION);
+ elem = builtin_type (gdbarch)->builtin_uint8;
+ append_composite_type_field (t, "u8", init_vector_type (elem, 16));
+ elem = builtin_type (gdbarch)->builtin_uint16;
+ append_composite_type_field (t, "u16", init_vector_type (elem, 8));
+ elem = builtin_type (gdbarch)->builtin_uint32;
+ append_composite_type_field (t, "u32", init_vector_type (elem, 4));
+ elem = builtin_type (gdbarch)->builtin_uint64;
+ append_composite_type_field (t, "u64", init_vector_type (elem, 2));
+ elem = builtin_type (gdbarch)->builtin_float;
+ append_composite_type_field (t, "f32", init_vector_type (elem, 4));
+ elem = builtin_type (gdbarch)->builtin_double;
+ append_composite_type_field (t, "f64", init_vector_type (elem, 2));
+
+ TYPE_VECTOR (t) = 1;
+ TYPE_NAME (t) = "neon_q";
+ tdep->neon_quad_type = t;
+ }
+
+ return tdep->neon_quad_type;
+}
+
+/* Return the GDB type object for the "standard" data type of data in
+ register N. */
+
+static struct type *
+arm_register_type (struct gdbarch *gdbarch, int regnum)
+{
+ int num_regs = gdbarch_num_regs (gdbarch);
+
+ if (gdbarch_tdep (gdbarch)->have_vfp_pseudos
+ && regnum >= num_regs && regnum < num_regs + 32)
+ return builtin_type (gdbarch)->builtin_float;
+
+ if (gdbarch_tdep (gdbarch)->have_neon_pseudos
+ && regnum >= num_regs + 32 && regnum < num_regs + 32 + 16)
+ return arm_neon_quad_type (gdbarch);
+
+ /* If the target description has register information, we are only
+ in this function so that we can override the types of
+ double-precision registers for NEON. */
+ if (tdesc_has_registers (gdbarch_target_desc (gdbarch)))
+ {
+ struct type *t = tdesc_register_type (gdbarch, regnum);
+
+ if (regnum >= ARM_D0_REGNUM && regnum < ARM_D0_REGNUM + 32
+ && TYPE_CODE (t) == TYPE_CODE_FLT
+ && gdbarch_tdep (gdbarch)->have_neon)
+ return arm_neon_double_type (gdbarch);
+ else
+ return t;
+ }
+
+ if (regnum >= ARM_F0_REGNUM && regnum < ARM_F0_REGNUM + NUM_FREGS)
+ {
+ if (!gdbarch_tdep (gdbarch)->have_fpa_registers)
+ return builtin_type (gdbarch)->builtin_void;
+
+ return arm_ext_type (gdbarch);
+ }
+ else if (regnum == ARM_SP_REGNUM)
+ return builtin_type (gdbarch)->builtin_data_ptr;
+ else if (regnum == ARM_PC_REGNUM)
+ return builtin_type (gdbarch)->builtin_func_ptr;
+ else if (regnum >= ARRAY_SIZE (arm_register_names))
+ /* These registers are only supported on targets which supply
+ an XML description. */
+ return builtin_type (gdbarch)->builtin_int0;
+ else
+ return builtin_type (gdbarch)->builtin_uint32;
+}
+
+/* Map a DWARF register REGNUM onto the appropriate GDB register
+ number. */
+
+static int
+arm_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
+{
+ /* Core integer regs. */
+ if (reg >= 0 && reg <= 15)
+ return reg;
+
+ /* Legacy FPA encoding. These were once used in a way which
+ overlapped with VFP register numbering, so their use is
+ discouraged, but GDB doesn't support the ARM toolchain
+ which used them for VFP. */
+ if (reg >= 16 && reg <= 23)
+ return ARM_F0_REGNUM + reg - 16;
+
+ /* New assignments for the FPA registers. */
+ if (reg >= 96 && reg <= 103)
+ return ARM_F0_REGNUM + reg - 96;
+
+ /* WMMX register assignments. */
+ if (reg >= 104 && reg <= 111)
+ return ARM_WCGR0_REGNUM + reg - 104;
+
+ if (reg >= 112 && reg <= 127)
+ return ARM_WR0_REGNUM + reg - 112;
+
+ if (reg >= 192 && reg <= 199)
+ return ARM_WC0_REGNUM + reg - 192;
+
+ /* VFP v2 registers. A double precision value is actually
+ in d1 rather than s2, but the ABI only defines numbering
+ for the single precision registers. This will "just work"
+ in GDB for little endian targets (we'll read eight bytes,
+ starting in s0 and then progressing to s1), but will be
+ reversed on big endian targets with VFP. This won't
+ be a problem for the new Neon quad registers; you're supposed
+ to use DW_OP_piece for those. */
+ if (reg >= 64 && reg <= 95)
+ {
+ char name_buf[4];
+
+ xsnprintf (name_buf, sizeof (name_buf), "s%d", reg - 64);
+ return user_reg_map_name_to_regnum (gdbarch, name_buf,
+ strlen (name_buf));
+ }
+
+ /* VFP v3 / Neon registers. This range is also used for VFP v2
+ registers, except that it now describes d0 instead of s0. */
+ if (reg >= 256 && reg <= 287)
+ {
+ char name_buf[4];
+
+ xsnprintf (name_buf, sizeof (name_buf), "d%d", reg - 256);
+ return user_reg_map_name_to_regnum (gdbarch, name_buf,
+ strlen (name_buf));
+ }
+
+ return -1;
+}
+
+/* Map GDB internal REGNUM onto the Arm simulator register numbers. */
+static int
+arm_register_sim_regno (struct gdbarch *gdbarch, int regnum)
+{
+ int reg = regnum;
+ gdb_assert (reg >= 0 && reg < gdbarch_num_regs (gdbarch));
+
+ if (regnum >= ARM_WR0_REGNUM && regnum <= ARM_WR15_REGNUM)
+ return regnum - ARM_WR0_REGNUM + SIM_ARM_IWMMXT_COP0R0_REGNUM;
+
+ if (regnum >= ARM_WC0_REGNUM && regnum <= ARM_WC7_REGNUM)
+ return regnum - ARM_WC0_REGNUM + SIM_ARM_IWMMXT_COP1R0_REGNUM;
+
+ if (regnum >= ARM_WCGR0_REGNUM && regnum <= ARM_WCGR7_REGNUM)
+ return regnum - ARM_WCGR0_REGNUM + SIM_ARM_IWMMXT_COP1R8_REGNUM;
+
+ if (reg < NUM_GREGS)
+ return SIM_ARM_R0_REGNUM + reg;
+ reg -= NUM_GREGS;
+
+ if (reg < NUM_FREGS)
+ return SIM_ARM_FP0_REGNUM + reg;
+ reg -= NUM_FREGS;
+
+ if (reg < NUM_SREGS)
+ return SIM_ARM_FPS_REGNUM + reg;
+ reg -= NUM_SREGS;
+
+ internal_error (__FILE__, __LINE__, _("Bad REGNUM %d"), regnum);
+}
+
+/* NOTE: cagney/2001-08-20: Both convert_from_extended() and
+ convert_to_extended() use floatformat_arm_ext_littlebyte_bigword.
+ It is thought that this is is the floating-point register format on
+ little-endian systems. */
+
+static void
+convert_from_extended (const struct floatformat *fmt, const void *ptr,
+ void *dbl, int endianess)
+{
+ DOUBLEST d;
+
+ if (endianess == BFD_ENDIAN_BIG)
+ floatformat_to_doublest (&floatformat_arm_ext_big, ptr, &d);
+ else
+ floatformat_to_doublest (&floatformat_arm_ext_littlebyte_bigword,
+ ptr, &d);
+ floatformat_from_doublest (fmt, &d, dbl);
+}
+
+static void
+convert_to_extended (const struct floatformat *fmt, void *dbl, const void *ptr,
+ int endianess)
+{
+ DOUBLEST d;
+
+ floatformat_to_doublest (fmt, ptr, &d);
+ if (endianess == BFD_ENDIAN_BIG)
+ floatformat_from_doublest (&floatformat_arm_ext_big, &d, dbl);
+ else
+ floatformat_from_doublest (&floatformat_arm_ext_littlebyte_bigword,
+ &d, dbl);
+}
+
+static int
+condition_true (unsigned long cond, unsigned long status_reg)
+{
+ if (cond == INST_AL || cond == INST_NV)
+ return 1;
+
+ switch (cond)
+ {
+ case INST_EQ:
+ return ((status_reg & FLAG_Z) != 0);
+ case INST_NE:
+ return ((status_reg & FLAG_Z) == 0);
+ case INST_CS:
+ return ((status_reg & FLAG_C) != 0);
+ case INST_CC:
+ return ((status_reg & FLAG_C) == 0);
+ case INST_MI:
+ return ((status_reg & FLAG_N) != 0);
+ case INST_PL:
+ return ((status_reg & FLAG_N) == 0);
+ case INST_VS:
+ return ((status_reg & FLAG_V) != 0);
+ case INST_VC:
+ return ((status_reg & FLAG_V) == 0);
+ case INST_HI:
+ return ((status_reg & (FLAG_C | FLAG_Z)) == FLAG_C);
+ case INST_LS:
+ return ((status_reg & (FLAG_C | FLAG_Z)) != FLAG_C);
+ case INST_GE:
+ return (((status_reg & FLAG_N) == 0) == ((status_reg & FLAG_V) == 0));
+ case INST_LT:
+ return (((status_reg & FLAG_N) == 0) != ((status_reg & FLAG_V) == 0));
+ case INST_GT:
+ return (((status_reg & FLAG_Z) == 0)
+ && (((status_reg & FLAG_N) == 0)
+ == ((status_reg & FLAG_V) == 0)));
+ case INST_LE:
+ return (((status_reg & FLAG_Z) != 0)
+ || (((status_reg & FLAG_N) == 0)
+ != ((status_reg & FLAG_V) == 0)));
+ }
+ return 1;
+}
+
+static unsigned long
+shifted_reg_val (struct frame_info *frame, unsigned long inst, int carry,
+ unsigned long pc_val, unsigned long status_reg)
+{
+ unsigned long res, shift;
+ int rm = bits (inst, 0, 3);
+ unsigned long shifttype = bits (inst, 5, 6);
+
+ if (bit (inst, 4))
+ {
+ int rs = bits (inst, 8, 11);
+ shift = (rs == 15 ? pc_val + 8
+ : get_frame_register_unsigned (frame, rs)) & 0xFF;
+ }
+ else
+ shift = bits (inst, 7, 11);
+
+ res = (rm == ARM_PC_REGNUM
+ ? (pc_val + (bit (inst, 4) ? 12 : 8))
+ : get_frame_register_unsigned (frame, rm));
+
+ switch (shifttype)
+ {
+ case 0: /* LSL */
+ res = shift >= 32 ? 0 : res << shift;
+ break;
+
+ case 1: /* LSR */
+ res = shift >= 32 ? 0 : res >> shift;
+ break;
+
+ case 2: /* ASR */
+ if (shift >= 32)
+ shift = 31;
+ res = ((res & 0x80000000L)
+ ? ~((~res) >> shift) : res >> shift);
+ break;
+
+ case 3: /* ROR/RRX */
+ shift &= 31;
+ if (shift == 0)
+ res = (res >> 1) | (carry ? 0x80000000L : 0);
+ else
+ res = (res >> shift) | (res << (32 - shift));
+ break;
+ }
+
+ return res & 0xffffffff;
+}
+
+/* Return number of 1-bits in VAL. */
+
+static int
+bitcount (unsigned long val)
+{
+ int nbits;
+ for (nbits = 0; val != 0; nbits++)
+ val &= val - 1; /* Delete rightmost 1-bit in val. */
+ return nbits;
+}
+
+/* Return the size in bytes of the complete Thumb instruction whose
+ first halfword is INST1. */
+
+static int
+thumb_insn_size (unsigned short inst1)
+{
+ if ((inst1 & 0xe000) == 0xe000 && (inst1 & 0x1800) != 0)
+ return 4;
+ else
+ return 2;
+}
+
+static int
+thumb_advance_itstate (unsigned int itstate)
+{
+ /* Preserve IT[7:5], the first three bits of the condition. Shift
+ the upcoming condition flags left by one bit. */
+ itstate = (itstate & 0xe0) | ((itstate << 1) & 0x1f);
+
+ /* If we have finished the IT block, clear the state. */
+ if ((itstate & 0x0f) == 0)
+ itstate = 0;
+
+ return itstate;
+}
+
+/* Find the next PC after the current instruction executes. In some
+ cases we can not statically determine the answer (see the IT state
+ handling in this function); in that case, a breakpoint may be
+ inserted in addition to the returned PC, which will be used to set
+ another breakpoint by our caller. */
+
+static CORE_ADDR
+thumb_get_next_pc_raw (struct frame_info *frame, CORE_ADDR pc)
+{
+ struct gdbarch *gdbarch = get_frame_arch (frame);
+ struct address_space *aspace = get_frame_address_space (frame);
+ enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
+ enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
+ unsigned long pc_val = ((unsigned long) pc) + 4; /* PC after prefetch */
+ unsigned short inst1;
+ CORE_ADDR nextpc = pc + 2; /* Default is next instruction. */
+ unsigned long offset;
+ ULONGEST status, itstate;
+
+ nextpc = MAKE_THUMB_ADDR (nextpc);
+ pc_val = MAKE_THUMB_ADDR (pc_val);
+
+ inst1 = read_memory_unsigned_integer (pc, 2, byte_order_for_code);
+
+ /* Thumb-2 conditional execution support. There are eight bits in
+ the CPSR which describe conditional execution state. Once
+ reconstructed (they're in a funny order), the low five bits
+ describe the low bit of the condition for each instruction and
+ how many instructions remain. The high three bits describe the
+ base condition. One of the low four bits will be set if an IT
+ block is active. These bits read as zero on earlier
+ processors. */
+ status = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
+ itstate = ((status >> 8) & 0xfc) | ((status >> 25) & 0x3);
+
+ /* If-Then handling. On GNU/Linux, where this routine is used, we
+ use an undefined instruction as a breakpoint. Unlike BKPT, IT
+ can disable execution of the undefined instruction. So we might
+ miss the breakpoint if we set it on a skipped conditional
+ instruction. Because conditional instructions can change the
+ flags, affecting the execution of further instructions, we may
+ need to set two breakpoints. */
+
+ if (gdbarch_tdep (gdbarch)->thumb2_breakpoint != NULL)
+ {
+ if ((inst1 & 0xff00) == 0xbf00 && (inst1 & 0x000f) != 0)
+ {
+ /* An IT instruction. Because this instruction does not
+ modify the flags, we can accurately predict the next
+ executed instruction. */
+ itstate = inst1 & 0x00ff;
+ pc += thumb_insn_size (inst1);
+
+ while (itstate != 0 && ! condition_true (itstate >> 4, status))
+ {
+ inst1 = read_memory_unsigned_integer (pc, 2,
+ byte_order_for_code);
+ pc += thumb_insn_size (inst1);
+ itstate = thumb_advance_itstate (itstate);
+ }
+
+ return MAKE_THUMB_ADDR (pc);
+ }
+ else if (itstate != 0)
+ {
+ /* We are in a conditional block. Check the condition. */
+ if (! condition_true (itstate >> 4, status))
+ {
+ /* Advance to the next executed instruction. */
+ pc += thumb_insn_size (inst1);
+ itstate = thumb_advance_itstate (itstate);
+
+ while (itstate != 0 && ! condition_true (itstate >> 4, status))
+ {
+ inst1 = read_memory_unsigned_integer (pc, 2,
+ byte_order_for_code);
+ pc += thumb_insn_size (inst1);
+ itstate = thumb_advance_itstate (itstate);
+ }
+
+ return MAKE_THUMB_ADDR (pc);
+ }
+ else if ((itstate & 0x0f) == 0x08)
+ {
+ /* This is the last instruction of the conditional
+ block, and it is executed. We can handle it normally
+ because the following instruction is not conditional,
+ and we must handle it normally because it is
+ permitted to branch. Fall through. */
+ }
+ else
+ {
+ int cond_negated;
+
+ /* There are conditional instructions after this one.
+ If this instruction modifies the flags, then we can
+ not predict what the next executed instruction will
+ be. Fortunately, this instruction is architecturally
+ forbidden to branch; we know it will fall through.
+ Start by skipping past it. */
+ pc += thumb_insn_size (inst1);
+ itstate = thumb_advance_itstate (itstate);
+
+ /* Set a breakpoint on the following instruction. */
+ gdb_assert ((itstate & 0x0f) != 0);
+ arm_insert_single_step_breakpoint (gdbarch, aspace,
+ MAKE_THUMB_ADDR (pc));
+ cond_negated = (itstate >> 4) & 1;
+
+ /* Skip all following instructions with the same
+ condition. If there is a later instruction in the IT
+ block with the opposite condition, set the other
+ breakpoint there. If not, then set a breakpoint on
+ the instruction after the IT block. */
+ do
+ {
+ inst1 = read_memory_unsigned_integer (pc, 2,
+ byte_order_for_code);
+ pc += thumb_insn_size (inst1);
+ itstate = thumb_advance_itstate (itstate);
+ }
+ while (itstate != 0 && ((itstate >> 4) & 1) == cond_negated);
+
+ return MAKE_THUMB_ADDR (pc);
+ }
+ }
+ }
+ else if (itstate & 0x0f)
+ {
+ /* We are in a conditional block. Check the condition. */
+ int cond = itstate >> 4;
+
+ if (! condition_true (cond, status))
+ /* Advance to the next instruction. All the 32-bit
+ instructions share a common prefix. */
+ return MAKE_THUMB_ADDR (pc + thumb_insn_size (inst1));
+
+ /* Otherwise, handle the instruction normally. */
+ }
+
+ if ((inst1 & 0xff00) == 0xbd00) /* pop {rlist, pc} */
+ {
+ CORE_ADDR sp;
+
+ /* Fetch the saved PC from the stack. It's stored above
+ all of the other registers. */
+ offset = bitcount (bits (inst1, 0, 7)) * INT_REGISTER_SIZE;
+ sp = get_frame_register_unsigned (frame, ARM_SP_REGNUM);
+ nextpc = read_memory_unsigned_integer (sp + offset, 4, byte_order);
+ }
+ else if ((inst1 & 0xf000) == 0xd000) /* conditional branch */
+ {
+ unsigned long cond = bits (inst1, 8, 11);
+ if (cond == 0x0f) /* 0x0f = SWI */
+ {
+ struct gdbarch_tdep *tdep;
+ tdep = gdbarch_tdep (gdbarch);
+
+ if (tdep->syscall_next_pc != NULL)
+ nextpc = tdep->syscall_next_pc (frame);
+
+ }
+ else if (cond != 0x0f && condition_true (cond, status))
+ nextpc = pc_val + (sbits (inst1, 0, 7) << 1);
+ }
+ else if ((inst1 & 0xf800) == 0xe000) /* unconditional branch */
+ {
+ nextpc = pc_val + (sbits (inst1, 0, 10) << 1);
+ }
+ else if (thumb_insn_size (inst1) == 4) /* 32-bit instruction */
+ {
+ unsigned short inst2;
+ inst2 = read_memory_unsigned_integer (pc + 2, 2, byte_order_for_code);
+
+ /* Default to the next instruction. */
+ nextpc = pc + 4;
+ nextpc = MAKE_THUMB_ADDR (nextpc);
+
+ if ((inst1 & 0xf800) == 0xf000 && (inst2 & 0x8000) == 0x8000)
+ {
+ /* Branches and miscellaneous control instructions. */
+
+ if ((inst2 & 0x1000) != 0 || (inst2 & 0xd001) == 0xc000)
+ {
+ /* B, BL, BLX. */
+ int j1, j2, imm1, imm2;
+
+ imm1 = sbits (inst1, 0, 10);
+ imm2 = bits (inst2, 0, 10);
+ j1 = bit (inst2, 13);
+ j2 = bit (inst2, 11);
+
+ offset = ((imm1 << 12) + (imm2 << 1));
+ offset ^= ((!j2) << 22) | ((!j1) << 23);
+
+ nextpc = pc_val + offset;
+ /* For BLX make sure to clear the low bits. */
+ if (bit (inst2, 12) == 0)
+ nextpc = nextpc & 0xfffffffc;
+ }
+ else if (inst1 == 0xf3de && (inst2 & 0xff00) == 0x3f00)
+ {
+ /* SUBS PC, LR, #imm8. */
+ nextpc = get_frame_register_unsigned (frame, ARM_LR_REGNUM);
+ nextpc -= inst2 & 0x00ff;
+ }
+ else if ((inst2 & 0xd000) == 0x8000 && (inst1 & 0x0380) != 0x0380)
+ {
+ /* Conditional branch. */
+ if (condition_true (bits (inst1, 6, 9), status))
+ {
+ int sign, j1, j2, imm1, imm2;
+
+ sign = sbits (inst1, 10, 10);
+ imm1 = bits (inst1, 0, 5);
+ imm2 = bits (inst2, 0, 10);
+ j1 = bit (inst2, 13);
+ j2 = bit (inst2, 11);
+
+ offset = (sign << 20) + (j2 << 19) + (j1 << 18);
+ offset += (imm1 << 12) + (imm2 << 1);
+
+ nextpc = pc_val + offset;
+ }
+ }
+ }
+ else if ((inst1 & 0xfe50) == 0xe810)
+ {
+ /* Load multiple or RFE. */
+ int rn, offset, load_pc = 1;
+
+ rn = bits (inst1, 0, 3);
+ if (bit (inst1, 7) && !bit (inst1, 8))
+ {
+ /* LDMIA or POP */
+ if (!bit (inst2, 15))
+ load_pc = 0;
+ offset = bitcount (inst2) * 4 - 4;
+ }
+ else if (!bit (inst1, 7) && bit (inst1, 8))
+ {
+ /* LDMDB */
+ if (!bit (inst2, 15))
+ load_pc = 0;
+ offset = -4;
+ }
+ else if (bit (inst1, 7) && bit (inst1, 8))
+ {
+ /* RFEIA */
+ offset = 0;
+ }
+ else if (!bit (inst1, 7) && !bit (inst1, 8))
+ {
+ /* RFEDB */
+ offset = -8;
+ }
+ else
+ load_pc = 0;
+
+ if (load_pc)
+ {
+ CORE_ADDR addr = get_frame_register_unsigned (frame, rn);
+ nextpc = get_frame_memory_unsigned (frame, addr + offset, 4);
+ }
+ }
+ else if ((inst1 & 0xffef) == 0xea4f && (inst2 & 0xfff0) == 0x0f00)
+ {
+ /* MOV PC or MOVS PC. */
+ nextpc = get_frame_register_unsigned (frame, bits (inst2, 0, 3));
+ nextpc = MAKE_THUMB_ADDR (nextpc);
+ }
+ else if ((inst1 & 0xff70) == 0xf850 && (inst2 & 0xf000) == 0xf000)
+ {
+ /* LDR PC. */
+ CORE_ADDR base;
+ int rn, load_pc = 1;
+
+ rn = bits (inst1, 0, 3);
+ base = get_frame_register_unsigned (frame, rn);
+ if (rn == ARM_PC_REGNUM)
+ {
+ base = (base + 4) & ~(CORE_ADDR) 0x3;
+ if (bit (inst1, 7))
+ base += bits (inst2, 0, 11);
+ else
+ base -= bits (inst2, 0, 11);
+ }
+ else if (bit (inst1, 7))
+ base += bits (inst2, 0, 11);
+ else if (bit (inst2, 11))
+ {
+ if (bit (inst2, 10))
+ {
+ if (bit (inst2, 9))
+ base += bits (inst2, 0, 7);
+ else
+ base -= bits (inst2, 0, 7);
+ }
+ }
+ else if ((inst2 & 0x0fc0) == 0x0000)
+ {
+ int shift = bits (inst2, 4, 5), rm = bits (inst2, 0, 3);
+ base += get_frame_register_unsigned (frame, rm) << shift;
+ }
+ else
+ /* Reserved. */
+ load_pc = 0;
+
+ if (load_pc)
+ nextpc = get_frame_memory_unsigned (frame, base, 4);
+ }
+ else if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf000)
+ {
+ /* TBB. */
+ CORE_ADDR tbl_reg, table, offset, length;
+
+ tbl_reg = bits (inst1, 0, 3);
+ if (tbl_reg == 0x0f)
+ table = pc + 4; /* Regcache copy of PC isn't right yet. */
+ else
+ table = get_frame_register_unsigned (frame, tbl_reg);
+
+ offset = get_frame_register_unsigned (frame, bits (inst2, 0, 3));
+ length = 2 * get_frame_memory_unsigned (frame, table + offset, 1);
+ nextpc = pc_val + length;
+ }
+ else if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf010)
+ {
+ /* TBH. */
+ CORE_ADDR tbl_reg, table, offset, length;
+
+ tbl_reg = bits (inst1, 0, 3);
+ if (tbl_reg == 0x0f)
+ table = pc + 4; /* Regcache copy of PC isn't right yet. */
+ else
+ table = get_frame_register_unsigned (frame, tbl_reg);
+
+ offset = 2 * get_frame_register_unsigned (frame, bits (inst2, 0, 3));
+ length = 2 * get_frame_memory_unsigned (frame, table + offset, 2);
+ nextpc = pc_val + length;
+ }
+ }
+ else if ((inst1 & 0xff00) == 0x4700) /* bx REG, blx REG */
+ {
+ if (bits (inst1, 3, 6) == 0x0f)
+ nextpc = UNMAKE_THUMB_ADDR (pc_val);
+ else
+ nextpc = get_frame_register_unsigned (frame, bits (inst1, 3, 6));
+ }
+ else if ((inst1 & 0xff87) == 0x4687) /* mov pc, REG */
+ {
+ if (bits (inst1, 3, 6) == 0x0f)
+ nextpc = pc_val;
+ else
+ nextpc = get_frame_register_unsigned (frame, bits (inst1, 3, 6));
+
+ nextpc = MAKE_THUMB_ADDR (nextpc);
+ }
+ else if ((inst1 & 0xf500) == 0xb100)
+ {
+ /* CBNZ or CBZ. */
+ int imm = (bit (inst1, 9) << 6) + (bits (inst1, 3, 7) << 1);
+ ULONGEST reg = get_frame_register_unsigned (frame, bits (inst1, 0, 2));
+
+ if (bit (inst1, 11) && reg != 0)
+ nextpc = pc_val + imm;
+ else if (!bit (inst1, 11) && reg == 0)
+ nextpc = pc_val + imm;
+ }
+ return nextpc;
+}
+
+/* Get the raw next address. PC is the current program counter, in
+ FRAME, which is assumed to be executing in ARM mode.
+
+ The value returned has the execution state of the next instruction
+ encoded in it. Use IS_THUMB_ADDR () to see whether the instruction is
+ in Thumb-State, and gdbarch_addr_bits_remove () to get the plain memory
+ address. */
+
+static CORE_ADDR
+arm_get_next_pc_raw (struct frame_info *frame, CORE_ADDR pc)
+{
+ struct gdbarch *gdbarch = get_frame_arch (frame);
+ enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
+ enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
+ unsigned long pc_val;
+ unsigned long this_instr;
+ unsigned long status;
+ CORE_ADDR nextpc;
+
+ pc_val = (unsigned long) pc;
+ this_instr = read_memory_unsigned_integer (pc, 4, byte_order_for_code);
+
+ status = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
+ nextpc = (CORE_ADDR) (pc_val + 4); /* Default case */
+
+ if (bits (this_instr, 28, 31) == INST_NV)
+ switch (bits (this_instr, 24, 27))
+ {
+ case 0xa:
+ case 0xb:
+ {
+ /* Branch with Link and change to Thumb. */
+ nextpc = BranchDest (pc, this_instr);
+ nextpc |= bit (this_instr, 24) << 1;
+ nextpc = MAKE_THUMB_ADDR (nextpc);
+ break;
+ }
+ case 0xc:
+ case 0xd:
+ case 0xe:
+ /* Coprocessor register transfer. */
+ if (bits (this_instr, 12, 15) == 15)
+ error (_("Invalid update to pc in instruction"));
+ break;
+ }
+ else if (condition_true (bits (this_instr, 28, 31), status))
+ {
+ switch (bits (this_instr, 24, 27))
+ {
+ case 0x0:
+ case 0x1: /* data processing */
+ case 0x2:
+ case 0x3:
+ {
+ unsigned long operand1, operand2, result = 0;
+ unsigned long rn;
+ int c;
+
+ if (bits (this_instr, 12, 15) != 15)
+ break;
+
+ if (bits (this_instr, 22, 25) == 0
+ && bits (this_instr, 4, 7) == 9) /* multiply */
+ error (_("Invalid update to pc in instruction"));
+
+ /* BX <reg>, BLX <reg> */
+ if (bits (this_instr, 4, 27) == 0x12fff1
+ || bits (this_instr, 4, 27) == 0x12fff3)
+ {
+ rn = bits (this_instr, 0, 3);
+ nextpc = ((rn == ARM_PC_REGNUM)
+ ? (pc_val + 8)
+ : get_frame_register_unsigned (frame, rn));
+
+ return nextpc;
+ }
+
+ /* Multiply into PC. */
+ c = (status & FLAG_C) ? 1 : 0;
+ rn = bits (this_instr, 16, 19);
+ operand1 = ((rn == ARM_PC_REGNUM)
+ ? (pc_val + 8)
+ : get_frame_register_unsigned (frame, rn));
+
+ if (bit (this_instr, 25))
+ {
+ unsigned long immval = bits (this_instr, 0, 7);
+ unsigned long rotate = 2 * bits (this_instr, 8, 11);
+ operand2 = ((immval >> rotate) | (immval << (32 - rotate)))
+ & 0xffffffff;
+ }
+ else /* operand 2 is a shifted register. */
+ operand2 = shifted_reg_val (frame, this_instr, c,
+ pc_val, status);
+
+ switch (bits (this_instr, 21, 24))
+ {
+ case 0x0: /*and */
+ result = operand1 & operand2;
+ break;
+
+ case 0x1: /*eor */
+ result = operand1 ^ operand2;
+ break;
+
+ case 0x2: /*sub */
+ result = operand1 - operand2;
+ break;
+
+ case 0x3: /*rsb */
+ result = operand2 - operand1;
+ break;
+
+ case 0x4: /*add */
+ result = operand1 + operand2;
+ break;
+
+ case 0x5: /*adc */
+ result = operand1 + operand2 + c;
+ break;
+
+ case 0x6: /*sbc */
+ result = operand1 - operand2 + c;
+ break;
+
+ case 0x7: /*rsc */
+ result = operand2 - operand1 + c;
+ break;
+
+ case 0x8:
+ case 0x9:
+ case 0xa:
+ case 0xb: /* tst, teq, cmp, cmn */
+ result = (unsigned long) nextpc;
+ break;
+
+ case 0xc: /*orr */
+ result = operand1 | operand2;
+ break;
+
+ case 0xd: /*mov */
+ /* Always step into a function. */
+ result = operand2;
+ break;
+
+ case 0xe: /*bic */
+ result = operand1 & ~operand2;
+ break;
+
+ case 0xf: /*mvn */
+ result = ~operand2;
+ break;
+ }
+
+ /* In 26-bit APCS the bottom two bits of the result are
+ ignored, and we always end up in ARM state. */
+ if (!arm_apcs_32)
+ nextpc = arm_addr_bits_remove (gdbarch, result);
+ else
+ nextpc = result;
+
+ break;
+ }
+
+ case 0x4:
+ case 0x5: /* data transfer */
+ case 0x6:
+ case 0x7:
+ if (bit (this_instr, 20))
+ {
+ /* load */
+ if (bits (this_instr, 12, 15) == 15)
+ {
+ /* rd == pc */
+ unsigned long rn;
+ unsigned long base;
+
+ if (bit (this_instr, 22))
+ error (_("Invalid update to pc in instruction"));
+
+ /* byte write to PC */
+ rn = bits (this_instr, 16, 19);
+ base = ((rn == ARM_PC_REGNUM)
+ ? (pc_val + 8)
+ : get_frame_register_unsigned (frame, rn));
+
+ if (bit (this_instr, 24))
+ {
+ /* pre-indexed */
+ int c = (status & FLAG_C) ? 1 : 0;
+ unsigned long offset =
+ (bit (this_instr, 25)
+ ? shifted_reg_val (frame, this_instr, c, pc_val, status)
+ : bits (this_instr, 0, 11));
+
+ if (bit (this_instr, 23))
+ base += offset;
+ else
+ base -= offset;
+ }
+ nextpc =
+ (CORE_ADDR) read_memory_unsigned_integer ((CORE_ADDR) base,
+ 4, byte_order);
+ }
+ }
+ break;
+
+ case 0x8:
+ case 0x9: /* block transfer */
+ if (bit (this_instr, 20))
+ {
+ /* LDM */
+ if (bit (this_instr, 15))
+ {
+ /* loading pc */
+ int offset = 0;
+ unsigned long rn_val
+ = get_frame_register_unsigned (frame,
+ bits (this_instr, 16, 19));
+
+ if (bit (this_instr, 23))
+ {
+ /* up */
+ unsigned long reglist = bits (this_instr, 0, 14);
+ offset = bitcount (reglist) * 4;
+ if (bit (this_instr, 24)) /* pre */
+ offset += 4;
+ }
+ else if (bit (this_instr, 24))
+ offset = -4;
+
+ nextpc =
+ (CORE_ADDR) read_memory_unsigned_integer ((CORE_ADDR)
+ (rn_val + offset),
+ 4, byte_order);
+ }
+ }
+ break;
+
+ case 0xb: /* branch & link */
+ case 0xa: /* branch */
+ {
+ nextpc = BranchDest (pc, this_instr);
+ break;
+ }
+
+ case 0xc:
+ case 0xd:
+ case 0xe: /* coproc ops */
+ break;
+ case 0xf: /* SWI */
+ {
+ struct gdbarch_tdep *tdep;
+ tdep = gdbarch_tdep (gdbarch);
+
+ if (tdep->syscall_next_pc != NULL)
+ nextpc = tdep->syscall_next_pc (frame);
+
+ }
+ break;
+
+ default:
+ fprintf_filtered (gdb_stderr, _("Bad bit-field extraction\n"));
+ return (pc);
+ }
+ }
+
+ return nextpc;
+}
+
+/* Determine next PC after current instruction executes. Will call either
+ arm_get_next_pc_raw or thumb_get_next_pc_raw. Error out if infinite
+ loop is detected. */
+
+CORE_ADDR
+arm_get_next_pc (struct frame_info *frame, CORE_ADDR pc)
+{
+ CORE_ADDR nextpc;
+
+ if (arm_frame_is_thumb (frame))
+ nextpc = thumb_get_next_pc_raw (frame, pc);
+ else
+ nextpc = arm_get_next_pc_raw (frame, pc);
+
+ return nextpc;
+}
+
+/* Like insert_single_step_breakpoint, but make sure we use a breakpoint
+ of the appropriate mode (as encoded in the PC value), even if this
+ differs from what would be expected according to the symbol tables. */
+
+void
+arm_insert_single_step_breakpoint (struct gdbarch *gdbarch,
+ struct address_space *aspace,
+ CORE_ADDR pc)
+{
+ struct cleanup *old_chain
+ = make_cleanup_restore_integer (&arm_override_mode);
+
+ arm_override_mode = IS_THUMB_ADDR (pc);
+ pc = gdbarch_addr_bits_remove (gdbarch, pc);
+
+ insert_single_step_breakpoint (gdbarch, aspace, pc);
+
+ do_cleanups (old_chain);
+}
+
+/* Checks for an atomic sequence of instructions beginning with a LDREX{,B,H,D}
+ instruction and ending with a STREX{,B,H,D} instruction. If such a sequence
+ is found, attempt to step through it. A breakpoint is placed at the end of
+ the sequence. */
+
+static int
+thumb_deal_with_atomic_sequence_raw (struct frame_info *frame)
+{
+ struct gdbarch *gdbarch = get_frame_arch (frame);
+ struct address_space *aspace = get_frame_address_space (frame);
+ enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
+ CORE_ADDR pc = get_frame_pc (frame);
+ CORE_ADDR breaks[2] = {-1, -1};
+ CORE_ADDR loc = pc;
+ unsigned short insn1, insn2;
+ int insn_count;
+ int index;
+ int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
+ const int atomic_sequence_length = 16; /* Instruction sequence length. */
+ ULONGEST status, itstate;
+
+ /* We currently do not support atomic sequences within an IT block. */
+ status = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
+ itstate = ((status >> 8) & 0xfc) | ((status >> 25) & 0x3);
+ if (itstate & 0x0f)
+ return 0;
+
+ /* Assume all atomic sequences start with a ldrex{,b,h,d} instruction. */
+ insn1 = read_memory_unsigned_integer (loc, 2, byte_order_for_code);
+ loc += 2;
+ if (thumb_insn_size (insn1) != 4)
+ return 0;
+
+ insn2 = read_memory_unsigned_integer (loc, 2, byte_order_for_code);
+ loc += 2;
+ if (!((insn1 & 0xfff0) == 0xe850
+ || ((insn1 & 0xfff0) == 0xe8d0 && (insn2 & 0x00c0) == 0x0040)))
+ return 0;
+
+ /* Assume that no atomic sequence is longer than "atomic_sequence_length"
+ instructions. */
+ for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
+ {
+ insn1 = read_memory_unsigned_integer (loc, 2, byte_order_for_code);
+ loc += 2;
+
+ if (thumb_insn_size (insn1) != 4)
+ {
+ /* Assume that there is at most one conditional branch in the
+ atomic sequence. If a conditional branch is found, put a
+ breakpoint in its destination address. */
+ if ((insn1 & 0xf000) == 0xd000 && bits (insn1, 8, 11) != 0x0f)
+ {
+ if (last_breakpoint > 0)
+ return 0; /* More than one conditional branch found,
+ fallback to the standard code. */
+
+ breaks[1] = loc + 2 + (sbits (insn1, 0, 7) << 1);
+ last_breakpoint++;
+ }
+
+ /* We do not support atomic sequences that use any *other*
+ instructions but conditional branches to change the PC.
+ Fall back to standard code to avoid losing control of
+ execution. */
+ else if (thumb_instruction_changes_pc (insn1))
+ return 0;
+ }
+ else
+ {
+ insn2 = read_memory_unsigned_integer (loc, 2, byte_order_for_code);
+ loc += 2;
+
+ /* Assume that there is at most one conditional branch in the
+ atomic sequence. If a conditional branch is found, put a
+ breakpoint in its destination address. */
+ if ((insn1 & 0xf800) == 0xf000
+ && (insn2 & 0xd000) == 0x8000
+ && (insn1 & 0x0380) != 0x0380)
+ {
+ int sign, j1, j2, imm1, imm2;
+ unsigned int offset;
+
+ sign = sbits (insn1, 10, 10);
+ imm1 = bits (insn1, 0, 5);
+ imm2 = bits (insn2, 0, 10);
+ j1 = bit (insn2, 13);
+ j2 = bit (insn2, 11);
+
+ offset = (sign << 20) + (j2 << 19) + (j1 << 18);
+ offset += (imm1 << 12) + (imm2 << 1);
+
+ if (last_breakpoint > 0)
+ return 0; /* More than one conditional branch found,
+ fallback to the standard code. */
+
+ breaks[1] = loc + offset;
+ last_breakpoint++;
+ }
+
+ /* We do not support atomic sequences that use any *other*
+ instructions but conditional branches to change the PC.
+ Fall back to standard code to avoid losing control of
+ execution. */
+ else if (thumb2_instruction_changes_pc (insn1, insn2))
+ return 0;
+
+ /* If we find a strex{,b,h,d}, we're done. */
+ if ((insn1 & 0xfff0) == 0xe840
+ || ((insn1 & 0xfff0) == 0xe8c0 && (insn2 & 0x00c0) == 0x0040))
+ break;
+ }
+ }
+
+ /* If we didn't find the strex{,b,h,d}, we cannot handle the sequence. */
+ if (insn_count == atomic_sequence_length)
+ return 0;
+
+ /* Insert a breakpoint right after the end of the atomic sequence. */
+ breaks[0] = loc;
+
+ /* Check for duplicated breakpoints. Check also for a breakpoint
+ placed (branch instruction's destination) anywhere in sequence. */
+ if (last_breakpoint
+ && (breaks[1] == breaks[0]
+ || (breaks[1] >= pc && breaks[1] < loc)))
+ last_breakpoint = 0;
+
+ /* Effectively inserts the breakpoints. */
+ for (index = 0; index <= last_breakpoint; index++)
+ arm_insert_single_step_breakpoint (gdbarch, aspace,
+ MAKE_THUMB_ADDR (breaks[index]));
+
+ return 1;
+}
+
+static int
+arm_deal_with_atomic_sequence_raw (struct frame_info *frame)
+{
+ struct gdbarch *gdbarch = get_frame_arch (frame);
+ struct address_space *aspace = get_frame_address_space (frame);
+ enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
+ CORE_ADDR pc = get_frame_pc (frame);
+ CORE_ADDR breaks[2] = {-1, -1};
+ CORE_ADDR loc = pc;
+ unsigned int insn;
+ int insn_count;
+ int index;
+ int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
+ const int atomic_sequence_length = 16; /* Instruction sequence length. */
+
+ /* Assume all atomic sequences start with a ldrex{,b,h,d} instruction.
+ Note that we do not currently support conditionally executed atomic
+ instructions. */
+ insn = read_memory_unsigned_integer (loc, 4, byte_order_for_code);
+ loc += 4;
+ if ((insn & 0xff9000f0) != 0xe1900090)
+ return 0;
+
+ /* Assume that no atomic sequence is longer than "atomic_sequence_length"
+ instructions. */
+ for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
+ {
+ insn = read_memory_unsigned_integer (loc, 4, byte_order_for_code);
+ loc += 4;
+
+ /* Assume that there is at most one conditional branch in the atomic
+ sequence. If a conditional branch is found, put a breakpoint in
+ its destination address. */
+ if (bits (insn, 24, 27) == 0xa)
+ {
+ if (last_breakpoint > 0)
+ return 0; /* More than one conditional branch found, fallback
+ to the standard single-step code. */
+
+ breaks[1] = BranchDest (loc - 4, insn);
+ last_breakpoint++;
+ }
+
+ /* We do not support atomic sequences that use any *other* instructions
+ but conditional branches to change the PC. Fall back to standard
+ code to avoid losing control of execution. */
+ else if (arm_instruction_changes_pc (insn))
+ return 0;
+
+ /* If we find a strex{,b,h,d}, we're done. */
+ if ((insn & 0xff9000f0) == 0xe1800090)
+ break;
+ }
+
+ /* If we didn't find the strex{,b,h,d}, we cannot handle the sequence. */
+ if (insn_count == atomic_sequence_length)
+ return 0;
+
+ /* Insert a breakpoint right after the end of the atomic sequence. */
+ breaks[0] = loc;
+
+ /* Check for duplicated breakpoints. Check also for a breakpoint
+ placed (branch instruction's destination) anywhere in sequence. */
+ if (last_breakpoint
+ && (breaks[1] == breaks[0]
+ || (breaks[1] >= pc && breaks[1] < loc)))
+ last_breakpoint = 0;
+
+ /* Effectively inserts the breakpoints. */
+ for (index = 0; index <= last_breakpoint; index++)
+ arm_insert_single_step_breakpoint (gdbarch, aspace, breaks[index]);
+
+ return 1;
+}
+
+int
+arm_deal_with_atomic_sequence (struct frame_info *frame)
+{
+ if (arm_frame_is_thumb (frame))
+ return thumb_deal_with_atomic_sequence_raw (frame);
+ else
+ return arm_deal_with_atomic_sequence_raw (frame);
+}
+
+/* single_step() is called just before we want to resume the inferior,
+ if we want to single-step it but there is no hardware or kernel
+ single-step support. We find the target of the coming instruction
+ and breakpoint it. */
+
+int
+arm_software_single_step (struct frame_info *frame)
+{
+ struct gdbarch *gdbarch = get_frame_arch (frame);
+ struct address_space *aspace = get_frame_address_space (frame);
+ CORE_ADDR next_pc;
+
+ if (arm_deal_with_atomic_sequence (frame))
+ return 1;
+
+ next_pc = arm_get_next_pc (frame, get_frame_pc (frame));
+ arm_insert_single_step_breakpoint (gdbarch, aspace, next_pc);
+
+ return 1;
+}
+
+/* Given BUF, which is OLD_LEN bytes ending at ENDADDR, expand
+ the buffer to be NEW_LEN bytes ending at ENDADDR. Return
+ NULL if an error occurs. BUF is freed. */
+
+static gdb_byte *
+extend_buffer_earlier (gdb_byte *buf, CORE_ADDR endaddr,
+ int old_len, int new_len)
+{
+ gdb_byte *new_buf;
+ int bytes_to_read = new_len - old_len;
+
+ new_buf = xmalloc (new_len);
+ memcpy (new_buf + bytes_to_read, buf, old_len);
+ xfree (buf);
+ if (target_read_memory (endaddr - new_len, new_buf, bytes_to_read) != 0)
+ {
+ xfree (new_buf);
+ return NULL;
+ }
+ return new_buf;
+}
+
+/* An IT block is at most the 2-byte IT instruction followed by
+ four 4-byte instructions. The furthest back we must search to
+ find an IT block that affects the current instruction is thus
+ 2 + 3 * 4 == 14 bytes. */
+#define MAX_IT_BLOCK_PREFIX 14
+
+/* Use a quick scan if there are more than this many bytes of
+ code. */
+#define IT_SCAN_THRESHOLD 32
+
+/* Adjust a breakpoint's address to move breakpoints out of IT blocks.
+ A breakpoint in an IT block may not be hit, depending on the
+ condition flags. */
+static CORE_ADDR
+arm_adjust_breakpoint_address (struct gdbarch *gdbarch, CORE_ADDR bpaddr)
+{
+ gdb_byte *buf;
+ char map_type;
+ CORE_ADDR boundary, func_start;
+ int buf_len;
+ enum bfd_endian order = gdbarch_byte_order_for_code (gdbarch);
+ int i, any, last_it, last_it_count;
+
+ /* If we are using BKPT breakpoints, none of this is necessary. */
+ if (gdbarch_tdep (gdbarch)->thumb2_breakpoint == NULL)
+ return bpaddr;
+
+ /* ARM mode does not have this problem. */
+ if (!arm_pc_is_thumb (gdbarch, bpaddr))
+ return bpaddr;
+
+ /* We are setting a breakpoint in Thumb code that could potentially
+ contain an IT block. The first step is to find how much Thumb
+ code there is; we do not need to read outside of known Thumb
+ sequences. */
+ map_type = arm_find_mapping_symbol (bpaddr, &boundary);
+ if (map_type == 0)
+ /* Thumb-2 code must have mapping symbols to have a chance. */
+ return bpaddr;
+
+ bpaddr = gdbarch_addr_bits_remove (gdbarch, bpaddr);
+
+ if (find_pc_partial_function (bpaddr, NULL, &func_start, NULL)
+ && func_start > boundary)
+ boundary = func_start;
+
+ /* Search for a candidate IT instruction. We have to do some fancy
+ footwork to distinguish a real IT instruction from the second
+ half of a 32-bit instruction, but there is no need for that if
+ there's no candidate. */
+ buf_len = min (bpaddr - boundary, MAX_IT_BLOCK_PREFIX);
+ if (buf_len == 0)
+ /* No room for an IT instruction. */
+ return bpaddr;
+
+ buf = xmalloc (buf_len);
+ if (target_read_memory (bpaddr - buf_len, buf, buf_len) != 0)
+ return bpaddr;
+ any = 0;
+ for (i = 0; i < buf_len; i += 2)
+ {
+ unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order);
+ if ((inst1 & 0xff00) == 0xbf00 && (inst1 & 0x000f) != 0)
+ {
+ any = 1;
+ break;
+ }
+ }
+ if (any == 0)
+ {
+ xfree (buf);
+ return bpaddr;
+ }
+
+ /* OK, the code bytes before this instruction contain at least one
+ halfword which resembles an IT instruction. We know that it's
+ Thumb code, but there are still two possibilities. Either the
+ halfword really is an IT instruction, or it is the second half of
+ a 32-bit Thumb instruction. The only way we can tell is to
+ scan forwards from a known instruction boundary. */
+ if (bpaddr - boundary > IT_SCAN_THRESHOLD)
+ {
+ int definite;
+
+ /* There's a lot of code before this instruction. Start with an
+ optimistic search; it's easy to recognize halfwords that can
+ not be the start of a 32-bit instruction, and use that to
+ lock on to the instruction boundaries. */
+ buf = extend_buffer_earlier (buf, bpaddr, buf_len, IT_SCAN_THRESHOLD);
+ if (buf == NULL)
+ return bpaddr;
+ buf_len = IT_SCAN_THRESHOLD;
+
+ definite = 0;
+ for (i = 0; i < buf_len - sizeof (buf) && ! definite; i += 2)
+ {
+ unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order);
+ if (thumb_insn_size (inst1) == 2)
+ {
+ definite = 1;
+ break;
+ }
+ }
+
+ /* At this point, if DEFINITE, BUF[I] is the first place we
+ are sure that we know the instruction boundaries, and it is far
+ enough from BPADDR that we could not miss an IT instruction
+ affecting BPADDR. If ! DEFINITE, give up - start from a
+ known boundary. */
+ if (! definite)
+ {
+ buf = extend_buffer_earlier (buf, bpaddr, buf_len,
+ bpaddr - boundary);
+ if (buf == NULL)
+ return bpaddr;
+ buf_len = bpaddr - boundary;
+ i = 0;
+ }
+ }
+ else
+ {
+ buf = extend_buffer_earlier (buf, bpaddr, buf_len, bpaddr - boundary);
+ if (buf == NULL)
+ return bpaddr;
+ buf_len = bpaddr - boundary;
+ i = 0;
+ }
+
+ /* Scan forwards. Find the last IT instruction before BPADDR. */
+ last_it = -1;
+ last_it_count = 0;
+ while (i < buf_len)
+ {
+ unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order);
+ last_it_count--;
+ if ((inst1 & 0xff00) == 0xbf00 && (inst1 & 0x000f) != 0)
+ {
+ last_it = i;
+ if (inst1 & 0x0001)
+ last_it_count = 4;
+ else if (inst1 & 0x0002)
+ last_it_count = 3;
+ else if (inst1 & 0x0004)
+ last_it_count = 2;
+ else
+ last_it_count = 1;
+ }
+ i += thumb_insn_size (inst1);
+ }
+
+ xfree (buf);
+
+ if (last_it == -1)
+ /* There wasn't really an IT instruction after all. */
+ return bpaddr;
+
+ if (last_it_count < 1)
+ /* It was too far away. */
+ return bpaddr;
+
+ /* This really is a trouble spot. Move the breakpoint to the IT
+ instruction. */
+ return bpaddr - buf_len + last_it;
+}
+
+/* ARM displaced stepping support.
+
+ Generally ARM displaced stepping works as follows:
+
+ 1. When an instruction is to be single-stepped, it is first decoded by
+ arm_process_displaced_insn (called from arm_displaced_step_copy_insn).
+ Depending on the type of instruction, it is then copied to a scratch
+ location, possibly in a modified form. The copy_* set of functions
+ performs such modification, as necessary. A breakpoint is placed after
+ the modified instruction in the scratch space to return control to GDB.
+ Note in particular that instructions which modify the PC will no longer
+ do so after modification.
+
+ 2. The instruction is single-stepped, by setting the PC to the scratch
+ location address, and resuming. Control returns to GDB when the
+ breakpoint is hit.
+
+ 3. A cleanup function (cleanup_*) is called corresponding to the copy_*
+ function used for the current instruction. This function's job is to
+ put the CPU/memory state back to what it would have been if the
+ instruction had been executed unmodified in its original location. */
+
+/* NOP instruction (mov r0, r0). */
+#define ARM_NOP 0xe1a00000
+#define THUMB_NOP 0x4600
+
+/* Helper for register reads for displaced stepping. In particular, this
+ returns the PC as it would be seen by the instruction at its original
+ location. */
+
+ULONGEST
+displaced_read_reg (struct regcache *regs, struct displaced_step_closure *dsc,
+ int regno)
+{
+ ULONGEST ret;
+ CORE_ADDR from = dsc->insn_addr;
+
+ if (regno == ARM_PC_REGNUM)
+ {
+ /* Compute pipeline offset:
+ - When executing an ARM instruction, PC reads as the address of the
+ current instruction plus 8.
+ - When executing a Thumb instruction, PC reads as the address of the
+ current instruction plus 4. */
+
+ if (!dsc->is_thumb)
+ from += 8;
+ else
+ from += 4;
+
+ if (debug_displaced)
+ fprintf_unfiltered (gdb_stdlog, "displaced: read pc value %.8lx\n",
+ (unsigned long) from);
+ return (ULONGEST) from;
+ }
+ else
+ {
+ regcache_cooked_read_unsigned (regs, regno, &ret);
+ if (debug_displaced)
+ fprintf_unfiltered (gdb_stdlog, "displaced: read r%d value %.8lx\n",
+ regno, (unsigned long) ret);
+ return ret;
+ }
+}
+
+static int
+displaced_in_arm_mode (struct regcache *regs)
+{
+ ULONGEST ps;
+ ULONGEST t_bit = arm_psr_thumb_bit (get_regcache_arch (regs));
+
+ regcache_cooked_read_unsigned (regs, ARM_PS_REGNUM, &ps);
+
+ return (ps & t_bit) == 0;
+}
+
+/* Write to the PC as from a branch instruction. */
+
+static void
+branch_write_pc (struct regcache *regs, struct displaced_step_closure *dsc,
+ ULONGEST val)
+{
+ if (!dsc->is_thumb)
+ /* Note: If bits 0/1 are set, this branch would be unpredictable for
+ architecture versions < 6. */
+ regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM,
+ val & ~(ULONGEST) 0x3);
+ else
+ regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM,
+ val & ~(ULONGEST) 0x1);
+}
+
+/* Write to the PC as from a branch-exchange instruction. */
+
+static void
+bx_write_pc (struct regcache *regs, ULONGEST val)
+{
+ ULONGEST ps;
+ ULONGEST t_bit = arm_psr_thumb_bit (get_regcache_arch (regs));
+
+ regcache_cooked_read_unsigned (regs, ARM_PS_REGNUM, &ps);
+
+ if ((val & 1) == 1)
+ {
+ regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM, ps | t_bit);
+ regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val & 0xfffffffe);
+ }
+ else if ((val & 2) == 0)
+ {
+ regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM, ps & ~t_bit);
+ regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val);
+ }
+ else
+ {
+ /* Unpredictable behaviour. Try to do something sensible (switch to ARM
+ mode, align dest to 4 bytes). */
+ warning (_("Single-stepping BX to non-word-aligned ARM instruction."));
+ regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM, ps & ~t_bit);
+ regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val & 0xfffffffc);
+ }
+}
+
+/* Write to the PC as if from a load instruction. */
+
+static void
+load_write_pc (struct regcache *regs, struct displaced_step_closure *dsc,
+ ULONGEST val)
+{
+ if (DISPLACED_STEPPING_ARCH_VERSION >= 5)
+ bx_write_pc (regs, val);
+ else
+ branch_write_pc (regs, dsc, val);
+}
+
+/* Write to the PC as if from an ALU instruction. */
+
+static void
+alu_write_pc (struct regcache *regs, struct displaced_step_closure *dsc,
+ ULONGEST val)
+{
+ if (DISPLACED_STEPPING_ARCH_VERSION >= 7 && !dsc->is_thumb)
+ bx_write_pc (regs, val);
+ else
+ branch_write_pc (regs, dsc, val);
+}
+
+/* Helper for writing to registers for displaced stepping. Writing to the PC
+ has a varying effects depending on the instruction which does the write:
+ this is controlled by the WRITE_PC argument. */
+
+void
+displaced_write_reg (struct regcache *regs, struct displaced_step_closure *dsc,
+ int regno, ULONGEST val, enum pc_write_style write_pc)
+{
+ if (regno == ARM_PC_REGNUM)
+ {
+ if (debug_displaced)
+ fprintf_unfiltered (gdb_stdlog, "displaced: writing pc %.8lx\n",
+ (unsigned long) val);
+ switch (write_pc)
+ {
+ case BRANCH_WRITE_PC:
+ branch_write_pc (regs, dsc, val);
+ break;
+
+ case BX_WRITE_PC:
+ bx_write_pc (regs, val);
+ break;
+
+ case LOAD_WRITE_PC:
+ load_write_pc (regs, dsc, val);
+ break;
+
+ case ALU_WRITE_PC:
+ alu_write_pc (regs, dsc, val);
+ break;
+
+ case CANNOT_WRITE_PC:
+ warning (_("Instruction wrote to PC in an unexpected way when "
+ "single-stepping"));
+ break;
+
+ default:
+ internal_error (__FILE__, __LINE__,
+ _("Invalid argument to displaced_write_reg"));
+ }
+
+ dsc->wrote_to_pc = 1;
+ }
+ else
+ {
+ if (debug_displaced)
+ fprintf_unfiltered (gdb_stdlog, "displaced: writing r%d value %.8lx\n",
+ regno, (unsigned long) val);
+ regcache_cooked_write_unsigned (regs, regno, val);
+ }
+}
+
+/* This function is used to concisely determine if an instruction INSN
+ references PC. Register fields of interest in INSN should have the
+ corresponding fields of BITMASK set to 0b1111. The function
+ returns return 1 if any of these fields in INSN reference the PC
+ (also 0b1111, r15), else it returns 0. */
+
+static int
+insn_references_pc (uint32_t insn, uint32_t bitmask)
+{
+ uint32_t lowbit = 1;
+
+ while (bitmask != 0)
+ {
+ uint32_t mask;
+
+ for (; lowbit && (bitmask & lowbit) == 0; lowbit <<= 1)
+ ;
+
+ if (!lowbit)
+ break;
+
+ mask = lowbit * 0xf;
+
+ if ((insn & mask) == mask)
+ return 1;
+
+ bitmask &= ~mask;
+ }
+
+ return 0;
+}
+
+/* The simplest copy function. Many instructions have the same effect no
+ matter what address they are executed at: in those cases, use this. */
+
+static int
+arm_copy_unmodified (struct gdbarch *gdbarch, uint32_t insn,
+ const char *iname, struct displaced_step_closure *dsc)
+{
+ if (debug_displaced)
+ fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.8lx, "
+ "opcode/class '%s' unmodified\n", (unsigned long) insn,
+ iname);
+
+ dsc->modinsn[0] = insn;
+
+ return 0;
+}
+
+static int
+thumb_copy_unmodified_32bit (struct gdbarch *gdbarch, uint16_t insn1,
+ uint16_t insn2, const char *iname,
+ struct displaced_step_closure *dsc)
+{
+ if (debug_displaced)
+ fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.4x %.4x, "
+ "opcode/class '%s' unmodified\n", insn1, insn2,
+ iname);
+
+ dsc->modinsn[0] = insn1;
+ dsc->modinsn[1] = insn2;
+ dsc->numinsns = 2;
+
+ return 0;
+}
+
+/* Copy 16-bit Thumb(Thumb and 16-bit Thumb-2) instruction without any
+ modification. */
+static int
+thumb_copy_unmodified_16bit (struct gdbarch *gdbarch, unsigned int insn,
+ const char *iname,
+ struct displaced_step_closure *dsc)
+{
+ if (debug_displaced)
+ fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.4x, "
+ "opcode/class '%s' unmodified\n", insn,
+ iname);
+
+ dsc->modinsn[0] = insn;
+
+ return 0;
+}
+
+/* Preload instructions with immediate offset. */
+
+static void
+cleanup_preload (struct gdbarch *gdbarch,
+ struct regcache *regs, struct displaced_step_closure *dsc)
+{
+ displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
+ if (!dsc->u.preload.immed)
+ displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
+}
+
+static void
+install_preload (struct gdbarch *gdbarch, struct regcache *regs,
+ struct displaced_step_closure *dsc, unsigned int rn)
+{
+ ULONGEST rn_val;
+ /* Preload instructions:
+
+ {pli/pld} [rn, #+/-imm]
+ ->
+ {pli/pld} [r0, #+/-imm]. */
+
+ dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
+ rn_val = displaced_read_reg (regs, dsc, rn);
+ displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
+ dsc->u.preload.immed = 1;
+
+ dsc->cleanup = &cleanup_preload;
+}
+
+static int
+arm_copy_preload (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
+ struct displaced_step_closure *dsc)
+{
+ unsigned int rn = bits (insn, 16, 19);
+
+ if (!insn_references_pc (insn, 0x000f0000ul))
+ return arm_copy_unmodified (gdbarch, insn, "preload", dsc);
+
+ if (debug_displaced)
+ fprintf_unfiltered (gdb_stdlog, "displaced: copying preload insn %.8lx\n",
+ (unsigned long) insn);
+
+ dsc->modinsn[0] = insn & 0xfff0ffff;
+
+ install_preload (gdbarch, regs, dsc, rn);
+
+ return 0;
+}
+
+static int
+thumb2_copy_preload (struct gdbarch *gdbarch, uint16_t insn1, uint16_t insn2,
+ struct regcache *regs, struct displaced_step_closure *dsc)
+{
+ unsigned int rn = bits (insn1, 0, 3);
+ unsigned int u_bit = bit (insn1, 7);
+ int imm12 = bits (insn2, 0, 11);
+ ULONGEST pc_val;
+
+ if (rn != ARM_PC_REGNUM)
+ return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "preload", dsc);
+
+ /* PC is only allowed to use in PLI (immediate,literal) Encoding T3, and
+ PLD (literal) Encoding T1. */
+ if (debug_displaced)
+ fprintf_unfiltered (gdb_stdlog,
+ "displaced: copying pld/pli pc (0x%x) %c imm12 %.4x\n",
+ (unsigned int) dsc->insn_addr, u_bit ? '+' : '-',
+ imm12);
+
+ if (!u_bit)
+ imm12 = -1 * imm12;
+
+ /* Rewrite instruction {pli/pld} PC imm12 into:
+ Prepare: tmp[0] <- r0, tmp[1] <- r1, r0 <- pc, r1 <- imm12
+
+ {pli/pld} [r0, r1]
+
+ Cleanup: r0 <- tmp[0], r1 <- tmp[1]. */
+
+ dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
+ dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
+
+ pc_val = displaced_read_reg (regs, dsc, ARM_PC_REGNUM);
+
+ displaced_write_reg (regs, dsc, 0, pc_val, CANNOT_WRITE_PC);
+ displaced_write_reg (regs, dsc, 1, imm12, CANNOT_WRITE_PC);
+ dsc->u.preload.immed = 0;
+
+ /* {pli/pld} [r0, r1] */
+ dsc->modinsn[0] = insn1 & 0xfff0;
+ dsc->modinsn[1] = 0xf001;
+ dsc->numinsns = 2;
+
+ dsc->cleanup = &cleanup_preload;
+ return 0;
+}
+
+/* Preload instructions with register offset. */
+
+static void
+install_preload_reg(struct gdbarch *gdbarch, struct regcache *regs,
+ struct displaced_step_closure *dsc, unsigned int rn,
+ unsigned int rm)
+{
+ ULONGEST rn_val, rm_val;
+
+ /* Preload register-offset instructions:
+
+ {pli/pld} [rn, rm {, shift}]
+ ->
+ {pli/pld} [r0, r1 {, shift}]. */
+
+ dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
+ dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
+ rn_val = displaced_read_reg (regs, dsc, rn);
+ rm_val = displaced_read_reg (regs, dsc, rm);
+ displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
+ displaced_write_reg (regs, dsc, 1, rm_val, CANNOT_WRITE_PC);
+ dsc->u.preload.immed = 0;
+
+ dsc->cleanup = &cleanup_preload;
+}
+
+static int
+arm_copy_preload_reg (struct gdbarch *gdbarch, uint32_t insn,
+ struct regcache *regs,
+ struct displaced_step_closure *dsc)
+{
+ unsigned int rn = bits (insn, 16, 19);
+ unsigned int rm = bits (insn, 0, 3);
+
+
+ if (!insn_references_pc (insn, 0x000f000ful))
+ return arm_copy_unmodified (gdbarch, insn, "preload reg", dsc);
+
+ if (debug_displaced)
+ fprintf_unfiltered (gdb_stdlog, "displaced: copying preload insn %.8lx\n",
+ (unsigned long) insn);
+
+ dsc->modinsn[0] = (insn & 0xfff0fff0) | 0x1;
+
+ install_preload_reg (gdbarch, regs, dsc, rn, rm);
+ return 0;
+}
+
+/* Copy/cleanup coprocessor load and store instructions. */
+
+static void
+cleanup_copro_load_store (struct gdbarch *gdbarch,
+ struct regcache *regs,
+ struct displaced_step_closure *dsc)
+{
+ ULONGEST rn_val = displaced_read_reg (regs, dsc, 0);
+
+ displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
+
+ if (dsc->u.ldst.writeback)
+ displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, LOAD_WRITE_PC);
+}
+
+static void
+install_copro_load_store (struct gdbarch *gdbarch, struct regcache *regs,
+ struct displaced_step_closure *dsc,
+ int writeback, unsigned int rn)
+{
+ ULONGEST rn_val;
+
+ /* Coprocessor load/store instructions:
+
+ {stc/stc2} [<Rn>, #+/-imm] (and other immediate addressing modes)
+ ->
+ {stc/stc2} [r0, #+/-imm].
+
+ ldc/ldc2 are handled identically. */
+
+ dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
+ rn_val = displaced_read_reg (regs, dsc, rn);
+ /* PC should be 4-byte aligned. */
+ rn_val = rn_val & 0xfffffffc;
+ displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
+
+ dsc->u.ldst.writeback = writeback;
+ dsc->u.ldst.rn = rn;
+
+ dsc->cleanup = &cleanup_copro_load_store;
+}
+
+static int
+arm_copy_copro_load_store (struct gdbarch *gdbarch, uint32_t insn,
+ struct regcache *regs,
+ struct displaced_step_closure *dsc)
+{
+ unsigned int rn = bits (insn, 16, 19);
+
+ if (!insn_references_pc (insn, 0x000f0000ul))
+ return arm_copy_unmodified (gdbarch, insn, "copro load/store", dsc);
+
+ if (debug_displaced)
+ fprintf_unfiltered (gdb_stdlog, "displaced: copying coprocessor "
+ "load/store insn %.8lx\n", (unsigned long) insn);
+
+ dsc->modinsn[0] = insn & 0xfff0ffff;
+
+ install_copro_load_store (gdbarch, regs, dsc, bit (insn, 25), rn);
+
+ return 0;
+}
+
+static int
+thumb2_copy_copro_load_store (struct gdbarch *gdbarch, uint16_t insn1,
+ uint16_t insn2, struct regcache *regs,
+ struct displaced_step_closure *dsc)
+{
+ unsigned int rn = bits (insn1, 0, 3);
+
+ if (rn != ARM_PC_REGNUM)
+ return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+ "copro load/store", dsc);
+
+ if (debug_displaced)
+ fprintf_unfiltered (gdb_stdlog, "displaced: copying coprocessor "
+ "load/store insn %.4x%.4x\n", insn1, insn2);
+
+ dsc->modinsn[0] = insn1 & 0xfff0;
+ dsc->modinsn[1] = insn2;
+ dsc->numinsns = 2;
+
+ /* This function is called for copying instruction LDC/LDC2/VLDR, which
+ doesn't support writeback, so pass 0. */
+ install_copro_load_store (gdbarch, regs, dsc, 0, rn);
+
+ return 0;
+}
+
+/* Clean up branch instructions (actually perform the branch, by setting
+ PC). */
+
+static void
+cleanup_branch (struct gdbarch *gdbarch, struct regcache *regs,
+ struct displaced_step_closure *dsc)
+{
+ uint32_t status = displaced_read_reg (regs, dsc, ARM_PS_REGNUM);
+ int branch_taken = condition_true (dsc->u.branch.cond, status);
+ enum pc_write_style write_pc = dsc->u.branch.exchange
+ ? BX_WRITE_PC : BRANCH_WRITE_PC;
+
+ if (!branch_taken)
+ return;
+
+ if (dsc->u.branch.link)
+ {
+ /* The value of LR should be the next insn of current one. In order
+ not to confuse logic hanlding later insn `bx lr', if current insn mode
+ is Thumb, the bit 0 of LR value should be set to 1. */
+ ULONGEST next_insn_addr = dsc->insn_addr + dsc->insn_size;
+
+ if (dsc->is_thumb)
+ next_insn_addr |= 0x1;
+
+ displaced_write_reg (regs, dsc, ARM_LR_REGNUM, next_insn_addr,
+ CANNOT_WRITE_PC);
+ }
+
+ displaced_write_reg (regs, dsc, ARM_PC_REGNUM, dsc->u.branch.dest, write_pc);
+}
+
+/* Copy B/BL/BLX instructions with immediate destinations. */
+
+static void
+install_b_bl_blx (struct gdbarch *gdbarch, struct regcache *regs,
+ struct displaced_step_closure *dsc,
+ unsigned int cond, int exchange, int link, long offset)
+{
+ /* Implement "BL<cond> <label>" as:
+
+ Preparation: cond <- instruction condition
+ Insn: mov r0, r0 (nop)
+ Cleanup: if (condition true) { r14 <- pc; pc <- label }.
+
+ B<cond> similar, but don't set r14 in cleanup. */
+
+ dsc->u.branch.cond = cond;
+ dsc->u.branch.link = link;
+ dsc->u.branch.exchange = exchange;
+
+ dsc->u.branch.dest = dsc->insn_addr;
+ if (link && exchange)
+ /* For BLX, offset is computed from the Align (PC, 4). */
+ dsc->u.branch.dest = dsc->u.branch.dest & 0xfffffffc;
+
+ if (dsc->is_thumb)
+ dsc->u.branch.dest += 4 + offset;
+ else
+ dsc->u.branch.dest += 8 + offset;
+
+ dsc->cleanup = &cleanup_branch;
+}
+static int
+arm_copy_b_bl_blx (struct gdbarch *gdbarch, uint32_t insn,
+ struct regcache *regs, struct displaced_step_closure *dsc)
+{
+ unsigned int cond = bits (insn, 28, 31);
+ int exchange = (cond == 0xf);
+ int link = exchange || bit (insn, 24);
+ long offset;
+
+ if (debug_displaced)
+ fprintf_unfiltered (gdb_stdlog, "displaced: copying %s immediate insn "
+ "%.8lx\n", (exchange) ? "blx" : (link) ? "bl" : "b",
+ (unsigned long) insn);
+ if (exchange)
+ /* For BLX, set bit 0 of the destination. The cleanup_branch function will
+ then arrange the switch into Thumb mode. */
+ offset = (bits (insn, 0, 23) << 2) | (bit (insn, 24) << 1) | 1;
+ else
+ offset = bits (insn, 0, 23) << 2;
+
+ if (bit (offset, 25))
+ offset = offset | ~0x3ffffff;
+
+ dsc->modinsn[0] = ARM_NOP;
+
+ install_b_bl_blx (gdbarch, regs, dsc, cond, exchange, link, offset);
+ return 0;
+}
+
+static int
+thumb2_copy_b_bl_blx (struct gdbarch *gdbarch, uint16_t insn1,
+ uint16_t insn2, struct regcache *regs,
+ struct displaced_step_closure *dsc)
+{
+ int link = bit (insn2, 14);
+ int exchange = link && !bit (insn2, 12);
+ int cond = INST_AL;
+ long offset = 0;
+ int j1 = bit (insn2, 13);
+ int j2 = bit (insn2, 11);
+ int s = sbits (insn1, 10, 10);
+ int i1 = !(j1 ^ bit (insn1, 10));
+ int i2 = !(j2 ^ bit (insn1, 10));
+
+ if (!link && !exchange) /* B */
+ {
+ offset = (bits (insn2, 0, 10) << 1);
+ if (bit (insn2, 12)) /* Encoding T4 */
+ {
+ offset |= (bits (insn1, 0, 9) << 12)
+ | (i2 << 22)
+ | (i1 << 23)
+ | (s << 24);
+ cond = INST_AL;
+ }
+ else /* Encoding T3 */
+ {
+ offset |= (bits (insn1, 0, 5) << 12)
+ | (j1 << 18)
+ | (j2 << 19)
+ | (s << 20);
+ cond = bits (insn1, 6, 9);
+ }
+ }
+ else
+ {
+ offset = (bits (insn1, 0, 9) << 12);
+ offset |= ((i2 << 22) | (i1 << 23) | (s << 24));
+ offset |= exchange ?
+ (bits (insn2, 1, 10) << 2) : (bits (insn2, 0, 10) << 1);
+ }
+
+ if (debug_displaced)
+ fprintf_unfiltered (gdb_stdlog, "displaced: copying %s insn "
+ "%.4x %.4x with offset %.8lx\n",
+ link ? (exchange) ? "blx" : "bl" : "b",
+ insn1, insn2, offset);
+
+ dsc->modinsn[0] = THUMB_NOP;
+
+ install_b_bl_blx (gdbarch, regs, dsc, cond, exchange, link, offset);
+ return 0;
+}
+
+/* Copy B Thumb instructions. */
+static int
+thumb_copy_b (struct gdbarch *gdbarch, unsigned short insn,
+ struct displaced_step_closure *dsc)
+{
+ unsigned int cond = 0;
+ int offset = 0;
+ unsigned short bit_12_15 = bits (insn, 12, 15);
+ CORE_ADDR from = dsc->insn_addr;
+
+ if (bit_12_15 == 0xd)
+ {
+ /* offset = SignExtend (imm8:0, 32) */
+ offset = sbits ((insn << 1), 0, 8);
+ cond = bits (insn, 8, 11);
+ }
+ else if (bit_12_15 == 0xe) /* Encoding T2 */
+ {
+ offset = sbits ((insn << 1), 0, 11);
+ cond = INST_AL;
+ }
+
+ if (debug_displaced)
+ fprintf_unfiltered (gdb_stdlog,
+ "displaced: copying b immediate insn %.4x "
+ "with offset %d\n", insn, offset);
+
+ dsc->u.branch.cond = cond;
+ dsc->u.branch.link = 0;
+ dsc->u.branch.exchange = 0;
+ dsc->u.branch.dest = from + 4 + offset;
+
+ dsc->modinsn[0] = THUMB_NOP;
+
+ dsc->cleanup = &cleanup_branch;
+
+ return 0;
+}
+
+/* Copy BX/BLX with register-specified destinations. */
+
+static void
+install_bx_blx_reg (struct gdbarch *gdbarch, struct regcache *regs,
+ struct displaced_step_closure *dsc, int link,
+ unsigned int cond, unsigned int rm)
+{
+ /* Implement {BX,BLX}<cond> <reg>" as:
+
+ Preparation: cond <- instruction condition
+ Insn: mov r0, r0 (nop)
+ Cleanup: if (condition true) { r14 <- pc; pc <- dest; }.
+
+ Don't set r14 in cleanup for BX. */
+
+ dsc->u.branch.dest = displaced_read_reg (regs, dsc, rm);
+
+ dsc->u.branch.cond = cond;
+ dsc->u.branch.link = link;
+
+ dsc->u.branch.exchange = 1;
+
+ dsc->cleanup = &cleanup_branch;
+}
+
+static int
+arm_copy_bx_blx_reg (struct gdbarch *gdbarch, uint32_t insn,
+ struct regcache *regs, struct displaced_step_closure *dsc)
+{
+ unsigned int cond = bits (insn, 28, 31);
+ /* BX: x12xxx1x
+ BLX: x12xxx3x. */
+ int link = bit (insn, 5);
+ unsigned int rm = bits (insn, 0, 3);
+
+ if (debug_displaced)
+ fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.8lx",
+ (unsigned long) insn);
+
+ dsc->modinsn[0] = ARM_NOP;
+
+ install_bx_blx_reg (gdbarch, regs, dsc, link, cond, rm);
+ return 0;
+}
+
+static int
+thumb_copy_bx_blx_reg (struct gdbarch *gdbarch, uint16_t insn,
+ struct regcache *regs,
+ struct displaced_step_closure *dsc)
+{
+ int link = bit (insn, 7);
+ unsigned int rm = bits (insn, 3, 6);
+
+ if (debug_displaced)
+ fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.4x",
+ (unsigned short) insn);
+
+ dsc->modinsn[0] = THUMB_NOP;
+
+ install_bx_blx_reg (gdbarch, regs, dsc, link, INST_AL, rm);
+
+ return 0;
+}
+
+
+/* Copy/cleanup arithmetic/logic instruction with immediate RHS. */
+
+static void
+cleanup_alu_imm (struct gdbarch *gdbarch,
+ struct regcache *regs, struct displaced_step_closure *dsc)
+{
+ ULONGEST rd_val = displaced_read_reg (regs, dsc, 0);
+ displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
+ displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
+ displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
+}
+
+static int
+arm_copy_alu_imm (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
+ struct displaced_step_closure *dsc)
+{
+ unsigned int rn = bits (insn, 16, 19);
+ unsigned int rd = bits (insn, 12, 15);
+ unsigned int op = bits (insn, 21, 24);
+ int is_mov = (op == 0xd);
+ ULONGEST rd_val, rn_val;
+
+ if (!insn_references_pc (insn, 0x000ff000ul))
+ return arm_copy_unmodified (gdbarch, insn, "ALU immediate", dsc);
+
+ if (debug_displaced)
+ fprintf_unfiltered (gdb_stdlog, "displaced: copying immediate %s insn "
+ "%.8lx\n", is_mov ? "move" : "ALU",
+ (unsigned long) insn);
+
+ /* Instruction is of form:
+
+ <op><cond> rd, [rn,] #imm
+
+ Rewrite as:
+
+ Preparation: tmp1, tmp2 <- r0, r1;
+ r0, r1 <- rd, rn
+ Insn: <op><cond> r0, r1, #imm
+ Cleanup: rd <- r0; r0 <- tmp1; r1 <- tmp2
+ */
+
+ dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
+ dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
+ rn_val = displaced_read_reg (regs, dsc, rn);
+ rd_val = displaced_read_reg (regs, dsc, rd);
+ displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
+ displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
+ dsc->rd = rd;
+
+ if (is_mov)
+ dsc->modinsn[0] = insn & 0xfff00fff;
+ else
+ dsc->modinsn[0] = (insn & 0xfff00fff) | 0x10000;
+
+ dsc->cleanup = &cleanup_alu_imm;
+
+ return 0;
+}
+
+static int
+thumb2_copy_alu_imm (struct gdbarch *gdbarch, uint16_t insn1,
+ uint16_t insn2, struct regcache *regs,
+ struct displaced_step_closure *dsc)
+{
+ unsigned int op = bits (insn1, 5, 8);
+ unsigned int rn, rm, rd;
+ ULONGEST rd_val, rn_val;
+
+ rn = bits (insn1, 0, 3); /* Rn */
+ rm = bits (insn2, 0, 3); /* Rm */
+ rd = bits (insn2, 8, 11); /* Rd */
+
+ /* This routine is only called for instruction MOV. */
+ gdb_assert (op == 0x2 && rn == 0xf);
+
+ if (rm != ARM_PC_REGNUM && rd != ARM_PC_REGNUM)
+ return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "ALU imm", dsc);
+
+ if (debug_displaced)
+ fprintf_unfiltered (gdb_stdlog, "displaced: copying reg %s insn %.4x%.4x\n",
+ "ALU", insn1, insn2);
+
+ /* Instruction is of form:
+
+ <op><cond> rd, [rn,] #imm
+
+ Rewrite as:
+
+ Preparation: tmp1, tmp2 <- r0, r1;
+ r0, r1 <- rd, rn
+ Insn: <op><cond> r0, r1, #imm
+ Cleanup: rd <- r0; r0 <- tmp1; r1 <- tmp2
+ */
+
+ dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
+ dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
+ rn_val = displaced_read_reg (regs, dsc, rn);
+ rd_val = displaced_read_reg (regs, dsc, rd);
+ displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
+ displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
+ dsc->rd = rd;
+
+ dsc->modinsn[0] = insn1;
+ dsc->modinsn[1] = ((insn2 & 0xf0f0) | 0x1);
+ dsc->numinsns = 2;
+
+ dsc->cleanup = &cleanup_alu_imm;
+
+ return 0;
+}
+
+/* Copy/cleanup arithmetic/logic insns with register RHS. */
+
+static void
+cleanup_alu_reg (struct gdbarch *gdbarch,
+ struct regcache *regs, struct displaced_step_closure *dsc)
+{
+ ULONGEST rd_val;
+ int i;
+
+ rd_val = displaced_read_reg (regs, dsc, 0);
+
+ for (i = 0; i < 3; i++)
+ displaced_write_reg (regs, dsc, i, dsc->tmp[i], CANNOT_WRITE_PC);
+
+ displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
+}
+
+static void
+install_alu_reg (struct gdbarch *gdbarch, struct regcache *regs,
+ struct displaced_step_closure *dsc,
+ unsigned int rd, unsigned int rn, unsigned int rm)
+{
+ ULONGEST rd_val, rn_val, rm_val;
+
+ /* Instruction is of form:
+
+ <op><cond> rd, [rn,] rm [, <shift>]
+
+ Rewrite as:
+
+ Preparation: tmp1, tmp2, tmp3 <- r0, r1, r2;
+ r0, r1, r2 <- rd, rn, rm
+ Insn: <op><cond> r0, r1, r2 [, <shift>]
+ Cleanup: rd <- r0; r0, r1, r2 <- tmp1, tmp2, tmp3
+ */
+
+ dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
+ dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
+ dsc->tmp[2] = displaced_read_reg (regs, dsc, 2);
+ rd_val = displaced_read_reg (regs, dsc, rd);
+ rn_val = displaced_read_reg (regs, dsc, rn);
+ rm_val = displaced_read_reg (regs, dsc, rm);
+ displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
+ displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
+ displaced_write_reg (regs, dsc, 2, rm_val, CANNOT_WRITE_PC);
+ dsc->rd = rd;
+
+ dsc->cleanup = &cleanup_alu_reg;
+}
+
+static int
+arm_copy_alu_reg (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
+ struct displaced_step_closure *dsc)
+{
+ unsigned int op = bits (insn, 21, 24);
+ int is_mov = (op == 0xd);
+
+ if (!insn_references_pc (insn, 0x000ff00ful))
+ return arm_copy_unmodified (gdbarch, insn, "ALU reg", dsc);
+
+ if (debug_displaced)
+ fprintf_unfiltered (gdb_stdlog, "displaced: copying reg %s insn %.8lx\n",
+ is_mov ? "move" : "ALU", (unsigned long) insn);
+
+ if (is_mov)
+ dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x2;
+ else
+ dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x10002;
+
+ install_alu_reg (gdbarch, regs, dsc, bits (insn, 12, 15), bits (insn, 16, 19),
+ bits (insn, 0, 3));
+ return 0;
+}
+
+static int
+thumb_copy_alu_reg (struct gdbarch *gdbarch, uint16_t insn,
+ struct regcache *regs,
+ struct displaced_step_closure *dsc)
+{
+ unsigned rn, rm, rd;
+
+ rd = bits (insn, 3, 6);
+ rn = (bit (insn, 7) << 3) | bits (insn, 0, 2);
+ rm = 2;
+
+ if (rd != ARM_PC_REGNUM && rn != ARM_PC_REGNUM)
+ return thumb_copy_unmodified_16bit (gdbarch, insn, "ALU reg", dsc);
+
+ if (debug_displaced)
+ fprintf_unfiltered (gdb_stdlog, "displaced: copying reg %s insn %.4x\n",
+ "ALU", (unsigned short) insn);
+
+ dsc->modinsn[0] = ((insn & 0xff00) | 0x08);
+
+ install_alu_reg (gdbarch, regs, dsc, rd, rn, rm);
+
+ return 0;
+}
+
+/* Cleanup/copy arithmetic/logic insns with shifted register RHS. */
+
+static void
+cleanup_alu_shifted_reg (struct gdbarch *gdbarch,
+ struct regcache *regs,
+ struct displaced_step_closure *dsc)
+{
+ ULONGEST rd_val = displaced_read_reg (regs, dsc, 0);
+ int i;
+
+ for (i = 0; i < 4; i++)
+ displaced_write_reg (regs, dsc, i, dsc->tmp[i], CANNOT_WRITE_PC);
+
+ displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
+}
+
+static void
+install_alu_shifted_reg (struct gdbarch *gdbarch, struct regcache *regs,
+ struct displaced_step_closure *dsc,
+ unsigned int rd, unsigned int rn, unsigned int rm,
+ unsigned rs)
+{
+ int i;
+ ULONGEST rd_val, rn_val, rm_val, rs_val;
+
+ /* Instruction is of form:
+
+ <op><cond> rd, [rn,] rm, <shift> rs
+
+ Rewrite as:
+
+ Preparation: tmp1, tmp2, tmp3, tmp4 <- r0, r1, r2, r3
+ r0, r1, r2, r3 <- rd, rn, rm, rs
+ Insn: <op><cond> r0, r1, r2, <shift> r3
+ Cleanup: tmp5 <- r0
+ r0, r1, r2, r3 <- tmp1, tmp2, tmp3, tmp4
+ rd <- tmp5
+ */
+
+ for (i = 0; i < 4; i++)
+ dsc->tmp[i] = displaced_read_reg (regs, dsc, i);
+
+ rd_val = displaced_read_reg (regs, dsc, rd);
+ rn_val = displaced_read_reg (regs, dsc, rn);
+ rm_val = displaced_read_reg (regs, dsc, rm);
+ rs_val = displaced_read_reg (regs, dsc, rs);
+ displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
+ displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
+ displaced_write_reg (regs, dsc, 2, rm_val, CANNOT_WRITE_PC);
+ displaced_write_reg (regs, dsc, 3, rs_val, CANNOT_WRITE_PC);
+ dsc->rd = rd;
+ dsc->cleanup = &cleanup_alu_shifted_reg;
+}
+
+static int
+arm_copy_alu_shifted_reg (struct gdbarch *gdbarch, uint32_t insn,
+ struct regcache *regs,
+ struct displaced_step_closure *dsc)
+{
+ unsigned int op = bits (insn, 21, 24);
+ int is_mov = (op == 0xd);
+ unsigned int rd, rn, rm, rs;
+
+ if (!insn_references_pc (insn, 0x000fff0ful))
+ return arm_copy_unmodified (gdbarch, insn, "ALU shifted reg", dsc);
+
+ if (debug_displaced)
+ fprintf_unfiltered (gdb_stdlog, "displaced: copying shifted reg %s insn "
+ "%.8lx\n", is_mov ? "move" : "ALU",
+ (unsigned long) insn);
+
+ rn = bits (insn, 16, 19);
+ rm = bits (insn, 0, 3);
+ rs = bits (insn, 8, 11);
+ rd = bits (insn, 12, 15);
+
+ if (is_mov)
+ dsc->modinsn[0] = (insn & 0xfff000f0) | 0x302;
+ else
+ dsc->modinsn[0] = (insn & 0xfff000f0) | 0x10302;
+
+ install_alu_shifted_reg (gdbarch, regs, dsc, rd, rn, rm, rs);
+
+ return 0;
+}
+
+/* Clean up load instructions. */
+
+static void
+cleanup_load (struct gdbarch *gdbarch, struct regcache *regs,
+ struct displaced_step_closure *dsc)
+{
+ ULONGEST rt_val, rt_val2 = 0, rn_val;
+
+ rt_val = displaced_read_reg (regs, dsc, 0);
+ if (dsc->u.ldst.xfersize == 8)
+ rt_val2 = displaced_read_reg (regs, dsc, 1);
+ rn_val = displaced_read_reg (regs, dsc, 2);
+
+ displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
+ if (dsc->u.ldst.xfersize > 4)
+ displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
+ displaced_write_reg (regs, dsc, 2, dsc->tmp[2], CANNOT_WRITE_PC);
+ if (!dsc->u.ldst.immed)
+ displaced_write_reg (regs, dsc, 3, dsc->tmp[3], CANNOT_WRITE_PC);
+
+ /* Handle register writeback. */
+ if (dsc->u.ldst.writeback)
+ displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, CANNOT_WRITE_PC);
+ /* Put result in right place. */
+ displaced_write_reg (regs, dsc, dsc->rd, rt_val, LOAD_WRITE_PC);
+ if (dsc->u.ldst.xfersize == 8)
+ displaced_write_reg (regs, dsc, dsc->rd + 1, rt_val2, LOAD_WRITE_PC);
+}
+
+/* Clean up store instructions. */
+
+static void
+cleanup_store (struct gdbarch *gdbarch, struct regcache *regs,
+ struct displaced_step_closure *dsc)
+{
+ ULONGEST rn_val = displaced_read_reg (regs, dsc, 2);
+
+ displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
+ if (dsc->u.ldst.xfersize > 4)
+ displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
+ displaced_write_reg (regs, dsc, 2, dsc->tmp[2], CANNOT_WRITE_PC);
+ if (!dsc->u.ldst.immed)
+ displaced_write_reg (regs, dsc, 3, dsc->tmp[3], CANNOT_WRITE_PC);
+ if (!dsc->u.ldst.restore_r4)
+ displaced_write_reg (regs, dsc, 4, dsc->tmp[4], CANNOT_WRITE_PC);
+
+ /* Writeback. */
+ if (dsc->u.ldst.writeback)
+ displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, CANNOT_WRITE_PC);
+}
+
+/* Copy "extra" load/store instructions. These are halfword/doubleword
+ transfers, which have a different encoding to byte/word transfers. */