struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
struct dwarf2_frame_state_reg *ra_state;
- if (tdep->has_pauth () && op == DW_CFA_AARCH64_negate_ra_state)
+ if (op == DW_CFA_AARCH64_negate_ra_state)
{
+ /* On systems without pauth, treat as a nop. */
+ if (!tdep->has_pauth ())
+ return true;
+
/* Allocate RA_STATE column if it's not allocated yet. */
fs->regs.alloc_regs (AARCH64_DWARF_PAUTH_RA_STATE + 1);
/* When arguments must be pushed onto the stack, they go on in reverse
order. The code below implements a FILO (stack) to do this. */
-typedef struct
+struct stack_item_t
{
/* Value to pass on stack. It can be NULL if this item is for stack
padding. */
/* Size in bytes of value to pass on stack. */
int len;
-} stack_item_t;
-
-DEF_VEC_O (stack_item_t);
+};
-/* Return the alignment (in bytes) of the given type. */
+/* Implement the gdbarch type alignment method, overrides the generic
+ alignment algorithm for anything that is aarch64 specific. */
-static int
-aarch64_type_align (struct type *t)
+static ULONGEST
+aarch64_type_align (gdbarch *gdbarch, struct type *t)
{
- int n;
- int align;
- int falign;
-
t = check_typedef (t);
- switch (TYPE_CODE (t))
+ if (TYPE_CODE (t) == TYPE_CODE_ARRAY && TYPE_VECTOR (t))
{
- default:
- /* Should never happen. */
- internal_error (__FILE__, __LINE__, _("unknown type alignment"));
- return 4;
-
- case TYPE_CODE_PTR:
- case TYPE_CODE_ENUM:
- case TYPE_CODE_INT:
- case TYPE_CODE_FLT:
- case TYPE_CODE_SET:
- case TYPE_CODE_RANGE:
- case TYPE_CODE_BITSTRING:
- case TYPE_CODE_REF:
- case TYPE_CODE_RVALUE_REF:
- case TYPE_CODE_CHAR:
- case TYPE_CODE_BOOL:
- return TYPE_LENGTH (t);
-
- case TYPE_CODE_ARRAY:
- if (TYPE_VECTOR (t))
- {
- /* Use the natural alignment for vector types (the same for
- scalar type), but the maximum alignment is 128-bit. */
- if (TYPE_LENGTH (t) > 16)
- return 16;
- else
- return TYPE_LENGTH (t);
- }
+ /* Use the natural alignment for vector types (the same for
+ scalar type), but the maximum alignment is 128-bit. */
+ if (TYPE_LENGTH (t) > 16)
+ return 16;
else
- return aarch64_type_align (TYPE_TARGET_TYPE (t));
- case TYPE_CODE_COMPLEX:
- return aarch64_type_align (TYPE_TARGET_TYPE (t));
-
- case TYPE_CODE_STRUCT:
- case TYPE_CODE_UNION:
- align = 1;
- for (n = 0; n < TYPE_NFIELDS (t); n++)
- {
- falign = aarch64_type_align (TYPE_FIELD_TYPE (t, n));
- if (falign > align)
- align = falign;
- }
- return align;
+ return TYPE_LENGTH (t);
}
+
+ /* Allow the common code to calculate the alignment. */
+ return 0;
}
/* Worker function for aapcs_is_vfp_call_or_return_candidate.
struct aarch64_call_info
{
/* the current argument number. */
- unsigned argnum;
+ unsigned argnum = 0;
/* The next general purpose register number, equivalent to NGRN as
described in the AArch64 Procedure Call Standard. */
- unsigned ngrn;
+ unsigned ngrn = 0;
/* The next SIMD and floating point register number, equivalent to
NSRN as described in the AArch64 Procedure Call Standard. */
- unsigned nsrn;
+ unsigned nsrn = 0;
/* The next stacked argument address, equivalent to NSAA as
described in the AArch64 Procedure Call Standard. */
- unsigned nsaa;
+ unsigned nsaa = 0;
/* Stack item vector. */
- VEC(stack_item_t) *si;
+ std::vector<stack_item_t> si;
};
/* Pass a value in a sequence of consecutive X registers. The caller
info->argnum++;
- align = aarch64_type_align (type);
+ align = type_align (type);
/* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
Natural alignment of the argument's type. */
item.len = len;
item.data = buf;
- VEC_safe_push (stack_item_t, info->si, &item);
+ info->si.push_back (item);
info->nsaa += len;
if (info->nsaa & (align - 1))
item.len = pad;
item.data = NULL;
- VEC_safe_push (stack_item_t, info->si, &item);
+ info->si.push_back (item);
info->nsaa += pad;
}
}
int argnum;
struct aarch64_call_info info;
- memset (&info, 0, sizeof (info));
-
/* We need to know what the type of the called function is in order
to determine the number of named/anonymous arguments for the
actual argument placement, and the return type in order to handle
if (info.nsaa & 15)
sp -= 16 - (info.nsaa & 15);
- while (!VEC_empty (stack_item_t, info.si))
+ while (!info.si.empty ())
{
- stack_item_t *si = VEC_last (stack_item_t, info.si);
+ const stack_item_t &si = info.si.back ();
- sp -= si->len;
- if (si->data != NULL)
- write_memory (sp, si->data, si->len);
- VEC_pop (stack_item_t, info.si);
+ sp -= si.len;
+ if (si.data != NULL)
+ write_memory (sp, si.data, si.len);
+ info.si.pop_back ();
}
- VEC_free (stack_item_t, info.si);
-
/* Finally, update the SP register. */
regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
TYPE_CODE_UNION);
+ elem = builtin_type (gdbarch)->builtin_half;
+ append_composite_type_field (t, "f", elem);
+
elem = builtin_type (gdbarch)->builtin_uint16;
append_composite_type_field (t, "u", elem);
sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
TYPE_CODE_UNION);
+ append_composite_type_field (sub, "f",
+ init_vector_type (bt->builtin_half, 8));
append_composite_type_field (sub, "u",
init_vector_type (bt->builtin_uint16, 8));
append_composite_type_field (sub, "s",
/* The address where the instruction will be executed at. */
CORE_ADDR new_addr;
/* Buffer of instructions to be copied to NEW_ADDR to execute. */
- uint32_t insn_buf[DISPLACED_MODIFIED_INSNS];
+ uint32_t insn_buf[AARCH64_DISPLACED_MODIFIED_INSNS];
/* Number of instructions in INSN_BUF. */
unsigned insn_count;
/* Registers when doing displaced stepping. */
dsd.insn_count = 0;
aarch64_relocate_instruction (insn, &visitor,
(struct aarch64_insn_data *) &dsd);
- gdb_assert (dsd.insn_count <= DISPLACED_MODIFIED_INSNS);
+ gdb_assert (dsd.insn_count <= AARCH64_DISPLACED_MODIFIED_INSNS);
if (dsd.insn_count != 0)
{
int i, num_regs = 0, num_pseudo_regs = 0;
int first_pauth_regnum = -1, pauth_ra_state_offset = -1;
+ /* Use the vector length passed via the target info. Here -1 is used for no
+ SVE, and 0 is unset. If unset then use the vector length from the existing
+ tdesc. */
+ uint64_t vq = 0;
+ if (info.id == (int *) -1)
+ vq = 0;
+ else if (info.id != 0)
+ vq = (uint64_t) info.id;
+ else
+ vq = aarch64_get_tdesc_vq (info.target_desc);
+
+ if (vq > AARCH64_MAX_SVE_VQ)
+ internal_error (__FILE__, __LINE__, _("VQ out of bounds: %s (max %d)"),
+ pulongest (vq), AARCH64_MAX_SVE_VQ);
+
/* If there is already a candidate, use it. */
for (gdbarch_list *best_arch = gdbarch_list_lookup_by_info (arches, &info);
best_arch != nullptr;
best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
{
struct gdbarch_tdep *tdep = gdbarch_tdep (best_arch->gdbarch);
- if (tdep)
+ if (tdep && tdep->vq == vq)
return best_arch->gdbarch;
}
- /* Ensure we always have a target description. */
+ /* Ensure we always have a target descriptor, and that it is for the given VQ
+ value. */
const struct target_desc *tdesc = info.target_desc;
- if (!tdesc_has_registers (tdesc))
- tdesc = aarch64_read_description (0, false);
+ if (!tdesc_has_registers (tdesc) || vq != aarch64_get_tdesc_vq (tdesc))
+ tdesc = aarch64_read_description (vq, false);
gdb_assert (tdesc);
feature_core = tdesc_find_feature (tdesc,"org.gnu.gdb.aarch64.core");
tdep->lowest_pc = 0x20;
tdep->jb_pc = -1; /* Longjump support not enabled by default. */
tdep->jb_elt_size = 8;
- tdep->vq = aarch64_get_tdesc_vq (tdesc);
+ tdep->vq = vq;
tdep->pauth_reg_base = first_pauth_regnum;
tdep->pauth_ra_state_regnum = (feature_pauth == NULL) ? -1
: pauth_ra_state_offset + num_regs;
set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
set_gdbarch_long_double_format (gdbarch, floatformats_ia64_quad);
+ set_gdbarch_type_align (gdbarch, aarch64_type_align);
/* Internal <-> external register number maps. */
set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
selftests::aarch64_analyze_prologue_test);
selftests::register_test ("aarch64-process-record",
selftests::aarch64_process_record_test);
- selftests::record_xml_tdesc ("aarch64.xml",
- aarch64_create_target_description (0, false));
#endif
}