/* Common target dependent code for GDB on AArch64 systems.
- Copyright (C) 2009-2020 Free Software Foundation, Inc.
+ Copyright (C) 2009-2021 Free Software Foundation, Inc.
Contributed by ARM Ltd.
This file is part of GDB.
#include "opcode/aarch64.h"
#include <algorithm>
-#define submask(x) ((1L << ((x) + 1)) - 1)
-#define bit(obj,st) (((obj) >> (st)) & 1)
-#define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
-
/* A Homogeneous Floating-Point or Short-Vector Aggregate may have at most
four members. */
#define HA_MAX_NUM_FLDS 4
/* All possible aarch64 target descriptors. */
-struct target_desc *tdesc_aarch64_list[AARCH64_MAX_SVE_VQ + 1][2/*pauth*/];
+static target_desc *tdesc_aarch64_list[AARCH64_MAX_SVE_VQ + 1][2/*pauth*/][2 /* mte */];
/* The standard register names, and all the valid aliases for them. */
static const struct
"pauth_cmask"
};
+static const char *const aarch64_mte_register_names[] =
+{
+ /* Tag Control Register. */
+ "tag_ctl"
+};
+
/* AArch64 prologue cache structure. */
struct aarch64_prologue_cache
{
int framereg;
/* Saved register offsets. */
- struct trad_frame_saved_reg *saved_regs;
+ trad_frame_saved_reg *saved_regs;
};
static void
show_aarch64_debug (struct ui_file *file, int from_tty,
- struct cmd_list_element *c, const char *value)
+ struct cmd_list_element *c, const char *value)
{
fprintf_filtered (file, _("AArch64 debugging is %s.\n"), value);
}
regs[rd] = regs[rm];
else
{
- if (aarch64_debug)
- {
- debug_printf ("aarch64: prologue analysis gave up "
- "addr=%s opcode=0x%x (orr x register)\n",
- core_addr_to_string_nz (start), insn);
- }
+ aarch64_debug_printf ("prologue analysis gave up "
+ "addr=%s opcode=0x%x (orr x register)",
+ core_addr_to_string_nz (start), insn);
+
break;
}
}
}
else
{
- if (aarch64_debug)
- debug_printf ("aarch64: prologue analysis gave up addr=%s"
- " opcode=0x%x (iclass)\n",
- core_addr_to_string_nz (start), insn);
+ aarch64_debug_printf ("prologue analysis gave up addr=%s"
+ " opcode=0x%x (iclass)",
+ core_addr_to_string_nz (start), insn);
break;
}
if (tdep->has_pauth () && cache != nullptr)
- trad_frame_set_value (cache->saved_regs,
- tdep->pauth_ra_state_regnum,
- ra_state_val);
+ {
+ int regnum = tdep->pauth_ra_state_regnum;
+ cache->saved_regs[regnum].set_value (ra_state_val);
+ }
}
else
{
- if (aarch64_debug)
- {
- debug_printf ("aarch64: prologue analysis gave up addr=%s"
- " opcode=0x%x\n",
- core_addr_to_string_nz (start), insn);
- }
+ aarch64_debug_printf ("prologue analysis gave up addr=%s"
+ " opcode=0x%x",
+ core_addr_to_string_nz (start), insn);
+
break;
}
}
CORE_ADDR offset;
if (stack.find_reg (gdbarch, i, &offset))
- cache->saved_regs[i].addr = offset;
+ cache->saved_regs[i].set_addr (offset);
}
for (i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
if (stack.find_reg (gdbarch, i + AARCH64_X_REGISTER_COUNT,
&offset))
- cache->saved_regs[i + regnum + AARCH64_D0_REGNUM].addr = offset;
+ cache->saved_regs[i + regnum + AARCH64_D0_REGNUM].set_addr (offset);
}
return start;
{
struct gdbarch_info info;
- gdbarch_info_init (&info);
info.bfd_arch_info = bfd_scan_arch ("aarch64");
struct gdbarch *gdbarch = gdbarch_find_by_info (info);
for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
{
if (i == AARCH64_FP_REGNUM)
- SELF_CHECK (cache.saved_regs[i].addr == -272);
+ SELF_CHECK (cache.saved_regs[i].addr () == -272);
else if (i == AARCH64_LR_REGNUM)
- SELF_CHECK (cache.saved_regs[i].addr == -264);
+ SELF_CHECK (cache.saved_regs[i].addr () == -264);
else
- SELF_CHECK (cache.saved_regs[i].addr == -1);
+ SELF_CHECK (cache.saved_regs[i].is_realreg ()
+ && cache.saved_regs[i].realreg () == i);
}
for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
{
- int regnum = gdbarch_num_regs (gdbarch);
+ int num_regs = gdbarch_num_regs (gdbarch);
+ int regnum = i + num_regs + AARCH64_D0_REGNUM;
- SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
- == -1);
+ SELF_CHECK (cache.saved_regs[regnum].is_realreg ()
+ && cache.saved_regs[regnum].realreg () == regnum);
}
}
for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
{
if (i == 1)
- SELF_CHECK (cache.saved_regs[i].addr == -16);
+ SELF_CHECK (cache.saved_regs[i].addr () == -16);
else if (i == 19)
- SELF_CHECK (cache.saved_regs[i].addr == -48);
+ SELF_CHECK (cache.saved_regs[i].addr () == -48);
else
- SELF_CHECK (cache.saved_regs[i].addr == -1);
+ SELF_CHECK (cache.saved_regs[i].is_realreg ()
+ && cache.saved_regs[i].realreg () == i);
}
for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
{
- int regnum = gdbarch_num_regs (gdbarch);
+ int num_regs = gdbarch_num_regs (gdbarch);
+ int regnum = i + num_regs + AARCH64_D0_REGNUM;
+
if (i == 0)
- SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
- == -24);
+ SELF_CHECK (cache.saved_regs[regnum].addr () == -24);
else
- SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
- == -1);
+ SELF_CHECK (cache.saved_regs[regnum].is_realreg ()
+ && cache.saved_regs[regnum].realreg () == regnum);
}
}
for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
{
if (i == 19)
- SELF_CHECK (cache.saved_regs[i].addr == -20);
+ SELF_CHECK (cache.saved_regs[i].addr () == -20);
else if (i == AARCH64_FP_REGNUM)
- SELF_CHECK (cache.saved_regs[i].addr == -48);
+ SELF_CHECK (cache.saved_regs[i].addr () == -48);
else if (i == AARCH64_LR_REGNUM)
- SELF_CHECK (cache.saved_regs[i].addr == -40);
+ SELF_CHECK (cache.saved_regs[i].addr () == -40);
else
- SELF_CHECK (cache.saved_regs[i].addr == -1);
+ SELF_CHECK (cache.saved_regs[i].is_realreg ()
+ && cache.saved_regs[i].realreg () == i);
}
if (tdep->has_pauth ())
{
- SELF_CHECK (trad_frame_value_p (cache.saved_regs,
- tdep->pauth_ra_state_regnum));
- SELF_CHECK (cache.saved_regs[tdep->pauth_ra_state_regnum].addr == 1);
+ int regnum = tdep->pauth_ra_state_regnum;
+ SELF_CHECK (cache.saved_regs[regnum].is_value ());
}
}
}
cache->framereg = AARCH64_FP_REGNUM;
cache->framesize = 16;
- cache->saved_regs[29].addr = 0;
- cache->saved_regs[30].addr = 8;
+ cache->saved_regs[29].set_addr (0);
+ cache->saved_regs[30].set_addr (8);
}
}
/* Calculate actual addresses of saved registers using offsets
determined by aarch64_analyze_prologue. */
for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
- if (trad_frame_addr_p (cache->saved_regs, reg))
- cache->saved_regs[reg].addr += cache->prev_sp;
+ if (cache->saved_regs[reg].is_addr ())
+ cache->saved_regs[reg].set_addr (cache->saved_regs[reg].addr ()
+ + cache->prev_sp);
cache->func = get_frame_func (this_frame);
lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
if (tdep->has_pauth ()
- && trad_frame_value_p (cache->saved_regs,
- tdep->pauth_ra_state_regnum))
+ && cache->saved_regs[tdep->pauth_ra_state_regnum].is_value ())
lr = aarch64_frame_unmask_lr (tdep, this_frame, lr);
return frame_unwind_got_constant (this_frame, prev_regnum, lr);
identified by the next frame's stack pointer at the time of the
call. The value was already reconstructed into PREV_SP. */
/*
- +----------+ ^
- | saved lr | |
+ +----------+ ^
+ | saved lr | |
+->| saved fp |--+
| | |
| | | <- Previous SP
| +----------+
| | saved lr |
+--| saved fp |<- FP
- | |
- | |<- SP
- +----------+ */
+ | |
+ | |<- SP
+ +----------+ */
if (prev_regnum == AARCH64_SP_REGNUM)
return frame_unwind_got_constant (this_frame, prev_regnum,
cache->prev_sp);
}
/* AArch64 prologue unwinder. */
-struct frame_unwind aarch64_prologue_unwind =
+static frame_unwind aarch64_prologue_unwind =
{
NORMAL_FRAME,
aarch64_prologue_frame_unwind_stop_reason,
}
/* AArch64 stub unwinder. */
-struct frame_unwind aarch64_stub_unwind =
+static frame_unwind aarch64_stub_unwind =
{
NORMAL_FRAME,
aarch64_stub_frame_unwind_stop_reason,
}
/* AArch64 default frame base information. */
-struct frame_base aarch64_normal_base =
+static frame_base aarch64_normal_base =
{
&aarch64_prologue_unwind,
aarch64_normal_frame_base,
&& (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
- if (aarch64_debug)
- {
- debug_printf ("arg %d in %s = 0x%s\n", info->argnum,
- gdbarch_register_name (gdbarch, regnum),
- phex (regval, X_REGISTER_SIZE));
- }
+ aarch64_debug_printf ("arg %d in %s = 0x%s", info->argnum,
+ gdbarch_register_name (gdbarch, regnum),
+ phex (regval, X_REGISTER_SIZE));
+
regcache_cooked_write_unsigned (regcache, regnum, regval);
len -= partial_len;
buf += partial_len;
memcpy (reg, buf, len);
regcache->cooked_write (regnum, reg);
- if (aarch64_debug)
- {
- debug_printf ("arg %d in %s\n", info->argnum,
- gdbarch_register_name (gdbarch, regnum));
- }
+ aarch64_debug_printf ("arg %d in %s", info->argnum,
+ gdbarch_register_name (gdbarch, regnum));
+
return 1;
}
info->nsrn = 8;
if (align > 16)
align = 16;
- if (aarch64_debug)
- {
- debug_printf ("arg %d len=%d @ sp + %d\n", info->argnum, len,
- info->nsaa);
- }
+ aarch64_debug_printf ("arg %d len=%d @ sp + %d\n", info->argnum, len,
+ info->nsaa);
item.len = len;
item.data = buf;
/* The struct_return pointer occupies X8. */
if (return_method != return_method_normal)
{
- if (aarch64_debug)
- {
- debug_printf ("struct return in %s = 0x%s\n",
- gdbarch_register_name (gdbarch,
- AARCH64_STRUCT_RETURN_REGNUM),
- paddress (gdbarch, struct_addr));
- }
+ aarch64_debug_printf ("struct return in %s = 0x%s",
+ gdbarch_register_name
+ (gdbarch, AARCH64_STRUCT_RETURN_REGNUM),
+ paddress (gdbarch, struct_addr));
+
regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
struct_addr);
}
t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
TYPE_CODE_UNION);
+ elem = builtin_type (gdbarch)->builtin_bfloat16;
+ append_composite_type_field (t, "bf", elem);
+
elem = builtin_type (gdbarch)->builtin_half;
append_composite_type_field (t, "f", elem);
sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
TYPE_CODE_UNION);
+ append_composite_type_field (sub, "bf",
+ init_vector_type (bt->builtin_bfloat16, 8));
append_composite_type_field (sub, "f",
init_vector_type (bt->builtin_half, 8));
append_composite_type_field (sub, "u",
gdb_byte buf[register_size (gdbarch, regno)];
gdb_assert (len <= sizeof (buf));
- if (aarch64_debug)
- {
- debug_printf ("read HFA or HVA return value element %d from %s\n",
- i + 1,
- gdbarch_register_name (gdbarch, regno));
- }
+ aarch64_debug_printf
+ ("read HFA or HVA return value element %d from %s",
+ i + 1, gdbarch_register_name (gdbarch, regno));
+
regs->cooked_read (regno, buf);
memcpy (valbuf, buf, len);
else
{
/* For a structure or union the behaviour is as if the value had
- been stored to word-aligned memory and then loaded into
- registers with 64-bit load instruction(s). */
+ been stored to word-aligned memory and then loaded into
+ registers with 64-bit load instruction(s). */
int len = TYPE_LENGTH (type);
int regno = AARCH64_X0_REGNUM;
bfd_byte buf[X_REGISTER_SIZE];
if (TYPE_LENGTH (type) > 16)
{
/* PCS B.6 Aggregates larger than 16 bytes are passed by
- invisible reference. */
+ invisible reference. */
return 1;
}
gdb_byte tmpbuf[register_size (gdbarch, regno)];
gdb_assert (len <= sizeof (tmpbuf));
- if (aarch64_debug)
- {
- debug_printf ("write HFA or HVA return value element %d to %s\n",
- i + 1,
- gdbarch_register_name (gdbarch, regno));
- }
+ aarch64_debug_printf
+ ("write HFA or HVA return value element %d to %s",
+ i + 1, gdbarch_register_name (gdbarch, regno));
memcpy (tmpbuf, valbuf,
len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
{
if (aarch64_return_in_memory (gdbarch, valtype))
{
- if (aarch64_debug)
- debug_printf ("return value in memory\n");
+ aarch64_debug_printf ("return value in memory");
return RETURN_VALUE_STRUCT_CONVENTION;
}
}
if (readbuf)
aarch64_extract_return_value (valtype, regcache, readbuf);
- if (aarch64_debug)
- debug_printf ("return value in registers\n");
+ aarch64_debug_printf ("return value in registers");
return RETURN_VALUE_REGISTER_CONVENTION;
}
return next_pcs;
}
-struct aarch64_displaced_step_closure : public displaced_step_closure
+struct aarch64_displaced_step_copy_insn_closure
+ : public displaced_step_copy_insn_closure
{
/* It is true when condition instruction, such as B.CON, TBZ, etc,
is being displaced stepping. */
/* Registers when doing displaced stepping. */
struct regcache *regs;
- aarch64_displaced_step_closure *dsc;
+ aarch64_displaced_step_copy_insn_closure *dsc;
};
/* Implementation of aarch64_insn_visitor method "b". */
struct aarch64_displaced_step_data *dsd
= (struct aarch64_displaced_step_data *) data;
- aarch64_emit_insn (dsd->insn_buf, insn);
- dsd->insn_count = 1;
-
- if ((insn & 0xfffffc1f) == 0xd65f0000)
+ uint32_t masked_insn = (insn & CLEAR_Rn_MASK);
+ if (masked_insn == BLR)
{
- /* RET */
- dsd->dsc->pc_adjust = 0;
+ /* Emit a BR to the same register and then update LR to the original
+ address (similar to aarch64_displaced_step_b). */
+ aarch64_emit_insn (dsd->insn_buf, insn & 0xffdfffff);
+ regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
+ data->insn_addr + 4);
}
+ else
+ aarch64_emit_insn (dsd->insn_buf, insn);
+ dsd->insn_count = 1;
+
+ if (masked_insn == RET || masked_insn == BR || masked_insn == BLR)
+ dsd->dsc->pc_adjust = 0;
else
dsd->dsc->pc_adjust = 4;
}
/* Implement the "displaced_step_copy_insn" gdbarch method. */
-displaced_step_closure_up
+displaced_step_copy_insn_closure_up
aarch64_displaced_step_copy_insn (struct gdbarch *gdbarch,
CORE_ADDR from, CORE_ADDR to,
struct regcache *regs)
return NULL;
}
- std::unique_ptr<aarch64_displaced_step_closure> dsc
- (new aarch64_displaced_step_closure);
+ std::unique_ptr<aarch64_displaced_step_copy_insn_closure> dsc
+ (new aarch64_displaced_step_copy_insn_closure);
dsd.base.insn_addr = from;
dsd.new_addr = to;
dsd.regs = regs;
relocated instruction(s) there. */
for (i = 0; i < dsd.insn_count; i++)
{
- if (debug_displaced)
- {
- debug_printf ("displaced: writing insn ");
- debug_printf ("%.8x", dsd.insn_buf[i]);
- debug_printf (" at %s\n", paddress (gdbarch, to + i * 4));
- }
+ displaced_debug_printf ("writing insn %.8x at %s",
+ dsd.insn_buf[i],
+ paddress (gdbarch, to + i * 4));
+
write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
(ULONGEST) dsd.insn_buf[i]);
}
}
/* This is a work around for a problem with g++ 4.8. */
- return displaced_step_closure_up (dsc.release ());
+ return displaced_step_copy_insn_closure_up (dsc.release ());
}
/* Implement the "displaced_step_fixup" gdbarch method. */
void
aarch64_displaced_step_fixup (struct gdbarch *gdbarch,
- struct displaced_step_closure *dsc_,
+ struct displaced_step_copy_insn_closure *dsc_,
CORE_ADDR from, CORE_ADDR to,
struct regcache *regs)
{
- aarch64_displaced_step_closure *dsc = (aarch64_displaced_step_closure *) dsc_;
+ aarch64_displaced_step_copy_insn_closure *dsc
+ = (aarch64_displaced_step_copy_insn_closure *) dsc_;
ULONGEST pc;
regcache_cooked_read_unsigned (regs, AARCH64_PC_REGNUM, &pc);
- if (debug_displaced)
- debug_printf ("Displaced: PC after stepping: %s (was %s).\n",
- paddress (gdbarch, pc), paddress (gdbarch, to));
+ displaced_debug_printf ("PC after stepping: %s (was %s).",
+ paddress (gdbarch, pc), paddress (gdbarch, to));
if (dsc->cond)
{
- if (debug_displaced)
- debug_printf ("Displaced: [Conditional] pc_adjust before: %d\n",
- dsc->pc_adjust);
+ displaced_debug_printf ("[Conditional] pc_adjust before: %d",
+ dsc->pc_adjust);
if (pc - to == 8)
{
else
gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
- if (debug_displaced)
- debug_printf ("Displaced: [Conditional] pc_adjust after: %d\n",
- dsc->pc_adjust);
+ displaced_debug_printf ("[Conditional] pc_adjust after: %d",
+ dsc->pc_adjust);
}
- if (debug_displaced)
- debug_printf ("Displaced: %s PC by %d\n",
- dsc->pc_adjust? "adjusting" : "not adjusting",
- dsc->pc_adjust);
-
+ displaced_debug_printf ("%s PC by %d",
+ dsc->pc_adjust ? "adjusting" : "not adjusting",
+ dsc->pc_adjust);
if (dsc->pc_adjust != 0)
{
took place. */
if ((pc - to) == 0)
{
- if (debug_displaced)
- debug_printf ("Displaced: PC did not move. Discarding PC "
- "adjustment.\n");
+ displaced_debug_printf ("PC did not move. Discarding PC adjustment.");
dsc->pc_adjust = 0;
}
- if (debug_displaced)
- {
- debug_printf ("Displaced: fixup: set PC to %s:%d\n",
- paddress (gdbarch, from), dsc->pc_adjust);
- }
+ displaced_debug_printf ("fixup: set PC to %s:%d",
+ paddress (gdbarch, from), dsc->pc_adjust);
+
regcache_cooked_write_unsigned (regs, AARCH64_PC_REGNUM,
from + dsc->pc_adjust);
}
/* Implement the "displaced_step_hw_singlestep" gdbarch method. */
-int
-aarch64_displaced_step_hw_singlestep (struct gdbarch *gdbarch,
- struct displaced_step_closure *closure)
+bool
+aarch64_displaced_step_hw_singlestep (struct gdbarch *gdbarch)
{
- return 1;
+ return true;
}
/* Get the correct target description for the given VQ value.
If VQ is zero then it is assumed SVE is not supported.
- (It is not possible to set VQ to zero on an SVE system). */
+ (It is not possible to set VQ to zero on an SVE system).
+
+ MTE_P indicates the presence of the Memory Tagging Extension feature. */
const target_desc *
-aarch64_read_description (uint64_t vq, bool pauth_p)
+aarch64_read_description (uint64_t vq, bool pauth_p, bool mte_p)
{
if (vq > AARCH64_MAX_SVE_VQ)
error (_("VQ is %" PRIu64 ", maximum supported value is %d"), vq,
AARCH64_MAX_SVE_VQ);
- struct target_desc *tdesc = tdesc_aarch64_list[vq][pauth_p];
+ struct target_desc *tdesc = tdesc_aarch64_list[vq][pauth_p][mte_p];
if (tdesc == NULL)
{
- tdesc = aarch64_create_target_description (vq, pauth_p);
- tdesc_aarch64_list[vq][pauth_p] = tdesc;
+ tdesc = aarch64_create_target_description (vq, pauth_p, mte_p);
+ tdesc_aarch64_list[vq][pauth_p][mte_p] = tdesc;
}
return tdesc;
bool valid_p = true;
int i, num_regs = 0, num_pseudo_regs = 0;
int first_pauth_regnum = -1, pauth_ra_state_offset = -1;
+ int first_mte_regnum = -1;
/* Use the vector length passed via the target info. Here -1 is used for no
SVE, and 0 is unset. If unset then use the vector length from the existing
value. */
const struct target_desc *tdesc = info.target_desc;
if (!tdesc_has_registers (tdesc) || vq != aarch64_get_tdesc_vq (tdesc))
- tdesc = aarch64_read_description (vq, false);
+ tdesc = aarch64_read_description (vq, false, false);
gdb_assert (tdesc);
feature_core = tdesc_find_feature (tdesc,"org.gnu.gdb.aarch64.core");
feature_fpu = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
feature_pauth = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.pauth");
+ const struct tdesc_feature *feature_mte
+ = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.mte");
if (feature_core == nullptr)
return nullptr;
- struct tdesc_arch_data *tdesc_data = tdesc_data_alloc ();
+ tdesc_arch_data_up tdesc_data = tdesc_data_alloc ();
/* Validate the description provides the mandatory core R registers
and allocate their numbers. */
for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
- valid_p &= tdesc_numbered_register (feature_core, tdesc_data,
+ valid_p &= tdesc_numbered_register (feature_core, tdesc_data.get (),
AARCH64_X0_REGNUM + i,
aarch64_r_register_names[i]);
/* Validate the description provides the mandatory V registers
and allocate their numbers. */
for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
- valid_p &= tdesc_numbered_register (feature_fpu, tdesc_data,
+ valid_p &= tdesc_numbered_register (feature_fpu, tdesc_data.get (),
AARCH64_V0_REGNUM + i,
aarch64_v_register_names[i]);
/* Validate the description provides the mandatory SVE registers
and allocate their numbers. */
for (i = 0; i < ARRAY_SIZE (aarch64_sve_register_names); i++)
- valid_p &= tdesc_numbered_register (feature_sve, tdesc_data,
+ valid_p &= tdesc_numbered_register (feature_sve, tdesc_data.get (),
AARCH64_SVE_Z0_REGNUM + i,
aarch64_sve_register_names[i]);
/* Validate the descriptor provides the mandatory PAUTH registers and
allocate their numbers. */
for (i = 0; i < ARRAY_SIZE (aarch64_pauth_register_names); i++)
- valid_p &= tdesc_numbered_register (feature_pauth, tdesc_data,
+ valid_p &= tdesc_numbered_register (feature_pauth, tdesc_data.get (),
first_pauth_regnum + i,
aarch64_pauth_register_names[i]);
num_pseudo_regs += 1; /* Count RA_STATE pseudo register. */
}
- if (!valid_p)
+ /* Add the MTE registers. */
+ if (feature_mte != NULL)
{
- tdesc_data_cleanup (tdesc_data);
- return nullptr;
+ first_mte_regnum = num_regs;
+ /* Validate the descriptor provides the mandatory MTE registers and
+ allocate their numbers. */
+ for (i = 0; i < ARRAY_SIZE (aarch64_mte_register_names); i++)
+ valid_p &= tdesc_numbered_register (feature_mte, tdesc_data.get (),
+ first_mte_regnum + i,
+ aarch64_mte_register_names[i]);
+
+ num_regs += i;
}
+ if (!valid_p)
+ return nullptr;
+
/* AArch64 code is always little-endian. */
info.byte_order_for_code = BFD_ENDIAN_LITTLE;
tdep->pauth_reg_base = first_pauth_regnum;
tdep->pauth_ra_state_regnum = (feature_pauth == NULL) ? -1
: pauth_ra_state_offset + num_regs;
+ tdep->mte_reg_base = first_mte_regnum;
set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
/* Hook in the ABI-specific overrides, if they have been registered. */
info.target_desc = tdesc;
- info.tdesc_data = tdesc_data;
+ info.tdesc_data = tdesc_data.get ();
gdbarch_init_osabi (info, gdbarch);
dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
set_gdbarch_get_pc_address_flags (gdbarch, aarch64_get_pc_address_flags);
- tdesc_use_registers (gdbarch, tdesc, tdesc_data);
+ tdesc_use_registers (gdbarch, tdesc, std::move (tdesc_data));
/* Add standard register aliases. */
for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
/* AArch64 process record-replay related structures, defines etc. */
#define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
- do \
- { \
- unsigned int reg_len = LENGTH; \
- if (reg_len) \
- { \
- REGS = XNEWVEC (uint32_t, reg_len); \
- memcpy(®S[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
- } \
- } \
- while (0)
+ do \
+ { \
+ unsigned int reg_len = LENGTH; \
+ if (reg_len) \
+ { \
+ REGS = XNEWVEC (uint32_t, reg_len); \
+ memcpy(®S[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
+ } \
+ } \
+ while (0)
#define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
- do \
- { \
- unsigned int mem_len = LENGTH; \
- if (mem_len) \
- { \
- MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
- memcpy(&MEMS->len, &RECORD_BUF[0], \
- sizeof(struct aarch64_mem_r) * LENGTH); \
- } \
- } \
- while (0)
+ do \
+ { \
+ unsigned int mem_len = LENGTH; \
+ if (mem_len) \
+ { \
+ MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
+ memcpy(&MEMS->len, &RECORD_BUF[0], \
+ sizeof(struct aarch64_mem_r) * LENGTH); \
+ } \
+ } \
+ while (0)
/* AArch64 record/replay structures and enumerations. */
uint8_t sindex, scale, selem, esize, replicate = 0;
scale = opcode_bits >> 2;
selem = ((opcode_bits & 0x02) |
- bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
+ bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
switch (scale)
- {
- case 1:
- if (size_bits & 0x01)
- return AARCH64_RECORD_UNKNOWN;
- break;
- case 2:
- if ((size_bits >> 1) & 0x01)
- return AARCH64_RECORD_UNKNOWN;
- if (size_bits & 0x01)
- {
- if (!((opcode_bits >> 1) & 0x01))
- scale = 3;
- else
- return AARCH64_RECORD_UNKNOWN;
- }
- break;
- case 3:
- if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
- {
- scale = size_bits;
- replicate = 1;
- break;
- }
- else
- return AARCH64_RECORD_UNKNOWN;
- default:
- break;
- }
+ {
+ case 1:
+ if (size_bits & 0x01)
+ return AARCH64_RECORD_UNKNOWN;
+ break;
+ case 2:
+ if ((size_bits >> 1) & 0x01)
+ return AARCH64_RECORD_UNKNOWN;
+ if (size_bits & 0x01)
+ {
+ if (!((opcode_bits >> 1) & 0x01))
+ scale = 3;
+ else
+ return AARCH64_RECORD_UNKNOWN;
+ }
+ break;
+ case 3:
+ if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
+ {
+ scale = size_bits;
+ replicate = 1;
+ break;
+ }
+ else
+ return AARCH64_RECORD_UNKNOWN;
+ default:
+ break;
+ }
esize = 8 << scale;
if (replicate)
- for (sindex = 0; sindex < selem; sindex++)
- {
- record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
- reg_rt = (reg_rt + 1) % 32;
- }
+ for (sindex = 0; sindex < selem; sindex++)
+ {
+ record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
+ reg_rt = (reg_rt + 1) % 32;
+ }
else
- {
- for (sindex = 0; sindex < selem; sindex++)
+ {
+ for (sindex = 0; sindex < selem; sindex++)
{
if (bit (aarch64_insn_r->aarch64_insn, 22))
record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
addr_offset = addr_offset + (esize / 8);
reg_rt = (reg_rt + 1) % 32;
}
- }
+ }
}
/* Load/store multiple structure. */
else
esize = 8 << size_bits;
if (bit (aarch64_insn_r->aarch64_insn, 30))
- elements = 128 / esize;
+ elements = 128 / esize;
else
- elements = 64 / esize;
+ elements = 64 / esize;
switch (opcode_bits)
- {
- /*LD/ST4 (4 Registers). */
- case 0:
- rpt = 1;
- selem = 4;
- break;
- /*LD/ST1 (4 Registers). */
- case 2:
- rpt = 4;
- selem = 1;
- break;
- /*LD/ST3 (3 Registers). */
- case 4:
- rpt = 1;
- selem = 3;
- break;
- /*LD/ST1 (3 Registers). */
- case 6:
- rpt = 3;
- selem = 1;
- break;
- /*LD/ST1 (1 Register). */
- case 7:
- rpt = 1;
- selem = 1;
- break;
- /*LD/ST2 (2 Registers). */
- case 8:
- rpt = 1;
- selem = 2;
- break;
- /*LD/ST1 (2 Registers). */
- case 10:
- rpt = 2;
- selem = 1;
- break;
- default:
- return AARCH64_RECORD_UNSUPPORTED;
- break;
- }
+ {
+ /*LD/ST4 (4 Registers). */
+ case 0:
+ rpt = 1;
+ selem = 4;
+ break;
+ /*LD/ST1 (4 Registers). */
+ case 2:
+ rpt = 4;
+ selem = 1;
+ break;
+ /*LD/ST3 (3 Registers). */
+ case 4:
+ rpt = 1;
+ selem = 3;
+ break;
+ /*LD/ST1 (3 Registers). */
+ case 6:
+ rpt = 3;
+ selem = 1;
+ break;
+ /*LD/ST1 (1 Register). */
+ case 7:
+ rpt = 1;
+ selem = 1;
+ break;
+ /*LD/ST2 (2 Registers). */
+ case 8:
+ rpt = 1;
+ selem = 2;
+ break;
+ /*LD/ST1 (2 Registers). */
+ case 10:
+ rpt = 2;
+ selem = 1;
+ break;
+ default:
+ return AARCH64_RECORD_UNSUPPORTED;
+ break;
+ }
for (rindex = 0; rindex < rpt; rindex++)
- for (eindex = 0; eindex < elements; eindex++)
- {
- uint8_t reg_tt, sindex;
- reg_tt = (reg_rt + rindex) % 32;
- for (sindex = 0; sindex < selem; sindex++)
- {
- if (bit (aarch64_insn_r->aarch64_insn, 22))
- record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
- else
- {
- record_buf_mem[mem_index++] = esize / 8;
- record_buf_mem[mem_index++] = address + addr_offset;
- }
- addr_offset = addr_offset + (esize / 8);
- reg_tt = (reg_tt + 1) % 32;
- }
- }
+ for (eindex = 0; eindex < elements; eindex++)
+ {
+ uint8_t reg_tt, sindex;
+ reg_tt = (reg_rt + rindex) % 32;
+ for (sindex = 0; sindex < selem; sindex++)
+ {
+ if (bit (aarch64_insn_r->aarch64_insn, 22))
+ record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
+ else
+ {
+ record_buf_mem[mem_index++] = esize / 8;
+ record_buf_mem[mem_index++] = address + addr_offset;
+ }
+ addr_offset = addr_offset + (esize / 8);
+ reg_tt = (reg_tt + 1) % 32;
+ }
+ }
}
if (bit (aarch64_insn_r->aarch64_insn, 23))
aarch64_insn_r->reg_rec_count = reg_index;
aarch64_insn_r->mem_rec_count = mem_index / 2;
MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
- record_buf_mem);
+ record_buf_mem);
REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
- record_buf);
+ record_buf);
return AARCH64_RECORD_SUCCESS;
}
if (record_debug)
debug_printf ("Process record: load register (literal)\n");
if (vector_flag)
- record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
+ record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
else
- record_buf[0] = reg_rt;
+ record_buf[0] = reg_rt;
aarch64_insn_r->reg_rec_count = 1;
}
/* All types of load/store pair instructions decoding. */
debug_printf ("Process record: load/store pair\n");
if (ld_flag)
- {
- if (vector_flag)
- {
- record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
- record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
- }
- else
- {
- record_buf[0] = reg_rt;
- record_buf[1] = reg_rt2;
- }
- aarch64_insn_r->reg_rec_count = 2;
- }
+ {
+ if (vector_flag)
+ {
+ record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
+ record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
+ }
+ else
+ {
+ record_buf[0] = reg_rt;
+ record_buf[1] = reg_rt2;
+ }
+ aarch64_insn_r->reg_rec_count = 2;
+ }
else
- {
- uint16_t imm7_off;
- imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
- if (!vector_flag)
- size_bits = size_bits >> 1;
- datasize = 8 << (2 + size_bits);
- offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
- offset = offset << (2 + size_bits);
- regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
- &address);
- if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
- {
- if (imm7_off & 0x40)
- address = address - offset;
- else
- address = address + offset;
- }
-
- record_buf_mem[0] = datasize / 8;
- record_buf_mem[1] = address;
- record_buf_mem[2] = datasize / 8;
- record_buf_mem[3] = address + (datasize / 8);
- aarch64_insn_r->mem_rec_count = 2;
- }
+ {
+ uint16_t imm7_off;
+ imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
+ if (!vector_flag)
+ size_bits = size_bits >> 1;
+ datasize = 8 << (2 + size_bits);
+ offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
+ offset = offset << (2 + size_bits);
+ regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
+ &address);
+ if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
+ {
+ if (imm7_off & 0x40)
+ address = address - offset;
+ else
+ address = address + offset;
+ }
+
+ record_buf_mem[0] = datasize / 8;
+ record_buf_mem[1] = address;
+ record_buf_mem[2] = datasize / 8;
+ record_buf_mem[3] = address + (datasize / 8);
+ aarch64_insn_r->mem_rec_count = 2;
+ }
if (bit (aarch64_insn_r->aarch64_insn, 23))
- record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
+ record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
}
/* Load/store register (unsigned immediate) instructions. */
else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
}
if (!ld_flag)
- {
- offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
- datasize = 8 << size_bits;
- regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
- &address);
- offset = offset << size_bits;
- address = address + offset;
-
- record_buf_mem[0] = datasize >> 3;
- record_buf_mem[1] = address;
- aarch64_insn_r->mem_rec_count = 1;
- }
+ {
+ offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
+ datasize = 8 << size_bits;
+ regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
+ &address);
+ offset = offset << size_bits;
+ address = address + offset;
+
+ record_buf_mem[0] = datasize >> 3;
+ record_buf_mem[1] = address;
+ aarch64_insn_r->mem_rec_count = 1;
+ }
else
- {
- if (vector_flag)
- record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
- else
- record_buf[0] = reg_rt;
- aarch64_insn_r->reg_rec_count = 1;
- }
+ {
+ if (vector_flag)
+ record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
+ else
+ record_buf[0] = reg_rt;
+ aarch64_insn_r->reg_rec_count = 1;
+ }
}
/* Load/store register (register offset) instructions. */
else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
debug_printf ("Process record: load/store (register offset)\n");
opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
if (!(opc >> 1))
- if (opc & 0x01)
- ld_flag = 0x01;
- else
- ld_flag = 0x0;
+ if (opc & 0x01)
+ ld_flag = 0x01;
+ else
+ ld_flag = 0x0;
else
- if (size_bits != 0x03)
- ld_flag = 0x01;
- else
- return AARCH64_RECORD_UNKNOWN;
+ if (size_bits != 0x03)
+ ld_flag = 0x01;
+ else
+ return AARCH64_RECORD_UNKNOWN;
if (!ld_flag)
- {
- ULONGEST reg_rm_val;
-
- regcache_raw_read_unsigned (aarch64_insn_r->regcache,
- bits (aarch64_insn_r->aarch64_insn, 16, 20), ®_rm_val);
- if (bit (aarch64_insn_r->aarch64_insn, 12))
- offset = reg_rm_val << size_bits;
- else
- offset = reg_rm_val;
- datasize = 8 << size_bits;
- regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
- &address);
- address = address + offset;
- record_buf_mem[0] = datasize >> 3;
- record_buf_mem[1] = address;
- aarch64_insn_r->mem_rec_count = 1;
- }
+ {
+ ULONGEST reg_rm_val;
+
+ regcache_raw_read_unsigned (aarch64_insn_r->regcache,
+ bits (aarch64_insn_r->aarch64_insn, 16, 20), ®_rm_val);
+ if (bit (aarch64_insn_r->aarch64_insn, 12))
+ offset = reg_rm_val << size_bits;
+ else
+ offset = reg_rm_val;
+ datasize = 8 << size_bits;
+ regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
+ &address);
+ address = address + offset;
+ record_buf_mem[0] = datasize >> 3;
+ record_buf_mem[1] = address;
+ aarch64_insn_r->mem_rec_count = 1;
+ }
else
- {
- if (vector_flag)
- record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
- else
- record_buf[0] = reg_rt;
- aarch64_insn_r->reg_rec_count = 1;
- }
+ {
+ if (vector_flag)
+ record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
+ else
+ record_buf[0] = reg_rt;
+ aarch64_insn_r->reg_rec_count = 1;
+ }
}
/* Load/store register (immediate and unprivileged) instructions. */
else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
}
opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
if (!(opc >> 1))
- if (opc & 0x01)
- ld_flag = 0x01;
- else
- ld_flag = 0x0;
+ if (opc & 0x01)
+ ld_flag = 0x01;
+ else
+ ld_flag = 0x0;
else
- if (size_bits != 0x03)
- ld_flag = 0x01;
- else
- return AARCH64_RECORD_UNKNOWN;
+ if (size_bits != 0x03)
+ ld_flag = 0x01;
+ else
+ return AARCH64_RECORD_UNKNOWN;
if (!ld_flag)
- {
- uint16_t imm9_off;
- imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
- offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
- datasize = 8 << size_bits;
- regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
- &address);
- if (insn_bits10_11 != 0x01)
- {
- if (imm9_off & 0x0100)
- address = address - offset;
- else
- address = address + offset;
- }
- record_buf_mem[0] = datasize >> 3;
- record_buf_mem[1] = address;
- aarch64_insn_r->mem_rec_count = 1;
- }
+ {
+ uint16_t imm9_off;
+ imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
+ offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
+ datasize = 8 << size_bits;
+ regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
+ &address);
+ if (insn_bits10_11 != 0x01)
+ {
+ if (imm9_off & 0x0100)
+ address = address - offset;
+ else
+ address = address + offset;
+ }
+ record_buf_mem[0] = datasize >> 3;
+ record_buf_mem[1] = address;
+ aarch64_insn_r->mem_rec_count = 1;
+ }
else
- {
- if (vector_flag)
- record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
- else
- record_buf[0] = reg_rt;
- aarch64_insn_r->reg_rec_count = 1;
- }
+ {
+ if (vector_flag)
+ record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
+ else
+ record_buf[0] = reg_rt;
+ aarch64_insn_r->reg_rec_count = 1;
+ }
if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
- record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
+ record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
}
/* Advanced SIMD load/store instructions. */
else
return aarch64_record_asimd_load_store (aarch64_insn_r);
MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
- record_buf_mem);
+ record_buf_mem);
REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
- record_buf);
+ record_buf);
return AARCH64_RECORD_SUCCESS;
}
record_buf[0] = AARCH64_CPSR_REGNUM;
}
/* Floating point - data processing (2-source) and
- conditional select instructions. */
+ conditional select instructions. */
else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
{
if (record_debug)
}
else
return AARCH64_RECORD_UNKNOWN;
- }
+ }
else
return AARCH64_RECORD_UNKNOWN;
- }
+ }
else
return AARCH64_RECORD_UNKNOWN;
}
if (record_debug)
debug_printf ("\n");
+ /* Record the V/X register. */
+ aarch64_insn_r->reg_rec_count++;
+
+ /* Some of these instructions may set bits in the FPSR, so record it
+ too. */
+ record_buf[1] = AARCH64_FPSR_REGNUM;
aarch64_insn_r->reg_rec_count++;
- gdb_assert (aarch64_insn_r->reg_rec_count == 1);
+
+ gdb_assert (aarch64_insn_r->reg_rec_count == 2);
REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
record_buf);
return AARCH64_RECORD_SUCCESS;
struct gdbarch_info info;
uint32_t ret;
- gdbarch_info_init (&info);
info.bfd_arch_info = bfd_scan_arch ("aarch64");
struct gdbarch *gdbarch = gdbarch_find_by_info (info);