struct record_full_entry *rec;
struct gdbarch *gdbarch = get_regcache_arch (regcache);
- rec = xcalloc (1, sizeof (struct record_full_entry));
+ rec = XCNEW (struct record_full_entry);
rec->type = record_full_reg;
rec->u.reg.num = regnum;
rec->u.reg.len = register_size (gdbarch, regnum);
{
struct record_full_entry *rec;
- rec = xcalloc (1, sizeof (struct record_full_entry));
+ rec = XCNEW (struct record_full_entry);
rec->type = record_full_mem;
rec->u.mem.addr = addr;
rec->u.mem.len = len;
{
struct record_full_entry *rec;
- rec = xcalloc (1, sizeof (struct record_full_entry));
+ rec = XCNEW (struct record_full_entry);
rec->type = record_full_end;
return rec;
static int
record_full_message_wrapper (void *args)
{
- struct record_full_message_args *record_full_args = args;
+ struct record_full_message_args *record_full_args
+ = (struct record_full_message_args *) args;
return record_full_message (record_full_args->regcache,
record_full_args->signal);
args.regcache = regcache;
args.signal = signal;
- return catch_errors (record_full_message_wrapper, &args, NULL,
+ return catch_errors (record_full_message_wrapper, &args, "",
RETURN_MASK_ALL);
}
/* Nothing to do if the entry is flagged not_accessible. */
if (!entry->u.mem.mem_entry_not_accessible)
{
- gdb_byte *mem = alloca (entry->u.mem.len);
+ gdb_byte *mem = (gdb_byte *) xmalloc (entry->u.mem.len);
+ struct cleanup *cleanup = make_cleanup (xfree, mem);
if (record_debug > 1)
fprintf_unfiltered (gdb_stdlog,
record_full_stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
}
}
+
+ do_cleanups (cleanup);
}
}
break;
/* Get record_full_core_regbuf. */
target_fetch_registers (regcache, -1);
- record_full_core_regbuf = xmalloc (MAX_REGISTER_SIZE * regnum);
+ record_full_core_regbuf = (gdb_byte *) xmalloc (MAX_REGISTER_SIZE * regnum);
for (i = 0; i < regnum; i ++)
regcache_raw_collect (regcache, i,
record_full_core_regbuf + MAX_REGISTER_SIZE * i);
/* "to_async" target method. */
static void
-record_full_async (struct target_ops *ops,
- void (*callback) (enum inferior_event_type event_type,
- void *context),
- void *context)
+record_full_async (struct target_ops *ops, int enable)
{
- if (callback != NULL)
+ if (enable)
mark_async_event_handler (record_full_async_inferior_event_token);
else
clear_async_event_handler (record_full_async_inferior_event_token);
- ops->beneath->to_async (ops->beneath, callback, context);
+ ops->beneath->to_async (ops->beneath, enable);
}
static int record_full_resume_step = 0;
/* We are about to start executing the inferior (or simulate it),
let's register it with the event loop. */
if (target_can_async_p ())
- target_async (inferior_event_handler, 0);
+ target_async (1);
}
static int record_full_get_sig = 0;
/* The "to_record_is_replaying" target method. */
static int
-record_full_is_replaying (struct target_ops *self)
+record_full_is_replaying (struct target_ops *self, ptid_t ptid)
{
return RECORD_FULL_IS_REPLAY;
}
+/* The "to_record_will_replay" target method. */
+
+static int
+record_full_will_replay (struct target_ops *self, ptid_t ptid, int dir)
+{
+ /* We can currently only record when executing forwards. Should we be able
+ to record when executing backwards on targets that support reverse
+ execution, this needs to be changed. */
+
+ return RECORD_FULL_IS_REPLAY || dir == EXEC_REVERSE;
+}
+
/* Go to a specific entry. */
static void
registers_changed ();
reinit_frame_cache ();
+ stop_pc = regcache_read_pc (get_current_regcache ());
print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
}
record_full_goto_entry (p);
}
+/* The "to_record_stop_replaying" target method. */
+
+static void
+record_full_stop_replaying (struct target_ops *self)
+{
+ record_full_goto_end (self);
+}
+
static void
init_record_full_ops (void)
{
record_full_ops.to_save_record = record_full_save;
record_full_ops.to_delete_record = record_full_delete;
record_full_ops.to_record_is_replaying = record_full_is_replaying;
+ record_full_ops.to_record_will_replay = record_full_will_replay;
+ record_full_ops.to_record_stop_replaying = record_full_stop_replaying;
record_full_ops.to_goto_record_begin = record_full_goto_begin;
record_full_ops.to_goto_record_end = record_full_goto_end;
record_full_ops.to_goto_record = record_full_goto;
/* We are about to start executing the inferior (or simulate it),
let's register it with the event loop. */
if (target_can_async_p ())
- target_async (inferior_event_handler, 0);
+ target_async (1);
}
/* "to_kill" method for prec over corefile. */
if (!entry)
{
/* Add a new entry. */
- entry = (struct record_full_core_buf_entry *)
- xmalloc
- (sizeof (struct record_full_core_buf_entry));
+ entry = XNEW (struct record_full_core_buf_entry);
entry->p = p;
if (!bfd_malloc_and_get_section
(p->the_bfd_section->owner,
record_full_core_ops.to_info_record = record_full_info;
record_full_core_ops.to_delete_record = record_full_delete;
record_full_core_ops.to_record_is_replaying = record_full_is_replaying;
+ record_full_core_ops.to_record_will_replay = record_full_will_replay;
record_full_core_ops.to_goto_record_begin = record_full_goto_begin;
record_full_core_ops.to_goto_record_end = record_full_goto_end;
record_full_core_ops.to_goto_record = record_full_goto;
bfdcore_read (core_bfd, osec, &signal,
sizeof (signal), &bfd_offset);
signal = netorder32 (signal);
- rec->u.end.sigval = signal;
+ rec->u.end.sigval = (enum gdb_signal) signal;
/* Get insn count. */
bfdcore_read (core_bfd, osec, &count,
static void
record_full_save_cleanups (void *data)
{
- bfd *obfd = data;
+ bfd *obfd = (bfd *) data;
char *pathname = xstrdup (bfd_get_filename (obfd));
gdb_bfd_unref (obfd);