/* Branch trace support for GDB, the GNU debugger.
- Copyright (C) 2013-2014 Free Software Foundation, Inc.
+ Copyright (C) 2013-2015 Free Software Foundation, Inc.
Contributed by Intel Corp. <markus.t.metzger@intel.com>
#include "gdbcmd.h"
#include "disasm.h"
#include "observer.h"
-#include "exceptions.h"
#include "cli/cli-utils.h"
#include "source.h"
#include "ui-out.h"
#include "regcache.h"
#include "frame-unwind.h"
#include "hashtab.h"
+#include "infrun.h"
+#include "event-loop.h"
+#include "inf-loop.h"
/* The target_ops of record-btrace. */
static struct target_ops record_btrace_ops;
/* A new thread observer enabling branch tracing for the new thread. */
static struct observer *record_btrace_thread_observer;
-/* Temporarily allow memory accesses. */
-static int record_btrace_allow_memory_access;
+/* Memory access types used in set/show record btrace replay-memory-access. */
+static const char replay_memory_access_read_only[] = "read-only";
+static const char replay_memory_access_read_write[] = "read-write";
+static const char *const replay_memory_access_types[] =
+{
+ replay_memory_access_read_only,
+ replay_memory_access_read_write,
+ NULL
+};
+
+/* The currently allowed replay memory access type. */
+static const char *replay_memory_access = replay_memory_access_read_only;
+
+/* Command lists for "set/show record btrace". */
+static struct cmd_list_element *set_record_btrace_cmdlist;
+static struct cmd_list_element *show_record_btrace_cmdlist;
+
+/* The execution direction of the last resume we got. See record-full.c. */
+static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
+
+/* The async event handler for reverse/replay execution. */
+static struct async_event_handler *record_btrace_async_inferior_event_handler;
+
+/* A flag indicating that we are currently generating a core file. */
+static int record_btrace_generating_corefile;
/* Print a record-btrace debug message. Use do ... while (0) to avoid
ambiguities when used in if statements. */
require_btrace_thread (void)
{
struct thread_info *tp;
- struct btrace_thread_info *btinfo;
DEBUG ("require");
btrace_fetch (tp);
- btinfo = &tp->btrace;
-
- if (btinfo->begin == NULL)
+ if (btrace_is_empty (tp))
error (_("No trace."));
return tp;
record_btrace_thread_observer = NULL;
}
+/* The record-btrace async event handler function. */
+
+static void
+record_btrace_handle_async_inferior_event (gdb_client_data data)
+{
+ inferior_event_handler (INF_REG_EVENT, NULL);
+}
+
/* The to_open method of target record-btrace. */
static void
-record_btrace_open (char *args, int from_tty)
+record_btrace_open (const char *args, int from_tty)
{
struct cleanup *disable_chain;
struct thread_info *tp;
if (!target_supports_btrace ())
error (_("Target does not support branch tracing."));
+ if (non_stop)
+ error (_("Record btrace can't debug inferior in non-stop mode."));
+
gdb_assert (record_btrace_thread_observer == NULL);
disable_chain = make_cleanup (null_cleanup, NULL);
- ALL_THREADS (tp)
+ ALL_NON_EXITED_THREADS (tp)
if (args == NULL || *args == 0 || number_is_in_list (args, tp->num))
{
btrace_enable (tp);
push_target (&record_btrace_ops);
+ record_btrace_async_inferior_event_handler
+ = create_async_event_handler (record_btrace_handle_async_inferior_event,
+ NULL);
+ record_btrace_generating_corefile = 0;
+
observer_notify_record_changed (current_inferior (), 1);
discard_cleanups (disable_chain);
/* The to_stop_recording method of target record-btrace. */
static void
-record_btrace_stop_recording (void)
+record_btrace_stop_recording (struct target_ops *self)
{
struct thread_info *tp;
record_btrace_auto_disable ();
- ALL_THREADS (tp)
+ ALL_NON_EXITED_THREADS (tp)
if (tp->btrace.target != NULL)
btrace_disable (tp);
}
/* The to_close method of target record-btrace. */
static void
-record_btrace_close (void)
+record_btrace_close (struct target_ops *self)
{
+ struct thread_info *tp;
+
+ if (record_btrace_async_inferior_event_handler != NULL)
+ delete_async_event_handler (&record_btrace_async_inferior_event_handler);
+
/* Make sure automatic recording gets disabled even if we did not stop
recording before closing the record-btrace target. */
record_btrace_auto_disable ();
- /* We already stopped recording. */
+ /* We should have already stopped recording.
+ Tear down btrace in case we have not. */
+ ALL_NON_EXITED_THREADS (tp)
+ btrace_teardown (tp);
}
/* The to_info_record method of target record-btrace. */
static void
-record_btrace_info (void)
+record_btrace_info (struct target_ops *self)
{
struct btrace_thread_info *btinfo;
struct thread_info *tp;
calls = 0;
btinfo = &tp->btrace;
- if (btinfo->begin != NULL)
+
+ if (!btrace_is_empty (tp))
{
struct btrace_call_iterator call;
struct btrace_insn_iterator insn;
/* The to_insn_history method of target record-btrace. */
static void
-record_btrace_insn_history (int size, int flags)
+record_btrace_insn_history (struct target_ops *self, int size, int flags)
{
struct btrace_thread_info *btinfo;
struct btrace_insn_history *history;
/* The to_insn_history_range method of target record-btrace. */
static void
-record_btrace_insn_history_range (ULONGEST from, ULONGEST to, int flags)
+record_btrace_insn_history_range (struct target_ops *self,
+ ULONGEST from, ULONGEST to, int flags)
{
struct btrace_thread_info *btinfo;
struct btrace_insn_history *history;
/* The to_insn_history_from method of target record-btrace. */
static void
-record_btrace_insn_history_from (ULONGEST from, int size, int flags)
+record_btrace_insn_history_from (struct target_ops *self,
+ ULONGEST from, int size, int flags)
{
ULONGEST begin, end, context;
end = ULONGEST_MAX;
}
- record_btrace_insn_history_range (begin, end, flags);
+ record_btrace_insn_history_range (self, begin, end, flags);
}
/* Print the instruction number range for a function call history line. */
return;
ui_out_field_string (uiout, "file",
- symtab_to_filename_for_display (sym->symtab));
+ symtab_to_filename_for_display (symbol_symtab (sym)));
begin = bfun->lbegin;
end = bfun->lend;
if (sym != NULL)
return SYMBOL_PRINT_NAME (sym);
else if (msym != NULL)
- return SYMBOL_PRINT_NAME (msym);
+ return MSYMBOL_PRINT_NAME (msym);
else
return "??";
}
if (sym != NULL)
ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (sym));
else if (msym != NULL)
- ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (msym));
+ ui_out_field_string (uiout, "function", MSYMBOL_PRINT_NAME (msym));
else if (!ui_out_is_mi_like_p (uiout))
ui_out_field_string (uiout, "function", "??");
/* The to_call_history method of target record-btrace. */
static void
-record_btrace_call_history (int size, int flags)
+record_btrace_call_history (struct target_ops *self, int size, int flags)
{
struct btrace_thread_info *btinfo;
struct btrace_call_history *history;
/* The to_call_history_range method of target record-btrace. */
static void
-record_btrace_call_history_range (ULONGEST from, ULONGEST to, int flags)
+record_btrace_call_history_range (struct target_ops *self,
+ ULONGEST from, ULONGEST to, int flags)
{
struct btrace_thread_info *btinfo;
struct btrace_call_history *history;
/* The to_call_history_from method of target record-btrace. */
static void
-record_btrace_call_history_from (ULONGEST from, int size, int flags)
+record_btrace_call_history_from (struct target_ops *self,
+ ULONGEST from, int size, int flags)
{
ULONGEST begin, end, context;
end = ULONGEST_MAX;
}
- record_btrace_call_history_range (begin, end, flags);
+ record_btrace_call_history_range (self, begin, end, flags);
}
/* The to_record_is_replaying method of target record-btrace. */
static int
-record_btrace_is_replaying (void)
+record_btrace_is_replaying (struct target_ops *self)
{
struct thread_info *tp;
- ALL_THREADS (tp)
+ ALL_NON_EXITED_THREADS (tp)
if (btrace_is_replaying (tp))
return 1;
/* The to_xfer_partial method of target record-btrace. */
-static LONGEST
+static enum target_xfer_status
record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
const char *annex, gdb_byte *readbuf,
const gdb_byte *writebuf, ULONGEST offset,
- ULONGEST len)
+ ULONGEST len, ULONGEST *xfered_len)
{
struct target_ops *t;
/* Filter out requests that don't make sense during replay. */
- if (!record_btrace_allow_memory_access && record_btrace_is_replaying ())
+ if (replay_memory_access == replay_memory_access_read_only
+ && !record_btrace_generating_corefile
+ && record_btrace_is_replaying (ops))
{
switch (object)
{
/* We do not allow writing memory in general. */
if (writebuf != NULL)
- return TARGET_XFER_E_UNAVAILABLE;
+ {
+ *xfered_len = len;
+ return TARGET_XFER_UNAVAILABLE;
+ }
/* We allow reading readonly memory. */
section = target_section_by_addr (ops, offset);
}
}
- return TARGET_XFER_E_UNAVAILABLE;
+ *xfered_len = len;
+ return TARGET_XFER_UNAVAILABLE;
}
}
}
/* Forward the request. */
- for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
- if (ops->to_xfer_partial != NULL)
- return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
- offset, len);
-
- return TARGET_XFER_E_UNAVAILABLE;
+ ops = ops->beneath;
+ return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
+ offset, len, xfered_len);
}
/* The to_insert_breakpoint method of target record-btrace. */
struct bp_target_info *bp_tgt)
{
volatile struct gdb_exception except;
- int old, ret;
+ const char *old;
+ int ret;
/* Inserting breakpoints requires accessing memory. Allow it for the
duration of this function. */
- old = record_btrace_allow_memory_access;
- record_btrace_allow_memory_access = 1;
+ old = replay_memory_access;
+ replay_memory_access = replay_memory_access_read_write;
ret = 0;
TRY_CATCH (except, RETURN_MASK_ALL)
- ret = forward_target_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
+ ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
- record_btrace_allow_memory_access = old;
+ replay_memory_access = old;
if (except.reason < 0)
throw_exception (except);
struct bp_target_info *bp_tgt)
{
volatile struct gdb_exception except;
- int old, ret;
+ const char *old;
+ int ret;
/* Removing breakpoints requires accessing memory. Allow it for the
duration of this function. */
- old = record_btrace_allow_memory_access;
- record_btrace_allow_memory_access = 1;
+ old = replay_memory_access;
+ replay_memory_access = replay_memory_access_read_write;
ret = 0;
TRY_CATCH (except, RETURN_MASK_ALL)
- ret = forward_target_remove_breakpoint (ops->beneath, gdbarch, bp_tgt);
+ ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt);
- record_btrace_allow_memory_access = old;
+ replay_memory_access = old;
if (except.reason < 0)
throw_exception (except);
gdb_assert (tp != NULL);
replay = tp->btrace.replay;
- if (replay != NULL)
+ if (replay != NULL && !record_btrace_generating_corefile)
{
const struct btrace_insn *insn;
struct gdbarch *gdbarch;
}
else
{
- struct target_ops *t;
+ struct target_ops *t = ops->beneath;
- for (t = ops->beneath; t != NULL; t = t->beneath)
- if (t->to_fetch_registers != NULL)
- {
- t->to_fetch_registers (t, regcache, regno);
- break;
- }
+ t->to_fetch_registers (t, regcache, regno);
}
}
{
struct target_ops *t;
- if (record_btrace_is_replaying ())
+ if (!record_btrace_generating_corefile && record_btrace_is_replaying (ops))
error (_("This record target does not allow writing registers."));
gdb_assert (may_write_registers != 0);
- for (t = ops->beneath; t != NULL; t = t->beneath)
- if (t->to_store_registers != NULL)
- {
- t->to_store_registers (t, regcache, regno);
- return;
- }
-
- noprocess ();
+ t = ops->beneath;
+ t->to_store_registers (t, regcache, regno);
}
/* The to_prepare_to_store method of target record-btrace. */
{
struct target_ops *t;
- if (record_btrace_is_replaying ())
+ if (!record_btrace_generating_corefile && record_btrace_is_replaying (ops))
return;
- for (t = ops->beneath; t != NULL; t = t->beneath)
- if (t->to_prepare_to_store != NULL)
- {
- t->to_prepare_to_store (t, regcache);
- return;
- }
+ t = ops->beneath;
+ t->to_prepare_to_store (t, regcache);
}
/* The branch trace frame cache. */
record_btrace_frame_dealloc_cache
};
+/* Implement the to_get_unwinder method. */
+
+static const struct frame_unwind *
+record_btrace_to_get_unwinder (struct target_ops *self)
+{
+ return &record_btrace_frame_unwind;
+}
+
+/* Implement the to_get_tailcall_unwinder method. */
+
+static const struct frame_unwind *
+record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
+{
+ return &record_btrace_tailcall_frame_unwind;
+}
+
+/* Indicate that TP should be resumed according to FLAG. */
+
+static void
+record_btrace_resume_thread (struct thread_info *tp,
+ enum btrace_thread_flag flag)
+{
+ struct btrace_thread_info *btinfo;
+
+ DEBUG ("resuming %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flag);
+
+ btinfo = &tp->btrace;
+
+ if ((btinfo->flags & BTHR_MOVE) != 0)
+ error (_("Thread already moving."));
+
+ /* Fetch the latest branch trace. */
+ btrace_fetch (tp);
+
+ btinfo->flags |= flag;
+}
+
+/* Find the thread to resume given a PTID. */
+
+static struct thread_info *
+record_btrace_find_resume_thread (ptid_t ptid)
+{
+ struct thread_info *tp;
+
+ /* When asked to resume everything, we pick the current thread. */
+ if (ptid_equal (minus_one_ptid, ptid) || ptid_is_pid (ptid))
+ ptid = inferior_ptid;
+
+ return find_thread_ptid (ptid);
+}
+
+/* Start replaying a thread. */
+
+static struct btrace_insn_iterator *
+record_btrace_start_replaying (struct thread_info *tp)
+{
+ volatile struct gdb_exception except;
+ struct btrace_insn_iterator *replay;
+ struct btrace_thread_info *btinfo;
+ int executing;
+
+ btinfo = &tp->btrace;
+ replay = NULL;
+
+ /* We can't start replaying without trace. */
+ if (btinfo->begin == NULL)
+ return NULL;
+
+ /* Clear the executing flag to allow changes to the current frame.
+ We are not actually running, yet. We just started a reverse execution
+ command or a record goto command.
+ For the latter, EXECUTING is false and this has no effect.
+ For the former, EXECUTING is true and we're in to_wait, about to
+ move the thread. Since we need to recompute the stack, we temporarily
+ set EXECUTING to flase. */
+ executing = is_executing (tp->ptid);
+ set_executing (tp->ptid, 0);
+
+ /* GDB stores the current frame_id when stepping in order to detects steps
+ into subroutines.
+ Since frames are computed differently when we're replaying, we need to
+ recompute those stored frames and fix them up so we can still detect
+ subroutines after we started replaying. */
+ TRY_CATCH (except, RETURN_MASK_ALL)
+ {
+ struct frame_info *frame;
+ struct frame_id frame_id;
+ int upd_step_frame_id, upd_step_stack_frame_id;
+
+ /* The current frame without replaying - computed via normal unwind. */
+ frame = get_current_frame ();
+ frame_id = get_frame_id (frame);
+
+ /* Check if we need to update any stepping-related frame id's. */
+ upd_step_frame_id = frame_id_eq (frame_id,
+ tp->control.step_frame_id);
+ upd_step_stack_frame_id = frame_id_eq (frame_id,
+ tp->control.step_stack_frame_id);
+
+ /* We start replaying at the end of the branch trace. This corresponds
+ to the current instruction. */
+ replay = xmalloc (sizeof (*replay));
+ btrace_insn_end (replay, btinfo);
+
+ /* We're not replaying, yet. */
+ gdb_assert (btinfo->replay == NULL);
+ btinfo->replay = replay;
+
+ /* Make sure we're not using any stale registers. */
+ registers_changed_ptid (tp->ptid);
+
+ /* The current frame with replaying - computed via btrace unwind. */
+ frame = get_current_frame ();
+ frame_id = get_frame_id (frame);
+
+ /* Replace stepping related frames where necessary. */
+ if (upd_step_frame_id)
+ tp->control.step_frame_id = frame_id;
+ if (upd_step_stack_frame_id)
+ tp->control.step_stack_frame_id = frame_id;
+ }
+
+ /* Restore the previous execution state. */
+ set_executing (tp->ptid, executing);
+
+ if (except.reason < 0)
+ {
+ xfree (btinfo->replay);
+ btinfo->replay = NULL;
+
+ registers_changed_ptid (tp->ptid);
+
+ throw_exception (except);
+ }
+
+ return replay;
+}
+
+/* Stop replaying a thread. */
+
+static void
+record_btrace_stop_replaying (struct thread_info *tp)
+{
+ struct btrace_thread_info *btinfo;
+
+ btinfo = &tp->btrace;
+
+ xfree (btinfo->replay);
+ btinfo->replay = NULL;
+
+ /* Make sure we're not leaving any stale registers. */
+ registers_changed_ptid (tp->ptid);
+}
+
/* The to_resume method of target record-btrace. */
static void
record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
enum gdb_signal signal)
{
+ struct thread_info *tp, *other;
+ enum btrace_thread_flag flag;
+
+ DEBUG ("resume %s: %s", target_pid_to_str (ptid), step ? "step" : "cont");
+
+ /* Store the execution direction of the last resume. */
+ record_btrace_resume_exec_dir = execution_direction;
+
+ tp = record_btrace_find_resume_thread (ptid);
+ if (tp == NULL)
+ error (_("Cannot find thread to resume."));
+
+ /* Stop replaying other threads if the thread to resume is not replaying. */
+ if (!btrace_is_replaying (tp) && execution_direction != EXEC_REVERSE)
+ ALL_NON_EXITED_THREADS (other)
+ record_btrace_stop_replaying (other);
+
/* As long as we're not replaying, just forward the request. */
- if (!record_btrace_is_replaying ())
+ if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
{
- for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
- if (ops->to_resume != NULL)
- return ops->to_resume (ops, ptid, step, signal);
+ ops = ops->beneath;
+ return ops->to_resume (ops, ptid, step, signal);
+ }
+
+ /* Compute the btrace thread flag for the requested move. */
+ if (step == 0)
+ flag = execution_direction == EXEC_REVERSE ? BTHR_RCONT : BTHR_CONT;
+ else
+ flag = execution_direction == EXEC_REVERSE ? BTHR_RSTEP : BTHR_STEP;
- error (_("Cannot find target for stepping."));
+ /* At the moment, we only move a single thread. We could also move
+ all threads in parallel by single-stepping each resumed thread
+ until the first runs into an event.
+ When we do that, we would want to continue all other threads.
+ For now, just resume one thread to not confuse to_wait. */
+ record_btrace_resume_thread (tp, flag);
+
+ /* We just indicate the resume intent here. The actual stepping happens in
+ record_btrace_wait below. */
+
+ /* Async support. */
+ if (target_can_async_p ())
+ {
+ target_async (inferior_event_handler, 0);
+ mark_async_event_handler (record_btrace_async_inferior_event_handler);
}
+}
- error (_("You can't do this from here. Do 'record goto end', first."));
+/* Find a thread to move. */
+
+static struct thread_info *
+record_btrace_find_thread_to_move (ptid_t ptid)
+{
+ struct thread_info *tp;
+
+ /* First check the parameter thread. */
+ tp = find_thread_ptid (ptid);
+ if (tp != NULL && (tp->btrace.flags & BTHR_MOVE) != 0)
+ return tp;
+
+ /* Otherwise, find one other thread that has been resumed. */
+ ALL_NON_EXITED_THREADS (tp)
+ if ((tp->btrace.flags & BTHR_MOVE) != 0)
+ return tp;
+
+ return NULL;
+}
+
+/* Return a target_waitstatus indicating that we ran out of history. */
+
+static struct target_waitstatus
+btrace_step_no_history (void)
+{
+ struct target_waitstatus status;
+
+ status.kind = TARGET_WAITKIND_NO_HISTORY;
+
+ return status;
+}
+
+/* Return a target_waitstatus indicating that a step finished. */
+
+static struct target_waitstatus
+btrace_step_stopped (void)
+{
+ struct target_waitstatus status;
+
+ status.kind = TARGET_WAITKIND_STOPPED;
+ status.value.sig = GDB_SIGNAL_TRAP;
+
+ return status;
+}
+
+/* Clear the record histories. */
+
+static void
+record_btrace_clear_histories (struct btrace_thread_info *btinfo)
+{
+ xfree (btinfo->insn_history);
+ xfree (btinfo->call_history);
+
+ btinfo->insn_history = NULL;
+ btinfo->call_history = NULL;
+}
+
+/* Step a single thread. */
+
+static struct target_waitstatus
+record_btrace_step_thread (struct thread_info *tp)
+{
+ struct btrace_insn_iterator *replay, end;
+ struct btrace_thread_info *btinfo;
+ struct address_space *aspace;
+ struct inferior *inf;
+ enum btrace_thread_flag flags;
+ unsigned int steps;
+
+ /* We can't step without an execution history. */
+ if (btrace_is_empty (tp))
+ return btrace_step_no_history ();
+
+ btinfo = &tp->btrace;
+ replay = btinfo->replay;
+
+ flags = btinfo->flags & BTHR_MOVE;
+ btinfo->flags &= ~BTHR_MOVE;
+
+ DEBUG ("stepping %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flags);
+
+ switch (flags)
+ {
+ default:
+ internal_error (__FILE__, __LINE__, _("invalid stepping type."));
+
+ case BTHR_STEP:
+ /* We're done if we're not replaying. */
+ if (replay == NULL)
+ return btrace_step_no_history ();
+
+ /* We are always able to step at least once. */
+ steps = btrace_insn_next (replay, 1);
+ gdb_assert (steps == 1);
+
+ /* Determine the end of the instruction trace. */
+ btrace_insn_end (&end, btinfo);
+
+ /* We stop replaying if we reached the end of the trace. */
+ if (btrace_insn_cmp (replay, &end) == 0)
+ record_btrace_stop_replaying (tp);
+
+ return btrace_step_stopped ();
+
+ case BTHR_RSTEP:
+ /* Start replaying if we're not already doing so. */
+ if (replay == NULL)
+ replay = record_btrace_start_replaying (tp);
+
+ /* If we can't step any further, we reached the end of the history. */
+ steps = btrace_insn_prev (replay, 1);
+ if (steps == 0)
+ return btrace_step_no_history ();
+
+ return btrace_step_stopped ();
+
+ case BTHR_CONT:
+ /* We're done if we're not replaying. */
+ if (replay == NULL)
+ return btrace_step_no_history ();
+
+ inf = find_inferior_ptid (tp->ptid);
+ aspace = inf->aspace;
+
+ /* Determine the end of the instruction trace. */
+ btrace_insn_end (&end, btinfo);
+
+ for (;;)
+ {
+ const struct btrace_insn *insn;
+
+ /* We are always able to step at least once. */
+ steps = btrace_insn_next (replay, 1);
+ gdb_assert (steps == 1);
+
+ /* We stop replaying if we reached the end of the trace. */
+ if (btrace_insn_cmp (replay, &end) == 0)
+ {
+ record_btrace_stop_replaying (tp);
+ return btrace_step_no_history ();
+ }
+
+ insn = btrace_insn_get (replay);
+ gdb_assert (insn);
+
+ DEBUG ("stepping %d (%s) ... %s", tp->num,
+ target_pid_to_str (tp->ptid),
+ core_addr_to_string_nz (insn->pc));
+
+ if (breakpoint_here_p (aspace, insn->pc))
+ return btrace_step_stopped ();
+ }
+
+ case BTHR_RCONT:
+ /* Start replaying if we're not already doing so. */
+ if (replay == NULL)
+ replay = record_btrace_start_replaying (tp);
+
+ inf = find_inferior_ptid (tp->ptid);
+ aspace = inf->aspace;
+
+ for (;;)
+ {
+ const struct btrace_insn *insn;
+
+ /* If we can't step any further, we're done. */
+ steps = btrace_insn_prev (replay, 1);
+ if (steps == 0)
+ return btrace_step_no_history ();
+
+ insn = btrace_insn_get (replay);
+ gdb_assert (insn);
+
+ DEBUG ("reverse-stepping %d (%s) ... %s", tp->num,
+ target_pid_to_str (tp->ptid),
+ core_addr_to_string_nz (insn->pc));
+
+ if (breakpoint_here_p (aspace, insn->pc))
+ return btrace_step_stopped ();
+ }
+ }
}
/* The to_wait method of target record-btrace. */
record_btrace_wait (struct target_ops *ops, ptid_t ptid,
struct target_waitstatus *status, int options)
{
+ struct thread_info *tp, *other;
+
+ DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
+
/* As long as we're not replaying, just forward the request. */
- if (!record_btrace_is_replaying ())
+ if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
+ {
+ ops = ops->beneath;
+ return ops->to_wait (ops, ptid, status, options);
+ }
+
+ /* Let's find a thread to move. */
+ tp = record_btrace_find_thread_to_move (ptid);
+ if (tp == NULL)
{
- for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
- if (ops->to_wait != NULL)
- return ops->to_wait (ops, ptid, status, options);
+ DEBUG ("wait %s: no thread", target_pid_to_str (ptid));
- error (_("Cannot find target for waiting."));
+ status->kind = TARGET_WAITKIND_IGNORE;
+ return minus_one_ptid;
}
- error (_("You can't do this from here. Do 'record goto end', first."));
+ /* We only move a single thread. We're not able to correlate threads. */
+ *status = record_btrace_step_thread (tp);
+
+ /* Stop all other threads. */
+ if (!non_stop)
+ ALL_NON_EXITED_THREADS (other)
+ other->btrace.flags &= ~BTHR_MOVE;
+
+ /* Start record histories anew from the current position. */
+ record_btrace_clear_histories (&tp->btrace);
+
+ /* We moved the replay position but did not update registers. */
+ registers_changed_ptid (tp->ptid);
+
+ return tp->ptid;
+}
+
+/* The to_can_execute_reverse method of target record-btrace. */
+
+static int
+record_btrace_can_execute_reverse (struct target_ops *self)
+{
+ return 1;
+}
+
+/* The to_decr_pc_after_break method of target record-btrace. */
+
+static CORE_ADDR
+record_btrace_decr_pc_after_break (struct target_ops *ops,
+ struct gdbarch *gdbarch)
+{
+ /* When replaying, we do not actually execute the breakpoint instruction
+ so there is no need to adjust the PC after hitting a breakpoint. */
+ if (record_btrace_is_replaying (ops))
+ return 0;
+
+ return ops->beneath->to_decr_pc_after_break (ops->beneath, gdbarch);
}
-/* The to_find_new_threads method of target record-btrace. */
+/* The to_update_thread_list method of target record-btrace. */
static void
-record_btrace_find_new_threads (struct target_ops *ops)
+record_btrace_update_thread_list (struct target_ops *ops)
{
- /* Don't expect new threads if we're replaying. */
- if (record_btrace_is_replaying ())
+ /* We don't add or remove threads during replay. */
+ if (record_btrace_is_replaying (ops))
return;
/* Forward the request. */
- for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
- if (ops->to_find_new_threads != NULL)
- {
- ops->to_find_new_threads (ops);
- break;
- }
+ ops = ops->beneath;
+ ops->to_update_thread_list (ops);
}
/* The to_thread_alive method of target record-btrace. */
record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
{
/* We don't add or remove threads during replay. */
- if (record_btrace_is_replaying ())
+ if (record_btrace_is_replaying (ops))
return find_thread_ptid (ptid) != NULL;
/* Forward the request. */
- for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
- if (ops->to_thread_alive != NULL)
- return ops->to_thread_alive (ops, ptid);
-
- return 0;
+ ops = ops->beneath;
+ return ops->to_thread_alive (ops, ptid);
}
/* Set the replay branch trace instruction iterator. If IT is NULL, replay
btinfo = &tp->btrace;
if (it == NULL || it->function == NULL)
- {
- if (btinfo->replay == NULL)
- return;
-
- xfree (btinfo->replay);
- btinfo->replay = NULL;
- }
+ record_btrace_stop_replaying (tp);
else
{
if (btinfo->replay == NULL)
- btinfo->replay = xmalloc (sizeof (*btinfo->replay));
+ record_btrace_start_replaying (tp);
else if (btrace_insn_cmp (btinfo->replay, it) == 0)
return;
*btinfo->replay = *it;
+ registers_changed_ptid (tp->ptid);
}
- /* Clear the function call and instruction histories so we start anew
- from the new replay position. */
- xfree (btinfo->insn_history);
- xfree (btinfo->call_history);
-
- btinfo->insn_history = NULL;
- btinfo->call_history = NULL;
-
- registers_changed_ptid (tp->ptid);
+ /* Start anew from the new replay position. */
+ record_btrace_clear_histories (btinfo);
}
/* The to_goto_record_begin method of target record-btrace. */
static void
-record_btrace_goto_begin (void)
+record_btrace_goto_begin (struct target_ops *self)
{
struct thread_info *tp;
struct btrace_insn_iterator begin;
/* The to_goto_record_end method of target record-btrace. */
static void
-record_btrace_goto_end (void)
+record_btrace_goto_end (struct target_ops *ops)
{
struct thread_info *tp;
/* The to_goto_record method of target record-btrace. */
static void
-record_btrace_goto (ULONGEST insn)
+record_btrace_goto (struct target_ops *self, ULONGEST insn)
{
struct thread_info *tp;
struct btrace_insn_iterator it;
print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
}
+/* The to_execution_direction target method. */
+
+static enum exec_direction_kind
+record_btrace_execution_direction (struct target_ops *self)
+{
+ return record_btrace_resume_exec_dir;
+}
+
+/* The to_prepare_to_generate_core target method. */
+
+static void
+record_btrace_prepare_to_generate_core (struct target_ops *self)
+{
+ record_btrace_generating_corefile = 1;
+}
+
+/* The to_done_generating_core target method. */
+
+static void
+record_btrace_done_generating_core (struct target_ops *self)
+{
+ record_btrace_generating_corefile = 0;
+}
+
/* Initialize the record-btrace target ops. */
static void
ops->to_disconnect = record_disconnect;
ops->to_mourn_inferior = record_mourn_inferior;
ops->to_kill = record_kill;
- ops->to_create_inferior = find_default_create_inferior;
ops->to_stop_recording = record_btrace_stop_recording;
ops->to_info_record = record_btrace_info;
ops->to_insn_history = record_btrace_insn_history;
ops->to_fetch_registers = record_btrace_fetch_registers;
ops->to_store_registers = record_btrace_store_registers;
ops->to_prepare_to_store = record_btrace_prepare_to_store;
- ops->to_get_unwinder = &record_btrace_frame_unwind;
- ops->to_get_tailcall_unwinder = &record_btrace_tailcall_frame_unwind;
+ ops->to_get_unwinder = &record_btrace_to_get_unwinder;
+ ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
ops->to_resume = record_btrace_resume;
ops->to_wait = record_btrace_wait;
- ops->to_find_new_threads = record_btrace_find_new_threads;
+ ops->to_update_thread_list = record_btrace_update_thread_list;
ops->to_thread_alive = record_btrace_thread_alive;
ops->to_goto_record_begin = record_btrace_goto_begin;
ops->to_goto_record_end = record_btrace_goto_end;
ops->to_goto_record = record_btrace_goto;
+ ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
+ ops->to_decr_pc_after_break = record_btrace_decr_pc_after_break;
+ ops->to_execution_direction = record_btrace_execution_direction;
+ ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
+ ops->to_done_generating_core = record_btrace_done_generating_core;
ops->to_stratum = record_stratum;
ops->to_magic = OPS_MAGIC;
}
execute_command ("target record-btrace", from_tty);
}
+/* The "set record btrace" command. */
+
+static void
+cmd_set_record_btrace (char *args, int from_tty)
+{
+ cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
+}
+
+/* The "show record btrace" command. */
+
+static void
+cmd_show_record_btrace (char *args, int from_tty)
+{
+ cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
+}
+
+/* The "show record btrace replay-memory-access" command. */
+
+static void
+cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
+ struct cmd_list_element *c, const char *value)
+{
+ fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
+ replay_memory_access);
+}
+
void _initialize_record_btrace (void);
/* Initialize btrace commands. */
&record_cmdlist);
add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
+ add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
+ _("Set record options"), &set_record_btrace_cmdlist,
+ "set record btrace ", 0, &set_record_cmdlist);
+
+ add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
+ _("Show record options"), &show_record_btrace_cmdlist,
+ "show record btrace ", 0, &show_record_cmdlist);
+
+ add_setshow_enum_cmd ("replay-memory-access", no_class,
+ replay_memory_access_types, &replay_memory_access, _("\
+Set what memory accesses are allowed during replay."), _("\
+Show what memory accesses are allowed during replay."),
+ _("Default is READ-ONLY.\n\n\
+The btrace record target does not trace data.\n\
+The memory therefore corresponds to the live target and not \
+to the current replay position.\n\n\
+When READ-ONLY, allow accesses to read-only memory during replay.\n\
+When READ-WRITE, allow accesses to read-only and read-write memory during \
+replay."),
+ NULL, cmd_show_replay_memory_access,
+ &set_record_btrace_cmdlist,
+ &show_record_btrace_cmdlist);
+
init_record_btrace_ops ();
add_target (&record_btrace_ops);