#include "filenames.h"
#include "regcache.h"
#include "frame-unwind.h"
+#include "hashtab.h"
/* The target_ops of record-btrace. */
static struct target_ops record_btrace_ops;
/* Update the branch trace for the current thread and return a pointer to its
- branch trace information struct.
+ thread_info.
Throws an error if there is no thread or no trace. This function never
returns NULL. */
-static struct btrace_thread_info *
-require_btrace (void)
+static struct thread_info *
+require_btrace_thread (void)
{
struct thread_info *tp;
- struct btrace_thread_info *btinfo;
DEBUG ("require");
btrace_fetch (tp);
- btinfo = &tp->btrace;
-
- if (btinfo->begin == NULL)
+ if (btrace_is_empty (tp))
error (_("No trace."));
- return btinfo;
+ return tp;
+}
+
+/* Update the branch trace for the current thread and return a pointer to its
+ branch trace information struct.
+
+ Throws an error if there is no thread or no trace. This function never
+ returns NULL. */
+
+static struct btrace_thread_info *
+require_btrace (void)
+{
+ struct thread_info *tp;
+
+ tp = require_btrace_thread ();
+
+ return &tp->btrace;
}
/* Enable branch tracing for one thread. Warn on errors. */
if (!target_supports_btrace ())
error (_("Target does not support branch tracing."));
+ if (non_stop)
+ error (_("Record btrace can't debug inferior in non-stop mode."));
+
gdb_assert (record_btrace_thread_observer == NULL);
disable_chain = make_cleanup (null_cleanup, NULL);
calls = 0;
btinfo = &tp->btrace;
- if (btinfo->begin != NULL)
+
+ if (!btrace_is_empty (tp))
{
struct btrace_call_iterator call;
struct btrace_insn_iterator insn;
ui_out_field_int (uiout, "max line", end);
}
+/* Get the name of a branch trace function. */
+
+static const char *
+btrace_get_bfun_name (const struct btrace_function *bfun)
+{
+ struct minimal_symbol *msym;
+ struct symbol *sym;
+
+ if (bfun == NULL)
+ return "??";
+
+ msym = bfun->msym;
+ sym = bfun->sym;
+
+ if (sym != NULL)
+ return SYMBOL_PRINT_NAME (sym);
+ else if (msym != NULL)
+ return SYMBOL_PRINT_NAME (msym);
+ else
+ return "??";
+}
+
/* Disassemble a section of the recorded function trace. */
static void
struct symbol *sym;
bfun = btrace_call_get (&it);
- msym = bfun->msym;
sym = bfun->sym;
+ msym = bfun->msym;
/* Print the function index. */
ui_out_field_uint (uiout, "index", bfun->number);
}
}
+/* The branch trace frame cache. */
+
+struct btrace_frame_cache
+{
+ /* The thread. */
+ struct thread_info *tp;
+
+ /* The frame info. */
+ struct frame_info *frame;
+
+ /* The branch trace function segment. */
+ const struct btrace_function *bfun;
+};
+
+/* A struct btrace_frame_cache hash table indexed by NEXT. */
+
+static htab_t bfcache;
+
+/* hash_f for htab_create_alloc of bfcache. */
+
+static hashval_t
+bfcache_hash (const void *arg)
+{
+ const struct btrace_frame_cache *cache = arg;
+
+ return htab_hash_pointer (cache->frame);
+}
+
+/* eq_f for htab_create_alloc of bfcache. */
+
+static int
+bfcache_eq (const void *arg1, const void *arg2)
+{
+ const struct btrace_frame_cache *cache1 = arg1;
+ const struct btrace_frame_cache *cache2 = arg2;
+
+ return cache1->frame == cache2->frame;
+}
+
+/* Create a new btrace frame cache. */
+
+static struct btrace_frame_cache *
+bfcache_new (struct frame_info *frame)
+{
+ struct btrace_frame_cache *cache;
+ void **slot;
+
+ cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
+ cache->frame = frame;
+
+ slot = htab_find_slot (bfcache, cache, INSERT);
+ gdb_assert (*slot == NULL);
+ *slot = cache;
+
+ return cache;
+}
+
+/* Extract the branch trace function from a branch trace frame. */
+
+static const struct btrace_function *
+btrace_get_frame_function (struct frame_info *frame)
+{
+ const struct btrace_frame_cache *cache;
+ const struct btrace_function *bfun;
+ struct btrace_frame_cache pattern;
+ void **slot;
+
+ pattern.frame = frame;
+
+ slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
+ if (slot == NULL)
+ return NULL;
+
+ cache = *slot;
+ return cache->bfun;
+}
+
/* Implement stop_reason method for record_btrace_frame_unwind. */
static enum unwind_stop_reason
record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
void **this_cache)
{
- return UNWIND_UNAVAILABLE;
+ const struct btrace_frame_cache *cache;
+ const struct btrace_function *bfun;
+
+ cache = *this_cache;
+ bfun = cache->bfun;
+ gdb_assert (bfun != NULL);
+
+ if (bfun->up == NULL)
+ return UNWIND_UNAVAILABLE;
+
+ return UNWIND_NO_REASON;
}
/* Implement this_id method for record_btrace_frame_unwind. */
record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
struct frame_id *this_id)
{
- /* Leave there the outer_frame_id value. */
+ const struct btrace_frame_cache *cache;
+ const struct btrace_function *bfun;
+ CORE_ADDR code, special;
+
+ cache = *this_cache;
+
+ bfun = cache->bfun;
+ gdb_assert (bfun != NULL);
+
+ while (bfun->segment.prev != NULL)
+ bfun = bfun->segment.prev;
+
+ code = get_frame_func (this_frame);
+ special = bfun->number;
+
+ *this_id = frame_id_build_unavailable_stack_special (code, special);
+
+ DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
+ btrace_get_bfun_name (cache->bfun),
+ core_addr_to_string_nz (this_id->code_addr),
+ core_addr_to_string_nz (this_id->special_addr));
}
/* Implement prev_register method for record_btrace_frame_unwind. */
void **this_cache,
int regnum)
{
- throw_error (NOT_AVAILABLE_ERROR,
- _("Registers are not available in btrace record history"));
+ const struct btrace_frame_cache *cache;
+ const struct btrace_function *bfun, *caller;
+ const struct btrace_insn *insn;
+ struct gdbarch *gdbarch;
+ CORE_ADDR pc;
+ int pcreg;
+
+ gdbarch = get_frame_arch (this_frame);
+ pcreg = gdbarch_pc_regnum (gdbarch);
+ if (pcreg < 0 || regnum != pcreg)
+ throw_error (NOT_AVAILABLE_ERROR,
+ _("Registers are not available in btrace record history"));
+
+ cache = *this_cache;
+ bfun = cache->bfun;
+ gdb_assert (bfun != NULL);
+
+ caller = bfun->up;
+ if (caller == NULL)
+ throw_error (NOT_AVAILABLE_ERROR,
+ _("No caller in btrace record history"));
+
+ if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
+ {
+ insn = VEC_index (btrace_insn_s, caller->insn, 0);
+ pc = insn->pc;
+ }
+ else
+ {
+ insn = VEC_last (btrace_insn_s, caller->insn);
+ pc = insn->pc;
+
+ pc += gdb_insn_length (gdbarch, pc);
+ }
+
+ DEBUG ("[frame] unwound PC in %s on level %d: %s",
+ btrace_get_bfun_name (bfun), bfun->level,
+ core_addr_to_string_nz (pc));
+
+ return frame_unwind_got_address (this_frame, regnum, pc);
}
/* Implement sniffer method for record_btrace_frame_unwind. */
struct frame_info *this_frame,
void **this_cache)
{
+ const struct btrace_function *bfun;
+ struct btrace_frame_cache *cache;
struct thread_info *tp;
- struct btrace_thread_info *btinfo;
- struct btrace_insn_iterator *replay;
+ struct frame_info *next;
/* THIS_FRAME does not contain a reference to its thread. */
tp = find_thread_ptid (inferior_ptid);
gdb_assert (tp != NULL);
- return btrace_is_replaying (tp);
+ bfun = NULL;
+ next = get_next_frame (this_frame);
+ if (next == NULL)
+ {
+ const struct btrace_insn_iterator *replay;
+
+ replay = tp->btrace.replay;
+ if (replay != NULL)
+ bfun = replay->function;
+ }
+ else
+ {
+ const struct btrace_function *callee;
+
+ callee = btrace_get_frame_function (next);
+ if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
+ bfun = callee->up;
+ }
+
+ if (bfun == NULL)
+ return 0;
+
+ DEBUG ("[frame] sniffed frame for %s on level %d",
+ btrace_get_bfun_name (bfun), bfun->level);
+
+ /* This is our frame. Initialize the frame cache. */
+ cache = bfcache_new (this_frame);
+ cache->tp = tp;
+ cache->bfun = bfun;
+
+ *this_cache = cache;
+ return 1;
+}
+
+/* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
+
+static int
+record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
+ struct frame_info *this_frame,
+ void **this_cache)
+{
+ const struct btrace_function *bfun, *callee;
+ struct btrace_frame_cache *cache;
+ struct frame_info *next;
+
+ next = get_next_frame (this_frame);
+ if (next == NULL)
+ return 0;
+
+ callee = btrace_get_frame_function (next);
+ if (callee == NULL)
+ return 0;
+
+ if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
+ return 0;
+
+ bfun = callee->up;
+ if (bfun == NULL)
+ return 0;
+
+ DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
+ btrace_get_bfun_name (bfun), bfun->level);
+
+ /* This is our frame. Initialize the frame cache. */
+ cache = bfcache_new (this_frame);
+ cache->tp = find_thread_ptid (inferior_ptid);
+ cache->bfun = bfun;
+
+ *this_cache = cache;
+ return 1;
+}
+
+static void
+record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
+{
+ struct btrace_frame_cache *cache;
+ void **slot;
+
+ cache = this_cache;
+
+ slot = htab_find_slot (bfcache, cache, NO_INSERT);
+ gdb_assert (slot != NULL);
+
+ htab_remove_elt (bfcache, cache);
}
/* btrace recording does not store previous memory content, neither the stack
Therefore this unwinder reports any possibly unwound registers as
<unavailable>. */
-static const struct frame_unwind record_btrace_frame_unwind =
+const struct frame_unwind record_btrace_frame_unwind =
{
NORMAL_FRAME,
record_btrace_frame_unwind_stop_reason,
record_btrace_frame_this_id,
record_btrace_frame_prev_register,
NULL,
- record_btrace_frame_sniffer
+ record_btrace_frame_sniffer,
+ record_btrace_frame_dealloc_cache
};
+const struct frame_unwind record_btrace_tailcall_frame_unwind =
+{
+ TAILCALL_FRAME,
+ record_btrace_frame_unwind_stop_reason,
+ record_btrace_frame_this_id,
+ record_btrace_frame_prev_register,
+ NULL,
+ record_btrace_tailcall_frame_sniffer,
+ record_btrace_frame_dealloc_cache
+};
+
+/* Indicate that TP should be resumed according to FLAG. */
+
+static void
+record_btrace_resume_thread (struct thread_info *tp,
+ enum btrace_thread_flag flag)
+{
+ struct btrace_thread_info *btinfo;
+
+ DEBUG ("resuming %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flag);
+
+ btinfo = &tp->btrace;
+
+ if ((btinfo->flags & BTHR_MOVE) != 0)
+ error (_("Thread already moving."));
+
+ /* Fetch the latest branch trace. */
+ btrace_fetch (tp);
+
+ btinfo->flags |= flag;
+}
+
+/* Find the thread to resume given a PTID. */
+
+static struct thread_info *
+record_btrace_find_resume_thread (ptid_t ptid)
+{
+ struct thread_info *tp;
+
+ /* When asked to resume everything, we pick the current thread. */
+ if (ptid_equal (minus_one_ptid, ptid) || ptid_is_pid (ptid))
+ ptid = inferior_ptid;
+
+ return find_thread_ptid (ptid);
+}
+
+/* Start replaying a thread. */
+
+static struct btrace_insn_iterator *
+record_btrace_start_replaying (struct thread_info *tp)
+{
+ volatile struct gdb_exception except;
+ struct btrace_insn_iterator *replay;
+ struct btrace_thread_info *btinfo;
+ int executing;
+
+ btinfo = &tp->btrace;
+ replay = NULL;
+
+ /* We can't start replaying without trace. */
+ if (btinfo->begin == NULL)
+ return NULL;
+
+ /* Clear the executing flag to allow changes to the current frame.
+ We are not actually running, yet. We just started a reverse execution
+ command or a record goto command.
+ For the latter, EXECUTING is false and this has no effect.
+ For the former, EXECUTING is true and we're in to_wait, about to
+ move the thread. Since we need to recompute the stack, we temporarily
+ set EXECUTING to flase. */
+ executing = is_executing (tp->ptid);
+ set_executing (tp->ptid, 0);
+
+ /* GDB stores the current frame_id when stepping in order to detects steps
+ into subroutines.
+ Since frames are computed differently when we're replaying, we need to
+ recompute those stored frames and fix them up so we can still detect
+ subroutines after we started replaying. */
+ TRY_CATCH (except, RETURN_MASK_ALL)
+ {
+ struct frame_info *frame;
+ struct frame_id frame_id;
+ int upd_step_frame_id, upd_step_stack_frame_id;
+
+ /* The current frame without replaying - computed via normal unwind. */
+ frame = get_current_frame ();
+ frame_id = get_frame_id (frame);
+
+ /* Check if we need to update any stepping-related frame id's. */
+ upd_step_frame_id = frame_id_eq (frame_id,
+ tp->control.step_frame_id);
+ upd_step_stack_frame_id = frame_id_eq (frame_id,
+ tp->control.step_stack_frame_id);
+
+ /* We start replaying at the end of the branch trace. This corresponds
+ to the current instruction. */
+ replay = xmalloc (sizeof (*replay));
+ btrace_insn_end (replay, btinfo);
+
+ /* We're not replaying, yet. */
+ gdb_assert (btinfo->replay == NULL);
+ btinfo->replay = replay;
+
+ /* Make sure we're not using any stale registers. */
+ registers_changed_ptid (tp->ptid);
+
+ /* The current frame with replaying - computed via btrace unwind. */
+ frame = get_current_frame ();
+ frame_id = get_frame_id (frame);
+
+ /* Replace stepping related frames where necessary. */
+ if (upd_step_frame_id)
+ tp->control.step_frame_id = frame_id;
+ if (upd_step_stack_frame_id)
+ tp->control.step_stack_frame_id = frame_id;
+ }
+
+ /* Restore the previous execution state. */
+ set_executing (tp->ptid, executing);
+
+ if (except.reason < 0)
+ {
+ xfree (btinfo->replay);
+ btinfo->replay = NULL;
+
+ registers_changed_ptid (tp->ptid);
+
+ throw_exception (except);
+ }
+
+ return replay;
+}
+
+/* Stop replaying a thread. */
+
+static void
+record_btrace_stop_replaying (struct thread_info *tp)
+{
+ struct btrace_thread_info *btinfo;
+
+ btinfo = &tp->btrace;
+
+ xfree (btinfo->replay);
+ btinfo->replay = NULL;
+
+ /* Make sure we're not leaving any stale registers. */
+ registers_changed_ptid (tp->ptid);
+}
+
/* The to_resume method of target record-btrace. */
static void
record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
enum gdb_signal signal)
{
+ struct thread_info *tp, *other;
+ enum btrace_thread_flag flag;
+
+ DEBUG ("resume %s: %s", target_pid_to_str (ptid), step ? "step" : "cont");
+
+ tp = record_btrace_find_resume_thread (ptid);
+ if (tp == NULL)
+ error (_("Cannot find thread to resume."));
+
+ /* Stop replaying other threads if the thread to resume is not replaying. */
+ if (!btrace_is_replaying (tp) && execution_direction != EXEC_REVERSE)
+ ALL_THREADS (other)
+ record_btrace_stop_replaying (other);
+
/* As long as we're not replaying, just forward the request. */
- if (!record_btrace_is_replaying ())
+ if (!record_btrace_is_replaying () && execution_direction != EXEC_REVERSE)
{
for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
if (ops->to_resume != NULL)
error (_("Cannot find target for stepping."));
}
- error (_("You can't do this from here. Do 'record goto end', first."));
+ /* Compute the btrace thread flag for the requested move. */
+ if (step == 0)
+ flag = execution_direction == EXEC_REVERSE ? BTHR_RCONT : BTHR_CONT;
+ else
+ flag = execution_direction == EXEC_REVERSE ? BTHR_RSTEP : BTHR_STEP;
+
+ /* At the moment, we only move a single thread. We could also move
+ all threads in parallel by single-stepping each resumed thread
+ until the first runs into an event.
+ When we do that, we would want to continue all other threads.
+ For now, just resume one thread to not confuse to_wait. */
+ record_btrace_resume_thread (tp, flag);
+
+ /* We just indicate the resume intent here. The actual stepping happens in
+ record_btrace_wait below. */
+}
+
+/* Find a thread to move. */
+
+static struct thread_info *
+record_btrace_find_thread_to_move (ptid_t ptid)
+{
+ struct thread_info *tp;
+
+ /* First check the parameter thread. */
+ tp = find_thread_ptid (ptid);
+ if (tp != NULL && (tp->btrace.flags & BTHR_MOVE) != 0)
+ return tp;
+
+ /* Otherwise, find one other thread that has been resumed. */
+ ALL_THREADS (tp)
+ if ((tp->btrace.flags & BTHR_MOVE) != 0)
+ return tp;
+
+ return NULL;
+}
+
+/* Return a target_waitstatus indicating that we ran out of history. */
+
+static struct target_waitstatus
+btrace_step_no_history (void)
+{
+ struct target_waitstatus status;
+
+ status.kind = TARGET_WAITKIND_NO_HISTORY;
+
+ return status;
+}
+
+/* Return a target_waitstatus indicating that a step finished. */
+
+static struct target_waitstatus
+btrace_step_stopped (void)
+{
+ struct target_waitstatus status;
+
+ status.kind = TARGET_WAITKIND_STOPPED;
+ status.value.sig = GDB_SIGNAL_TRAP;
+
+ return status;
+}
+
+/* Clear the record histories. */
+
+static void
+record_btrace_clear_histories (struct btrace_thread_info *btinfo)
+{
+ xfree (btinfo->insn_history);
+ xfree (btinfo->call_history);
+
+ btinfo->insn_history = NULL;
+ btinfo->call_history = NULL;
+}
+
+/* Step a single thread. */
+
+static struct target_waitstatus
+record_btrace_step_thread (struct thread_info *tp)
+{
+ struct btrace_insn_iterator *replay, end;
+ struct btrace_thread_info *btinfo;
+ struct address_space *aspace;
+ struct inferior *inf;
+ enum btrace_thread_flag flags;
+ unsigned int steps;
+
+ btinfo = &tp->btrace;
+ replay = btinfo->replay;
+
+ flags = btinfo->flags & BTHR_MOVE;
+ btinfo->flags &= ~BTHR_MOVE;
+
+ DEBUG ("stepping %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flags);
+
+ switch (flags)
+ {
+ default:
+ internal_error (__FILE__, __LINE__, _("invalid stepping type."));
+
+ case BTHR_STEP:
+ /* We're done if we're not replaying. */
+ if (replay == NULL)
+ return btrace_step_no_history ();
+
+ /* We are always able to step at least once. */
+ steps = btrace_insn_next (replay, 1);
+ gdb_assert (steps == 1);
+
+ /* Determine the end of the instruction trace. */
+ btrace_insn_end (&end, btinfo);
+
+ /* We stop replaying if we reached the end of the trace. */
+ if (btrace_insn_cmp (replay, &end) == 0)
+ record_btrace_stop_replaying (tp);
+
+ return btrace_step_stopped ();
+
+ case BTHR_RSTEP:
+ /* Start replaying if we're not already doing so. */
+ if (replay == NULL)
+ replay = record_btrace_start_replaying (tp);
+
+ /* If we can't step any further, we reached the end of the history. */
+ steps = btrace_insn_prev (replay, 1);
+ if (steps == 0)
+ return btrace_step_no_history ();
+
+ return btrace_step_stopped ();
+
+ case BTHR_CONT:
+ /* We're done if we're not replaying. */
+ if (replay == NULL)
+ return btrace_step_no_history ();
+
+ inf = find_inferior_pid (ptid_get_pid (tp->ptid));
+ aspace = inf->aspace;
+
+ /* Determine the end of the instruction trace. */
+ btrace_insn_end (&end, btinfo);
+
+ for (;;)
+ {
+ const struct btrace_insn *insn;
+
+ /* We are always able to step at least once. */
+ steps = btrace_insn_next (replay, 1);
+ gdb_assert (steps == 1);
+
+ /* We stop replaying if we reached the end of the trace. */
+ if (btrace_insn_cmp (replay, &end) == 0)
+ {
+ record_btrace_stop_replaying (tp);
+ return btrace_step_no_history ();
+ }
+
+ insn = btrace_insn_get (replay);
+ gdb_assert (insn);
+
+ DEBUG ("stepping %d (%s) ... %s", tp->num,
+ target_pid_to_str (tp->ptid),
+ core_addr_to_string_nz (insn->pc));
+
+ if (breakpoint_here_p (aspace, insn->pc))
+ return btrace_step_stopped ();
+ }
+
+ case BTHR_RCONT:
+ /* Start replaying if we're not already doing so. */
+ if (replay == NULL)
+ replay = record_btrace_start_replaying (tp);
+
+ inf = find_inferior_pid (ptid_get_pid (tp->ptid));
+ aspace = inf->aspace;
+
+ for (;;)
+ {
+ const struct btrace_insn *insn;
+
+ /* If we can't step any further, we're done. */
+ steps = btrace_insn_prev (replay, 1);
+ if (steps == 0)
+ return btrace_step_no_history ();
+
+ insn = btrace_insn_get (replay);
+ gdb_assert (insn);
+
+ DEBUG ("reverse-stepping %d (%s) ... %s", tp->num,
+ target_pid_to_str (tp->ptid),
+ core_addr_to_string_nz (insn->pc));
+
+ if (breakpoint_here_p (aspace, insn->pc))
+ return btrace_step_stopped ();
+ }
+ }
}
/* The to_wait method of target record-btrace. */
record_btrace_wait (struct target_ops *ops, ptid_t ptid,
struct target_waitstatus *status, int options)
{
+ struct thread_info *tp, *other;
+
+ DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
+
/* As long as we're not replaying, just forward the request. */
- if (!record_btrace_is_replaying ())
+ if (!record_btrace_is_replaying () && execution_direction != EXEC_REVERSE)
{
for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
if (ops->to_wait != NULL)
error (_("Cannot find target for waiting."));
}
- error (_("You can't do this from here. Do 'record goto end', first."));
+ /* Let's find a thread to move. */
+ tp = record_btrace_find_thread_to_move (ptid);
+ if (tp == NULL)
+ {
+ DEBUG ("wait %s: no thread", target_pid_to_str (ptid));
+
+ status->kind = TARGET_WAITKIND_IGNORE;
+ return minus_one_ptid;
+ }
+
+ /* We only move a single thread. We're not able to correlate threads. */
+ *status = record_btrace_step_thread (tp);
+
+ /* Stop all other threads. */
+ if (!non_stop)
+ ALL_THREADS (other)
+ other->btrace.flags &= ~BTHR_MOVE;
+
+ /* Start record histories anew from the current position. */
+ record_btrace_clear_histories (&tp->btrace);
+
+ /* We moved the replay position but did not update registers. */
+ registers_changed_ptid (tp->ptid);
+
+ return tp->ptid;
+}
+
+/* The to_can_execute_reverse method of target record-btrace. */
+
+static int
+record_btrace_can_execute_reverse (void)
+{
+ return 1;
+}
+
+/* The to_decr_pc_after_break method of target record-btrace. */
+
+static CORE_ADDR
+record_btrace_decr_pc_after_break (struct target_ops *ops,
+ struct gdbarch *gdbarch)
+{
+ /* When replaying, we do not actually execute the breakpoint instruction
+ so there is no need to adjust the PC after hitting a breakpoint. */
+ if (record_btrace_is_replaying ())
+ return 0;
+
+ return forward_target_decr_pc_after_break (ops->beneath, gdbarch);
}
/* The to_find_new_threads method of target record-btrace. */
return 0;
}
+/* Set the replay branch trace instruction iterator. If IT is NULL, replay
+ is stopped. */
+
+static void
+record_btrace_set_replay (struct thread_info *tp,
+ const struct btrace_insn_iterator *it)
+{
+ struct btrace_thread_info *btinfo;
+
+ btinfo = &tp->btrace;
+
+ if (it == NULL || it->function == NULL)
+ record_btrace_stop_replaying (tp);
+ else
+ {
+ if (btinfo->replay == NULL)
+ record_btrace_start_replaying (tp);
+ else if (btrace_insn_cmp (btinfo->replay, it) == 0)
+ return;
+
+ *btinfo->replay = *it;
+ registers_changed_ptid (tp->ptid);
+ }
+
+ /* Start anew from the new replay position. */
+ record_btrace_clear_histories (btinfo);
+}
+
+/* The to_goto_record_begin method of target record-btrace. */
+
+static void
+record_btrace_goto_begin (void)
+{
+ struct thread_info *tp;
+ struct btrace_insn_iterator begin;
+
+ tp = require_btrace_thread ();
+
+ btrace_insn_begin (&begin, &tp->btrace);
+ record_btrace_set_replay (tp, &begin);
+
+ print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
+}
+
+/* The to_goto_record_end method of target record-btrace. */
+
+static void
+record_btrace_goto_end (void)
+{
+ struct thread_info *tp;
+
+ tp = require_btrace_thread ();
+
+ record_btrace_set_replay (tp, NULL);
+
+ print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
+}
+
+/* The to_goto_record method of target record-btrace. */
+
+static void
+record_btrace_goto (ULONGEST insn)
+{
+ struct thread_info *tp;
+ struct btrace_insn_iterator it;
+ unsigned int number;
+ int found;
+
+ number = insn;
+
+ /* Check for wrap-arounds. */
+ if (number != insn)
+ error (_("Instruction number out of range."));
+
+ tp = require_btrace_thread ();
+
+ found = btrace_find_insn_by_number (&it, &tp->btrace, number);
+ if (found == 0)
+ error (_("No such instruction."));
+
+ record_btrace_set_replay (tp, &it);
+
+ print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
+}
+
/* Initialize the record-btrace target ops. */
static void
ops->to_store_registers = record_btrace_store_registers;
ops->to_prepare_to_store = record_btrace_prepare_to_store;
ops->to_get_unwinder = &record_btrace_frame_unwind;
+ ops->to_get_tailcall_unwinder = &record_btrace_tailcall_frame_unwind;
ops->to_resume = record_btrace_resume;
ops->to_wait = record_btrace_wait;
ops->to_find_new_threads = record_btrace_find_new_threads;
ops->to_thread_alive = record_btrace_thread_alive;
+ ops->to_goto_record_begin = record_btrace_goto_begin;
+ ops->to_goto_record_end = record_btrace_goto_end;
+ ops->to_goto_record = record_btrace_goto;
+ ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
+ ops->to_decr_pc_after_break = record_btrace_decr_pc_after_break;
ops->to_stratum = record_stratum;
ops->to_magic = OPS_MAGIC;
}
init_record_btrace_ops ();
add_target (&record_btrace_ops);
+
+ bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
+ xcalloc, xfree);
}