/* Branch trace support for GDB, the GNU debugger.
- Copyright (C) 2013-2015 Free Software Foundation, Inc.
+ Copyright (C) 2013-2016 Free Software Foundation, Inc.
Contributed by Intel Corp. <markus.t.metzger@intel.com>
static void
record_btrace_disable_callback (void *arg)
{
- struct thread_info *tp;
-
- tp = arg;
+ struct thread_info *tp = (struct thread_info *) arg;
btrace_disable (tp);
}
if (!target_has_execution)
error (_("The program is not being run."));
- if (non_stop)
- error (_("Record btrace can't debug inferior in non-stop mode."));
-
gdb_assert (record_btrace_thread_observer == NULL);
disable_chain = make_cleanup (null_cleanup, NULL);
ALL_NON_EXITED_THREADS (tp)
- if (args == NULL || *args == 0 || number_is_in_list (args, tp->num))
+ if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
{
btrace_enable (tp, &record_btrace_conf);
}
}
-/* Print an Intel(R) Processor Trace configuration. */
+/* Print an Intel Processor Trace configuration. */
static void
record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
}
printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
- "for thread %d (%s).\n"), insns, calls, gaps,
- tp->num, target_pid_to_str (tp->ptid));
+ "for thread %s (%s).\n"), insns, calls, gaps,
+ print_thread_id (tp), target_pid_to_str (tp->ptid));
if (btrace_is_replaying (tp))
printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
ui_out_field_fmt (uiout, fld, "%u", val);
}
+/* A range of source lines. */
+
+struct btrace_line_range
+{
+ /* The symtab this line is from. */
+ struct symtab *symtab;
+
+ /* The first line (inclusive). */
+ int begin;
+
+ /* The last line (exclusive). */
+ int end;
+};
+
+/* Construct a line range. */
+
+static struct btrace_line_range
+btrace_mk_line_range (struct symtab *symtab, int begin, int end)
+{
+ struct btrace_line_range range;
+
+ range.symtab = symtab;
+ range.begin = begin;
+ range.end = end;
+
+ return range;
+}
+
+/* Add a line to a line range. */
+
+static struct btrace_line_range
+btrace_line_range_add (struct btrace_line_range range, int line)
+{
+ if (range.end <= range.begin)
+ {
+ /* This is the first entry. */
+ range.begin = line;
+ range.end = line + 1;
+ }
+ else if (line < range.begin)
+ range.begin = line;
+ else if (range.end < line)
+ range.end = line;
+
+ return range;
+}
+
+/* Return non-zero if RANGE is empty, zero otherwise. */
+
+static int
+btrace_line_range_is_empty (struct btrace_line_range range)
+{
+ return range.end <= range.begin;
+}
+
+/* Return non-zero if LHS contains RHS, zero otherwise. */
+
+static int
+btrace_line_range_contains_range (struct btrace_line_range lhs,
+ struct btrace_line_range rhs)
+{
+ return ((lhs.symtab == rhs.symtab)
+ && (lhs.begin <= rhs.begin)
+ && (rhs.end <= lhs.end));
+}
+
+/* Find the line range associated with PC. */
+
+static struct btrace_line_range
+btrace_find_line_range (CORE_ADDR pc)
+{
+ struct btrace_line_range range;
+ struct linetable_entry *lines;
+ struct linetable *ltable;
+ struct symtab *symtab;
+ int nlines, i;
+
+ symtab = find_pc_line_symtab (pc);
+ if (symtab == NULL)
+ return btrace_mk_line_range (NULL, 0, 0);
+
+ ltable = SYMTAB_LINETABLE (symtab);
+ if (ltable == NULL)
+ return btrace_mk_line_range (symtab, 0, 0);
+
+ nlines = ltable->nitems;
+ lines = ltable->item;
+ if (nlines <= 0)
+ return btrace_mk_line_range (symtab, 0, 0);
+
+ range = btrace_mk_line_range (symtab, 0, 0);
+ for (i = 0; i < nlines - 1; i++)
+ {
+ if ((lines[i].pc == pc) && (lines[i].line != 0))
+ range = btrace_line_range_add (range, lines[i].line);
+ }
+
+ return range;
+}
+
+/* Print source lines in LINES to UIOUT.
+
+ UI_ITEM_CHAIN is a cleanup chain for the last source line and the
+ instructions corresponding to that source line. When printing a new source
+ line, we do the cleanups for the open chain and open a new cleanup chain for
+ the new source line. If the source line range in LINES is not empty, this
+ function will leave the cleanup chain for the last printed source line open
+ so instructions can be added to it. */
+
+static void
+btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
+ struct cleanup **ui_item_chain, int flags)
+{
+ print_source_lines_flags psl_flags;
+ int line;
+
+ psl_flags = 0;
+ if (flags & DISASSEMBLY_FILENAME)
+ psl_flags |= PRINT_SOURCE_LINES_FILENAME;
+
+ for (line = lines.begin; line < lines.end; ++line)
+ {
+ if (*ui_item_chain != NULL)
+ do_cleanups (*ui_item_chain);
+
+ *ui_item_chain
+ = make_cleanup_ui_out_tuple_begin_end (uiout, "src_and_asm_line");
+
+ print_source_lines (lines.symtab, line, line + 1, psl_flags);
+
+ make_cleanup_ui_out_list_begin_end (uiout, "line_asm_insn");
+ }
+}
+
/* Disassemble a section of the recorded instruction trace. */
static void
const struct btrace_insn_iterator *begin,
const struct btrace_insn_iterator *end, int flags)
{
+ struct ui_file *stb;
+ struct cleanup *cleanups, *ui_item_chain;
+ struct disassemble_info di;
struct gdbarch *gdbarch;
struct btrace_insn_iterator it;
+ struct btrace_line_range last_lines;
DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
btrace_insn_number (end));
+ flags |= DISASSEMBLY_SPECULATIVE;
+
gdbarch = target_gdbarch ();
+ stb = mem_fileopen ();
+ cleanups = make_cleanup_ui_file_delete (stb);
+ di = gdb_disassemble_info (gdbarch, stb);
+ last_lines = btrace_mk_line_range (NULL, 0, 0);
+
+ make_cleanup_ui_out_list_begin_end (uiout, "asm_insns");
+
+ /* UI_ITEM_CHAIN is a cleanup chain for the last source line and the
+ instructions corresponding to that line. */
+ ui_item_chain = NULL;
for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
{
}
else
{
- char prefix[4];
+ struct disasm_insn dinsn;
- /* We may add a speculation prefix later. We use the same space
- that is used for the pc prefix. */
- if ((flags & DISASSEMBLY_OMIT_PC) == 0)
- strncpy (prefix, pc_prefix (insn->pc), 3);
- else
+ if ((flags & DISASSEMBLY_SOURCE) != 0)
{
- prefix[0] = ' ';
- prefix[1] = ' ';
- prefix[2] = ' ';
+ struct btrace_line_range lines;
+
+ lines = btrace_find_line_range (insn->pc);
+ if (!btrace_line_range_is_empty (lines)
+ && !btrace_line_range_contains_range (last_lines, lines))
+ {
+ btrace_print_lines (lines, uiout, &ui_item_chain, flags);
+ last_lines = lines;
+ }
+ else if (ui_item_chain == NULL)
+ {
+ ui_item_chain
+ = make_cleanup_ui_out_tuple_begin_end (uiout,
+ "src_and_asm_line");
+ /* No source information. */
+ make_cleanup_ui_out_list_begin_end (uiout, "line_asm_insn");
+ }
+
+ gdb_assert (ui_item_chain != NULL);
}
- prefix[3] = 0;
- /* Print the instruction index. */
- ui_out_field_uint (uiout, "index", btrace_insn_number (&it));
- ui_out_text (uiout, "\t");
+ memset (&dinsn, 0, sizeof (dinsn));
+ dinsn.number = btrace_insn_number (&it);
+ dinsn.addr = insn->pc;
- /* Indicate speculative execution by a leading '?'. */
if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
- prefix[0] = '?';
-
- /* Print the prefix; we tell gdb_disassembly below to omit it. */
- ui_out_field_fmt (uiout, "prefix", "%s", prefix);
+ dinsn.is_speculative = 1;
- /* Disassembly with '/m' flag may not produce the expected result.
- See PR gdb/11833. */
- gdb_disassembly (gdbarch, uiout, NULL, flags | DISASSEMBLY_OMIT_PC,
- 1, insn->pc, insn->pc + 1);
+ gdb_pretty_print_insn (gdbarch, uiout, &di, &dinsn, flags, stb);
}
}
+
+ do_cleanups (cleanups);
}
/* The to_insn_history method of target record-btrace. */
const struct btrace_thread_info *btinfo,
const struct btrace_call_iterator *begin,
const struct btrace_call_iterator *end,
- enum record_print_flag flags)
+ int int_flags)
{
struct btrace_call_iterator it;
+ record_print_flags flags = (enum record_print_flag) int_flags;
- DEBUG ("ftrace (0x%x): [%u; %u)", flags, btrace_call_number (begin),
+ DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
btrace_call_number (end));
for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
/* The to_call_history method of target record-btrace. */
static void
-record_btrace_call_history (struct target_ops *self, int size, int flags)
+record_btrace_call_history (struct target_ops *self, int size, int int_flags)
{
struct btrace_thread_info *btinfo;
struct btrace_call_history *history;
struct cleanup *uiout_cleanup;
struct ui_out *uiout;
unsigned int context, covered;
+ record_print_flags flags = (enum record_print_flag) int_flags;
uiout = current_uiout;
uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
{
struct btrace_insn_iterator *replay;
- DEBUG ("call-history (0x%x): %d", flags, size);
+ DEBUG ("call-history (0x%x): %d", int_flags, size);
/* If we're replaying, we start at the replay position. Otherwise, we
start at the tail of the trace. */
begin = history->begin;
end = history->end;
- DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", flags, size,
+ DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", int_flags, size,
btrace_call_number (&begin), btrace_call_number (&end));
if (size < 0)
static void
record_btrace_call_history_range (struct target_ops *self,
- ULONGEST from, ULONGEST to, int flags)
+ ULONGEST from, ULONGEST to,
+ int int_flags)
{
struct btrace_thread_info *btinfo;
struct btrace_call_history *history;
struct ui_out *uiout;
unsigned int low, high;
int found;
+ record_print_flags flags = (enum record_print_flag) int_flags;
uiout = current_uiout;
uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
low = from;
high = to;
- DEBUG ("call-history (0x%x): [%u; %u)", flags, low, high);
+ DEBUG ("call-history (0x%x): [%u; %u)", int_flags, low, high);
/* Check for wrap-arounds. */
if (low != from || high != to)
static void
record_btrace_call_history_from (struct target_ops *self,
- ULONGEST from, int size, int flags)
+ ULONGEST from, int size,
+ int int_flags)
{
ULONGEST begin, end, context;
+ record_print_flags flags = (enum record_print_flag) int_flags;
context = abs (size);
if (context == 0)
/* The to_record_is_replaying method of target record-btrace. */
static int
-record_btrace_is_replaying (struct target_ops *self)
+record_btrace_is_replaying (struct target_ops *self, ptid_t ptid)
{
struct thread_info *tp;
ALL_NON_EXITED_THREADS (tp)
- if (btrace_is_replaying (tp))
+ if (ptid_match (tp->ptid, ptid) && btrace_is_replaying (tp))
return 1;
return 0;
}
+/* The to_record_will_replay method of target record-btrace. */
+
+static int
+record_btrace_will_replay (struct target_ops *self, ptid_t ptid, int dir)
+{
+ return dir == EXEC_REVERSE || record_btrace_is_replaying (self, ptid);
+}
+
/* The to_xfer_partial method of target record-btrace. */
static enum target_xfer_status
/* Filter out requests that don't make sense during replay. */
if (replay_memory_access == replay_memory_access_read_only
&& !record_btrace_generating_corefile
- && record_btrace_is_replaying (ops))
+ && record_btrace_is_replaying (ops, inferior_ptid))
{
switch (object)
{
{
struct target_ops *t;
- if (!record_btrace_generating_corefile && record_btrace_is_replaying (ops))
- error (_("This record target does not allow writing registers."));
+ if (!record_btrace_generating_corefile
+ && record_btrace_is_replaying (ops, inferior_ptid))
+ error (_("Cannot write registers while replaying."));
gdb_assert (may_write_registers != 0);
{
struct target_ops *t;
- if (!record_btrace_generating_corefile && record_btrace_is_replaying (ops))
+ if (!record_btrace_generating_corefile
+ && record_btrace_is_replaying (ops, inferior_ptid))
return;
t = ops->beneath;
static hashval_t
bfcache_hash (const void *arg)
{
- const struct btrace_frame_cache *cache = arg;
+ const struct btrace_frame_cache *cache
+ = (const struct btrace_frame_cache *) arg;
return htab_hash_pointer (cache->frame);
}
static int
bfcache_eq (const void *arg1, const void *arg2)
{
- const struct btrace_frame_cache *cache1 = arg1;
- const struct btrace_frame_cache *cache2 = arg2;
+ const struct btrace_frame_cache *cache1
+ = (const struct btrace_frame_cache *) arg1;
+ const struct btrace_frame_cache *cache2
+ = (const struct btrace_frame_cache *) arg2;
return cache1->frame == cache2->frame;
}
if (slot == NULL)
return NULL;
- cache = *slot;
+ cache = (const struct btrace_frame_cache *) *slot;
return cache->bfun;
}
const struct btrace_frame_cache *cache;
const struct btrace_function *bfun;
- cache = *this_cache;
+ cache = (const struct btrace_frame_cache *) *this_cache;
bfun = cache->bfun;
gdb_assert (bfun != NULL);
const struct btrace_function *bfun;
CORE_ADDR code, special;
- cache = *this_cache;
+ cache = (const struct btrace_frame_cache *) *this_cache;
bfun = cache->bfun;
gdb_assert (bfun != NULL);
throw_error (NOT_AVAILABLE_ERROR,
_("Registers are not available in btrace record history"));
- cache = *this_cache;
+ cache = (const struct btrace_frame_cache *) *this_cache;
bfun = cache->bfun;
gdb_assert (bfun != NULL);
struct btrace_frame_cache *cache;
void **slot;
- cache = this_cache;
+ cache = (struct btrace_frame_cache *) this_cache;
slot = htab_find_slot (bfcache, cache, NO_INSERT);
gdb_assert (slot != NULL);
{
struct btrace_thread_info *btinfo;
- DEBUG ("resuming thread %d (%s): %x (%s)", tp->num,
+ DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
target_pid_to_str (tp->ptid), flag, btrace_thread_flag_to_str (flag));
btinfo = &tp->btrace;
enum gdb_signal signal)
{
struct thread_info *tp;
- enum btrace_thread_flag flag;
- ptid_t orig_ptid;
+ enum btrace_thread_flag flag, cflag;
DEBUG ("resume %s: %s%s", target_pid_to_str (ptid),
execution_direction == EXEC_REVERSE ? "reverse-" : "",
step ? "step" : "cont");
- orig_ptid = ptid;
-
/* Store the execution direction of the last resume.
If there is more than one to_resume call, we have to rely on infrun
to not change the execution direction in-between. */
record_btrace_resume_exec_dir = execution_direction;
- /* For all-stop targets... */
- if (!target_is_non_stop_p ())
- {
- /* ...we pick the current thread when asked to resume an entire process
- or everything. */
- if (ptid_equal (minus_one_ptid, ptid) || ptid_is_pid (ptid))
- ptid = inferior_ptid;
-
- tp = find_thread_ptid (ptid);
- if (tp == NULL)
- error (_("Cannot find thread to resume."));
-
- /* ...and we stop replaying other threads if the thread to resume is not
- replaying. */
- if (!btrace_is_replaying (tp) && execution_direction != EXEC_REVERSE)
- ALL_NON_EXITED_THREADS (tp)
- record_btrace_stop_replaying (tp);
- }
-
/* As long as we're not replaying, just forward the request.
For non-stop targets this means that no thread is replaying. In order to
make progress, we may need to explicitly move replaying threads to the end
of their execution history. */
- if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
+ if ((execution_direction != EXEC_REVERSE)
+ && !record_btrace_is_replaying (ops, minus_one_ptid))
{
ops = ops->beneath;
- return ops->to_resume (ops, orig_ptid, step, signal);
+ ops->to_resume (ops, ptid, step, signal);
+ return;
}
/* Compute the btrace thread flag for the requested move. */
- if (step == 0)
- flag = execution_direction == EXEC_REVERSE ? BTHR_RCONT : BTHR_CONT;
+ if (execution_direction == EXEC_REVERSE)
+ {
+ flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
+ cflag = BTHR_RCONT;
+ }
else
- flag = execution_direction == EXEC_REVERSE ? BTHR_RSTEP : BTHR_STEP;
+ {
+ flag = step == 0 ? BTHR_CONT : BTHR_STEP;
+ cflag = BTHR_CONT;
+ }
/* We just indicate the resume intent here. The actual stepping happens in
- record_btrace_wait below. */
- ALL_NON_EXITED_THREADS (tp)
- if (ptid_match (tp->ptid, ptid))
- record_btrace_resume_thread (tp, flag);
+ record_btrace_wait below.
+
+ For all-stop targets, we only step INFERIOR_PTID and continue others. */
+ if (!target_is_non_stop_p ())
+ {
+ gdb_assert (ptid_match (inferior_ptid, ptid));
+
+ ALL_NON_EXITED_THREADS (tp)
+ if (ptid_match (tp->ptid, ptid))
+ {
+ if (ptid_match (tp->ptid, inferior_ptid))
+ record_btrace_resume_thread (tp, flag);
+ else
+ record_btrace_resume_thread (tp, cflag);
+ }
+ }
+ else
+ {
+ ALL_NON_EXITED_THREADS (tp)
+ if (ptid_match (tp->ptid, ptid))
+ record_btrace_resume_thread (tp, flag);
+ }
/* Async support. */
if (target_can_async_p ())
if (flags == 0)
return;
- DEBUG ("cancel resume thread %d (%s): %x (%s)", tp->num,
+ DEBUG ("cancel resume thread %s (%s): %x (%s)",
+ print_thread_id (tp),
target_pid_to_str (tp->ptid), flags,
btrace_thread_flag_to_str (flags));
flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
- DEBUG ("stepping thread %d (%s): %x (%s)", tp->num,
+ DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
target_pid_to_str (tp->ptid), flags,
btrace_thread_flag_to_str (flags));
DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
/* As long as we're not replaying, just forward the request. */
- if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
+ if ((execution_direction != EXEC_REVERSE)
+ && !record_btrace_is_replaying (ops, minus_one_ptid))
{
ops = ops->beneath;
return ops->to_wait (ops, ptid, status, options);
/* We moved the replay position but did not update registers. */
registers_changed_ptid (eventing->ptid);
- DEBUG ("wait ended by thread %d (%s): %s", eventing->num,
+ DEBUG ("wait ended by thread %s (%s): %s",
+ print_thread_id (eventing),
target_pid_to_str (eventing->ptid),
target_waitstatus_to_string (status));
DEBUG ("stop %s", target_pid_to_str (ptid));
/* As long as we're not replaying, just forward the request. */
- if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
+ if ((execution_direction != EXEC_REVERSE)
+ && !record_btrace_is_replaying (ops, minus_one_ptid))
{
ops = ops->beneath;
ops->to_stop (ops, ptid);
static int
record_btrace_stopped_by_sw_breakpoint (struct target_ops *ops)
{
- if (record_btrace_is_replaying (ops))
+ if (record_btrace_is_replaying (ops, minus_one_ptid))
{
struct thread_info *tp = inferior_thread ();
static int
record_btrace_supports_stopped_by_sw_breakpoint (struct target_ops *ops)
{
- if (record_btrace_is_replaying (ops))
+ if (record_btrace_is_replaying (ops, minus_one_ptid))
return 1;
return ops->beneath->to_supports_stopped_by_sw_breakpoint (ops->beneath);
static int
record_btrace_stopped_by_hw_breakpoint (struct target_ops *ops)
{
- if (record_btrace_is_replaying (ops))
+ if (record_btrace_is_replaying (ops, minus_one_ptid))
{
struct thread_info *tp = inferior_thread ();
static int
record_btrace_supports_stopped_by_hw_breakpoint (struct target_ops *ops)
{
- if (record_btrace_is_replaying (ops))
+ if (record_btrace_is_replaying (ops, minus_one_ptid))
return 1;
return ops->beneath->to_supports_stopped_by_hw_breakpoint (ops->beneath);
record_btrace_update_thread_list (struct target_ops *ops)
{
/* We don't add or remove threads during replay. */
- if (record_btrace_is_replaying (ops))
+ if (record_btrace_is_replaying (ops, minus_one_ptid))
return;
/* Forward the request. */
record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
{
/* We don't add or remove threads during replay. */
- if (record_btrace_is_replaying (ops))
+ if (record_btrace_is_replaying (ops, minus_one_ptid))
return find_thread_ptid (ptid) != NULL;
/* Forward the request. */
record_btrace_set_replay (tp, &it);
}
+/* The to_record_stop_replaying method of target record-btrace. */
+
+static void
+record_btrace_stop_replaying_all (struct target_ops *self)
+{
+ struct thread_info *tp;
+
+ ALL_NON_EXITED_THREADS (tp)
+ record_btrace_stop_replaying (tp);
+}
+
/* The to_execution_direction target method. */
static enum exec_direction_kind
ops->to_call_history_from = record_btrace_call_history_from;
ops->to_call_history_range = record_btrace_call_history_range;
ops->to_record_is_replaying = record_btrace_is_replaying;
+ ops->to_record_will_replay = record_btrace_will_replay;
+ ops->to_record_stop_replaying = record_btrace_stop_replaying_all;
ops->to_xfer_partial = record_btrace_xfer_partial;
ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
END_CATCH
}
-/* Start recording Intel(R) Processor Trace. */
+/* Start recording in Intel Processor Trace format. */
static void
cmd_record_btrace_pt_start (char *args, int from_tty)
add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
_("\
-Start branch trace recording in Intel(R) Processor Trace format.\n\n\
+Start branch trace recording in Intel Processor Trace format.\n\n\
This format may not be available on all processors."),
&record_btrace_cmdlist);
add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);