/* Branch trace support for GDB, the GNU debugger.
- Copyright (C) 2013-2015 Free Software Foundation, Inc.
+ Copyright (C) 2013-2017 Free Software Foundation, Inc.
Contributed by Intel Corp. <markus.t.metzger@intel.com>
#include <inttypes.h>
#include <ctype.h>
+#include <algorithm>
/* Command lists for btrace maintenance commands. */
static struct cmd_list_element *maint_btrace_cmdlist;
/* Control whether to skip PAD packets when computing the packet history. */
static int maint_btrace_pt_skip_pad = 1;
+/* A vector of function segments. */
+typedef struct btrace_function * bfun_s;
+DEF_VEC_P (bfun_s);
+
static void btrace_add_pc (struct thread_info *tp);
/* Print a record debug message. Use do ... while (0) to avoid ambiguities
prefix, fun, file, level, ibegin, iend);
}
+/* Return the number of instructions in a given function call segment. */
+
+static unsigned int
+ftrace_call_num_insn (const struct btrace_function* bfun)
+{
+ if (bfun == NULL)
+ return 0;
+
+ /* A gap is always counted as one instruction. */
+ if (bfun->errcode != 0)
+ return 1;
+
+ return VEC_length (btrace_insn_s, bfun->insn);
+}
+
/* Return non-zero if BFUN does not match MFUN and FUN,
return zero otherwise. */
{
struct btrace_function *bfun;
- bfun = xzalloc (sizeof (*bfun));
+ bfun = XCNEW (struct btrace_function);
bfun->msym = mfun;
bfun->sym = fun;
prev->flow.next = bfun;
bfun->number = prev->number + 1;
- bfun->insn_offset = (prev->insn_offset
- + VEC_length (btrace_insn_s, prev->insn));
+ bfun->insn_offset = prev->insn_offset + ftrace_call_num_insn (prev);
bfun->level = prev->level;
}
bfun->flags = flags;
ftrace_debug (bfun, "set caller");
+ ftrace_debug (caller, "..to");
}
/* Fix up the caller for all segments of a function. */
return bfun;
}
+/* Return the caller of BFUN or NULL if there is none. This function skips
+ tail calls in the call chain. */
+static struct btrace_function *
+ftrace_get_caller (struct btrace_function *bfun)
+{
+ for (; bfun != NULL; bfun = bfun->up)
+ if ((bfun->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
+ return bfun->up;
+
+ return NULL;
+}
+
/* Find the innermost caller in the back trace of BFUN with MFUN/FUN
symbol information. */
/* There is no call in PREV's back trace. We assume that the
branch trace did not include it. */
- /* Let's find the topmost call function - this skips tail calls. */
+ /* Let's find the topmost function and add a new caller for it.
+ This should handle a series of initial tail calls. */
while (prev->up != NULL)
prev = prev->up;
- /* We maintain levels for a series of returns for which we have
- not seen the calls.
- We start at the preceding function's level in case this has
- already been a return for which we have not seen the call.
- We start at level 0 otherwise, to handle tail calls correctly. */
- bfun->level = min (0, prev->level) - 1;
+ bfun->level = prev->level - 1;
/* Fix up the call stack for PREV. */
ftrace_fixup_caller (prev, bfun, BFUN_UP_LINKS_TO_RET);
else
{
/* There is a call in PREV's back trace to which we should have
- returned. Let's remain at this level. */
- bfun->level = prev->level;
+ returned but didn't. Let's start a new, separate back trace
+ from PREV's level. */
+ bfun->level = prev->level - 1;
+
+ /* We fix up the back trace for PREV but leave other function segments
+ on the same level as they are.
+ This should handle things like schedule () correctly where we're
+ switching contexts. */
+ prev->up = bfun;
+ prev->flags = BFUN_UP_LINKS_TO_RET;
ftrace_debug (bfun, "new return - unknown caller");
}
{
struct btrace_function *bfun;
- /* This is an unexplained function switch. The call stack will likely
- be wrong at this point. */
+ /* This is an unexplained function switch. We can't really be sure about the
+ call stack, yet the best I can think of right now is to preserve it. */
bfun = ftrace_new_function (prev, mfun, fun);
+ bfun->up = prev->up;
+ bfun->flags = prev->flags;
ftrace_debug (bfun, "new switch");
start = get_pc_function_start (pc);
+ /* A jump to the start of a function is (typically) a tail call. */
+ if (start == pc)
+ return ftrace_new_tailcall (bfun, mfun, fun);
+
/* If we can't determine the function for PC, we treat a jump at
- the end of the block as tail call. */
- if (start == 0 || start == pc)
+ the end of the block as tail call if we're switching functions
+ and as an intra-function branch if we don't. */
+ if (start == 0 && ftrace_function_switched (bfun, mfun, fun))
return ftrace_new_tailcall (bfun, mfun, fun);
+
+ break;
}
}
}
return iclass;
}
+/* Try to match the back trace at LHS to the back trace at RHS. Returns the
+ number of matching function segments or zero if the back traces do not
+ match. */
+
+static int
+ftrace_match_backtrace (struct btrace_function *lhs,
+ struct btrace_function *rhs)
+{
+ int matches;
+
+ for (matches = 0; lhs != NULL && rhs != NULL; ++matches)
+ {
+ if (ftrace_function_switched (lhs, rhs->msym, rhs->sym))
+ return 0;
+
+ lhs = ftrace_get_caller (lhs);
+ rhs = ftrace_get_caller (rhs);
+ }
+
+ return matches;
+}
+
+/* Add ADJUSTMENT to the level of BFUN and succeeding function segments. */
+
+static void
+ftrace_fixup_level (struct btrace_function *bfun, int adjustment)
+{
+ if (adjustment == 0)
+ return;
+
+ DEBUG_FTRACE ("fixup level (%+d)", adjustment);
+ ftrace_debug (bfun, "..bfun");
+
+ for (; bfun != NULL; bfun = bfun->flow.next)
+ bfun->level += adjustment;
+}
+
+/* Recompute the global level offset. Traverse the function trace and compute
+ the global level offset as the negative of the minimal function level. */
+
+static void
+ftrace_compute_global_level_offset (struct btrace_thread_info *btinfo)
+{
+ struct btrace_function *bfun, *end;
+ int level;
+
+ if (btinfo == NULL)
+ return;
+
+ bfun = btinfo->begin;
+ if (bfun == NULL)
+ return;
+
+ /* The last function segment contains the current instruction, which is not
+ really part of the trace. If it contains just this one instruction, we
+ stop when we reach it; otherwise, we let the below loop run to the end. */
+ end = btinfo->end;
+ if (VEC_length (btrace_insn_s, end->insn) > 1)
+ end = NULL;
+
+ level = INT_MAX;
+ for (; bfun != end; bfun = bfun->flow.next)
+ level = std::min (level, bfun->level);
+
+ DEBUG_FTRACE ("setting global level offset: %d", -level);
+ btinfo->level = -level;
+}
+
+/* Connect the function segments PREV and NEXT in a bottom-to-top walk as in
+ ftrace_connect_backtrace. */
+
+static void
+ftrace_connect_bfun (struct btrace_function *prev,
+ struct btrace_function *next)
+{
+ DEBUG_FTRACE ("connecting...");
+ ftrace_debug (prev, "..prev");
+ ftrace_debug (next, "..next");
+
+ /* The function segments are not yet connected. */
+ gdb_assert (prev->segment.next == NULL);
+ gdb_assert (next->segment.prev == NULL);
+
+ prev->segment.next = next;
+ next->segment.prev = prev;
+
+ /* We may have moved NEXT to a different function level. */
+ ftrace_fixup_level (next, prev->level - next->level);
+
+ /* If we run out of back trace for one, let's use the other's. */
+ if (prev->up == NULL)
+ {
+ if (next->up != NULL)
+ {
+ DEBUG_FTRACE ("using next's callers");
+ ftrace_fixup_caller (prev, next->up, next->flags);
+ }
+ }
+ else if (next->up == NULL)
+ {
+ if (prev->up != NULL)
+ {
+ DEBUG_FTRACE ("using prev's callers");
+ ftrace_fixup_caller (next, prev->up, prev->flags);
+ }
+ }
+ else
+ {
+ /* PREV may have a tailcall caller, NEXT can't. If it does, fixup the up
+ link to add the tail callers to NEXT's back trace.
+
+ This removes NEXT->UP from NEXT's back trace. It will be added back
+ when connecting NEXT and PREV's callers - provided they exist.
+
+ If PREV's back trace consists of a series of tail calls without an
+ actual call, there will be no further connection and NEXT's caller will
+ be removed for good. To catch this case, we handle it here and connect
+ the top of PREV's back trace to NEXT's caller. */
+ if ((prev->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
+ {
+ struct btrace_function *caller;
+ btrace_function_flags flags;
+
+ /* We checked NEXT->UP above so CALLER can't be NULL. */
+ caller = next->up;
+ flags = next->flags;
+
+ DEBUG_FTRACE ("adding prev's tail calls to next");
+
+ ftrace_fixup_caller (next, prev->up, prev->flags);
+
+ for (prev = prev->up; prev != NULL; prev = prev->up)
+ {
+ /* At the end of PREV's back trace, continue with CALLER. */
+ if (prev->up == NULL)
+ {
+ DEBUG_FTRACE ("fixing up link for tailcall chain");
+ ftrace_debug (prev, "..top");
+ ftrace_debug (caller, "..up");
+
+ ftrace_fixup_caller (prev, caller, flags);
+
+ /* If we skipped any tail calls, this may move CALLER to a
+ different function level.
+
+ Note that changing CALLER's level is only OK because we
+ know that this is the last iteration of the bottom-to-top
+ walk in ftrace_connect_backtrace.
+
+ Otherwise we will fix up CALLER's level when we connect it
+ to PREV's caller in the next iteration. */
+ ftrace_fixup_level (caller, prev->level - caller->level - 1);
+ break;
+ }
+
+ /* There's nothing to do if we find a real call. */
+ if ((prev->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
+ {
+ DEBUG_FTRACE ("will fix up link in next iteration");
+ break;
+ }
+ }
+ }
+ }
+}
+
+/* Connect function segments on the same level in the back trace at LHS and RHS.
+ The back traces at LHS and RHS are expected to match according to
+ ftrace_match_backtrace. */
+
+static void
+ftrace_connect_backtrace (struct btrace_function *lhs,
+ struct btrace_function *rhs)
+{
+ while (lhs != NULL && rhs != NULL)
+ {
+ struct btrace_function *prev, *next;
+
+ gdb_assert (!ftrace_function_switched (lhs, rhs->msym, rhs->sym));
+
+ /* Connecting LHS and RHS may change the up link. */
+ prev = lhs;
+ next = rhs;
+
+ lhs = ftrace_get_caller (lhs);
+ rhs = ftrace_get_caller (rhs);
+
+ ftrace_connect_bfun (prev, next);
+ }
+}
+
+/* Bridge the gap between two function segments left and right of a gap if their
+ respective back traces match in at least MIN_MATCHES functions.
+
+ Returns non-zero if the gap could be bridged, zero otherwise. */
+
+static int
+ftrace_bridge_gap (struct btrace_function *lhs, struct btrace_function *rhs,
+ int min_matches)
+{
+ struct btrace_function *best_l, *best_r, *cand_l, *cand_r;
+ int best_matches;
+
+ DEBUG_FTRACE ("checking gap at insn %u (req matches: %d)",
+ rhs->insn_offset - 1, min_matches);
+
+ best_matches = 0;
+ best_l = NULL;
+ best_r = NULL;
+
+ /* We search the back traces of LHS and RHS for valid connections and connect
+ the two functon segments that give the longest combined back trace. */
+
+ for (cand_l = lhs; cand_l != NULL; cand_l = ftrace_get_caller (cand_l))
+ for (cand_r = rhs; cand_r != NULL; cand_r = ftrace_get_caller (cand_r))
+ {
+ int matches;
+
+ matches = ftrace_match_backtrace (cand_l, cand_r);
+ if (best_matches < matches)
+ {
+ best_matches = matches;
+ best_l = cand_l;
+ best_r = cand_r;
+ }
+ }
+
+ /* We need at least MIN_MATCHES matches. */
+ gdb_assert (min_matches > 0);
+ if (best_matches < min_matches)
+ return 0;
+
+ DEBUG_FTRACE ("..matches: %d", best_matches);
+
+ /* We will fix up the level of BEST_R and succeeding function segments such
+ that BEST_R's level matches BEST_L's when we connect BEST_L to BEST_R.
+
+ This will ignore the level of RHS and following if BEST_R != RHS. I.e. if
+ BEST_R is a successor of RHS in the back trace of RHS (phases 1 and 3).
+
+ To catch this, we already fix up the level here where we can start at RHS
+ instead of at BEST_R. We will ignore the level fixup when connecting
+ BEST_L to BEST_R as they will already be on the same level. */
+ ftrace_fixup_level (rhs, best_l->level - best_r->level);
+
+ ftrace_connect_backtrace (best_l, best_r);
+
+ return best_matches;
+}
+
+/* Try to bridge gaps due to overflow or decode errors by connecting the
+ function segments that are separated by the gap. */
+
+static void
+btrace_bridge_gaps (struct thread_info *tp, VEC (bfun_s) **gaps)
+{
+ VEC (bfun_s) *remaining;
+ struct cleanup *old_chain;
+ int min_matches;
+
+ DEBUG ("bridge gaps");
+
+ remaining = NULL;
+ old_chain = make_cleanup (VEC_cleanup (bfun_s), &remaining);
+
+ /* We require a minimum amount of matches for bridging a gap. The number of
+ required matches will be lowered with each iteration.
+
+ The more matches the higher our confidence that the bridging is correct.
+ For big gaps or small traces, however, it may not be feasible to require a
+ high number of matches. */
+ for (min_matches = 5; min_matches > 0; --min_matches)
+ {
+ /* Let's try to bridge as many gaps as we can. In some cases, we need to
+ skip a gap and revisit it again after we closed later gaps. */
+ while (!VEC_empty (bfun_s, *gaps))
+ {
+ struct btrace_function *gap;
+ unsigned int idx;
+
+ for (idx = 0; VEC_iterate (bfun_s, *gaps, idx, gap); ++idx)
+ {
+ struct btrace_function *lhs, *rhs;
+ int bridged;
+
+ /* We may have a sequence of gaps if we run from one error into
+ the next as we try to re-sync onto the trace stream. Ignore
+ all but the leftmost gap in such a sequence.
+
+ Also ignore gaps at the beginning of the trace. */
+ lhs = gap->flow.prev;
+ if (lhs == NULL || lhs->errcode != 0)
+ continue;
+
+ /* Skip gaps to the right. */
+ for (rhs = gap->flow.next; rhs != NULL; rhs = rhs->flow.next)
+ if (rhs->errcode == 0)
+ break;
+
+ /* Ignore gaps at the end of the trace. */
+ if (rhs == NULL)
+ continue;
+
+ bridged = ftrace_bridge_gap (lhs, rhs, min_matches);
+
+ /* Keep track of gaps we were not able to bridge and try again.
+ If we just pushed them to the end of GAPS we would risk an
+ infinite loop in case we simply cannot bridge a gap. */
+ if (bridged == 0)
+ VEC_safe_push (bfun_s, remaining, gap);
+ }
+
+ /* Let's see if we made any progress. */
+ if (VEC_length (bfun_s, remaining) == VEC_length (bfun_s, *gaps))
+ break;
+
+ VEC_free (bfun_s, *gaps);
+
+ *gaps = remaining;
+ remaining = NULL;
+ }
+
+ /* We get here if either GAPS is empty or if GAPS equals REMAINING. */
+ if (VEC_empty (bfun_s, *gaps))
+ break;
+
+ VEC_free (bfun_s, remaining);
+ }
+
+ do_cleanups (old_chain);
+
+ /* We may omit this in some cases. Not sure it is worth the extra
+ complication, though. */
+ ftrace_compute_global_level_offset (&tp->btrace);
+}
+
/* Compute the function branch trace from BTS trace. */
static void
btrace_compute_ftrace_bts (struct thread_info *tp,
- const struct btrace_data_bts *btrace)
+ const struct btrace_data_bts *btrace,
+ VEC (bfun_s) **gaps)
{
struct btrace_thread_info *btinfo;
struct btrace_function *begin, *end;
struct gdbarch *gdbarch;
- unsigned int blk, ngaps;
+ unsigned int blk;
int level;
gdbarch = target_gdbarch ();
btinfo = &tp->btrace;
begin = btinfo->begin;
end = btinfo->end;
- ngaps = btinfo->ngaps;
level = begin != NULL ? -btinfo->level : INT_MAX;
blk = VEC_length (btrace_block_s, btrace->blocks);
/* We should hit the end of the block. Warn if we went too far. */
if (block->end < pc)
{
- /* Indicate the gap in the trace - unless we're at the
- beginning. */
- if (begin != NULL)
- {
- warning (_("Recorded trace may be corrupted around %s."),
- core_addr_to_string_nz (pc));
+ /* Indicate the gap in the trace. */
+ end = ftrace_new_gap (end, BDE_BTS_OVERFLOW);
+ if (begin == NULL)
+ begin = end;
+
+ VEC_safe_push (bfun_s, *gaps, end);
+
+ warning (_("Recorded trace may be corrupted at instruction "
+ "%u (pc = %s)."), end->insn_offset - 1,
+ core_addr_to_string_nz (pc));
- end = ftrace_new_gap (end, BDE_BTS_OVERFLOW);
- ngaps += 1;
- }
break;
}
/* Maintain the function level offset.
For all but the last block, we do it here. */
if (blk != 0)
- level = min (level, end->level);
+ level = std::min (level, end->level);
size = 0;
TRY
insn.pc = pc;
insn.size = size;
insn.iclass = ftrace_classify_insn (gdbarch, pc);
+ insn.flags = 0;
ftrace_update_insns (end, &insn);
/* We can't continue if we fail to compute the size. */
if (size <= 0)
{
- warning (_("Recorded trace may be incomplete around %s."),
- core_addr_to_string_nz (pc));
-
/* Indicate the gap in the trace. We just added INSN so we're
not at the beginning. */
end = ftrace_new_gap (end, BDE_BTS_INSN_SIZE);
- ngaps += 1;
+
+ VEC_safe_push (bfun_s, *gaps, end);
+
+ warning (_("Recorded trace may be incomplete at instruction %u "
+ "(pc = %s)."), end->insn_offset - 1,
+ core_addr_to_string_nz (pc));
break;
}
and is not really part of the execution history, it shouldn't
affect the level. */
if (blk == 0)
- level = min (level, end->level);
+ level = std::min (level, end->level);
}
}
btinfo->begin = begin;
btinfo->end = end;
- btinfo->ngaps = ngaps;
/* LEVEL is the minimal function level of all btrace function segments.
Define the global level offset to -LEVEL so all function levels are
}
}
+/* Return the btrace instruction flags for INSN. */
+
+static btrace_insn_flags
+pt_btrace_insn_flags (const struct pt_insn *insn)
+{
+ btrace_insn_flags flags = 0;
+
+ if (insn->speculative)
+ flags |= BTRACE_INSN_FLAG_SPECULATIVE;
+
+ return flags;
+}
+
/* Add function branch trace using DECODER. */
static void
ftrace_add_pt (struct pt_insn_decoder *decoder,
struct btrace_function **pbegin,
struct btrace_function **pend, int *plevel,
- unsigned int *ngaps)
+ VEC (bfun_s) **gaps)
{
struct btrace_function *begin, *end, *upd;
uint64_t offset;
- int errcode, nerrors;
+ int errcode;
begin = *pbegin;
end = *pend;
- nerrors = 0;
for (;;)
{
struct btrace_insn btinsn;
if (errcode < 0)
{
if (errcode != -pte_eos)
- warning (_("Failed to synchronize onto the Intel(R) Processor "
+ warning (_("Failed to synchronize onto the Intel Processor "
"Trace stream: %s."), pt_errstr (pt_errcode (errcode)));
break;
}
flag. The ENABLED instruction flag means that we continued
from some other instruction. Indicate this as a trace gap. */
if (insn.enabled)
- *pend = end = ftrace_new_gap (end, BDE_PT_DISABLED);
+ {
+ *pend = end = ftrace_new_gap (end, BDE_PT_DISABLED);
+
+ VEC_safe_push (bfun_s, *gaps, end);
+
+ pt_insn_get_offset (decoder, &offset);
- /* Indicate trace overflows. */
- if (insn.resynced)
- *pend = end = ftrace_new_gap (end, BDE_PT_OVERFLOW);
+ warning (_("Non-contiguous trace at instruction %u (offset "
+ "= 0x%" PRIx64 ", pc = 0x%" PRIx64 ")."),
+ end->insn_offset - 1, offset, insn.ip);
+ }
+ }
+
+ /* Indicate trace overflows. */
+ if (insn.resynced)
+ {
+ *pend = end = ftrace_new_gap (end, BDE_PT_OVERFLOW);
+ if (begin == NULL)
+ *pbegin = begin = end;
+
+ VEC_safe_push (bfun_s, *gaps, end);
+
+ pt_insn_get_offset (decoder, &offset);
+
+ warning (_("Overflow at instruction %u (offset = 0x%" PRIx64
+ ", pc = 0x%" PRIx64 ")."), end->insn_offset - 1,
+ offset, insn.ip);
}
upd = ftrace_update_function (end, insn.ip);
}
/* Maintain the function level offset. */
- *plevel = min (*plevel, end->level);
+ *plevel = std::min (*plevel, end->level);
btinsn.pc = (CORE_ADDR) insn.ip;
btinsn.size = (gdb_byte) insn.size;
btinsn.iclass = pt_reclassify_insn (insn.iclass);
+ btinsn.flags = pt_btrace_insn_flags (&insn);
ftrace_update_insns (end, &btinsn);
}
if (errcode == -pte_eos)
break;
- /* If the gap is at the very beginning, we ignore it - we will have
- less trace, but we won't have any holes in the trace. */
+ /* Indicate the gap in the trace. */
+ *pend = end = ftrace_new_gap (end, errcode);
if (begin == NULL)
- continue;
+ *pbegin = begin = end;
+
+ VEC_safe_push (bfun_s, *gaps, end);
pt_insn_get_offset (decoder, &offset);
- warning (_("Failed to decode Intel(R) Processor Trace near trace "
- "offset 0x%" PRIx64 " near recorded PC 0x%" PRIx64 ": %s."),
+ warning (_("Decode error (%d) at instruction %u (offset = 0x%" PRIx64
+ ", pc = 0x%" PRIx64 "): %s."), errcode, end->insn_offset - 1,
offset, insn.ip, pt_errstr (pt_errcode (errcode)));
-
- /* Indicate the gap in the trace. */
- *pend = end = ftrace_new_gap (end, errcode);
- *ngaps += 1;
}
-
- if (nerrors > 0)
- warning (_("The recorded execution trace may have gaps."));
}
/* A callback function to allow the trace decoder to read the inferior's
const struct pt_asid *asid, uint64_t pc,
void *context)
{
- int errcode;
+ int result, errcode;
+ result = (int) size;
TRY
{
errcode = target_read_code ((CORE_ADDR) pc, buffer, size);
if (errcode != 0)
- return -pte_nomap;
+ result = -pte_nomap;
}
CATCH (error, RETURN_MASK_ERROR)
{
- return -pte_nomap;
+ result = -pte_nomap;
}
END_CATCH
- return size;
+ return result;
}
/* Translate the vendor from one enum to another. */
btrace_add_pc (tp);
}
-/* Compute the function branch trace from Intel(R) Processor Trace. */
+/* Compute the function branch trace from Intel Processor Trace
+ format. */
static void
btrace_compute_ftrace_pt (struct thread_info *tp,
- const struct btrace_data_pt *btrace)
+ const struct btrace_data_pt *btrace,
+ VEC (bfun_s) **gaps)
{
struct btrace_thread_info *btinfo;
struct pt_insn_decoder *decoder;
errcode = pt_cpu_errata (&config.errata, &config.cpu);
if (errcode < 0)
- error (_("Failed to configure the Intel(R) Processor Trace decoder: %s."),
+ error (_("Failed to configure the Intel Processor Trace decoder: %s."),
pt_errstr (pt_errcode (errcode)));
decoder = pt_insn_alloc_decoder (&config);
if (decoder == NULL)
- error (_("Failed to allocate the Intel(R) Processor Trace decoder."));
+ error (_("Failed to allocate the Intel Processor Trace decoder."));
TRY
{
image = pt_insn_get_image(decoder);
if (image == NULL)
- error (_("Failed to configure the Intel(R) Processor Trace decoder."));
+ error (_("Failed to configure the Intel Processor Trace decoder."));
errcode = pt_image_set_callback(image, btrace_pt_readmem_callback, NULL);
if (errcode < 0)
- error (_("Failed to configure the Intel(R) Processor Trace decoder: "
+ error (_("Failed to configure the Intel Processor Trace decoder: "
"%s."), pt_errstr (pt_errcode (errcode)));
- ftrace_add_pt (decoder, &btinfo->begin, &btinfo->end, &level,
- &btinfo->ngaps);
+ ftrace_add_pt (decoder, &btinfo->begin, &btinfo->end, &level, gaps);
}
CATCH (error, RETURN_MASK_ALL)
{
if (error.reason == RETURN_QUIT && btinfo->end != NULL)
{
btinfo->end = ftrace_new_gap (btinfo->end, BDE_PT_USER_QUIT);
- btinfo->ngaps++;
+
+ VEC_safe_push (bfun_s, *gaps, btinfo->end);
}
btrace_finalize_ftrace_pt (decoder, tp, level);
static void
btrace_compute_ftrace_pt (struct thread_info *tp,
- const struct btrace_data_pt *btrace)
+ const struct btrace_data_pt *btrace,
+ VEC (bfun_s) **gaps)
{
internal_error (__FILE__, __LINE__, _("Unexpected branch trace format."));
}
a thread given by BTINFO. */
static void
-btrace_compute_ftrace (struct thread_info *tp, struct btrace_data *btrace)
+btrace_compute_ftrace_1 (struct thread_info *tp, struct btrace_data *btrace,
+ VEC (bfun_s) **gaps)
{
DEBUG ("compute ftrace");
return;
case BTRACE_FORMAT_BTS:
- btrace_compute_ftrace_bts (tp, &btrace->variant.bts);
+ btrace_compute_ftrace_bts (tp, &btrace->variant.bts, gaps);
return;
case BTRACE_FORMAT_PT:
- btrace_compute_ftrace_pt (tp, &btrace->variant.pt);
+ btrace_compute_ftrace_pt (tp, &btrace->variant.pt, gaps);
return;
}
internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
}
+static void
+btrace_finalize_ftrace (struct thread_info *tp, VEC (bfun_s) **gaps)
+{
+ if (!VEC_empty (bfun_s, *gaps))
+ {
+ tp->btrace.ngaps += VEC_length (bfun_s, *gaps);
+ btrace_bridge_gaps (tp, gaps);
+ }
+}
+
+static void
+btrace_compute_ftrace (struct thread_info *tp, struct btrace_data *btrace)
+{
+ VEC (bfun_s) *gaps;
+ struct cleanup *old_chain;
+
+ gaps = NULL;
+ old_chain = make_cleanup (VEC_cleanup (bfun_s), &gaps);
+
+ TRY
+ {
+ btrace_compute_ftrace_1 (tp, btrace, &gaps);
+ }
+ CATCH (error, RETURN_MASK_ALL)
+ {
+ btrace_finalize_ftrace (tp, &gaps);
+
+ throw_exception (error);
+ }
+ END_CATCH
+
+ btrace_finalize_ftrace (tp, &gaps);
+
+ do_cleanups (old_chain);
+}
+
/* Add an entry for the current PC. */
static void
if (tp->btrace.target != NULL)
return;
+#if !defined (HAVE_LIBIPT)
+ if (conf->format == BTRACE_FORMAT_PT)
+ error (_("GDB does not support Intel Processor Trace."));
+#endif /* !defined (HAVE_LIBIPT) */
+
if (!target_supports_btrace (conf->format))
error (_("Target does not support branch tracing."));
- DEBUG ("enable thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
+ DEBUG ("enable thread %s (%s)", print_thread_id (tp),
+ target_pid_to_str (tp->ptid));
tp->btrace.target = target_enable_btrace (tp->ptid, conf);
- /* Add an entry for the current PC so we start tracing from where we
- enabled it. */
- if (tp->btrace.target != NULL)
- btrace_add_pc (tp);
+ /* We're done if we failed to enable tracing. */
+ if (tp->btrace.target == NULL)
+ return;
+
+ /* We need to undo the enable in case of errors. */
+ TRY
+ {
+ /* Add an entry for the current PC so we start tracing from where we
+ enabled it.
+
+ If we can't access TP's registers, TP is most likely running. In this
+ case, we can't really say where tracing was enabled so it should be
+ safe to simply skip this step.
+
+ This is not relevant for BTRACE_FORMAT_PT since the trace will already
+ start at the PC at which tracing was enabled. */
+ if (conf->format != BTRACE_FORMAT_PT
+ && can_access_registers_ptid (tp->ptid))
+ btrace_add_pc (tp);
+ }
+ CATCH (exception, RETURN_MASK_ALL)
+ {
+ btrace_disable (tp);
+
+ throw_exception (exception);
+ }
+ END_CATCH
}
/* See btrace.h. */
if (btp->target == NULL)
return;
- DEBUG ("disable thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
+ DEBUG ("disable thread %s (%s)", print_thread_id (tp),
+ target_pid_to_str (tp->ptid));
target_disable_btrace (btp->target);
btp->target = NULL;
if (btp->target == NULL)
return;
- DEBUG ("teardown thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
+ DEBUG ("teardown thread %s (%s)", print_thread_id (tp),
+ target_pid_to_str (tp->ptid));
target_teardown_btrace (btp->target);
btp->target = NULL;
/* See btrace.h. */
+const char *
+btrace_decode_error (enum btrace_format format, int errcode)
+{
+ switch (format)
+ {
+ case BTRACE_FORMAT_BTS:
+ switch (errcode)
+ {
+ case BDE_BTS_OVERFLOW:
+ return _("instruction overflow");
+
+ case BDE_BTS_INSN_SIZE:
+ return _("unknown instruction");
+
+ default:
+ break;
+ }
+ break;
+
+#if defined (HAVE_LIBIPT)
+ case BTRACE_FORMAT_PT:
+ switch (errcode)
+ {
+ case BDE_PT_USER_QUIT:
+ return _("trace decode cancelled");
+
+ case BDE_PT_DISABLED:
+ return _("disabled");
+
+ case BDE_PT_OVERFLOW:
+ return _("overflow");
+
+ default:
+ if (errcode < 0)
+ return pt_errstr (pt_errcode (errcode));
+ break;
+ }
+ break;
+#endif /* defined (HAVE_LIBIPT) */
+
+ default:
+ break;
+ }
+
+ return _("unknown");
+}
+
+/* See btrace.h. */
+
void
btrace_fetch (struct thread_info *tp)
{
struct cleanup *cleanup;
int errcode;
- DEBUG ("fetch thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
+ DEBUG ("fetch thread %s (%s)", print_thread_id (tp),
+ target_pid_to_str (tp->ptid));
btinfo = &tp->btrace;
tinfo = btinfo->target;
if (btinfo->replay != NULL)
return;
+ /* We should not be called on running or exited threads. */
+ gdb_assert (can_access_registers_ptid (tp->ptid));
+
btrace_data_init (&btrace);
cleanup = make_cleanup_btrace_data (&btrace);
/* Compute the trace, provided we have any. */
if (!btrace_data_empty (&btrace))
{
+ struct btrace_function *bfun;
+
/* Store the raw trace data. The stored data will be cleared in
btrace_clear, so we always append the new trace. */
btrace_data_append (&btinfo->data, &btrace);
btrace_maint_clear (btinfo);
+ VEC_truncate (btrace_fun_p, btinfo->functions, 0);
btrace_clear_history (btinfo);
btrace_compute_ftrace (tp, &btrace);
+
+ for (bfun = btinfo->begin; bfun != NULL; bfun = bfun->flow.next)
+ VEC_safe_push (btrace_fun_p, btinfo->functions, bfun);
}
do_cleanups (cleanup);
struct btrace_thread_info *btinfo;
struct btrace_function *it, *trash;
- DEBUG ("clear thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
+ DEBUG ("clear thread %s (%s)", print_thread_id (tp),
+ target_pid_to_str (tp->ptid));
/* Make sure btrace frames that may hold a pointer into the branch
trace data are destroyed. */
btinfo = &tp->btrace;
+ VEC_free (btrace_fun_p, btinfo->functions);
+
it = btinfo->begin;
while (it != NULL)
{
const struct gdb_xml_element *element,
void *user_data, VEC (gdb_xml_value_s) *attributes)
{
- const char *version = xml_find_attribute (attributes, "version")->value;
+ const char *version
+ = (const char *) xml_find_attribute (attributes, "version")->value;
if (strcmp (version, "1.0") != 0)
gdb_xml_error (parser, _("Unsupported btrace version: \"%s\""), version);
struct btrace_block *block;
ULONGEST *begin, *end;
- btrace = user_data;
+ btrace = (struct btrace_data *) user_data;
switch (btrace->format)
{
gdb_xml_error (parser, _("Btrace format error."));
}
- begin = xml_find_attribute (attributes, "begin")->value;
- end = xml_find_attribute (attributes, "end")->value;
+ begin = (ULONGEST *) xml_find_attribute (attributes, "begin")->value;
+ end = (ULONGEST *) xml_find_attribute (attributes, "end")->value;
block = VEC_safe_push (btrace_block_s, btrace->variant.bts.blocks, NULL);
block->begin = *begin;
static void
parse_xml_raw (struct gdb_xml_parser *parser, const char *body_text,
- gdb_byte **pdata, unsigned long *psize)
+ gdb_byte **pdata, size_t *psize)
{
struct cleanup *cleanup;
gdb_byte *data, *bin;
- unsigned long size;
- size_t len;
+ size_t len, size;
len = strlen (body_text);
- size = len / 2;
-
- if ((size_t) size * 2 != len)
+ if (len % 2 != 0)
gdb_xml_error (parser, _("Bad raw data size."));
- bin = data = xmalloc (size);
+ size = len / 2;
+
+ bin = data = (gdb_byte *) xmalloc (size);
cleanup = make_cleanup (xfree, data);
/* We use hex encoding - see common/rsp-low.h. */
const char *vendor;
ULONGEST *family, *model, *stepping;
- vendor = xml_find_attribute (attributes, "vendor")->value;
- family = xml_find_attribute (attributes, "family")->value;
- model = xml_find_attribute (attributes, "model")->value;
- stepping = xml_find_attribute (attributes, "stepping")->value;
+ vendor = (const char *) xml_find_attribute (attributes, "vendor")->value;
+ family = (ULONGEST *) xml_find_attribute (attributes, "family")->value;
+ model = (ULONGEST *) xml_find_attribute (attributes, "model")->value;
+ stepping = (ULONGEST *) xml_find_attribute (attributes, "stepping")->value;
- btrace = user_data;
+ btrace = (struct btrace_data *) user_data;
if (strcmp (vendor, "GenuineIntel") == 0)
btrace->variant.pt.config.cpu.vendor = CV_INTEL;
{
struct btrace_data *btrace;
- btrace = user_data;
+ btrace = (struct btrace_data *) user_data;
parse_xml_raw (parser, body_text, &btrace->variant.pt.data,
&btrace->variant.pt.size);
}
{
struct btrace_data *btrace;
- btrace = user_data;
+ btrace = (struct btrace_data *) user_data;
btrace->format = BTRACE_FORMAT_PT;
btrace->variant.pt.config.cpu.vendor = CV_UNKNOWN;
btrace->variant.pt.data = NULL;
struct btrace_config *conf;
struct gdb_xml_value *size;
- conf = user_data;
+ conf = (struct btrace_config *) user_data;
conf->format = BTRACE_FORMAT_BTS;
conf->bts.size = 0;
struct btrace_config *conf;
struct gdb_xml_value *size;
- conf = user_data;
+ conf = (struct btrace_config *) user_data;
conf->format = BTRACE_FORMAT_PT;
conf->pt.size = 0;
/* See btrace.h. */
-unsigned int
-btrace_insn_number (const struct btrace_insn_iterator *it)
+int
+btrace_insn_get_error (const struct btrace_insn_iterator *it)
{
- const struct btrace_function *bfun;
-
- bfun = it->function;
+ return it->function->errcode;
+}
- /* Return zero if the iterator points to a gap in the trace. */
- if (bfun->errcode != 0)
- return 0;
+/* See btrace.h. */
- return bfun->insn_offset + it->index;
+unsigned int
+btrace_insn_number (const struct btrace_insn_iterator *it)
+{
+ return it->function->insn_offset + it->index;
}
/* See btrace.h. */
space = end - index;
/* Advance the iterator as far as possible within this segment. */
- adv = min (space, stride);
+ adv = std::min (space, stride);
stride -= adv;
index += adv;
steps += adv;
}
/* Advance the iterator as far as possible within this segment. */
- adv = min (index, stride);
+ adv = std::min (index, stride);
stride -= adv;
index -= adv;
lnum = btrace_insn_number (lhs);
rnum = btrace_insn_number (rhs);
- /* A gap has an instruction number of zero. Things are getting more
- complicated if gaps are involved.
-
- We take the instruction number offset from the iterator's function.
- This is the number of the first instruction after the gap.
-
- This is OK as long as both lhs and rhs point to gaps. If only one of
- them does, we need to adjust the number based on the other's regular
- instruction number. Otherwise, a gap might compare equal to an
- instruction. */
-
- if (lnum == 0 && rnum == 0)
- {
- lnum = lhs->function->insn_offset;
- rnum = rhs->function->insn_offset;
- }
- else if (lnum == 0)
- {
- lnum = lhs->function->insn_offset;
-
- if (lnum == rnum)
- lnum -= 1;
- }
- else if (rnum == 0)
- {
- rnum = rhs->function->insn_offset;
-
- if (rnum == lnum)
- rnum -= 1;
- }
-
return (int) (lnum - rnum);
}
unsigned int number)
{
const struct btrace_function *bfun;
- unsigned int end, length;
+ unsigned int upper, lower;
- for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
- {
- /* Skip gaps. */
- if (bfun->errcode != 0)
- continue;
+ if (VEC_empty (btrace_fun_p, btinfo->functions))
+ return 0;
- if (bfun->insn_offset <= number)
- break;
- }
+ lower = 0;
+ bfun = VEC_index (btrace_fun_p, btinfo->functions, lower);
+ if (number < bfun->insn_offset)
+ return 0;
- if (bfun == NULL)
+ upper = VEC_length (btrace_fun_p, btinfo->functions) - 1;
+ bfun = VEC_index (btrace_fun_p, btinfo->functions, upper);
+ if (number >= bfun->insn_offset + ftrace_call_num_insn (bfun))
return 0;
- length = VEC_length (btrace_insn_s, bfun->insn);
- gdb_assert (length > 0);
+ /* We assume that there are no holes in the numbering. */
+ for (;;)
+ {
+ const unsigned int average = lower + (upper - lower) / 2;
- end = bfun->insn_offset + length;
- if (end <= number)
- return 0;
+ bfun = VEC_index (btrace_fun_p, btinfo->functions, average);
+
+ if (number < bfun->insn_offset)
+ {
+ upper = average - 1;
+ continue;
+ }
+
+ if (number >= bfun->insn_offset + ftrace_call_num_insn (bfun))
+ {
+ lower = average + 1;
+ continue;
+ }
+
+ break;
+ }
it->function = bfun;
it->index = number - bfun->insn_offset;
-
return 1;
}
const struct btrace_insn_iterator *end)
{
if (btinfo->insn_history == NULL)
- btinfo->insn_history = xzalloc (sizeof (*btinfo->insn_history));
+ btinfo->insn_history = XCNEW (struct btrace_insn_history);
btinfo->insn_history->begin = *begin;
btinfo->insn_history->end = *end;
gdb_assert (begin->btinfo == end->btinfo);
if (btinfo->call_history == NULL)
- btinfo->call_history = xzalloc (sizeof (*btinfo->call_history));
+ btinfo->call_history = XCNEW (struct btrace_call_history);
btinfo->call_history->begin = *begin;
btinfo->call_history->end = *end;
static void
do_btrace_data_cleanup (void *arg)
{
- btrace_data_fini (arg);
+ btrace_data_fini ((struct btrace_data *) arg);
}
/* See btrace.h. */
break;
case ppt_pip:
- printf_unfiltered (("pip %" PRIx64 ""), packet->payload.pip.cr3);
+ printf_unfiltered (("pip %" PRIx64 "%s"), packet->payload.pip.cr3,
+ packet->payload.pip.nr ? (" nr") : (""));
break;
case ppt_tsc:
printf_unfiltered (("ovf"));
break;
+ case ppt_stop:
+ printf_unfiltered (("stop"));
+ break;
+
+ case ppt_vmcs:
+ printf_unfiltered (("vmcs %" PRIx64 ""), packet->payload.vmcs.base);
+ break;
+
+ case ppt_tma:
+ printf_unfiltered (("tma %x %x"), packet->payload.tma.ctc,
+ packet->payload.tma.fc);
+ break;
+
+ case ppt_mtc:
+ printf_unfiltered (("mtc %x"), packet->payload.mtc.ctc);
+ break;
+
+ case ppt_cyc:
+ printf_unfiltered (("cyc %" PRIx64 ""), packet->payload.cyc.value);
+ break;
+
+ case ppt_mnt:
+ printf_unfiltered (("mnt %" PRIx64 ""), packet->payload.mnt.payload);
+ break;
}
}
}
if (errcode != -pte_eos)
- warning (_("Failed to synchronize onto the Intel(R) Processor Trace "
+ warning (_("Failed to synchronize onto the Intel Processor Trace "
"stream: %s."), pt_errstr (pt_errcode (errcode)));
}
errcode = pt_cpu_errata (&config.errata, &config.cpu);
if (errcode < 0)
- error (_("Failed to configure the Intel(R) Processor Trace decoder: %s."),
+ error (_("Failed to configure the Intel Processor Trace decoder: %s."),
pt_errstr (pt_errcode (errcode)));
decoder = pt_pkt_alloc_decoder (&config);
if (decoder == NULL)
- error (_("Failed to allocate the Intel(R) Processor Trace decoder."));
+ error (_("Failed to allocate the Intel Processor Trace decoder."));
TRY
{
0, &maintenance_set_cmdlist);
add_prefix_cmd ("pt", class_maintenance, maint_btrace_pt_set_cmd, _("\
-Set Intel(R) Processor Trace specific variables."),
+Set Intel Processor Trace specific variables."),
&maint_btrace_pt_set_cmdlist, "maintenance set btrace pt ",
0, &maint_btrace_set_cmdlist);
0, &maintenance_show_cmdlist);
add_prefix_cmd ("pt", class_maintenance, maint_btrace_pt_show_cmd, _("\
-Show Intel(R) Processor Trace specific variables."),
+Show Intel Processor Trace specific variables."),
&maint_btrace_pt_show_cmdlist, "maintenance show btrace pt ",
0, &maint_btrace_show_cmdlist);