X-Git-Url: http://drtracing.org/?a=blobdiff_plain;f=gdb%2Fbtrace.c;h=95dc7abb559fdae3ae8a573df18d7e6ec14634e4;hb=5430098f1807e084fe4ff5057040d68435f3d8a2;hp=561ee7cb76fdaac52e977a0a658cb660d6ede907;hpb=b20a652466ea6e62e7d056188b79a0677a29f46e;p=deliverable%2Fbinutils-gdb.git diff --git a/gdb/btrace.c b/gdb/btrace.c index 561ee7cb76..95dc7abb55 100644 --- a/gdb/btrace.c +++ b/gdb/btrace.c @@ -1,6 +1,6 @@ /* Branch trace support for GDB, the GNU debugger. - Copyright (C) 2013-2015 Free Software Foundation, Inc. + Copyright (C) 2013-2017 Free Software Foundation, Inc. Contributed by Intel Corp. @@ -32,8 +32,26 @@ #include "xml-support.h" #include "regcache.h" #include "rsp-low.h" +#include "gdbcmd.h" +#include "cli/cli-utils.h" #include +#include +#include + +/* Command lists for btrace maintenance commands. */ +static struct cmd_list_element *maint_btrace_cmdlist; +static struct cmd_list_element *maint_btrace_set_cmdlist; +static struct cmd_list_element *maint_btrace_show_cmdlist; +static struct cmd_list_element *maint_btrace_pt_set_cmdlist; +static struct cmd_list_element *maint_btrace_pt_show_cmdlist; + +/* Control whether to skip PAD packets when computing the packet history. */ +static int maint_btrace_pt_skip_pad = 1; + +/* A vector of function segments. */ +typedef struct btrace_function * bfun_s; +DEF_VEC_P (bfun_s); static void btrace_add_pc (struct thread_info *tp); @@ -123,6 +141,21 @@ ftrace_debug (const struct btrace_function *bfun, const char *prefix) prefix, fun, file, level, ibegin, iend); } +/* Return the number of instructions in a given function call segment. */ + +static unsigned int +ftrace_call_num_insn (const struct btrace_function* bfun) +{ + if (bfun == NULL) + return 0; + + /* A gap is always counted as one instruction. */ + if (bfun->errcode != 0) + return 1; + + return VEC_length (btrace_insn_s, bfun->insn); +} + /* Return non-zero if BFUN does not match MFUN and FUN, return zero otherwise. */ @@ -180,7 +213,7 @@ ftrace_new_function (struct btrace_function *prev, { struct btrace_function *bfun; - bfun = xzalloc (sizeof (*bfun)); + bfun = XCNEW (struct btrace_function); bfun->msym = mfun; bfun->sym = fun; @@ -198,8 +231,7 @@ ftrace_new_function (struct btrace_function *prev, prev->flow.next = bfun; bfun->number = prev->number + 1; - bfun->insn_offset = (prev->insn_offset - + VEC_length (btrace_insn_s, prev->insn)); + bfun->insn_offset = prev->insn_offset + ftrace_call_num_insn (prev); bfun->level = prev->level; } @@ -220,6 +252,7 @@ ftrace_update_caller (struct btrace_function *bfun, bfun->flags = flags; ftrace_debug (bfun, "set caller"); + ftrace_debug (caller, "..to"); } /* Fix up the caller for all segments of a function. */ @@ -282,6 +315,18 @@ ftrace_new_tailcall (struct btrace_function *caller, return bfun; } +/* Return the caller of BFUN or NULL if there is none. This function skips + tail calls in the call chain. */ +static struct btrace_function * +ftrace_get_caller (struct btrace_function *bfun) +{ + for (; bfun != NULL; bfun = bfun->up) + if ((bfun->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0) + return bfun->up; + + return NULL; +} + /* Find the innermost caller in the back trace of BFUN with MFUN/FUN symbol information. */ @@ -373,16 +418,12 @@ ftrace_new_return (struct btrace_function *prev, /* There is no call in PREV's back trace. We assume that the branch trace did not include it. */ - /* Let's find the topmost call function - this skips tail calls. */ + /* Let's find the topmost function and add a new caller for it. + This should handle a series of initial tail calls. */ while (prev->up != NULL) prev = prev->up; - /* We maintain levels for a series of returns for which we have - not seen the calls. - We start at the preceding function's level in case this has - already been a return for which we have not seen the call. - We start at level 0 otherwise, to handle tail calls correctly. */ - bfun->level = min (0, prev->level) - 1; + bfun->level = prev->level - 1; /* Fix up the call stack for PREV. */ ftrace_fixup_caller (prev, bfun, BFUN_UP_LINKS_TO_RET); @@ -392,8 +433,16 @@ ftrace_new_return (struct btrace_function *prev, else { /* There is a call in PREV's back trace to which we should have - returned. Let's remain at this level. */ - bfun->level = prev->level; + returned but didn't. Let's start a new, separate back trace + from PREV's level. */ + bfun->level = prev->level - 1; + + /* We fix up the back trace for PREV but leave other function segments + on the same level as they are. + This should handle things like schedule () correctly where we're + switching contexts. */ + prev->up = bfun; + prev->flags = BFUN_UP_LINKS_TO_RET; ftrace_debug (bfun, "new return - unknown caller"); } @@ -413,9 +462,11 @@ ftrace_new_switch (struct btrace_function *prev, { struct btrace_function *bfun; - /* This is an unexplained function switch. The call stack will likely - be wrong at this point. */ + /* This is an unexplained function switch. We can't really be sure about the + call stack, yet the best I can think of right now is to preserve it. */ bfun = ftrace_new_function (prev, mfun, fun); + bfun->up = prev->up; + bfun->flags = prev->flags; ftrace_debug (bfun, "new switch"); @@ -515,10 +566,17 @@ ftrace_update_function (struct btrace_function *bfun, CORE_ADDR pc) start = get_pc_function_start (pc); + /* A jump to the start of a function is (typically) a tail call. */ + if (start == pc) + return ftrace_new_tailcall (bfun, mfun, fun); + /* If we can't determine the function for PC, we treat a jump at - the end of the block as tail call. */ - if (start == 0 || start == pc) + the end of the block as tail call if we're switching functions + and as an intra-function branch if we don't. */ + if (start == 0 && ftrace_function_switched (bfun, mfun, fun)) return ftrace_new_tailcall (bfun, mfun, fun); + + break; } } } @@ -574,23 +632,359 @@ ftrace_classify_insn (struct gdbarch *gdbarch, CORE_ADDR pc) return iclass; } +/* Try to match the back trace at LHS to the back trace at RHS. Returns the + number of matching function segments or zero if the back traces do not + match. */ + +static int +ftrace_match_backtrace (struct btrace_function *lhs, + struct btrace_function *rhs) +{ + int matches; + + for (matches = 0; lhs != NULL && rhs != NULL; ++matches) + { + if (ftrace_function_switched (lhs, rhs->msym, rhs->sym)) + return 0; + + lhs = ftrace_get_caller (lhs); + rhs = ftrace_get_caller (rhs); + } + + return matches; +} + +/* Add ADJUSTMENT to the level of BFUN and succeeding function segments. */ + +static void +ftrace_fixup_level (struct btrace_function *bfun, int adjustment) +{ + if (adjustment == 0) + return; + + DEBUG_FTRACE ("fixup level (%+d)", adjustment); + ftrace_debug (bfun, "..bfun"); + + for (; bfun != NULL; bfun = bfun->flow.next) + bfun->level += adjustment; +} + +/* Recompute the global level offset. Traverse the function trace and compute + the global level offset as the negative of the minimal function level. */ + +static void +ftrace_compute_global_level_offset (struct btrace_thread_info *btinfo) +{ + struct btrace_function *bfun, *end; + int level; + + if (btinfo == NULL) + return; + + bfun = btinfo->begin; + if (bfun == NULL) + return; + + /* The last function segment contains the current instruction, which is not + really part of the trace. If it contains just this one instruction, we + stop when we reach it; otherwise, we let the below loop run to the end. */ + end = btinfo->end; + if (VEC_length (btrace_insn_s, end->insn) > 1) + end = NULL; + + level = INT_MAX; + for (; bfun != end; bfun = bfun->flow.next) + level = std::min (level, bfun->level); + + DEBUG_FTRACE ("setting global level offset: %d", -level); + btinfo->level = -level; +} + +/* Connect the function segments PREV and NEXT in a bottom-to-top walk as in + ftrace_connect_backtrace. */ + +static void +ftrace_connect_bfun (struct btrace_function *prev, + struct btrace_function *next) +{ + DEBUG_FTRACE ("connecting..."); + ftrace_debug (prev, "..prev"); + ftrace_debug (next, "..next"); + + /* The function segments are not yet connected. */ + gdb_assert (prev->segment.next == NULL); + gdb_assert (next->segment.prev == NULL); + + prev->segment.next = next; + next->segment.prev = prev; + + /* We may have moved NEXT to a different function level. */ + ftrace_fixup_level (next, prev->level - next->level); + + /* If we run out of back trace for one, let's use the other's. */ + if (prev->up == NULL) + { + if (next->up != NULL) + { + DEBUG_FTRACE ("using next's callers"); + ftrace_fixup_caller (prev, next->up, next->flags); + } + } + else if (next->up == NULL) + { + if (prev->up != NULL) + { + DEBUG_FTRACE ("using prev's callers"); + ftrace_fixup_caller (next, prev->up, prev->flags); + } + } + else + { + /* PREV may have a tailcall caller, NEXT can't. If it does, fixup the up + link to add the tail callers to NEXT's back trace. + + This removes NEXT->UP from NEXT's back trace. It will be added back + when connecting NEXT and PREV's callers - provided they exist. + + If PREV's back trace consists of a series of tail calls without an + actual call, there will be no further connection and NEXT's caller will + be removed for good. To catch this case, we handle it here and connect + the top of PREV's back trace to NEXT's caller. */ + if ((prev->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0) + { + struct btrace_function *caller; + btrace_function_flags flags; + + /* We checked NEXT->UP above so CALLER can't be NULL. */ + caller = next->up; + flags = next->flags; + + DEBUG_FTRACE ("adding prev's tail calls to next"); + + ftrace_fixup_caller (next, prev->up, prev->flags); + + for (prev = prev->up; prev != NULL; prev = prev->up) + { + /* At the end of PREV's back trace, continue with CALLER. */ + if (prev->up == NULL) + { + DEBUG_FTRACE ("fixing up link for tailcall chain"); + ftrace_debug (prev, "..top"); + ftrace_debug (caller, "..up"); + + ftrace_fixup_caller (prev, caller, flags); + + /* If we skipped any tail calls, this may move CALLER to a + different function level. + + Note that changing CALLER's level is only OK because we + know that this is the last iteration of the bottom-to-top + walk in ftrace_connect_backtrace. + + Otherwise we will fix up CALLER's level when we connect it + to PREV's caller in the next iteration. */ + ftrace_fixup_level (caller, prev->level - caller->level - 1); + break; + } + + /* There's nothing to do if we find a real call. */ + if ((prev->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0) + { + DEBUG_FTRACE ("will fix up link in next iteration"); + break; + } + } + } + } +} + +/* Connect function segments on the same level in the back trace at LHS and RHS. + The back traces at LHS and RHS are expected to match according to + ftrace_match_backtrace. */ + +static void +ftrace_connect_backtrace (struct btrace_function *lhs, + struct btrace_function *rhs) +{ + while (lhs != NULL && rhs != NULL) + { + struct btrace_function *prev, *next; + + gdb_assert (!ftrace_function_switched (lhs, rhs->msym, rhs->sym)); + + /* Connecting LHS and RHS may change the up link. */ + prev = lhs; + next = rhs; + + lhs = ftrace_get_caller (lhs); + rhs = ftrace_get_caller (rhs); + + ftrace_connect_bfun (prev, next); + } +} + +/* Bridge the gap between two function segments left and right of a gap if their + respective back traces match in at least MIN_MATCHES functions. + + Returns non-zero if the gap could be bridged, zero otherwise. */ + +static int +ftrace_bridge_gap (struct btrace_function *lhs, struct btrace_function *rhs, + int min_matches) +{ + struct btrace_function *best_l, *best_r, *cand_l, *cand_r; + int best_matches; + + DEBUG_FTRACE ("checking gap at insn %u (req matches: %d)", + rhs->insn_offset - 1, min_matches); + + best_matches = 0; + best_l = NULL; + best_r = NULL; + + /* We search the back traces of LHS and RHS for valid connections and connect + the two functon segments that give the longest combined back trace. */ + + for (cand_l = lhs; cand_l != NULL; cand_l = ftrace_get_caller (cand_l)) + for (cand_r = rhs; cand_r != NULL; cand_r = ftrace_get_caller (cand_r)) + { + int matches; + + matches = ftrace_match_backtrace (cand_l, cand_r); + if (best_matches < matches) + { + best_matches = matches; + best_l = cand_l; + best_r = cand_r; + } + } + + /* We need at least MIN_MATCHES matches. */ + gdb_assert (min_matches > 0); + if (best_matches < min_matches) + return 0; + + DEBUG_FTRACE ("..matches: %d", best_matches); + + /* We will fix up the level of BEST_R and succeeding function segments such + that BEST_R's level matches BEST_L's when we connect BEST_L to BEST_R. + + This will ignore the level of RHS and following if BEST_R != RHS. I.e. if + BEST_R is a successor of RHS in the back trace of RHS (phases 1 and 3). + + To catch this, we already fix up the level here where we can start at RHS + instead of at BEST_R. We will ignore the level fixup when connecting + BEST_L to BEST_R as they will already be on the same level. */ + ftrace_fixup_level (rhs, best_l->level - best_r->level); + + ftrace_connect_backtrace (best_l, best_r); + + return best_matches; +} + +/* Try to bridge gaps due to overflow or decode errors by connecting the + function segments that are separated by the gap. */ + +static void +btrace_bridge_gaps (struct thread_info *tp, VEC (bfun_s) **gaps) +{ + VEC (bfun_s) *remaining; + struct cleanup *old_chain; + int min_matches; + + DEBUG ("bridge gaps"); + + remaining = NULL; + old_chain = make_cleanup (VEC_cleanup (bfun_s), &remaining); + + /* We require a minimum amount of matches for bridging a gap. The number of + required matches will be lowered with each iteration. + + The more matches the higher our confidence that the bridging is correct. + For big gaps or small traces, however, it may not be feasible to require a + high number of matches. */ + for (min_matches = 5; min_matches > 0; --min_matches) + { + /* Let's try to bridge as many gaps as we can. In some cases, we need to + skip a gap and revisit it again after we closed later gaps. */ + while (!VEC_empty (bfun_s, *gaps)) + { + struct btrace_function *gap; + unsigned int idx; + + for (idx = 0; VEC_iterate (bfun_s, *gaps, idx, gap); ++idx) + { + struct btrace_function *lhs, *rhs; + int bridged; + + /* We may have a sequence of gaps if we run from one error into + the next as we try to re-sync onto the trace stream. Ignore + all but the leftmost gap in such a sequence. + + Also ignore gaps at the beginning of the trace. */ + lhs = gap->flow.prev; + if (lhs == NULL || lhs->errcode != 0) + continue; + + /* Skip gaps to the right. */ + for (rhs = gap->flow.next; rhs != NULL; rhs = rhs->flow.next) + if (rhs->errcode == 0) + break; + + /* Ignore gaps at the end of the trace. */ + if (rhs == NULL) + continue; + + bridged = ftrace_bridge_gap (lhs, rhs, min_matches); + + /* Keep track of gaps we were not able to bridge and try again. + If we just pushed them to the end of GAPS we would risk an + infinite loop in case we simply cannot bridge a gap. */ + if (bridged == 0) + VEC_safe_push (bfun_s, remaining, gap); + } + + /* Let's see if we made any progress. */ + if (VEC_length (bfun_s, remaining) == VEC_length (bfun_s, *gaps)) + break; + + VEC_free (bfun_s, *gaps); + + *gaps = remaining; + remaining = NULL; + } + + /* We get here if either GAPS is empty or if GAPS equals REMAINING. */ + if (VEC_empty (bfun_s, *gaps)) + break; + + VEC_free (bfun_s, remaining); + } + + do_cleanups (old_chain); + + /* We may omit this in some cases. Not sure it is worth the extra + complication, though. */ + ftrace_compute_global_level_offset (&tp->btrace); +} + /* Compute the function branch trace from BTS trace. */ static void btrace_compute_ftrace_bts (struct thread_info *tp, - const struct btrace_data_bts *btrace) + const struct btrace_data_bts *btrace, + VEC (bfun_s) **gaps) { struct btrace_thread_info *btinfo; struct btrace_function *begin, *end; struct gdbarch *gdbarch; - unsigned int blk, ngaps; + unsigned int blk; int level; gdbarch = target_gdbarch (); btinfo = &tp->btrace; begin = btinfo->begin; end = btinfo->end; - ngaps = btinfo->ngaps; level = begin != NULL ? -btinfo->level : INT_MAX; blk = VEC_length (btrace_block_s, btrace->blocks); @@ -612,16 +1006,17 @@ btrace_compute_ftrace_bts (struct thread_info *tp, /* We should hit the end of the block. Warn if we went too far. */ if (block->end < pc) { - /* Indicate the gap in the trace - unless we're at the - beginning. */ - if (begin != NULL) - { - warning (_("Recorded trace may be corrupted around %s."), - core_addr_to_string_nz (pc)); + /* Indicate the gap in the trace. */ + end = ftrace_new_gap (end, BDE_BTS_OVERFLOW); + if (begin == NULL) + begin = end; + + VEC_safe_push (bfun_s, *gaps, end); + + warning (_("Recorded trace may be corrupted at instruction " + "%u (pc = %s)."), end->insn_offset - 1, + core_addr_to_string_nz (pc)); - end = ftrace_new_gap (end, BDE_BTS_OVERFLOW); - ngaps += 1; - } break; } @@ -632,7 +1027,7 @@ btrace_compute_ftrace_bts (struct thread_info *tp, /* Maintain the function level offset. For all but the last block, we do it here. */ if (blk != 0) - level = min (level, end->level); + level = std::min (level, end->level); size = 0; TRY @@ -647,6 +1042,7 @@ btrace_compute_ftrace_bts (struct thread_info *tp, insn.pc = pc; insn.size = size; insn.iclass = ftrace_classify_insn (gdbarch, pc); + insn.flags = 0; ftrace_update_insns (end, &insn); @@ -657,13 +1053,15 @@ btrace_compute_ftrace_bts (struct thread_info *tp, /* We can't continue if we fail to compute the size. */ if (size <= 0) { - warning (_("Recorded trace may be incomplete around %s."), - core_addr_to_string_nz (pc)); - /* Indicate the gap in the trace. We just added INSN so we're not at the beginning. */ end = ftrace_new_gap (end, BDE_BTS_INSN_SIZE); - ngaps += 1; + + VEC_safe_push (bfun_s, *gaps, end); + + warning (_("Recorded trace may be incomplete at instruction %u " + "(pc = %s)."), end->insn_offset - 1, + core_addr_to_string_nz (pc)); break; } @@ -677,13 +1075,12 @@ btrace_compute_ftrace_bts (struct thread_info *tp, and is not really part of the execution history, it shouldn't affect the level. */ if (blk == 0) - level = min (level, end->level); + level = std::min (level, end->level); } } btinfo->begin = begin; btinfo->end = end; - btinfo->ngaps = ngaps; /* LEVEL is the minimal function level of all btrace function segments. Define the global level offset to -LEVEL so all function levels are @@ -712,21 +1109,33 @@ pt_reclassify_insn (enum pt_insn_class iclass) } } +/* Return the btrace instruction flags for INSN. */ + +static btrace_insn_flags +pt_btrace_insn_flags (const struct pt_insn *insn) +{ + btrace_insn_flags flags = 0; + + if (insn->speculative) + flags |= BTRACE_INSN_FLAG_SPECULATIVE; + + return flags; +} + /* Add function branch trace using DECODER. */ static void ftrace_add_pt (struct pt_insn_decoder *decoder, struct btrace_function **pbegin, struct btrace_function **pend, int *plevel, - unsigned int *ngaps) + VEC (bfun_s) **gaps) { struct btrace_function *begin, *end, *upd; uint64_t offset; - int errcode, nerrors; + int errcode; begin = *pbegin; end = *pend; - nerrors = 0; for (;;) { struct btrace_insn btinsn; @@ -736,7 +1145,7 @@ ftrace_add_pt (struct pt_insn_decoder *decoder, if (errcode < 0) { if (errcode != -pte_eos) - warning (_("Failed to synchronize onto the Intel(R) Processor " + warning (_("Failed to synchronize onto the Intel Processor " "Trace stream: %s."), pt_errstr (pt_errcode (errcode))); break; } @@ -757,11 +1166,33 @@ ftrace_add_pt (struct pt_insn_decoder *decoder, flag. The ENABLED instruction flag means that we continued from some other instruction. Indicate this as a trace gap. */ if (insn.enabled) - *pend = end = ftrace_new_gap (end, BDE_PT_DISABLED); + { + *pend = end = ftrace_new_gap (end, BDE_PT_DISABLED); + + VEC_safe_push (bfun_s, *gaps, end); - /* Indicate trace overflows. */ - if (insn.resynced) - *pend = end = ftrace_new_gap (end, BDE_PT_OVERFLOW); + pt_insn_get_offset (decoder, &offset); + + warning (_("Non-contiguous trace at instruction %u (offset " + "= 0x%" PRIx64 ", pc = 0x%" PRIx64 ")."), + end->insn_offset - 1, offset, insn.ip); + } + } + + /* Indicate trace overflows. */ + if (insn.resynced) + { + *pend = end = ftrace_new_gap (end, BDE_PT_OVERFLOW); + if (begin == NULL) + *pbegin = begin = end; + + VEC_safe_push (bfun_s, *gaps, end); + + pt_insn_get_offset (decoder, &offset); + + warning (_("Overflow at instruction %u (offset = 0x%" PRIx64 + ", pc = 0x%" PRIx64 ")."), end->insn_offset - 1, + offset, insn.ip); } upd = ftrace_update_function (end, insn.ip); @@ -774,11 +1205,12 @@ ftrace_add_pt (struct pt_insn_decoder *decoder, } /* Maintain the function level offset. */ - *plevel = min (*plevel, end->level); + *plevel = std::min (*plevel, end->level); btinsn.pc = (CORE_ADDR) insn.ip; btinsn.size = (gdb_byte) insn.size; btinsn.iclass = pt_reclassify_insn (insn.iclass); + btinsn.flags = pt_btrace_insn_flags (&insn); ftrace_update_insns (end, &btinsn); } @@ -786,24 +1218,19 @@ ftrace_add_pt (struct pt_insn_decoder *decoder, if (errcode == -pte_eos) break; - /* If the gap is at the very beginning, we ignore it - we will have - less trace, but we won't have any holes in the trace. */ + /* Indicate the gap in the trace. */ + *pend = end = ftrace_new_gap (end, errcode); if (begin == NULL) - continue; + *pbegin = begin = end; + + VEC_safe_push (bfun_s, *gaps, end); pt_insn_get_offset (decoder, &offset); - warning (_("Failed to decode Intel(R) Processor Trace near trace " - "offset 0x%" PRIx64 " near recorded PC 0x%" PRIx64 ": %s."), + warning (_("Decode error (%d) at instruction %u (offset = 0x%" PRIx64 + ", pc = 0x%" PRIx64 "): %s."), errcode, end->insn_offset - 1, offset, insn.ip, pt_errstr (pt_errcode (errcode))); - - /* Indicate the gap in the trace. */ - *pend = end = ftrace_new_gap (end, errcode); - *ngaps += 1; } - - if (nerrors > 0) - warning (_("The recorded execution trace may have gaps.")); } /* A callback function to allow the trace decoder to read the inferior's @@ -811,24 +1238,25 @@ ftrace_add_pt (struct pt_insn_decoder *decoder, static int btrace_pt_readmem_callback (gdb_byte *buffer, size_t size, - const struct pt_asid *asid, CORE_ADDR pc, + const struct pt_asid *asid, uint64_t pc, void *context) { - int errcode; + int result, errcode; + result = (int) size; TRY { - errcode = target_read_code (pc, buffer, size); + errcode = target_read_code ((CORE_ADDR) pc, buffer, size); if (errcode != 0) - return -pte_nomap; + result = -pte_nomap; } CATCH (error, RETURN_MASK_ERROR) { - return -pte_nomap; + result = -pte_nomap; } END_CATCH - return size; + return result; } /* Translate the vendor from one enum to another. */ @@ -865,11 +1293,13 @@ static void btrace_finalize_ftrace_pt (struct pt_insn_decoder *decoder, btrace_add_pc (tp); } -/* Compute the function branch trace from Intel(R) Processor Trace. */ +/* Compute the function branch trace from Intel Processor Trace + format. */ static void btrace_compute_ftrace_pt (struct thread_info *tp, - const struct btrace_data_pt *btrace) + const struct btrace_data_pt *btrace, + VEC (bfun_s) **gaps) { struct btrace_thread_info *btinfo; struct pt_insn_decoder *decoder; @@ -893,12 +1323,12 @@ btrace_compute_ftrace_pt (struct thread_info *tp, errcode = pt_cpu_errata (&config.errata, &config.cpu); if (errcode < 0) - error (_("Failed to configure the Intel(R) Processor Trace decoder: %s."), + error (_("Failed to configure the Intel Processor Trace decoder: %s."), pt_errstr (pt_errcode (errcode))); decoder = pt_insn_alloc_decoder (&config); if (decoder == NULL) - error (_("Failed to allocate the Intel(R) Processor Trace decoder.")); + error (_("Failed to allocate the Intel Processor Trace decoder.")); TRY { @@ -906,15 +1336,14 @@ btrace_compute_ftrace_pt (struct thread_info *tp, image = pt_insn_get_image(decoder); if (image == NULL) - error (_("Failed to configure the Intel(R) Processor Trace decoder.")); + error (_("Failed to configure the Intel Processor Trace decoder.")); errcode = pt_image_set_callback(image, btrace_pt_readmem_callback, NULL); if (errcode < 0) - error (_("Failed to configure the Intel(R) Processor Trace decoder: " + error (_("Failed to configure the Intel Processor Trace decoder: " "%s."), pt_errstr (pt_errcode (errcode))); - ftrace_add_pt (decoder, &btinfo->begin, &btinfo->end, &level, - &btinfo->ngaps); + ftrace_add_pt (decoder, &btinfo->begin, &btinfo->end, &level, gaps); } CATCH (error, RETURN_MASK_ALL) { @@ -922,7 +1351,8 @@ btrace_compute_ftrace_pt (struct thread_info *tp, if (error.reason == RETURN_QUIT && btinfo->end != NULL) { btinfo->end = ftrace_new_gap (btinfo->end, BDE_PT_USER_QUIT); - btinfo->ngaps++; + + VEC_safe_push (bfun_s, *gaps, btinfo->end); } btrace_finalize_ftrace_pt (decoder, tp, level); @@ -938,7 +1368,8 @@ btrace_compute_ftrace_pt (struct thread_info *tp, static void btrace_compute_ftrace_pt (struct thread_info *tp, - const struct btrace_data_pt *btrace) + const struct btrace_data_pt *btrace, + VEC (bfun_s) **gaps) { internal_error (__FILE__, __LINE__, _("Unexpected branch trace format.")); } @@ -949,7 +1380,8 @@ btrace_compute_ftrace_pt (struct thread_info *tp, a thread given by BTINFO. */ static void -btrace_compute_ftrace (struct thread_info *tp, struct btrace_data *btrace) +btrace_compute_ftrace_1 (struct thread_info *tp, struct btrace_data *btrace, + VEC (bfun_s) **gaps) { DEBUG ("compute ftrace"); @@ -959,17 +1391,53 @@ btrace_compute_ftrace (struct thread_info *tp, struct btrace_data *btrace) return; case BTRACE_FORMAT_BTS: - btrace_compute_ftrace_bts (tp, &btrace->variant.bts); + btrace_compute_ftrace_bts (tp, &btrace->variant.bts, gaps); return; case BTRACE_FORMAT_PT: - btrace_compute_ftrace_pt (tp, &btrace->variant.pt); + btrace_compute_ftrace_pt (tp, &btrace->variant.pt, gaps); return; } internal_error (__FILE__, __LINE__, _("Unkown branch trace format.")); } +static void +btrace_finalize_ftrace (struct thread_info *tp, VEC (bfun_s) **gaps) +{ + if (!VEC_empty (bfun_s, *gaps)) + { + tp->btrace.ngaps += VEC_length (bfun_s, *gaps); + btrace_bridge_gaps (tp, gaps); + } +} + +static void +btrace_compute_ftrace (struct thread_info *tp, struct btrace_data *btrace) +{ + VEC (bfun_s) *gaps; + struct cleanup *old_chain; + + gaps = NULL; + old_chain = make_cleanup (VEC_cleanup (bfun_s), &gaps); + + TRY + { + btrace_compute_ftrace_1 (tp, btrace, &gaps); + } + CATCH (error, RETURN_MASK_ALL) + { + btrace_finalize_ftrace (tp, &gaps); + + throw_exception (error); + } + END_CATCH + + btrace_finalize_ftrace (tp, &gaps); + + do_cleanups (old_chain); +} + /* Add an entry for the current PC. */ static void @@ -1007,17 +1475,46 @@ btrace_enable (struct thread_info *tp, const struct btrace_config *conf) if (tp->btrace.target != NULL) return; +#if !defined (HAVE_LIBIPT) + if (conf->format == BTRACE_FORMAT_PT) + error (_("GDB does not support Intel Processor Trace.")); +#endif /* !defined (HAVE_LIBIPT) */ + if (!target_supports_btrace (conf->format)) error (_("Target does not support branch tracing.")); - DEBUG ("enable thread %d (%s)", tp->num, target_pid_to_str (tp->ptid)); + DEBUG ("enable thread %s (%s)", print_thread_id (tp), + target_pid_to_str (tp->ptid)); tp->btrace.target = target_enable_btrace (tp->ptid, conf); - /* Add an entry for the current PC so we start tracing from where we - enabled it. */ - if (tp->btrace.target != NULL) - btrace_add_pc (tp); + /* We're done if we failed to enable tracing. */ + if (tp->btrace.target == NULL) + return; + + /* We need to undo the enable in case of errors. */ + TRY + { + /* Add an entry for the current PC so we start tracing from where we + enabled it. + + If we can't access TP's registers, TP is most likely running. In this + case, we can't really say where tracing was enabled so it should be + safe to simply skip this step. + + This is not relevant for BTRACE_FORMAT_PT since the trace will already + start at the PC at which tracing was enabled. */ + if (conf->format != BTRACE_FORMAT_PT + && can_access_registers_ptid (tp->ptid)) + btrace_add_pc (tp); + } + CATCH (exception, RETURN_MASK_ALL) + { + btrace_disable (tp); + + throw_exception (exception); + } + END_CATCH } /* See btrace.h. */ @@ -1042,7 +1539,8 @@ btrace_disable (struct thread_info *tp) if (btp->target == NULL) return; - DEBUG ("disable thread %d (%s)", tp->num, target_pid_to_str (tp->ptid)); + DEBUG ("disable thread %s (%s)", print_thread_id (tp), + target_pid_to_str (tp->ptid)); target_disable_btrace (btp->target); btp->target = NULL; @@ -1061,7 +1559,8 @@ btrace_teardown (struct thread_info *tp) if (btp->target == NULL) return; - DEBUG ("teardown thread %d (%s)", tp->num, target_pid_to_str (tp->ptid)); + DEBUG ("teardown thread %s (%s)", print_thread_id (tp), + target_pid_to_str (tp->ptid)); target_teardown_btrace (btp->target); btp->target = NULL; @@ -1197,29 +1696,109 @@ btrace_clear_history (struct btrace_thread_info *btinfo) btinfo->replay = NULL; } -/* See btrace.h. */ +/* Clear the branch trace maintenance histories in BTINFO. */ -void -btrace_fetch (struct thread_info *tp) +static void +btrace_maint_clear (struct btrace_thread_info *btinfo) { - struct btrace_thread_info *btinfo; - struct btrace_target_info *tinfo; - struct btrace_data btrace; - struct cleanup *cleanup; - int errcode; + switch (btinfo->data.format) + { + default: + break; - DEBUG ("fetch thread %d (%s)", tp->num, target_pid_to_str (tp->ptid)); + case BTRACE_FORMAT_BTS: + btinfo->maint.variant.bts.packet_history.begin = 0; + btinfo->maint.variant.bts.packet_history.end = 0; + break; - btinfo = &tp->btrace; - tinfo = btinfo->target; - if (tinfo == NULL) - return; +#if defined (HAVE_LIBIPT) + case BTRACE_FORMAT_PT: + xfree (btinfo->maint.variant.pt.packets); - /* There's no way we could get new trace while replaying. - On the other hand, delta trace would return a partial record with the - current PC, which is the replay PC, not the last PC, as expected. */ - if (btinfo->replay != NULL) - return; + btinfo->maint.variant.pt.packets = NULL; + btinfo->maint.variant.pt.packet_history.begin = 0; + btinfo->maint.variant.pt.packet_history.end = 0; + break; +#endif /* defined (HAVE_LIBIPT) */ + } +} + +/* See btrace.h. */ + +const char * +btrace_decode_error (enum btrace_format format, int errcode) +{ + switch (format) + { + case BTRACE_FORMAT_BTS: + switch (errcode) + { + case BDE_BTS_OVERFLOW: + return _("instruction overflow"); + + case BDE_BTS_INSN_SIZE: + return _("unknown instruction"); + + default: + break; + } + break; + +#if defined (HAVE_LIBIPT) + case BTRACE_FORMAT_PT: + switch (errcode) + { + case BDE_PT_USER_QUIT: + return _("trace decode cancelled"); + + case BDE_PT_DISABLED: + return _("disabled"); + + case BDE_PT_OVERFLOW: + return _("overflow"); + + default: + if (errcode < 0) + return pt_errstr (pt_errcode (errcode)); + break; + } + break; +#endif /* defined (HAVE_LIBIPT) */ + + default: + break; + } + + return _("unknown"); +} + +/* See btrace.h. */ + +void +btrace_fetch (struct thread_info *tp) +{ + struct btrace_thread_info *btinfo; + struct btrace_target_info *tinfo; + struct btrace_data btrace; + struct cleanup *cleanup; + int errcode; + + DEBUG ("fetch thread %s (%s)", print_thread_id (tp), + target_pid_to_str (tp->ptid)); + + btinfo = &tp->btrace; + tinfo = btinfo->target; + if (tinfo == NULL) + return; + + /* There's no way we could get new trace while replaying. + On the other hand, delta trace would return a partial record with the + current PC, which is the replay PC, not the last PC, as expected. */ + if (btinfo->replay != NULL) + return; + + /* We should not be called on running or exited threads. */ + gdb_assert (can_access_registers_ptid (tp->ptid)); btrace_data_init (&btrace); cleanup = make_cleanup_btrace_data (&btrace); @@ -1260,8 +1839,19 @@ btrace_fetch (struct thread_info *tp) /* Compute the trace, provided we have any. */ if (!btrace_data_empty (&btrace)) { + struct btrace_function *bfun; + + /* Store the raw trace data. The stored data will be cleared in + btrace_clear, so we always append the new trace. */ + btrace_data_append (&btinfo->data, &btrace); + btrace_maint_clear (btinfo); + + VEC_truncate (btrace_fun_p, btinfo->functions, 0); btrace_clear_history (btinfo); btrace_compute_ftrace (tp, &btrace); + + for (bfun = btinfo->begin; bfun != NULL; bfun = bfun->flow.next) + VEC_safe_push (btrace_fun_p, btinfo->functions, bfun); } do_cleanups (cleanup); @@ -1275,7 +1865,8 @@ btrace_clear (struct thread_info *tp) struct btrace_thread_info *btinfo; struct btrace_function *it, *trash; - DEBUG ("clear thread %d (%s)", tp->num, target_pid_to_str (tp->ptid)); + DEBUG ("clear thread %s (%s)", print_thread_id (tp), + target_pid_to_str (tp->ptid)); /* Make sure btrace frames that may hold a pointer into the branch trace data are destroyed. */ @@ -1283,6 +1874,8 @@ btrace_clear (struct thread_info *tp) btinfo = &tp->btrace; + VEC_free (btrace_fun_p, btinfo->functions); + it = btinfo->begin; while (it != NULL) { @@ -1296,6 +1889,9 @@ btrace_clear (struct thread_info *tp) btinfo->end = NULL; btinfo->ngaps = 0; + /* Must clear the maint data before - it depends on BTINFO->DATA. */ + btrace_maint_clear (btinfo); + btrace_data_clear (&btinfo->data); btrace_clear_history (btinfo); } @@ -1321,7 +1917,8 @@ check_xml_btrace_version (struct gdb_xml_parser *parser, const struct gdb_xml_element *element, void *user_data, VEC (gdb_xml_value_s) *attributes) { - const char *version = xml_find_attribute (attributes, "version")->value; + const char *version + = (const char *) xml_find_attribute (attributes, "version")->value; if (strcmp (version, "1.0") != 0) gdb_xml_error (parser, _("Unsupported btrace version: \"%s\""), version); @@ -1338,7 +1935,7 @@ parse_xml_btrace_block (struct gdb_xml_parser *parser, struct btrace_block *block; ULONGEST *begin, *end; - btrace = user_data; + btrace = (struct btrace_data *) user_data; switch (btrace->format) { @@ -1354,8 +1951,8 @@ parse_xml_btrace_block (struct gdb_xml_parser *parser, gdb_xml_error (parser, _("Btrace format error.")); } - begin = xml_find_attribute (attributes, "begin")->value; - end = xml_find_attribute (attributes, "end")->value; + begin = (ULONGEST *) xml_find_attribute (attributes, "begin")->value; + end = (ULONGEST *) xml_find_attribute (attributes, "end")->value; block = VEC_safe_push (btrace_block_s, btrace->variant.bts.blocks, NULL); block->begin = *begin; @@ -1366,20 +1963,19 @@ parse_xml_btrace_block (struct gdb_xml_parser *parser, static void parse_xml_raw (struct gdb_xml_parser *parser, const char *body_text, - gdb_byte **pdata, unsigned long *psize) + gdb_byte **pdata, size_t *psize) { struct cleanup *cleanup; gdb_byte *data, *bin; - unsigned long size; - size_t len; + size_t len, size; len = strlen (body_text); - size = len / 2; - - if ((size_t) size * 2 != len) + if (len % 2 != 0) gdb_xml_error (parser, _("Bad raw data size.")); - bin = data = xmalloc (size); + size = len / 2; + + bin = data = (gdb_byte *) xmalloc (size); cleanup = make_cleanup (xfree, data); /* We use hex encoding - see common/rsp-low.h. */ @@ -1415,12 +2011,12 @@ parse_xml_btrace_pt_config_cpu (struct gdb_xml_parser *parser, const char *vendor; ULONGEST *family, *model, *stepping; - vendor = xml_find_attribute (attributes, "vendor")->value; - family = xml_find_attribute (attributes, "family")->value; - model = xml_find_attribute (attributes, "model")->value; - stepping = xml_find_attribute (attributes, "stepping")->value; + vendor = (const char *) xml_find_attribute (attributes, "vendor")->value; + family = (ULONGEST *) xml_find_attribute (attributes, "family")->value; + model = (ULONGEST *) xml_find_attribute (attributes, "model")->value; + stepping = (ULONGEST *) xml_find_attribute (attributes, "stepping")->value; - btrace = user_data; + btrace = (struct btrace_data *) user_data; if (strcmp (vendor, "GenuineIntel") == 0) btrace->variant.pt.config.cpu.vendor = CV_INTEL; @@ -1439,7 +2035,7 @@ parse_xml_btrace_pt_raw (struct gdb_xml_parser *parser, { struct btrace_data *btrace; - btrace = user_data; + btrace = (struct btrace_data *) user_data; parse_xml_raw (parser, body_text, &btrace->variant.pt.data, &btrace->variant.pt.size); } @@ -1453,7 +2049,7 @@ parse_xml_btrace_pt (struct gdb_xml_parser *parser, { struct btrace_data *btrace; - btrace = user_data; + btrace = (struct btrace_data *) user_data; btrace->format = BTRACE_FORMAT_PT; btrace->variant.pt.config.cpu.vendor = CV_UNKNOWN; btrace->variant.pt.data = NULL; @@ -1548,7 +2144,7 @@ parse_xml_btrace_conf_bts (struct gdb_xml_parser *parser, struct btrace_config *conf; struct gdb_xml_value *size; - conf = user_data; + conf = (struct btrace_config *) user_data; conf->format = BTRACE_FORMAT_BTS; conf->bts.size = 0; @@ -1567,7 +2163,7 @@ parse_xml_btrace_conf_pt (struct gdb_xml_parser *parser, struct btrace_config *conf; struct gdb_xml_value *size; - conf = user_data; + conf = (struct btrace_config *) user_data; conf->format = BTRACE_FORMAT_PT; conf->pt.size = 0; @@ -1653,18 +2249,18 @@ btrace_insn_get (const struct btrace_insn_iterator *it) /* See btrace.h. */ -unsigned int -btrace_insn_number (const struct btrace_insn_iterator *it) +int +btrace_insn_get_error (const struct btrace_insn_iterator *it) { - const struct btrace_function *bfun; - - bfun = it->function; + return it->function->errcode; +} - /* Return zero if the iterator points to a gap in the trace. */ - if (bfun->errcode != 0) - return 0; +/* See btrace.h. */ - return bfun->insn_offset + it->index; +unsigned int +btrace_insn_number (const struct btrace_insn_iterator *it) +{ + return it->function->insn_offset + it->index; } /* See btrace.h. */ @@ -1752,7 +2348,7 @@ btrace_insn_next (struct btrace_insn_iterator *it, unsigned int stride) space = end - index; /* Advance the iterator as far as possible within this segment. */ - adv = min (space, stride); + adv = std::min (space, stride); stride -= adv; index += adv; steps += adv; @@ -1831,7 +2427,7 @@ btrace_insn_prev (struct btrace_insn_iterator *it, unsigned int stride) } /* Advance the iterator as far as possible within this segment. */ - adv = min (index, stride); + adv = std::min (index, stride); stride -= adv; index -= adv; @@ -1859,37 +2455,6 @@ btrace_insn_cmp (const struct btrace_insn_iterator *lhs, lnum = btrace_insn_number (lhs); rnum = btrace_insn_number (rhs); - /* A gap has an instruction number of zero. Things are getting more - complicated if gaps are involved. - - We take the instruction number offset from the iterator's function. - This is the number of the first instruction after the gap. - - This is OK as long as both lhs and rhs point to gaps. If only one of - them does, we need to adjust the number based on the other's regular - instruction number. Otherwise, a gap might compare equal to an - instruction. */ - - if (lnum == 0 && rnum == 0) - { - lnum = lhs->function->insn_offset; - rnum = rhs->function->insn_offset; - } - else if (lnum == 0) - { - lnum = lhs->function->insn_offset; - - if (lnum == rnum) - lnum -= 1; - } - else if (rnum == 0) - { - rnum = rhs->function->insn_offset; - - if (rnum == lnum) - rnum -= 1; - } - return (int) (lnum - rnum); } @@ -1901,31 +2466,45 @@ btrace_find_insn_by_number (struct btrace_insn_iterator *it, unsigned int number) { const struct btrace_function *bfun; - unsigned int end, length; + unsigned int upper, lower; - for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev) - { - /* Skip gaps. */ - if (bfun->errcode != 0) - continue; + if (VEC_empty (btrace_fun_p, btinfo->functions)) + return 0; - if (bfun->insn_offset <= number) - break; - } + lower = 0; + bfun = VEC_index (btrace_fun_p, btinfo->functions, lower); + if (number < bfun->insn_offset) + return 0; - if (bfun == NULL) + upper = VEC_length (btrace_fun_p, btinfo->functions) - 1; + bfun = VEC_index (btrace_fun_p, btinfo->functions, upper); + if (number >= bfun->insn_offset + ftrace_call_num_insn (bfun)) return 0; - length = VEC_length (btrace_insn_s, bfun->insn); - gdb_assert (length > 0); + /* We assume that there are no holes in the numbering. */ + for (;;) + { + const unsigned int average = lower + (upper - lower) / 2; - end = bfun->insn_offset + length; - if (end <= number) - return 0; + bfun = VEC_index (btrace_fun_p, btinfo->functions, average); + + if (number < bfun->insn_offset) + { + upper = average - 1; + continue; + } + + if (number >= bfun->insn_offset + ftrace_call_num_insn (bfun)) + { + lower = average + 1; + continue; + } + + break; + } it->function = bfun; it->index = number - bfun->insn_offset; - return 1; } @@ -2134,7 +2713,7 @@ btrace_set_insn_history (struct btrace_thread_info *btinfo, const struct btrace_insn_iterator *end) { if (btinfo->insn_history == NULL) - btinfo->insn_history = xzalloc (sizeof (*btinfo->insn_history)); + btinfo->insn_history = XCNEW (struct btrace_insn_history); btinfo->insn_history->begin = *begin; btinfo->insn_history->end = *end; @@ -2150,7 +2729,7 @@ btrace_set_call_history (struct btrace_thread_info *btinfo, gdb_assert (begin->btinfo == end->btinfo); if (btinfo->call_history == NULL) - btinfo->call_history = xzalloc (sizeof (*btinfo->call_history)); + btinfo->call_history = XCNEW (struct btrace_call_history); btinfo->call_history->begin = *begin; btinfo->call_history->end = *end; @@ -2188,7 +2767,7 @@ btrace_is_empty (struct thread_info *tp) static void do_btrace_data_cleanup (void *arg) { - btrace_data_fini (arg); + btrace_data_fini ((struct btrace_data *) arg); } /* See btrace.h. */ @@ -2198,3 +2777,718 @@ make_cleanup_btrace_data (struct btrace_data *data) { return make_cleanup (do_btrace_data_cleanup, data); } + +#if defined (HAVE_LIBIPT) + +/* Print a single packet. */ + +static void +pt_print_packet (const struct pt_packet *packet) +{ + switch (packet->type) + { + default: + printf_unfiltered (("[??: %x]"), packet->type); + break; + + case ppt_psb: + printf_unfiltered (("psb")); + break; + + case ppt_psbend: + printf_unfiltered (("psbend")); + break; + + case ppt_pad: + printf_unfiltered (("pad")); + break; + + case ppt_tip: + printf_unfiltered (("tip %u: 0x%" PRIx64 ""), + packet->payload.ip.ipc, + packet->payload.ip.ip); + break; + + case ppt_tip_pge: + printf_unfiltered (("tip.pge %u: 0x%" PRIx64 ""), + packet->payload.ip.ipc, + packet->payload.ip.ip); + break; + + case ppt_tip_pgd: + printf_unfiltered (("tip.pgd %u: 0x%" PRIx64 ""), + packet->payload.ip.ipc, + packet->payload.ip.ip); + break; + + case ppt_fup: + printf_unfiltered (("fup %u: 0x%" PRIx64 ""), + packet->payload.ip.ipc, + packet->payload.ip.ip); + break; + + case ppt_tnt_8: + printf_unfiltered (("tnt-8 %u: 0x%" PRIx64 ""), + packet->payload.tnt.bit_size, + packet->payload.tnt.payload); + break; + + case ppt_tnt_64: + printf_unfiltered (("tnt-64 %u: 0x%" PRIx64 ""), + packet->payload.tnt.bit_size, + packet->payload.tnt.payload); + break; + + case ppt_pip: + printf_unfiltered (("pip %" PRIx64 "%s"), packet->payload.pip.cr3, + packet->payload.pip.nr ? (" nr") : ("")); + break; + + case ppt_tsc: + printf_unfiltered (("tsc %" PRIx64 ""), packet->payload.tsc.tsc); + break; + + case ppt_cbr: + printf_unfiltered (("cbr %u"), packet->payload.cbr.ratio); + break; + + case ppt_mode: + switch (packet->payload.mode.leaf) + { + default: + printf_unfiltered (("mode %u"), packet->payload.mode.leaf); + break; + + case pt_mol_exec: + printf_unfiltered (("mode.exec%s%s"), + packet->payload.mode.bits.exec.csl + ? (" cs.l") : (""), + packet->payload.mode.bits.exec.csd + ? (" cs.d") : ("")); + break; + + case pt_mol_tsx: + printf_unfiltered (("mode.tsx%s%s"), + packet->payload.mode.bits.tsx.intx + ? (" intx") : (""), + packet->payload.mode.bits.tsx.abrt + ? (" abrt") : ("")); + break; + } + break; + + case ppt_ovf: + printf_unfiltered (("ovf")); + break; + + case ppt_stop: + printf_unfiltered (("stop")); + break; + + case ppt_vmcs: + printf_unfiltered (("vmcs %" PRIx64 ""), packet->payload.vmcs.base); + break; + + case ppt_tma: + printf_unfiltered (("tma %x %x"), packet->payload.tma.ctc, + packet->payload.tma.fc); + break; + + case ppt_mtc: + printf_unfiltered (("mtc %x"), packet->payload.mtc.ctc); + break; + + case ppt_cyc: + printf_unfiltered (("cyc %" PRIx64 ""), packet->payload.cyc.value); + break; + + case ppt_mnt: + printf_unfiltered (("mnt %" PRIx64 ""), packet->payload.mnt.payload); + break; + } +} + +/* Decode packets into MAINT using DECODER. */ + +static void +btrace_maint_decode_pt (struct btrace_maint_info *maint, + struct pt_packet_decoder *decoder) +{ + int errcode; + + for (;;) + { + struct btrace_pt_packet packet; + + errcode = pt_pkt_sync_forward (decoder); + if (errcode < 0) + break; + + for (;;) + { + pt_pkt_get_offset (decoder, &packet.offset); + + errcode = pt_pkt_next (decoder, &packet.packet, + sizeof(packet.packet)); + if (errcode < 0) + break; + + if (maint_btrace_pt_skip_pad == 0 || packet.packet.type != ppt_pad) + { + packet.errcode = pt_errcode (errcode); + VEC_safe_push (btrace_pt_packet_s, maint->variant.pt.packets, + &packet); + } + } + + if (errcode == -pte_eos) + break; + + packet.errcode = pt_errcode (errcode); + VEC_safe_push (btrace_pt_packet_s, maint->variant.pt.packets, + &packet); + + warning (_("Error at trace offset 0x%" PRIx64 ": %s."), + packet.offset, pt_errstr (packet.errcode)); + } + + if (errcode != -pte_eos) + warning (_("Failed to synchronize onto the Intel Processor Trace " + "stream: %s."), pt_errstr (pt_errcode (errcode))); +} + +/* Update the packet history in BTINFO. */ + +static void +btrace_maint_update_pt_packets (struct btrace_thread_info *btinfo) +{ + volatile struct gdb_exception except; + struct pt_packet_decoder *decoder; + struct btrace_data_pt *pt; + struct pt_config config; + int errcode; + + pt = &btinfo->data.variant.pt; + + /* Nothing to do if there is no trace. */ + if (pt->size == 0) + return; + + memset (&config, 0, sizeof(config)); + + config.size = sizeof (config); + config.begin = pt->data; + config.end = pt->data + pt->size; + + config.cpu.vendor = pt_translate_cpu_vendor (pt->config.cpu.vendor); + config.cpu.family = pt->config.cpu.family; + config.cpu.model = pt->config.cpu.model; + config.cpu.stepping = pt->config.cpu.stepping; + + errcode = pt_cpu_errata (&config.errata, &config.cpu); + if (errcode < 0) + error (_("Failed to configure the Intel Processor Trace decoder: %s."), + pt_errstr (pt_errcode (errcode))); + + decoder = pt_pkt_alloc_decoder (&config); + if (decoder == NULL) + error (_("Failed to allocate the Intel Processor Trace decoder.")); + + TRY + { + btrace_maint_decode_pt (&btinfo->maint, decoder); + } + CATCH (except, RETURN_MASK_ALL) + { + pt_pkt_free_decoder (decoder); + + if (except.reason < 0) + throw_exception (except); + } + END_CATCH + + pt_pkt_free_decoder (decoder); +} + +#endif /* !defined (HAVE_LIBIPT) */ + +/* Update the packet maintenance information for BTINFO and store the + low and high bounds into BEGIN and END, respectively. + Store the current iterator state into FROM and TO. */ + +static void +btrace_maint_update_packets (struct btrace_thread_info *btinfo, + unsigned int *begin, unsigned int *end, + unsigned int *from, unsigned int *to) +{ + switch (btinfo->data.format) + { + default: + *begin = 0; + *end = 0; + *from = 0; + *to = 0; + break; + + case BTRACE_FORMAT_BTS: + /* Nothing to do - we operate directly on BTINFO->DATA. */ + *begin = 0; + *end = VEC_length (btrace_block_s, btinfo->data.variant.bts.blocks); + *from = btinfo->maint.variant.bts.packet_history.begin; + *to = btinfo->maint.variant.bts.packet_history.end; + break; + +#if defined (HAVE_LIBIPT) + case BTRACE_FORMAT_PT: + if (VEC_empty (btrace_pt_packet_s, btinfo->maint.variant.pt.packets)) + btrace_maint_update_pt_packets (btinfo); + + *begin = 0; + *end = VEC_length (btrace_pt_packet_s, btinfo->maint.variant.pt.packets); + *from = btinfo->maint.variant.pt.packet_history.begin; + *to = btinfo->maint.variant.pt.packet_history.end; + break; +#endif /* defined (HAVE_LIBIPT) */ + } +} + +/* Print packets in BTINFO from BEGIN (inclusive) until END (exclusive) and + update the current iterator position. */ + +static void +btrace_maint_print_packets (struct btrace_thread_info *btinfo, + unsigned int begin, unsigned int end) +{ + switch (btinfo->data.format) + { + default: + break; + + case BTRACE_FORMAT_BTS: + { + VEC (btrace_block_s) *blocks; + unsigned int blk; + + blocks = btinfo->data.variant.bts.blocks; + for (blk = begin; blk < end; ++blk) + { + const btrace_block_s *block; + + block = VEC_index (btrace_block_s, blocks, blk); + + printf_unfiltered ("%u\tbegin: %s, end: %s\n", blk, + core_addr_to_string_nz (block->begin), + core_addr_to_string_nz (block->end)); + } + + btinfo->maint.variant.bts.packet_history.begin = begin; + btinfo->maint.variant.bts.packet_history.end = end; + } + break; + +#if defined (HAVE_LIBIPT) + case BTRACE_FORMAT_PT: + { + VEC (btrace_pt_packet_s) *packets; + unsigned int pkt; + + packets = btinfo->maint.variant.pt.packets; + for (pkt = begin; pkt < end; ++pkt) + { + const struct btrace_pt_packet *packet; + + packet = VEC_index (btrace_pt_packet_s, packets, pkt); + + printf_unfiltered ("%u\t", pkt); + printf_unfiltered ("0x%" PRIx64 "\t", packet->offset); + + if (packet->errcode == pte_ok) + pt_print_packet (&packet->packet); + else + printf_unfiltered ("[error: %s]", pt_errstr (packet->errcode)); + + printf_unfiltered ("\n"); + } + + btinfo->maint.variant.pt.packet_history.begin = begin; + btinfo->maint.variant.pt.packet_history.end = end; + } + break; +#endif /* defined (HAVE_LIBIPT) */ + } +} + +/* Read a number from an argument string. */ + +static unsigned int +get_uint (char **arg) +{ + char *begin, *end, *pos; + unsigned long number; + + begin = *arg; + pos = skip_spaces (begin); + + if (!isdigit (*pos)) + error (_("Expected positive number, got: %s."), pos); + + number = strtoul (pos, &end, 10); + if (number > UINT_MAX) + error (_("Number too big.")); + + *arg += (end - begin); + + return (unsigned int) number; +} + +/* Read a context size from an argument string. */ + +static int +get_context_size (char **arg) +{ + char *pos; + int number; + + pos = skip_spaces (*arg); + + if (!isdigit (*pos)) + error (_("Expected positive number, got: %s."), pos); + + return strtol (pos, arg, 10); +} + +/* Complain about junk at the end of an argument string. */ + +static void +no_chunk (char *arg) +{ + if (*arg != 0) + error (_("Junk after argument: %s."), arg); +} + +/* The "maintenance btrace packet-history" command. */ + +static void +maint_btrace_packet_history_cmd (char *arg, int from_tty) +{ + struct btrace_thread_info *btinfo; + struct thread_info *tp; + unsigned int size, begin, end, from, to; + + tp = find_thread_ptid (inferior_ptid); + if (tp == NULL) + error (_("No thread.")); + + size = 10; + btinfo = &tp->btrace; + + btrace_maint_update_packets (btinfo, &begin, &end, &from, &to); + if (begin == end) + { + printf_unfiltered (_("No trace.\n")); + return; + } + + if (arg == NULL || *arg == 0 || strcmp (arg, "+") == 0) + { + from = to; + + if (end - from < size) + size = end - from; + to = from + size; + } + else if (strcmp (arg, "-") == 0) + { + to = from; + + if (to - begin < size) + size = to - begin; + from = to - size; + } + else + { + from = get_uint (&arg); + if (end <= from) + error (_("'%u' is out of range."), from); + + arg = skip_spaces (arg); + if (*arg == ',') + { + arg = skip_spaces (++arg); + + if (*arg == '+') + { + arg += 1; + size = get_context_size (&arg); + + no_chunk (arg); + + if (end - from < size) + size = end - from; + to = from + size; + } + else if (*arg == '-') + { + arg += 1; + size = get_context_size (&arg); + + no_chunk (arg); + + /* Include the packet given as first argument. */ + from += 1; + to = from; + + if (to - begin < size) + size = to - begin; + from = to - size; + } + else + { + to = get_uint (&arg); + + /* Include the packet at the second argument and silently + truncate the range. */ + if (to < end) + to += 1; + else + to = end; + + no_chunk (arg); + } + } + else + { + no_chunk (arg); + + if (end - from < size) + size = end - from; + to = from + size; + } + + dont_repeat (); + } + + btrace_maint_print_packets (btinfo, from, to); +} + +/* The "maintenance btrace clear-packet-history" command. */ + +static void +maint_btrace_clear_packet_history_cmd (char *args, int from_tty) +{ + struct btrace_thread_info *btinfo; + struct thread_info *tp; + + if (args != NULL && *args != 0) + error (_("Invalid argument.")); + + tp = find_thread_ptid (inferior_ptid); + if (tp == NULL) + error (_("No thread.")); + + btinfo = &tp->btrace; + + /* Must clear the maint data before - it depends on BTINFO->DATA. */ + btrace_maint_clear (btinfo); + btrace_data_clear (&btinfo->data); +} + +/* The "maintenance btrace clear" command. */ + +static void +maint_btrace_clear_cmd (char *args, int from_tty) +{ + struct btrace_thread_info *btinfo; + struct thread_info *tp; + + if (args != NULL && *args != 0) + error (_("Invalid argument.")); + + tp = find_thread_ptid (inferior_ptid); + if (tp == NULL) + error (_("No thread.")); + + btrace_clear (tp); +} + +/* The "maintenance btrace" command. */ + +static void +maint_btrace_cmd (char *args, int from_tty) +{ + help_list (maint_btrace_cmdlist, "maintenance btrace ", all_commands, + gdb_stdout); +} + +/* The "maintenance set btrace" command. */ + +static void +maint_btrace_set_cmd (char *args, int from_tty) +{ + help_list (maint_btrace_set_cmdlist, "maintenance set btrace ", all_commands, + gdb_stdout); +} + +/* The "maintenance show btrace" command. */ + +static void +maint_btrace_show_cmd (char *args, int from_tty) +{ + help_list (maint_btrace_show_cmdlist, "maintenance show btrace ", + all_commands, gdb_stdout); +} + +/* The "maintenance set btrace pt" command. */ + +static void +maint_btrace_pt_set_cmd (char *args, int from_tty) +{ + help_list (maint_btrace_pt_set_cmdlist, "maintenance set btrace pt ", + all_commands, gdb_stdout); +} + +/* The "maintenance show btrace pt" command. */ + +static void +maint_btrace_pt_show_cmd (char *args, int from_tty) +{ + help_list (maint_btrace_pt_show_cmdlist, "maintenance show btrace pt ", + all_commands, gdb_stdout); +} + +/* The "maintenance info btrace" command. */ + +static void +maint_info_btrace_cmd (char *args, int from_tty) +{ + struct btrace_thread_info *btinfo; + struct thread_info *tp; + const struct btrace_config *conf; + + if (args != NULL && *args != 0) + error (_("Invalid argument.")); + + tp = find_thread_ptid (inferior_ptid); + if (tp == NULL) + error (_("No thread.")); + + btinfo = &tp->btrace; + + conf = btrace_conf (btinfo); + if (conf == NULL) + error (_("No btrace configuration.")); + + printf_unfiltered (_("Format: %s.\n"), + btrace_format_string (conf->format)); + + switch (conf->format) + { + default: + break; + + case BTRACE_FORMAT_BTS: + printf_unfiltered (_("Number of packets: %u.\n"), + VEC_length (btrace_block_s, + btinfo->data.variant.bts.blocks)); + break; + +#if defined (HAVE_LIBIPT) + case BTRACE_FORMAT_PT: + { + struct pt_version version; + + version = pt_library_version (); + printf_unfiltered (_("Version: %u.%u.%u%s.\n"), version.major, + version.minor, version.build, + version.ext != NULL ? version.ext : ""); + + btrace_maint_update_pt_packets (btinfo); + printf_unfiltered (_("Number of packets: %u.\n"), + VEC_length (btrace_pt_packet_s, + btinfo->maint.variant.pt.packets)); + } + break; +#endif /* defined (HAVE_LIBIPT) */ + } +} + +/* The "maint show btrace pt skip-pad" show value function. */ + +static void +show_maint_btrace_pt_skip_pad (struct ui_file *file, int from_tty, + struct cmd_list_element *c, + const char *value) +{ + fprintf_filtered (file, _("Skip PAD packets is %s.\n"), value); +} + + +/* Initialize btrace maintenance commands. */ + +void _initialize_btrace (void); +void +_initialize_btrace (void) +{ + add_cmd ("btrace", class_maintenance, maint_info_btrace_cmd, + _("Info about branch tracing data."), &maintenanceinfolist); + + add_prefix_cmd ("btrace", class_maintenance, maint_btrace_cmd, + _("Branch tracing maintenance commands."), + &maint_btrace_cmdlist, "maintenance btrace ", + 0, &maintenancelist); + + add_prefix_cmd ("btrace", class_maintenance, maint_btrace_set_cmd, _("\ +Set branch tracing specific variables."), + &maint_btrace_set_cmdlist, "maintenance set btrace ", + 0, &maintenance_set_cmdlist); + + add_prefix_cmd ("pt", class_maintenance, maint_btrace_pt_set_cmd, _("\ +Set Intel Processor Trace specific variables."), + &maint_btrace_pt_set_cmdlist, "maintenance set btrace pt ", + 0, &maint_btrace_set_cmdlist); + + add_prefix_cmd ("btrace", class_maintenance, maint_btrace_show_cmd, _("\ +Show branch tracing specific variables."), + &maint_btrace_show_cmdlist, "maintenance show btrace ", + 0, &maintenance_show_cmdlist); + + add_prefix_cmd ("pt", class_maintenance, maint_btrace_pt_show_cmd, _("\ +Show Intel Processor Trace specific variables."), + &maint_btrace_pt_show_cmdlist, "maintenance show btrace pt ", + 0, &maint_btrace_show_cmdlist); + + add_setshow_boolean_cmd ("skip-pad", class_maintenance, + &maint_btrace_pt_skip_pad, _("\ +Set whether PAD packets should be skipped in the btrace packet history."), _("\ +Show whether PAD packets should be skipped in the btrace packet history."),_("\ +When enabled, PAD packets are ignored in the btrace packet history."), + NULL, show_maint_btrace_pt_skip_pad, + &maint_btrace_pt_set_cmdlist, + &maint_btrace_pt_show_cmdlist); + + add_cmd ("packet-history", class_maintenance, maint_btrace_packet_history_cmd, + _("Print the raw branch tracing data.\n\ +With no argument, print ten more packets after the previous ten-line print.\n\ +With '-' as argument print ten packets before a previous ten-line print.\n\ +One argument specifies the starting packet of a ten-line print.\n\ +Two arguments with comma between specify starting and ending packets to \ +print.\n\ +Preceded with '+'/'-' the second argument specifies the distance from the \ +first.\n"), + &maint_btrace_cmdlist); + + add_cmd ("clear-packet-history", class_maintenance, + maint_btrace_clear_packet_history_cmd, + _("Clears the branch tracing packet history.\n\ +Discards the raw branch tracing data but not the execution history data.\n\ +"), + &maint_btrace_cmdlist); + + add_cmd ("clear", class_maintenance, maint_btrace_clear_cmd, + _("Clears the branch tracing data.\n\ +Discards the raw branch tracing data and the execution history data.\n\ +The next 'record' command will fetch the branch tracing data anew.\n\ +"), + &maint_btrace_cmdlist); + +}