1 /* Branch trace support for GDB, the GNU debugger.
3 Copyright (C) 2013-2017 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "gdbthread.h"
31 #include "filenames.h"
32 #include "xml-support.h"
36 #include "cli/cli-utils.h"
42 /* Command lists for btrace maintenance commands. */
43 static struct cmd_list_element
*maint_btrace_cmdlist
;
44 static struct cmd_list_element
*maint_btrace_set_cmdlist
;
45 static struct cmd_list_element
*maint_btrace_show_cmdlist
;
46 static struct cmd_list_element
*maint_btrace_pt_set_cmdlist
;
47 static struct cmd_list_element
*maint_btrace_pt_show_cmdlist
;
49 /* Control whether to skip PAD packets when computing the packet history. */
50 static int maint_btrace_pt_skip_pad
= 1;
52 /* A vector of function segments. */
53 typedef struct btrace_function
* bfun_s
;
56 static void btrace_add_pc (struct thread_info
*tp
);
58 /* Print a record debug message. Use do ... while (0) to avoid ambiguities
59 when used in if statements. */
61 #define DEBUG(msg, args...) \
64 if (record_debug != 0) \
65 fprintf_unfiltered (gdb_stdlog, \
66 "[btrace] " msg "\n", ##args); \
70 #define DEBUG_FTRACE(msg, args...) DEBUG ("[ftrace] " msg, ##args)
72 /* Return the function name of a recorded function segment for printing.
73 This function never returns NULL. */
76 ftrace_print_function_name (const struct btrace_function
*bfun
)
78 struct minimal_symbol
*msym
;
85 return SYMBOL_PRINT_NAME (sym
);
88 return MSYMBOL_PRINT_NAME (msym
);
93 /* Return the file name of a recorded function segment for printing.
94 This function never returns NULL. */
97 ftrace_print_filename (const struct btrace_function
*bfun
)
100 const char *filename
;
105 filename
= symtab_to_filename_for_display (symbol_symtab (sym
));
107 filename
= "<unknown>";
112 /* Return a string representation of the address of an instruction.
113 This function never returns NULL. */
116 ftrace_print_insn_addr (const struct btrace_insn
*insn
)
121 return core_addr_to_string_nz (insn
->pc
);
124 /* Print an ftrace debug status message. */
127 ftrace_debug (const struct btrace_function
*bfun
, const char *prefix
)
129 const char *fun
, *file
;
130 unsigned int ibegin
, iend
;
133 fun
= ftrace_print_function_name (bfun
);
134 file
= ftrace_print_filename (bfun
);
137 ibegin
= bfun
->insn_offset
;
138 iend
= ibegin
+ VEC_length (btrace_insn_s
, bfun
->insn
);
140 DEBUG_FTRACE ("%s: fun = %s, file = %s, level = %d, insn = [%u; %u)",
141 prefix
, fun
, file
, level
, ibegin
, iend
);
144 /* Return non-zero if BFUN does not match MFUN and FUN,
145 return zero otherwise. */
148 ftrace_function_switched (const struct btrace_function
*bfun
,
149 const struct minimal_symbol
*mfun
,
150 const struct symbol
*fun
)
152 struct minimal_symbol
*msym
;
158 /* If the minimal symbol changed, we certainly switched functions. */
159 if (mfun
!= NULL
&& msym
!= NULL
160 && strcmp (MSYMBOL_LINKAGE_NAME (mfun
), MSYMBOL_LINKAGE_NAME (msym
)) != 0)
163 /* If the symbol changed, we certainly switched functions. */
164 if (fun
!= NULL
&& sym
!= NULL
)
166 const char *bfname
, *fname
;
168 /* Check the function name. */
169 if (strcmp (SYMBOL_LINKAGE_NAME (fun
), SYMBOL_LINKAGE_NAME (sym
)) != 0)
172 /* Check the location of those functions, as well. */
173 bfname
= symtab_to_fullname (symbol_symtab (sym
));
174 fname
= symtab_to_fullname (symbol_symtab (fun
));
175 if (filename_cmp (fname
, bfname
) != 0)
179 /* If we lost symbol information, we switched functions. */
180 if (!(msym
== NULL
&& sym
== NULL
) && mfun
== NULL
&& fun
== NULL
)
183 /* If we gained symbol information, we switched functions. */
184 if (msym
== NULL
&& sym
== NULL
&& !(mfun
== NULL
&& fun
== NULL
))
190 /* Allocate and initialize a new branch trace function segment.
191 PREV is the chronologically preceding function segment.
192 MFUN and FUN are the symbol information we have for this function. */
194 static struct btrace_function
*
195 ftrace_new_function (struct btrace_function
*prev
,
196 struct minimal_symbol
*mfun
,
199 struct btrace_function
*bfun
;
201 bfun
= XCNEW (struct btrace_function
);
205 bfun
->flow
.prev
= prev
;
209 /* Start counting at one. */
211 bfun
->insn_offset
= 1;
215 gdb_assert (prev
->flow
.next
== NULL
);
216 prev
->flow
.next
= bfun
;
218 bfun
->number
= prev
->number
+ 1;
219 bfun
->insn_offset
= (prev
->insn_offset
220 + VEC_length (btrace_insn_s
, prev
->insn
));
221 bfun
->level
= prev
->level
;
227 /* Update the UP field of a function segment. */
230 ftrace_update_caller (struct btrace_function
*bfun
,
231 struct btrace_function
*caller
,
232 enum btrace_function_flag flags
)
234 if (bfun
->up
!= NULL
)
235 ftrace_debug (bfun
, "updating caller");
240 ftrace_debug (bfun
, "set caller");
241 ftrace_debug (caller
, "..to");
244 /* Fix up the caller for all segments of a function. */
247 ftrace_fixup_caller (struct btrace_function
*bfun
,
248 struct btrace_function
*caller
,
249 enum btrace_function_flag flags
)
251 struct btrace_function
*prev
, *next
;
253 ftrace_update_caller (bfun
, caller
, flags
);
255 /* Update all function segments belonging to the same function. */
256 for (prev
= bfun
->segment
.prev
; prev
!= NULL
; prev
= prev
->segment
.prev
)
257 ftrace_update_caller (prev
, caller
, flags
);
259 for (next
= bfun
->segment
.next
; next
!= NULL
; next
= next
->segment
.next
)
260 ftrace_update_caller (next
, caller
, flags
);
263 /* Add a new function segment for a call.
264 CALLER is the chronologically preceding function segment.
265 MFUN and FUN are the symbol information we have for this function. */
267 static struct btrace_function
*
268 ftrace_new_call (struct btrace_function
*caller
,
269 struct minimal_symbol
*mfun
,
272 struct btrace_function
*bfun
;
274 bfun
= ftrace_new_function (caller
, mfun
, fun
);
278 ftrace_debug (bfun
, "new call");
283 /* Add a new function segment for a tail call.
284 CALLER is the chronologically preceding function segment.
285 MFUN and FUN are the symbol information we have for this function. */
287 static struct btrace_function
*
288 ftrace_new_tailcall (struct btrace_function
*caller
,
289 struct minimal_symbol
*mfun
,
292 struct btrace_function
*bfun
;
294 bfun
= ftrace_new_function (caller
, mfun
, fun
);
297 bfun
->flags
|= BFUN_UP_LINKS_TO_TAILCALL
;
299 ftrace_debug (bfun
, "new tail call");
304 /* Return the caller of BFUN or NULL if there is none. This function skips
305 tail calls in the call chain. */
306 static struct btrace_function
*
307 ftrace_get_caller (struct btrace_function
*bfun
)
309 for (; bfun
!= NULL
; bfun
= bfun
->up
)
310 if ((bfun
->flags
& BFUN_UP_LINKS_TO_TAILCALL
) == 0)
316 /* Find the innermost caller in the back trace of BFUN with MFUN/FUN
317 symbol information. */
319 static struct btrace_function
*
320 ftrace_find_caller (struct btrace_function
*bfun
,
321 struct minimal_symbol
*mfun
,
324 for (; bfun
!= NULL
; bfun
= bfun
->up
)
326 /* Skip functions with incompatible symbol information. */
327 if (ftrace_function_switched (bfun
, mfun
, fun
))
330 /* This is the function segment we're looking for. */
337 /* Find the innermost caller in the back trace of BFUN, skipping all
338 function segments that do not end with a call instruction (e.g.
339 tail calls ending with a jump). */
341 static struct btrace_function
*
342 ftrace_find_call (struct btrace_function
*bfun
)
344 for (; bfun
!= NULL
; bfun
= bfun
->up
)
346 struct btrace_insn
*last
;
349 if (bfun
->errcode
!= 0)
352 last
= VEC_last (btrace_insn_s
, bfun
->insn
);
354 if (last
->iclass
== BTRACE_INSN_CALL
)
361 /* Add a continuation segment for a function into which we return.
362 PREV is the chronologically preceding function segment.
363 MFUN and FUN are the symbol information we have for this function. */
365 static struct btrace_function
*
366 ftrace_new_return (struct btrace_function
*prev
,
367 struct minimal_symbol
*mfun
,
370 struct btrace_function
*bfun
, *caller
;
372 bfun
= ftrace_new_function (prev
, mfun
, fun
);
374 /* It is important to start at PREV's caller. Otherwise, we might find
375 PREV itself, if PREV is a recursive function. */
376 caller
= ftrace_find_caller (prev
->up
, mfun
, fun
);
379 /* The caller of PREV is the preceding btrace function segment in this
380 function instance. */
381 gdb_assert (caller
->segment
.next
== NULL
);
383 caller
->segment
.next
= bfun
;
384 bfun
->segment
.prev
= caller
;
386 /* Maintain the function level. */
387 bfun
->level
= caller
->level
;
389 /* Maintain the call stack. */
390 bfun
->up
= caller
->up
;
391 bfun
->flags
= caller
->flags
;
393 ftrace_debug (bfun
, "new return");
397 /* We did not find a caller. This could mean that something went
398 wrong or that the call is simply not included in the trace. */
400 /* Let's search for some actual call. */
401 caller
= ftrace_find_call (prev
->up
);
404 /* There is no call in PREV's back trace. We assume that the
405 branch trace did not include it. */
407 /* Let's find the topmost function and add a new caller for it.
408 This should handle a series of initial tail calls. */
409 while (prev
->up
!= NULL
)
412 bfun
->level
= prev
->level
- 1;
414 /* Fix up the call stack for PREV. */
415 ftrace_fixup_caller (prev
, bfun
, BFUN_UP_LINKS_TO_RET
);
417 ftrace_debug (bfun
, "new return - no caller");
421 /* There is a call in PREV's back trace to which we should have
422 returned but didn't. Let's start a new, separate back trace
423 from PREV's level. */
424 bfun
->level
= prev
->level
- 1;
426 /* We fix up the back trace for PREV but leave other function segments
427 on the same level as they are.
428 This should handle things like schedule () correctly where we're
429 switching contexts. */
431 prev
->flags
= BFUN_UP_LINKS_TO_RET
;
433 ftrace_debug (bfun
, "new return - unknown caller");
440 /* Add a new function segment for a function switch.
441 PREV is the chronologically preceding function segment.
442 MFUN and FUN are the symbol information we have for this function. */
444 static struct btrace_function
*
445 ftrace_new_switch (struct btrace_function
*prev
,
446 struct minimal_symbol
*mfun
,
449 struct btrace_function
*bfun
;
451 /* This is an unexplained function switch. We can't really be sure about the
452 call stack, yet the best I can think of right now is to preserve it. */
453 bfun
= ftrace_new_function (prev
, mfun
, fun
);
455 bfun
->flags
= prev
->flags
;
457 ftrace_debug (bfun
, "new switch");
462 /* Add a new function segment for a gap in the trace due to a decode error.
463 PREV is the chronologically preceding function segment.
464 ERRCODE is the format-specific error code. */
466 static struct btrace_function
*
467 ftrace_new_gap (struct btrace_function
*prev
, int errcode
)
469 struct btrace_function
*bfun
;
471 /* We hijack prev if it was empty. */
472 if (prev
!= NULL
&& prev
->errcode
== 0
473 && VEC_empty (btrace_insn_s
, prev
->insn
))
476 bfun
= ftrace_new_function (prev
, NULL
, NULL
);
478 bfun
->errcode
= errcode
;
480 ftrace_debug (bfun
, "new gap");
485 /* Update BFUN with respect to the instruction at PC. This may create new
487 Return the chronologically latest function segment, never NULL. */
489 static struct btrace_function
*
490 ftrace_update_function (struct btrace_function
*bfun
, CORE_ADDR pc
)
492 struct bound_minimal_symbol bmfun
;
493 struct minimal_symbol
*mfun
;
495 struct btrace_insn
*last
;
497 /* Try to determine the function we're in. We use both types of symbols
498 to avoid surprises when we sometimes get a full symbol and sometimes
499 only a minimal symbol. */
500 fun
= find_pc_function (pc
);
501 bmfun
= lookup_minimal_symbol_by_pc (pc
);
504 if (fun
== NULL
&& mfun
== NULL
)
505 DEBUG_FTRACE ("no symbol at %s", core_addr_to_string_nz (pc
));
507 /* If we didn't have a function or if we had a gap before, we create one. */
508 if (bfun
== NULL
|| bfun
->errcode
!= 0)
509 return ftrace_new_function (bfun
, mfun
, fun
);
511 /* Check the last instruction, if we have one.
512 We do this check first, since it allows us to fill in the call stack
513 links in addition to the normal flow links. */
515 if (!VEC_empty (btrace_insn_s
, bfun
->insn
))
516 last
= VEC_last (btrace_insn_s
, bfun
->insn
);
520 switch (last
->iclass
)
522 case BTRACE_INSN_RETURN
:
526 /* On some systems, _dl_runtime_resolve returns to the resolved
527 function instead of jumping to it. From our perspective,
528 however, this is a tailcall.
529 If we treated it as return, we wouldn't be able to find the
530 resolved function in our stack back trace. Hence, we would
531 lose the current stack back trace and start anew with an empty
532 back trace. When the resolved function returns, we would then
533 create a stack back trace with the same function names but
534 different frame id's. This will confuse stepping. */
535 fname
= ftrace_print_function_name (bfun
);
536 if (strcmp (fname
, "_dl_runtime_resolve") == 0)
537 return ftrace_new_tailcall (bfun
, mfun
, fun
);
539 return ftrace_new_return (bfun
, mfun
, fun
);
542 case BTRACE_INSN_CALL
:
543 /* Ignore calls to the next instruction. They are used for PIC. */
544 if (last
->pc
+ last
->size
== pc
)
547 return ftrace_new_call (bfun
, mfun
, fun
);
549 case BTRACE_INSN_JUMP
:
553 start
= get_pc_function_start (pc
);
555 /* A jump to the start of a function is (typically) a tail call. */
557 return ftrace_new_tailcall (bfun
, mfun
, fun
);
559 /* If we can't determine the function for PC, we treat a jump at
560 the end of the block as tail call if we're switching functions
561 and as an intra-function branch if we don't. */
562 if (start
== 0 && ftrace_function_switched (bfun
, mfun
, fun
))
563 return ftrace_new_tailcall (bfun
, mfun
, fun
);
570 /* Check if we're switching functions for some other reason. */
571 if (ftrace_function_switched (bfun
, mfun
, fun
))
573 DEBUG_FTRACE ("switching from %s in %s at %s",
574 ftrace_print_insn_addr (last
),
575 ftrace_print_function_name (bfun
),
576 ftrace_print_filename (bfun
));
578 return ftrace_new_switch (bfun
, mfun
, fun
);
584 /* Add the instruction at PC to BFUN's instructions. */
587 ftrace_update_insns (struct btrace_function
*bfun
,
588 const struct btrace_insn
*insn
)
590 VEC_safe_push (btrace_insn_s
, bfun
->insn
, insn
);
592 if (record_debug
> 1)
593 ftrace_debug (bfun
, "update insn");
596 /* Classify the instruction at PC. */
598 static enum btrace_insn_class
599 ftrace_classify_insn (struct gdbarch
*gdbarch
, CORE_ADDR pc
)
601 enum btrace_insn_class iclass
;
603 iclass
= BTRACE_INSN_OTHER
;
606 if (gdbarch_insn_is_call (gdbarch
, pc
))
607 iclass
= BTRACE_INSN_CALL
;
608 else if (gdbarch_insn_is_ret (gdbarch
, pc
))
609 iclass
= BTRACE_INSN_RETURN
;
610 else if (gdbarch_insn_is_jump (gdbarch
, pc
))
611 iclass
= BTRACE_INSN_JUMP
;
613 CATCH (error
, RETURN_MASK_ERROR
)
621 /* Try to match the back trace at LHS to the back trace at RHS. Returns the
622 number of matching function segments or zero if the back traces do not
626 ftrace_match_backtrace (struct btrace_function
*lhs
,
627 struct btrace_function
*rhs
)
631 for (matches
= 0; lhs
!= NULL
&& rhs
!= NULL
; ++matches
)
633 if (ftrace_function_switched (lhs
, rhs
->msym
, rhs
->sym
))
636 lhs
= ftrace_get_caller (lhs
);
637 rhs
= ftrace_get_caller (rhs
);
643 /* Add ADJUSTMENT to the level of BFUN and succeeding function segments. */
646 ftrace_fixup_level (struct btrace_function
*bfun
, int adjustment
)
651 DEBUG_FTRACE ("fixup level (%+d)", adjustment
);
652 ftrace_debug (bfun
, "..bfun");
654 for (; bfun
!= NULL
; bfun
= bfun
->flow
.next
)
655 bfun
->level
+= adjustment
;
658 /* Recompute the global level offset. Traverse the function trace and compute
659 the global level offset as the negative of the minimal function level. */
662 ftrace_compute_global_level_offset (struct btrace_thread_info
*btinfo
)
664 struct btrace_function
*bfun
, *end
;
670 bfun
= btinfo
->begin
;
674 /* The last function segment contains the current instruction, which is not
675 really part of the trace. If it contains just this one instruction, we
676 stop when we reach it; otherwise, we let the below loop run to the end. */
678 if (VEC_length (btrace_insn_s
, end
->insn
) > 1)
682 for (; bfun
!= end
; bfun
= bfun
->flow
.next
)
683 level
= std::min (level
, bfun
->level
);
685 DEBUG_FTRACE ("setting global level offset: %d", -level
);
686 btinfo
->level
= -level
;
689 /* Connect the function segments PREV and NEXT in a bottom-to-top walk as in
690 ftrace_connect_backtrace. */
693 ftrace_connect_bfun (struct btrace_function
*prev
,
694 struct btrace_function
*next
)
696 DEBUG_FTRACE ("connecting...");
697 ftrace_debug (prev
, "..prev");
698 ftrace_debug (next
, "..next");
700 /* The function segments are not yet connected. */
701 gdb_assert (prev
->segment
.next
== NULL
);
702 gdb_assert (next
->segment
.prev
== NULL
);
704 prev
->segment
.next
= next
;
705 next
->segment
.prev
= prev
;
707 /* We may have moved NEXT to a different function level. */
708 ftrace_fixup_level (next
, prev
->level
- next
->level
);
710 /* If we run out of back trace for one, let's use the other's. */
711 if (prev
->up
== NULL
)
713 if (next
->up
!= NULL
)
715 DEBUG_FTRACE ("using next's callers");
716 ftrace_fixup_caller (prev
, next
->up
, next
->flags
);
719 else if (next
->up
== NULL
)
721 if (prev
->up
!= NULL
)
723 DEBUG_FTRACE ("using prev's callers");
724 ftrace_fixup_caller (next
, prev
->up
, prev
->flags
);
729 /* PREV may have a tailcall caller, NEXT can't. If it does, fixup the up
730 link to add the tail callers to NEXT's back trace.
732 This removes NEXT->UP from NEXT's back trace. It will be added back
733 when connecting NEXT and PREV's callers - provided they exist.
735 If PREV's back trace consists of a series of tail calls without an
736 actual call, there will be no further connection and NEXT's caller will
737 be removed for good. To catch this case, we handle it here and connect
738 the top of PREV's back trace to NEXT's caller. */
739 if ((prev
->flags
& BFUN_UP_LINKS_TO_TAILCALL
) != 0)
741 struct btrace_function
*caller
;
742 btrace_function_flags flags
;
744 /* We checked NEXT->UP above so CALLER can't be NULL. */
748 DEBUG_FTRACE ("adding prev's tail calls to next");
750 ftrace_fixup_caller (next
, prev
->up
, prev
->flags
);
752 for (prev
= prev
->up
; prev
!= NULL
; prev
= prev
->up
)
754 /* At the end of PREV's back trace, continue with CALLER. */
755 if (prev
->up
== NULL
)
757 DEBUG_FTRACE ("fixing up link for tailcall chain");
758 ftrace_debug (prev
, "..top");
759 ftrace_debug (caller
, "..up");
761 ftrace_fixup_caller (prev
, caller
, flags
);
763 /* If we skipped any tail calls, this may move CALLER to a
764 different function level.
766 Note that changing CALLER's level is only OK because we
767 know that this is the last iteration of the bottom-to-top
768 walk in ftrace_connect_backtrace.
770 Otherwise we will fix up CALLER's level when we connect it
771 to PREV's caller in the next iteration. */
772 ftrace_fixup_level (caller
, prev
->level
- caller
->level
- 1);
776 /* There's nothing to do if we find a real call. */
777 if ((prev
->flags
& BFUN_UP_LINKS_TO_TAILCALL
) == 0)
779 DEBUG_FTRACE ("will fix up link in next iteration");
787 /* Connect function segments on the same level in the back trace at LHS and RHS.
788 The back traces at LHS and RHS are expected to match according to
789 ftrace_match_backtrace. */
792 ftrace_connect_backtrace (struct btrace_function
*lhs
,
793 struct btrace_function
*rhs
)
795 while (lhs
!= NULL
&& rhs
!= NULL
)
797 struct btrace_function
*prev
, *next
;
799 gdb_assert (!ftrace_function_switched (lhs
, rhs
->msym
, rhs
->sym
));
801 /* Connecting LHS and RHS may change the up link. */
805 lhs
= ftrace_get_caller (lhs
);
806 rhs
= ftrace_get_caller (rhs
);
808 ftrace_connect_bfun (prev
, next
);
812 /* Bridge the gap between two function segments left and right of a gap if their
813 respective back traces match in at least MIN_MATCHES functions.
815 Returns non-zero if the gap could be bridged, zero otherwise. */
818 ftrace_bridge_gap (struct btrace_function
*lhs
, struct btrace_function
*rhs
,
821 struct btrace_function
*best_l
, *best_r
, *cand_l
, *cand_r
;
824 DEBUG_FTRACE ("checking gap at insn %u (req matches: %d)",
825 rhs
->insn_offset
- 1, min_matches
);
831 /* We search the back traces of LHS and RHS for valid connections and connect
832 the two functon segments that give the longest combined back trace. */
834 for (cand_l
= lhs
; cand_l
!= NULL
; cand_l
= ftrace_get_caller (cand_l
))
835 for (cand_r
= rhs
; cand_r
!= NULL
; cand_r
= ftrace_get_caller (cand_r
))
839 matches
= ftrace_match_backtrace (cand_l
, cand_r
);
840 if (best_matches
< matches
)
842 best_matches
= matches
;
848 /* We need at least MIN_MATCHES matches. */
849 gdb_assert (min_matches
> 0);
850 if (best_matches
< min_matches
)
853 DEBUG_FTRACE ("..matches: %d", best_matches
);
855 /* We will fix up the level of BEST_R and succeeding function segments such
856 that BEST_R's level matches BEST_L's when we connect BEST_L to BEST_R.
858 This will ignore the level of RHS and following if BEST_R != RHS. I.e. if
859 BEST_R is a successor of RHS in the back trace of RHS (phases 1 and 3).
861 To catch this, we already fix up the level here where we can start at RHS
862 instead of at BEST_R. We will ignore the level fixup when connecting
863 BEST_L to BEST_R as they will already be on the same level. */
864 ftrace_fixup_level (rhs
, best_l
->level
- best_r
->level
);
866 ftrace_connect_backtrace (best_l
, best_r
);
871 /* Try to bridge gaps due to overflow or decode errors by connecting the
872 function segments that are separated by the gap. */
875 btrace_bridge_gaps (struct thread_info
*tp
, VEC (bfun_s
) **gaps
)
877 VEC (bfun_s
) *remaining
;
878 struct cleanup
*old_chain
;
881 DEBUG ("bridge gaps");
884 old_chain
= make_cleanup (VEC_cleanup (bfun_s
), &remaining
);
886 /* We require a minimum amount of matches for bridging a gap. The number of
887 required matches will be lowered with each iteration.
889 The more matches the higher our confidence that the bridging is correct.
890 For big gaps or small traces, however, it may not be feasible to require a
891 high number of matches. */
892 for (min_matches
= 5; min_matches
> 0; --min_matches
)
894 /* Let's try to bridge as many gaps as we can. In some cases, we need to
895 skip a gap and revisit it again after we closed later gaps. */
896 while (!VEC_empty (bfun_s
, *gaps
))
898 struct btrace_function
*gap
;
901 for (idx
= 0; VEC_iterate (bfun_s
, *gaps
, idx
, gap
); ++idx
)
903 struct btrace_function
*lhs
, *rhs
;
906 /* We may have a sequence of gaps if we run from one error into
907 the next as we try to re-sync onto the trace stream. Ignore
908 all but the leftmost gap in such a sequence.
910 Also ignore gaps at the beginning of the trace. */
911 lhs
= gap
->flow
.prev
;
912 if (lhs
== NULL
|| lhs
->errcode
!= 0)
915 /* Skip gaps to the right. */
916 for (rhs
= gap
->flow
.next
; rhs
!= NULL
; rhs
= rhs
->flow
.next
)
917 if (rhs
->errcode
== 0)
920 /* Ignore gaps at the end of the trace. */
924 bridged
= ftrace_bridge_gap (lhs
, rhs
, min_matches
);
926 /* Keep track of gaps we were not able to bridge and try again.
927 If we just pushed them to the end of GAPS we would risk an
928 infinite loop in case we simply cannot bridge a gap. */
930 VEC_safe_push (bfun_s
, remaining
, gap
);
933 /* Let's see if we made any progress. */
934 if (VEC_length (bfun_s
, remaining
) == VEC_length (bfun_s
, *gaps
))
937 VEC_free (bfun_s
, *gaps
);
943 /* We get here if either GAPS is empty or if GAPS equals REMAINING. */
944 if (VEC_empty (bfun_s
, *gaps
))
947 VEC_free (bfun_s
, remaining
);
950 do_cleanups (old_chain
);
952 /* We may omit this in some cases. Not sure it is worth the extra
953 complication, though. */
954 ftrace_compute_global_level_offset (&tp
->btrace
);
957 /* Compute the function branch trace from BTS trace. */
960 btrace_compute_ftrace_bts (struct thread_info
*tp
,
961 const struct btrace_data_bts
*btrace
,
964 struct btrace_thread_info
*btinfo
;
965 struct btrace_function
*begin
, *end
;
966 struct gdbarch
*gdbarch
;
970 gdbarch
= target_gdbarch ();
971 btinfo
= &tp
->btrace
;
972 begin
= btinfo
->begin
;
974 level
= begin
!= NULL
? -btinfo
->level
: INT_MAX
;
975 blk
= VEC_length (btrace_block_s
, btrace
->blocks
);
979 btrace_block_s
*block
;
984 block
= VEC_index (btrace_block_s
, btrace
->blocks
, blk
);
989 struct btrace_insn insn
;
992 /* We should hit the end of the block. Warn if we went too far. */
995 /* Indicate the gap in the trace. */
996 end
= ftrace_new_gap (end
, BDE_BTS_OVERFLOW
);
1000 VEC_safe_push (bfun_s
, *gaps
, end
);
1002 warning (_("Recorded trace may be corrupted at instruction "
1003 "%u (pc = %s)."), end
->insn_offset
- 1,
1004 core_addr_to_string_nz (pc
));
1009 end
= ftrace_update_function (end
, pc
);
1013 /* Maintain the function level offset.
1014 For all but the last block, we do it here. */
1016 level
= std::min (level
, end
->level
);
1021 size
= gdb_insn_length (gdbarch
, pc
);
1023 CATCH (error
, RETURN_MASK_ERROR
)
1030 insn
.iclass
= ftrace_classify_insn (gdbarch
, pc
);
1033 ftrace_update_insns (end
, &insn
);
1035 /* We're done once we pushed the instruction at the end. */
1036 if (block
->end
== pc
)
1039 /* We can't continue if we fail to compute the size. */
1042 /* Indicate the gap in the trace. We just added INSN so we're
1043 not at the beginning. */
1044 end
= ftrace_new_gap (end
, BDE_BTS_INSN_SIZE
);
1046 VEC_safe_push (bfun_s
, *gaps
, end
);
1048 warning (_("Recorded trace may be incomplete at instruction %u "
1049 "(pc = %s)."), end
->insn_offset
- 1,
1050 core_addr_to_string_nz (pc
));
1057 /* Maintain the function level offset.
1058 For the last block, we do it here to not consider the last
1060 Since the last instruction corresponds to the current instruction
1061 and is not really part of the execution history, it shouldn't
1062 affect the level. */
1064 level
= std::min (level
, end
->level
);
1068 btinfo
->begin
= begin
;
1071 /* LEVEL is the minimal function level of all btrace function segments.
1072 Define the global level offset to -LEVEL so all function levels are
1073 normalized to start at zero. */
1074 btinfo
->level
= -level
;
1077 #if defined (HAVE_LIBIPT)
1079 static enum btrace_insn_class
1080 pt_reclassify_insn (enum pt_insn_class iclass
)
1085 return BTRACE_INSN_CALL
;
1088 return BTRACE_INSN_RETURN
;
1091 return BTRACE_INSN_JUMP
;
1094 return BTRACE_INSN_OTHER
;
1098 /* Return the btrace instruction flags for INSN. */
1100 static btrace_insn_flags
1101 pt_btrace_insn_flags (const struct pt_insn
*insn
)
1103 btrace_insn_flags flags
= 0;
1105 if (insn
->speculative
)
1106 flags
|= BTRACE_INSN_FLAG_SPECULATIVE
;
1111 /* Add function branch trace using DECODER. */
1114 ftrace_add_pt (struct pt_insn_decoder
*decoder
,
1115 struct btrace_function
**pbegin
,
1116 struct btrace_function
**pend
, int *plevel
,
1117 VEC (bfun_s
) **gaps
)
1119 struct btrace_function
*begin
, *end
, *upd
;
1127 struct btrace_insn btinsn
;
1128 struct pt_insn insn
;
1130 errcode
= pt_insn_sync_forward (decoder
);
1133 if (errcode
!= -pte_eos
)
1134 warning (_("Failed to synchronize onto the Intel Processor "
1135 "Trace stream: %s."), pt_errstr (pt_errcode (errcode
)));
1139 memset (&btinsn
, 0, sizeof (btinsn
));
1142 errcode
= pt_insn_next (decoder
, &insn
, sizeof(insn
));
1146 /* Look for gaps in the trace - unless we're at the beginning. */
1149 /* Tracing is disabled and re-enabled each time we enter the
1150 kernel. Most times, we continue from the same instruction we
1151 stopped before. This is indicated via the RESUMED instruction
1152 flag. The ENABLED instruction flag means that we continued
1153 from some other instruction. Indicate this as a trace gap. */
1156 *pend
= end
= ftrace_new_gap (end
, BDE_PT_DISABLED
);
1158 VEC_safe_push (bfun_s
, *gaps
, end
);
1160 pt_insn_get_offset (decoder
, &offset
);
1162 warning (_("Non-contiguous trace at instruction %u (offset "
1163 "= 0x%" PRIx64
", pc = 0x%" PRIx64
")."),
1164 end
->insn_offset
- 1, offset
, insn
.ip
);
1168 /* Indicate trace overflows. */
1171 *pend
= end
= ftrace_new_gap (end
, BDE_PT_OVERFLOW
);
1173 *pbegin
= begin
= end
;
1175 VEC_safe_push (bfun_s
, *gaps
, end
);
1177 pt_insn_get_offset (decoder
, &offset
);
1179 warning (_("Overflow at instruction %u (offset = 0x%" PRIx64
1180 ", pc = 0x%" PRIx64
")."), end
->insn_offset
- 1,
1184 upd
= ftrace_update_function (end
, insn
.ip
);
1190 *pbegin
= begin
= upd
;
1193 /* Maintain the function level offset. */
1194 *plevel
= std::min (*plevel
, end
->level
);
1196 btinsn
.pc
= (CORE_ADDR
) insn
.ip
;
1197 btinsn
.size
= (gdb_byte
) insn
.size
;
1198 btinsn
.iclass
= pt_reclassify_insn (insn
.iclass
);
1199 btinsn
.flags
= pt_btrace_insn_flags (&insn
);
1201 ftrace_update_insns (end
, &btinsn
);
1204 if (errcode
== -pte_eos
)
1207 /* Indicate the gap in the trace. */
1208 *pend
= end
= ftrace_new_gap (end
, errcode
);
1210 *pbegin
= begin
= end
;
1212 VEC_safe_push (bfun_s
, *gaps
, end
);
1214 pt_insn_get_offset (decoder
, &offset
);
1216 warning (_("Decode error (%d) at instruction %u (offset = 0x%" PRIx64
1217 ", pc = 0x%" PRIx64
"): %s."), errcode
, end
->insn_offset
- 1,
1218 offset
, insn
.ip
, pt_errstr (pt_errcode (errcode
)));
1222 /* A callback function to allow the trace decoder to read the inferior's
1226 btrace_pt_readmem_callback (gdb_byte
*buffer
, size_t size
,
1227 const struct pt_asid
*asid
, uint64_t pc
,
1230 int result
, errcode
;
1232 result
= (int) size
;
1235 errcode
= target_read_code ((CORE_ADDR
) pc
, buffer
, size
);
1237 result
= -pte_nomap
;
1239 CATCH (error
, RETURN_MASK_ERROR
)
1241 result
= -pte_nomap
;
1248 /* Translate the vendor from one enum to another. */
1250 static enum pt_cpu_vendor
1251 pt_translate_cpu_vendor (enum btrace_cpu_vendor vendor
)
1263 /* Finalize the function branch trace after decode. */
1265 static void btrace_finalize_ftrace_pt (struct pt_insn_decoder
*decoder
,
1266 struct thread_info
*tp
, int level
)
1268 pt_insn_free_decoder (decoder
);
1270 /* LEVEL is the minimal function level of all btrace function segments.
1271 Define the global level offset to -LEVEL so all function levels are
1272 normalized to start at zero. */
1273 tp
->btrace
.level
= -level
;
1275 /* Add a single last instruction entry for the current PC.
1276 This allows us to compute the backtrace at the current PC using both
1277 standard unwind and btrace unwind.
1278 This extra entry is ignored by all record commands. */
1282 /* Compute the function branch trace from Intel Processor Trace
1286 btrace_compute_ftrace_pt (struct thread_info
*tp
,
1287 const struct btrace_data_pt
*btrace
,
1288 VEC (bfun_s
) **gaps
)
1290 struct btrace_thread_info
*btinfo
;
1291 struct pt_insn_decoder
*decoder
;
1292 struct pt_config config
;
1295 if (btrace
->size
== 0)
1298 btinfo
= &tp
->btrace
;
1299 level
= btinfo
->begin
!= NULL
? -btinfo
->level
: INT_MAX
;
1301 pt_config_init(&config
);
1302 config
.begin
= btrace
->data
;
1303 config
.end
= btrace
->data
+ btrace
->size
;
1305 config
.cpu
.vendor
= pt_translate_cpu_vendor (btrace
->config
.cpu
.vendor
);
1306 config
.cpu
.family
= btrace
->config
.cpu
.family
;
1307 config
.cpu
.model
= btrace
->config
.cpu
.model
;
1308 config
.cpu
.stepping
= btrace
->config
.cpu
.stepping
;
1310 errcode
= pt_cpu_errata (&config
.errata
, &config
.cpu
);
1312 error (_("Failed to configure the Intel Processor Trace decoder: %s."),
1313 pt_errstr (pt_errcode (errcode
)));
1315 decoder
= pt_insn_alloc_decoder (&config
);
1316 if (decoder
== NULL
)
1317 error (_("Failed to allocate the Intel Processor Trace decoder."));
1321 struct pt_image
*image
;
1323 image
= pt_insn_get_image(decoder
);
1325 error (_("Failed to configure the Intel Processor Trace decoder."));
1327 errcode
= pt_image_set_callback(image
, btrace_pt_readmem_callback
, NULL
);
1329 error (_("Failed to configure the Intel Processor Trace decoder: "
1330 "%s."), pt_errstr (pt_errcode (errcode
)));
1332 ftrace_add_pt (decoder
, &btinfo
->begin
, &btinfo
->end
, &level
, gaps
);
1334 CATCH (error
, RETURN_MASK_ALL
)
1336 /* Indicate a gap in the trace if we quit trace processing. */
1337 if (error
.reason
== RETURN_QUIT
&& btinfo
->end
!= NULL
)
1339 btinfo
->end
= ftrace_new_gap (btinfo
->end
, BDE_PT_USER_QUIT
);
1341 VEC_safe_push (bfun_s
, *gaps
, btinfo
->end
);
1344 btrace_finalize_ftrace_pt (decoder
, tp
, level
);
1346 throw_exception (error
);
1350 btrace_finalize_ftrace_pt (decoder
, tp
, level
);
1353 #else /* defined (HAVE_LIBIPT) */
1356 btrace_compute_ftrace_pt (struct thread_info
*tp
,
1357 const struct btrace_data_pt
*btrace
,
1358 VEC (bfun_s
) **gaps
)
1360 internal_error (__FILE__
, __LINE__
, _("Unexpected branch trace format."));
1363 #endif /* defined (HAVE_LIBIPT) */
1365 /* Compute the function branch trace from a block branch trace BTRACE for
1366 a thread given by BTINFO. */
1369 btrace_compute_ftrace_1 (struct thread_info
*tp
, struct btrace_data
*btrace
,
1370 VEC (bfun_s
) **gaps
)
1372 DEBUG ("compute ftrace");
1374 switch (btrace
->format
)
1376 case BTRACE_FORMAT_NONE
:
1379 case BTRACE_FORMAT_BTS
:
1380 btrace_compute_ftrace_bts (tp
, &btrace
->variant
.bts
, gaps
);
1383 case BTRACE_FORMAT_PT
:
1384 btrace_compute_ftrace_pt (tp
, &btrace
->variant
.pt
, gaps
);
1388 internal_error (__FILE__
, __LINE__
, _("Unkown branch trace format."));
1392 btrace_finalize_ftrace (struct thread_info
*tp
, VEC (bfun_s
) **gaps
)
1394 if (!VEC_empty (bfun_s
, *gaps
))
1396 tp
->btrace
.ngaps
+= VEC_length (bfun_s
, *gaps
);
1397 btrace_bridge_gaps (tp
, gaps
);
1402 btrace_compute_ftrace (struct thread_info
*tp
, struct btrace_data
*btrace
)
1405 struct cleanup
*old_chain
;
1408 old_chain
= make_cleanup (VEC_cleanup (bfun_s
), &gaps
);
1412 btrace_compute_ftrace_1 (tp
, btrace
, &gaps
);
1414 CATCH (error
, RETURN_MASK_ALL
)
1416 btrace_finalize_ftrace (tp
, &gaps
);
1418 throw_exception (error
);
1422 btrace_finalize_ftrace (tp
, &gaps
);
1424 do_cleanups (old_chain
);
1427 /* Add an entry for the current PC. */
1430 btrace_add_pc (struct thread_info
*tp
)
1432 struct btrace_data btrace
;
1433 struct btrace_block
*block
;
1434 struct regcache
*regcache
;
1435 struct cleanup
*cleanup
;
1438 regcache
= get_thread_regcache (tp
->ptid
);
1439 pc
= regcache_read_pc (regcache
);
1441 btrace_data_init (&btrace
);
1442 btrace
.format
= BTRACE_FORMAT_BTS
;
1443 btrace
.variant
.bts
.blocks
= NULL
;
1445 cleanup
= make_cleanup_btrace_data (&btrace
);
1447 block
= VEC_safe_push (btrace_block_s
, btrace
.variant
.bts
.blocks
, NULL
);
1451 btrace_compute_ftrace (tp
, &btrace
);
1453 do_cleanups (cleanup
);
1459 btrace_enable (struct thread_info
*tp
, const struct btrace_config
*conf
)
1461 if (tp
->btrace
.target
!= NULL
)
1464 #if !defined (HAVE_LIBIPT)
1465 if (conf
->format
== BTRACE_FORMAT_PT
)
1466 error (_("GDB does not support Intel Processor Trace."));
1467 #endif /* !defined (HAVE_LIBIPT) */
1469 if (!target_supports_btrace (conf
->format
))
1470 error (_("Target does not support branch tracing."));
1472 DEBUG ("enable thread %s (%s)", print_thread_id (tp
),
1473 target_pid_to_str (tp
->ptid
));
1475 tp
->btrace
.target
= target_enable_btrace (tp
->ptid
, conf
);
1477 /* We're done if we failed to enable tracing. */
1478 if (tp
->btrace
.target
== NULL
)
1481 /* We need to undo the enable in case of errors. */
1484 /* Add an entry for the current PC so we start tracing from where we
1487 If we can't access TP's registers, TP is most likely running. In this
1488 case, we can't really say where tracing was enabled so it should be
1489 safe to simply skip this step.
1491 This is not relevant for BTRACE_FORMAT_PT since the trace will already
1492 start at the PC at which tracing was enabled. */
1493 if (conf
->format
!= BTRACE_FORMAT_PT
1494 && can_access_registers_ptid (tp
->ptid
))
1497 CATCH (exception
, RETURN_MASK_ALL
)
1499 btrace_disable (tp
);
1501 throw_exception (exception
);
1508 const struct btrace_config
*
1509 btrace_conf (const struct btrace_thread_info
*btinfo
)
1511 if (btinfo
->target
== NULL
)
1514 return target_btrace_conf (btinfo
->target
);
1520 btrace_disable (struct thread_info
*tp
)
1522 struct btrace_thread_info
*btp
= &tp
->btrace
;
1525 if (btp
->target
== NULL
)
1528 DEBUG ("disable thread %s (%s)", print_thread_id (tp
),
1529 target_pid_to_str (tp
->ptid
));
1531 target_disable_btrace (btp
->target
);
1540 btrace_teardown (struct thread_info
*tp
)
1542 struct btrace_thread_info
*btp
= &tp
->btrace
;
1545 if (btp
->target
== NULL
)
1548 DEBUG ("teardown thread %s (%s)", print_thread_id (tp
),
1549 target_pid_to_str (tp
->ptid
));
1551 target_teardown_btrace (btp
->target
);
1557 /* Stitch branch trace in BTS format. */
1560 btrace_stitch_bts (struct btrace_data_bts
*btrace
, struct thread_info
*tp
)
1562 struct btrace_thread_info
*btinfo
;
1563 struct btrace_function
*last_bfun
;
1564 struct btrace_insn
*last_insn
;
1565 btrace_block_s
*first_new_block
;
1567 btinfo
= &tp
->btrace
;
1568 last_bfun
= btinfo
->end
;
1569 gdb_assert (last_bfun
!= NULL
);
1570 gdb_assert (!VEC_empty (btrace_block_s
, btrace
->blocks
));
1572 /* If the existing trace ends with a gap, we just glue the traces
1573 together. We need to drop the last (i.e. chronologically first) block
1574 of the new trace, though, since we can't fill in the start address.*/
1575 if (VEC_empty (btrace_insn_s
, last_bfun
->insn
))
1577 VEC_pop (btrace_block_s
, btrace
->blocks
);
1581 /* Beware that block trace starts with the most recent block, so the
1582 chronologically first block in the new trace is the last block in
1583 the new trace's block vector. */
1584 first_new_block
= VEC_last (btrace_block_s
, btrace
->blocks
);
1585 last_insn
= VEC_last (btrace_insn_s
, last_bfun
->insn
);
1587 /* If the current PC at the end of the block is the same as in our current
1588 trace, there are two explanations:
1589 1. we executed the instruction and some branch brought us back.
1590 2. we have not made any progress.
1591 In the first case, the delta trace vector should contain at least two
1593 In the second case, the delta trace vector should contain exactly one
1594 entry for the partial block containing the current PC. Remove it. */
1595 if (first_new_block
->end
== last_insn
->pc
1596 && VEC_length (btrace_block_s
, btrace
->blocks
) == 1)
1598 VEC_pop (btrace_block_s
, btrace
->blocks
);
1602 DEBUG ("stitching %s to %s", ftrace_print_insn_addr (last_insn
),
1603 core_addr_to_string_nz (first_new_block
->end
));
1605 /* Do a simple sanity check to make sure we don't accidentally end up
1606 with a bad block. This should not occur in practice. */
1607 if (first_new_block
->end
< last_insn
->pc
)
1609 warning (_("Error while trying to read delta trace. Falling back to "
1614 /* We adjust the last block to start at the end of our current trace. */
1615 gdb_assert (first_new_block
->begin
== 0);
1616 first_new_block
->begin
= last_insn
->pc
;
1618 /* We simply pop the last insn so we can insert it again as part of
1619 the normal branch trace computation.
1620 Since instruction iterators are based on indices in the instructions
1621 vector, we don't leave any pointers dangling. */
1622 DEBUG ("pruning insn at %s for stitching",
1623 ftrace_print_insn_addr (last_insn
));
1625 VEC_pop (btrace_insn_s
, last_bfun
->insn
);
1627 /* The instructions vector may become empty temporarily if this has
1628 been the only instruction in this function segment.
1629 This violates the invariant but will be remedied shortly by
1630 btrace_compute_ftrace when we add the new trace. */
1632 /* The only case where this would hurt is if the entire trace consisted
1633 of just that one instruction. If we remove it, we might turn the now
1634 empty btrace function segment into a gap. But we don't want gaps at
1635 the beginning. To avoid this, we remove the entire old trace. */
1636 if (last_bfun
== btinfo
->begin
&& VEC_empty (btrace_insn_s
, last_bfun
->insn
))
1642 /* Adjust the block trace in order to stitch old and new trace together.
1643 BTRACE is the new delta trace between the last and the current stop.
1644 TP is the traced thread.
1645 May modifx BTRACE as well as the existing trace in TP.
1646 Return 0 on success, -1 otherwise. */
1649 btrace_stitch_trace (struct btrace_data
*btrace
, struct thread_info
*tp
)
1651 /* If we don't have trace, there's nothing to do. */
1652 if (btrace_data_empty (btrace
))
1655 switch (btrace
->format
)
1657 case BTRACE_FORMAT_NONE
:
1660 case BTRACE_FORMAT_BTS
:
1661 return btrace_stitch_bts (&btrace
->variant
.bts
, tp
);
1663 case BTRACE_FORMAT_PT
:
1664 /* Delta reads are not supported. */
1668 internal_error (__FILE__
, __LINE__
, _("Unkown branch trace format."));
1671 /* Clear the branch trace histories in BTINFO. */
1674 btrace_clear_history (struct btrace_thread_info
*btinfo
)
1676 xfree (btinfo
->insn_history
);
1677 xfree (btinfo
->call_history
);
1678 xfree (btinfo
->replay
);
1680 btinfo
->insn_history
= NULL
;
1681 btinfo
->call_history
= NULL
;
1682 btinfo
->replay
= NULL
;
1685 /* Clear the branch trace maintenance histories in BTINFO. */
1688 btrace_maint_clear (struct btrace_thread_info
*btinfo
)
1690 switch (btinfo
->data
.format
)
1695 case BTRACE_FORMAT_BTS
:
1696 btinfo
->maint
.variant
.bts
.packet_history
.begin
= 0;
1697 btinfo
->maint
.variant
.bts
.packet_history
.end
= 0;
1700 #if defined (HAVE_LIBIPT)
1701 case BTRACE_FORMAT_PT
:
1702 xfree (btinfo
->maint
.variant
.pt
.packets
);
1704 btinfo
->maint
.variant
.pt
.packets
= NULL
;
1705 btinfo
->maint
.variant
.pt
.packet_history
.begin
= 0;
1706 btinfo
->maint
.variant
.pt
.packet_history
.end
= 0;
1708 #endif /* defined (HAVE_LIBIPT) */
1715 btrace_fetch (struct thread_info
*tp
)
1717 struct btrace_thread_info
*btinfo
;
1718 struct btrace_target_info
*tinfo
;
1719 struct btrace_data btrace
;
1720 struct cleanup
*cleanup
;
1723 DEBUG ("fetch thread %s (%s)", print_thread_id (tp
),
1724 target_pid_to_str (tp
->ptid
));
1726 btinfo
= &tp
->btrace
;
1727 tinfo
= btinfo
->target
;
1731 /* There's no way we could get new trace while replaying.
1732 On the other hand, delta trace would return a partial record with the
1733 current PC, which is the replay PC, not the last PC, as expected. */
1734 if (btinfo
->replay
!= NULL
)
1737 /* We should not be called on running or exited threads. */
1738 gdb_assert (can_access_registers_ptid (tp
->ptid
));
1740 btrace_data_init (&btrace
);
1741 cleanup
= make_cleanup_btrace_data (&btrace
);
1743 /* Let's first try to extend the trace we already have. */
1744 if (btinfo
->end
!= NULL
)
1746 errcode
= target_read_btrace (&btrace
, tinfo
, BTRACE_READ_DELTA
);
1749 /* Success. Let's try to stitch the traces together. */
1750 errcode
= btrace_stitch_trace (&btrace
, tp
);
1754 /* We failed to read delta trace. Let's try to read new trace. */
1755 errcode
= target_read_btrace (&btrace
, tinfo
, BTRACE_READ_NEW
);
1757 /* If we got any new trace, discard what we have. */
1758 if (errcode
== 0 && !btrace_data_empty (&btrace
))
1762 /* If we were not able to read the trace, we start over. */
1766 errcode
= target_read_btrace (&btrace
, tinfo
, BTRACE_READ_ALL
);
1770 errcode
= target_read_btrace (&btrace
, tinfo
, BTRACE_READ_ALL
);
1772 /* If we were not able to read the branch trace, signal an error. */
1774 error (_("Failed to read branch trace."));
1776 /* Compute the trace, provided we have any. */
1777 if (!btrace_data_empty (&btrace
))
1779 /* Store the raw trace data. The stored data will be cleared in
1780 btrace_clear, so we always append the new trace. */
1781 btrace_data_append (&btinfo
->data
, &btrace
);
1782 btrace_maint_clear (btinfo
);
1784 btrace_clear_history (btinfo
);
1785 btrace_compute_ftrace (tp
, &btrace
);
1788 do_cleanups (cleanup
);
1794 btrace_clear (struct thread_info
*tp
)
1796 struct btrace_thread_info
*btinfo
;
1797 struct btrace_function
*it
, *trash
;
1799 DEBUG ("clear thread %s (%s)", print_thread_id (tp
),
1800 target_pid_to_str (tp
->ptid
));
1802 /* Make sure btrace frames that may hold a pointer into the branch
1803 trace data are destroyed. */
1804 reinit_frame_cache ();
1806 btinfo
= &tp
->btrace
;
1817 btinfo
->begin
= NULL
;
1821 /* Must clear the maint data before - it depends on BTINFO->DATA. */
1822 btrace_maint_clear (btinfo
);
1823 btrace_data_clear (&btinfo
->data
);
1824 btrace_clear_history (btinfo
);
1830 btrace_free_objfile (struct objfile
*objfile
)
1832 struct thread_info
*tp
;
1834 DEBUG ("free objfile");
1836 ALL_NON_EXITED_THREADS (tp
)
1840 #if defined (HAVE_LIBEXPAT)
1842 /* Check the btrace document version. */
1845 check_xml_btrace_version (struct gdb_xml_parser
*parser
,
1846 const struct gdb_xml_element
*element
,
1847 void *user_data
, VEC (gdb_xml_value_s
) *attributes
)
1850 = (const char *) xml_find_attribute (attributes
, "version")->value
;
1852 if (strcmp (version
, "1.0") != 0)
1853 gdb_xml_error (parser
, _("Unsupported btrace version: \"%s\""), version
);
1856 /* Parse a btrace "block" xml record. */
1859 parse_xml_btrace_block (struct gdb_xml_parser
*parser
,
1860 const struct gdb_xml_element
*element
,
1861 void *user_data
, VEC (gdb_xml_value_s
) *attributes
)
1863 struct btrace_data
*btrace
;
1864 struct btrace_block
*block
;
1865 ULONGEST
*begin
, *end
;
1867 btrace
= (struct btrace_data
*) user_data
;
1869 switch (btrace
->format
)
1871 case BTRACE_FORMAT_BTS
:
1874 case BTRACE_FORMAT_NONE
:
1875 btrace
->format
= BTRACE_FORMAT_BTS
;
1876 btrace
->variant
.bts
.blocks
= NULL
;
1880 gdb_xml_error (parser
, _("Btrace format error."));
1883 begin
= (ULONGEST
*) xml_find_attribute (attributes
, "begin")->value
;
1884 end
= (ULONGEST
*) xml_find_attribute (attributes
, "end")->value
;
1886 block
= VEC_safe_push (btrace_block_s
, btrace
->variant
.bts
.blocks
, NULL
);
1887 block
->begin
= *begin
;
1891 /* Parse a "raw" xml record. */
1894 parse_xml_raw (struct gdb_xml_parser
*parser
, const char *body_text
,
1895 gdb_byte
**pdata
, size_t *psize
)
1897 struct cleanup
*cleanup
;
1898 gdb_byte
*data
, *bin
;
1901 len
= strlen (body_text
);
1903 gdb_xml_error (parser
, _("Bad raw data size."));
1907 bin
= data
= (gdb_byte
*) xmalloc (size
);
1908 cleanup
= make_cleanup (xfree
, data
);
1910 /* We use hex encoding - see common/rsp-low.h. */
1918 if (hi
== 0 || lo
== 0)
1919 gdb_xml_error (parser
, _("Bad hex encoding."));
1921 *bin
++ = fromhex (hi
) * 16 + fromhex (lo
);
1925 discard_cleanups (cleanup
);
1931 /* Parse a btrace pt-config "cpu" xml record. */
1934 parse_xml_btrace_pt_config_cpu (struct gdb_xml_parser
*parser
,
1935 const struct gdb_xml_element
*element
,
1937 VEC (gdb_xml_value_s
) *attributes
)
1939 struct btrace_data
*btrace
;
1941 ULONGEST
*family
, *model
, *stepping
;
1943 vendor
= (const char *) xml_find_attribute (attributes
, "vendor")->value
;
1944 family
= (ULONGEST
*) xml_find_attribute (attributes
, "family")->value
;
1945 model
= (ULONGEST
*) xml_find_attribute (attributes
, "model")->value
;
1946 stepping
= (ULONGEST
*) xml_find_attribute (attributes
, "stepping")->value
;
1948 btrace
= (struct btrace_data
*) user_data
;
1950 if (strcmp (vendor
, "GenuineIntel") == 0)
1951 btrace
->variant
.pt
.config
.cpu
.vendor
= CV_INTEL
;
1953 btrace
->variant
.pt
.config
.cpu
.family
= *family
;
1954 btrace
->variant
.pt
.config
.cpu
.model
= *model
;
1955 btrace
->variant
.pt
.config
.cpu
.stepping
= *stepping
;
1958 /* Parse a btrace pt "raw" xml record. */
1961 parse_xml_btrace_pt_raw (struct gdb_xml_parser
*parser
,
1962 const struct gdb_xml_element
*element
,
1963 void *user_data
, const char *body_text
)
1965 struct btrace_data
*btrace
;
1967 btrace
= (struct btrace_data
*) user_data
;
1968 parse_xml_raw (parser
, body_text
, &btrace
->variant
.pt
.data
,
1969 &btrace
->variant
.pt
.size
);
1972 /* Parse a btrace "pt" xml record. */
1975 parse_xml_btrace_pt (struct gdb_xml_parser
*parser
,
1976 const struct gdb_xml_element
*element
,
1977 void *user_data
, VEC (gdb_xml_value_s
) *attributes
)
1979 struct btrace_data
*btrace
;
1981 btrace
= (struct btrace_data
*) user_data
;
1982 btrace
->format
= BTRACE_FORMAT_PT
;
1983 btrace
->variant
.pt
.config
.cpu
.vendor
= CV_UNKNOWN
;
1984 btrace
->variant
.pt
.data
= NULL
;
1985 btrace
->variant
.pt
.size
= 0;
1988 static const struct gdb_xml_attribute block_attributes
[] = {
1989 { "begin", GDB_XML_AF_NONE
, gdb_xml_parse_attr_ulongest
, NULL
},
1990 { "end", GDB_XML_AF_NONE
, gdb_xml_parse_attr_ulongest
, NULL
},
1991 { NULL
, GDB_XML_AF_NONE
, NULL
, NULL
}
1994 static const struct gdb_xml_attribute btrace_pt_config_cpu_attributes
[] = {
1995 { "vendor", GDB_XML_AF_NONE
, NULL
, NULL
},
1996 { "family", GDB_XML_AF_NONE
, gdb_xml_parse_attr_ulongest
, NULL
},
1997 { "model", GDB_XML_AF_NONE
, gdb_xml_parse_attr_ulongest
, NULL
},
1998 { "stepping", GDB_XML_AF_NONE
, gdb_xml_parse_attr_ulongest
, NULL
},
1999 { NULL
, GDB_XML_AF_NONE
, NULL
, NULL
}
2002 static const struct gdb_xml_element btrace_pt_config_children
[] = {
2003 { "cpu", btrace_pt_config_cpu_attributes
, NULL
, GDB_XML_EF_OPTIONAL
,
2004 parse_xml_btrace_pt_config_cpu
, NULL
},
2005 { NULL
, NULL
, NULL
, GDB_XML_EF_NONE
, NULL
, NULL
}
2008 static const struct gdb_xml_element btrace_pt_children
[] = {
2009 { "pt-config", NULL
, btrace_pt_config_children
, GDB_XML_EF_OPTIONAL
, NULL
,
2011 { "raw", NULL
, NULL
, GDB_XML_EF_OPTIONAL
, NULL
, parse_xml_btrace_pt_raw
},
2012 { NULL
, NULL
, NULL
, GDB_XML_EF_NONE
, NULL
, NULL
}
2015 static const struct gdb_xml_attribute btrace_attributes
[] = {
2016 { "version", GDB_XML_AF_NONE
, NULL
, NULL
},
2017 { NULL
, GDB_XML_AF_NONE
, NULL
, NULL
}
2020 static const struct gdb_xml_element btrace_children
[] = {
2021 { "block", block_attributes
, NULL
,
2022 GDB_XML_EF_REPEATABLE
| GDB_XML_EF_OPTIONAL
, parse_xml_btrace_block
, NULL
},
2023 { "pt", NULL
, btrace_pt_children
, GDB_XML_EF_OPTIONAL
, parse_xml_btrace_pt
,
2025 { NULL
, NULL
, NULL
, GDB_XML_EF_NONE
, NULL
, NULL
}
2028 static const struct gdb_xml_element btrace_elements
[] = {
2029 { "btrace", btrace_attributes
, btrace_children
, GDB_XML_EF_NONE
,
2030 check_xml_btrace_version
, NULL
},
2031 { NULL
, NULL
, NULL
, GDB_XML_EF_NONE
, NULL
, NULL
}
2034 #endif /* defined (HAVE_LIBEXPAT) */
2039 parse_xml_btrace (struct btrace_data
*btrace
, const char *buffer
)
2041 struct cleanup
*cleanup
;
2044 #if defined (HAVE_LIBEXPAT)
2046 btrace
->format
= BTRACE_FORMAT_NONE
;
2048 cleanup
= make_cleanup_btrace_data (btrace
);
2049 errcode
= gdb_xml_parse_quick (_("btrace"), "btrace.dtd", btrace_elements
,
2052 error (_("Error parsing branch trace."));
2054 /* Keep parse results. */
2055 discard_cleanups (cleanup
);
2057 #else /* !defined (HAVE_LIBEXPAT) */
2059 error (_("Cannot process branch trace. XML parsing is not supported."));
2061 #endif /* !defined (HAVE_LIBEXPAT) */
2064 #if defined (HAVE_LIBEXPAT)
2066 /* Parse a btrace-conf "bts" xml record. */
2069 parse_xml_btrace_conf_bts (struct gdb_xml_parser
*parser
,
2070 const struct gdb_xml_element
*element
,
2071 void *user_data
, VEC (gdb_xml_value_s
) *attributes
)
2073 struct btrace_config
*conf
;
2074 struct gdb_xml_value
*size
;
2076 conf
= (struct btrace_config
*) user_data
;
2077 conf
->format
= BTRACE_FORMAT_BTS
;
2080 size
= xml_find_attribute (attributes
, "size");
2082 conf
->bts
.size
= (unsigned int) *(ULONGEST
*) size
->value
;
2085 /* Parse a btrace-conf "pt" xml record. */
2088 parse_xml_btrace_conf_pt (struct gdb_xml_parser
*parser
,
2089 const struct gdb_xml_element
*element
,
2090 void *user_data
, VEC (gdb_xml_value_s
) *attributes
)
2092 struct btrace_config
*conf
;
2093 struct gdb_xml_value
*size
;
2095 conf
= (struct btrace_config
*) user_data
;
2096 conf
->format
= BTRACE_FORMAT_PT
;
2099 size
= xml_find_attribute (attributes
, "size");
2101 conf
->pt
.size
= (unsigned int) *(ULONGEST
*) size
->value
;
2104 static const struct gdb_xml_attribute btrace_conf_pt_attributes
[] = {
2105 { "size", GDB_XML_AF_OPTIONAL
, gdb_xml_parse_attr_ulongest
, NULL
},
2106 { NULL
, GDB_XML_AF_NONE
, NULL
, NULL
}
2109 static const struct gdb_xml_attribute btrace_conf_bts_attributes
[] = {
2110 { "size", GDB_XML_AF_OPTIONAL
, gdb_xml_parse_attr_ulongest
, NULL
},
2111 { NULL
, GDB_XML_AF_NONE
, NULL
, NULL
}
2114 static const struct gdb_xml_element btrace_conf_children
[] = {
2115 { "bts", btrace_conf_bts_attributes
, NULL
, GDB_XML_EF_OPTIONAL
,
2116 parse_xml_btrace_conf_bts
, NULL
},
2117 { "pt", btrace_conf_pt_attributes
, NULL
, GDB_XML_EF_OPTIONAL
,
2118 parse_xml_btrace_conf_pt
, NULL
},
2119 { NULL
, NULL
, NULL
, GDB_XML_EF_NONE
, NULL
, NULL
}
2122 static const struct gdb_xml_attribute btrace_conf_attributes
[] = {
2123 { "version", GDB_XML_AF_NONE
, NULL
, NULL
},
2124 { NULL
, GDB_XML_AF_NONE
, NULL
, NULL
}
2127 static const struct gdb_xml_element btrace_conf_elements
[] = {
2128 { "btrace-conf", btrace_conf_attributes
, btrace_conf_children
,
2129 GDB_XML_EF_NONE
, NULL
, NULL
},
2130 { NULL
, NULL
, NULL
, GDB_XML_EF_NONE
, NULL
, NULL
}
2133 #endif /* defined (HAVE_LIBEXPAT) */
2138 parse_xml_btrace_conf (struct btrace_config
*conf
, const char *xml
)
2142 #if defined (HAVE_LIBEXPAT)
2144 errcode
= gdb_xml_parse_quick (_("btrace-conf"), "btrace-conf.dtd",
2145 btrace_conf_elements
, xml
, conf
);
2147 error (_("Error parsing branch trace configuration."));
2149 #else /* !defined (HAVE_LIBEXPAT) */
2151 error (_("XML parsing is not supported."));
2153 #endif /* !defined (HAVE_LIBEXPAT) */
2158 const struct btrace_insn
*
2159 btrace_insn_get (const struct btrace_insn_iterator
*it
)
2161 const struct btrace_function
*bfun
;
2162 unsigned int index
, end
;
2165 bfun
= it
->function
;
2167 /* Check if the iterator points to a gap in the trace. */
2168 if (bfun
->errcode
!= 0)
2171 /* The index is within the bounds of this function's instruction vector. */
2172 end
= VEC_length (btrace_insn_s
, bfun
->insn
);
2173 gdb_assert (0 < end
);
2174 gdb_assert (index
< end
);
2176 return VEC_index (btrace_insn_s
, bfun
->insn
, index
);
2182 btrace_insn_number (const struct btrace_insn_iterator
*it
)
2184 const struct btrace_function
*bfun
;
2186 bfun
= it
->function
;
2188 /* Return zero if the iterator points to a gap in the trace. */
2189 if (bfun
->errcode
!= 0)
2192 return bfun
->insn_offset
+ it
->index
;
2198 btrace_insn_begin (struct btrace_insn_iterator
*it
,
2199 const struct btrace_thread_info
*btinfo
)
2201 const struct btrace_function
*bfun
;
2203 bfun
= btinfo
->begin
;
2205 error (_("No trace."));
2207 it
->function
= bfun
;
2214 btrace_insn_end (struct btrace_insn_iterator
*it
,
2215 const struct btrace_thread_info
*btinfo
)
2217 const struct btrace_function
*bfun
;
2218 unsigned int length
;
2222 error (_("No trace."));
2224 length
= VEC_length (btrace_insn_s
, bfun
->insn
);
2226 /* The last function may either be a gap or it contains the current
2227 instruction, which is one past the end of the execution trace; ignore
2232 it
->function
= bfun
;
2239 btrace_insn_next (struct btrace_insn_iterator
*it
, unsigned int stride
)
2241 const struct btrace_function
*bfun
;
2242 unsigned int index
, steps
;
2244 bfun
= it
->function
;
2250 unsigned int end
, space
, adv
;
2252 end
= VEC_length (btrace_insn_s
, bfun
->insn
);
2254 /* An empty function segment represents a gap in the trace. We count
2255 it as one instruction. */
2258 const struct btrace_function
*next
;
2260 next
= bfun
->flow
.next
;
2273 gdb_assert (0 < end
);
2274 gdb_assert (index
< end
);
2276 /* Compute the number of instructions remaining in this segment. */
2277 space
= end
- index
;
2279 /* Advance the iterator as far as possible within this segment. */
2280 adv
= std::min (space
, stride
);
2285 /* Move to the next function if we're at the end of this one. */
2288 const struct btrace_function
*next
;
2290 next
= bfun
->flow
.next
;
2293 /* We stepped past the last function.
2295 Let's adjust the index to point to the last instruction in
2296 the previous function. */
2302 /* We now point to the first instruction in the new function. */
2307 /* We did make progress. */
2308 gdb_assert (adv
> 0);
2311 /* Update the iterator. */
2312 it
->function
= bfun
;
2321 btrace_insn_prev (struct btrace_insn_iterator
*it
, unsigned int stride
)
2323 const struct btrace_function
*bfun
;
2324 unsigned int index
, steps
;
2326 bfun
= it
->function
;
2334 /* Move to the previous function if we're at the start of this one. */
2337 const struct btrace_function
*prev
;
2339 prev
= bfun
->flow
.prev
;
2343 /* We point to one after the last instruction in the new function. */
2345 index
= VEC_length (btrace_insn_s
, bfun
->insn
);
2347 /* An empty function segment represents a gap in the trace. We count
2348 it as one instruction. */
2358 /* Advance the iterator as far as possible within this segment. */
2359 adv
= std::min (index
, stride
);
2365 /* We did make progress. */
2366 gdb_assert (adv
> 0);
2369 /* Update the iterator. */
2370 it
->function
= bfun
;
2379 btrace_insn_cmp (const struct btrace_insn_iterator
*lhs
,
2380 const struct btrace_insn_iterator
*rhs
)
2382 unsigned int lnum
, rnum
;
2384 lnum
= btrace_insn_number (lhs
);
2385 rnum
= btrace_insn_number (rhs
);
2387 /* A gap has an instruction number of zero. Things are getting more
2388 complicated if gaps are involved.
2390 We take the instruction number offset from the iterator's function.
2391 This is the number of the first instruction after the gap.
2393 This is OK as long as both lhs and rhs point to gaps. If only one of
2394 them does, we need to adjust the number based on the other's regular
2395 instruction number. Otherwise, a gap might compare equal to an
2398 if (lnum
== 0 && rnum
== 0)
2400 lnum
= lhs
->function
->insn_offset
;
2401 rnum
= rhs
->function
->insn_offset
;
2405 lnum
= lhs
->function
->insn_offset
;
2412 rnum
= rhs
->function
->insn_offset
;
2418 return (int) (lnum
- rnum
);
2424 btrace_find_insn_by_number (struct btrace_insn_iterator
*it
,
2425 const struct btrace_thread_info
*btinfo
,
2426 unsigned int number
)
2428 const struct btrace_function
*bfun
;
2429 unsigned int end
, length
;
2431 for (bfun
= btinfo
->end
; bfun
!= NULL
; bfun
= bfun
->flow
.prev
)
2434 if (bfun
->errcode
!= 0)
2437 if (bfun
->insn_offset
<= number
)
2444 length
= VEC_length (btrace_insn_s
, bfun
->insn
);
2445 gdb_assert (length
> 0);
2447 end
= bfun
->insn_offset
+ length
;
2451 it
->function
= bfun
;
2452 it
->index
= number
- bfun
->insn_offset
;
2459 const struct btrace_function
*
2460 btrace_call_get (const struct btrace_call_iterator
*it
)
2462 return it
->function
;
2468 btrace_call_number (const struct btrace_call_iterator
*it
)
2470 const struct btrace_thread_info
*btinfo
;
2471 const struct btrace_function
*bfun
;
2474 btinfo
= it
->btinfo
;
2475 bfun
= it
->function
;
2477 return bfun
->number
;
2479 /* For the end iterator, i.e. bfun == NULL, we return one more than the
2480 number of the last function. */
2482 insns
= VEC_length (btrace_insn_s
, bfun
->insn
);
2484 /* If the function contains only a single instruction (i.e. the current
2485 instruction), it will be skipped and its number is already the number
2488 return bfun
->number
;
2490 /* Otherwise, return one more than the number of the last function. */
2491 return bfun
->number
+ 1;
2497 btrace_call_begin (struct btrace_call_iterator
*it
,
2498 const struct btrace_thread_info
*btinfo
)
2500 const struct btrace_function
*bfun
;
2502 bfun
= btinfo
->begin
;
2504 error (_("No trace."));
2506 it
->btinfo
= btinfo
;
2507 it
->function
= bfun
;
2513 btrace_call_end (struct btrace_call_iterator
*it
,
2514 const struct btrace_thread_info
*btinfo
)
2516 const struct btrace_function
*bfun
;
2520 error (_("No trace."));
2522 it
->btinfo
= btinfo
;
2523 it
->function
= NULL
;
2529 btrace_call_next (struct btrace_call_iterator
*it
, unsigned int stride
)
2531 const struct btrace_function
*bfun
;
2534 bfun
= it
->function
;
2536 while (bfun
!= NULL
)
2538 const struct btrace_function
*next
;
2541 next
= bfun
->flow
.next
;
2544 /* Ignore the last function if it only contains a single
2545 (i.e. the current) instruction. */
2546 insns
= VEC_length (btrace_insn_s
, bfun
->insn
);
2551 if (stride
== steps
)
2558 it
->function
= bfun
;
2565 btrace_call_prev (struct btrace_call_iterator
*it
, unsigned int stride
)
2567 const struct btrace_thread_info
*btinfo
;
2568 const struct btrace_function
*bfun
;
2571 bfun
= it
->function
;
2578 btinfo
= it
->btinfo
;
2583 /* Ignore the last function if it only contains a single
2584 (i.e. the current) instruction. */
2585 insns
= VEC_length (btrace_insn_s
, bfun
->insn
);
2587 bfun
= bfun
->flow
.prev
;
2595 while (steps
< stride
)
2597 const struct btrace_function
*prev
;
2599 prev
= bfun
->flow
.prev
;
2607 it
->function
= bfun
;
2614 btrace_call_cmp (const struct btrace_call_iterator
*lhs
,
2615 const struct btrace_call_iterator
*rhs
)
2617 unsigned int lnum
, rnum
;
2619 lnum
= btrace_call_number (lhs
);
2620 rnum
= btrace_call_number (rhs
);
2622 return (int) (lnum
- rnum
);
2628 btrace_find_call_by_number (struct btrace_call_iterator
*it
,
2629 const struct btrace_thread_info
*btinfo
,
2630 unsigned int number
)
2632 const struct btrace_function
*bfun
;
2634 for (bfun
= btinfo
->end
; bfun
!= NULL
; bfun
= bfun
->flow
.prev
)
2638 bnum
= bfun
->number
;
2641 it
->btinfo
= btinfo
;
2642 it
->function
= bfun
;
2646 /* Functions are ordered and numbered consecutively. We could bail out
2647 earlier. On the other hand, it is very unlikely that we search for
2648 a nonexistent function. */
2657 btrace_set_insn_history (struct btrace_thread_info
*btinfo
,
2658 const struct btrace_insn_iterator
*begin
,
2659 const struct btrace_insn_iterator
*end
)
2661 if (btinfo
->insn_history
== NULL
)
2662 btinfo
->insn_history
= XCNEW (struct btrace_insn_history
);
2664 btinfo
->insn_history
->begin
= *begin
;
2665 btinfo
->insn_history
->end
= *end
;
2671 btrace_set_call_history (struct btrace_thread_info
*btinfo
,
2672 const struct btrace_call_iterator
*begin
,
2673 const struct btrace_call_iterator
*end
)
2675 gdb_assert (begin
->btinfo
== end
->btinfo
);
2677 if (btinfo
->call_history
== NULL
)
2678 btinfo
->call_history
= XCNEW (struct btrace_call_history
);
2680 btinfo
->call_history
->begin
= *begin
;
2681 btinfo
->call_history
->end
= *end
;
2687 btrace_is_replaying (struct thread_info
*tp
)
2689 return tp
->btrace
.replay
!= NULL
;
2695 btrace_is_empty (struct thread_info
*tp
)
2697 struct btrace_insn_iterator begin
, end
;
2698 struct btrace_thread_info
*btinfo
;
2700 btinfo
= &tp
->btrace
;
2702 if (btinfo
->begin
== NULL
)
2705 btrace_insn_begin (&begin
, btinfo
);
2706 btrace_insn_end (&end
, btinfo
);
2708 return btrace_insn_cmp (&begin
, &end
) == 0;
2711 /* Forward the cleanup request. */
2714 do_btrace_data_cleanup (void *arg
)
2716 btrace_data_fini ((struct btrace_data
*) arg
);
2722 make_cleanup_btrace_data (struct btrace_data
*data
)
2724 return make_cleanup (do_btrace_data_cleanup
, data
);
2727 #if defined (HAVE_LIBIPT)
2729 /* Print a single packet. */
2732 pt_print_packet (const struct pt_packet
*packet
)
2734 switch (packet
->type
)
2737 printf_unfiltered (("[??: %x]"), packet
->type
);
2741 printf_unfiltered (("psb"));
2745 printf_unfiltered (("psbend"));
2749 printf_unfiltered (("pad"));
2753 printf_unfiltered (("tip %u: 0x%" PRIx64
""),
2754 packet
->payload
.ip
.ipc
,
2755 packet
->payload
.ip
.ip
);
2759 printf_unfiltered (("tip.pge %u: 0x%" PRIx64
""),
2760 packet
->payload
.ip
.ipc
,
2761 packet
->payload
.ip
.ip
);
2765 printf_unfiltered (("tip.pgd %u: 0x%" PRIx64
""),
2766 packet
->payload
.ip
.ipc
,
2767 packet
->payload
.ip
.ip
);
2771 printf_unfiltered (("fup %u: 0x%" PRIx64
""),
2772 packet
->payload
.ip
.ipc
,
2773 packet
->payload
.ip
.ip
);
2777 printf_unfiltered (("tnt-8 %u: 0x%" PRIx64
""),
2778 packet
->payload
.tnt
.bit_size
,
2779 packet
->payload
.tnt
.payload
);
2783 printf_unfiltered (("tnt-64 %u: 0x%" PRIx64
""),
2784 packet
->payload
.tnt
.bit_size
,
2785 packet
->payload
.tnt
.payload
);
2789 printf_unfiltered (("pip %" PRIx64
"%s"), packet
->payload
.pip
.cr3
,
2790 packet
->payload
.pip
.nr
? (" nr") : (""));
2794 printf_unfiltered (("tsc %" PRIx64
""), packet
->payload
.tsc
.tsc
);
2798 printf_unfiltered (("cbr %u"), packet
->payload
.cbr
.ratio
);
2802 switch (packet
->payload
.mode
.leaf
)
2805 printf_unfiltered (("mode %u"), packet
->payload
.mode
.leaf
);
2809 printf_unfiltered (("mode.exec%s%s"),
2810 packet
->payload
.mode
.bits
.exec
.csl
2812 packet
->payload
.mode
.bits
.exec
.csd
2813 ? (" cs.d") : (""));
2817 printf_unfiltered (("mode.tsx%s%s"),
2818 packet
->payload
.mode
.bits
.tsx
.intx
2820 packet
->payload
.mode
.bits
.tsx
.abrt
2821 ? (" abrt") : (""));
2827 printf_unfiltered (("ovf"));
2831 printf_unfiltered (("stop"));
2835 printf_unfiltered (("vmcs %" PRIx64
""), packet
->payload
.vmcs
.base
);
2839 printf_unfiltered (("tma %x %x"), packet
->payload
.tma
.ctc
,
2840 packet
->payload
.tma
.fc
);
2844 printf_unfiltered (("mtc %x"), packet
->payload
.mtc
.ctc
);
2848 printf_unfiltered (("cyc %" PRIx64
""), packet
->payload
.cyc
.value
);
2852 printf_unfiltered (("mnt %" PRIx64
""), packet
->payload
.mnt
.payload
);
2857 /* Decode packets into MAINT using DECODER. */
2860 btrace_maint_decode_pt (struct btrace_maint_info
*maint
,
2861 struct pt_packet_decoder
*decoder
)
2867 struct btrace_pt_packet packet
;
2869 errcode
= pt_pkt_sync_forward (decoder
);
2875 pt_pkt_get_offset (decoder
, &packet
.offset
);
2877 errcode
= pt_pkt_next (decoder
, &packet
.packet
,
2878 sizeof(packet
.packet
));
2882 if (maint_btrace_pt_skip_pad
== 0 || packet
.packet
.type
!= ppt_pad
)
2884 packet
.errcode
= pt_errcode (errcode
);
2885 VEC_safe_push (btrace_pt_packet_s
, maint
->variant
.pt
.packets
,
2890 if (errcode
== -pte_eos
)
2893 packet
.errcode
= pt_errcode (errcode
);
2894 VEC_safe_push (btrace_pt_packet_s
, maint
->variant
.pt
.packets
,
2897 warning (_("Error at trace offset 0x%" PRIx64
": %s."),
2898 packet
.offset
, pt_errstr (packet
.errcode
));
2901 if (errcode
!= -pte_eos
)
2902 warning (_("Failed to synchronize onto the Intel Processor Trace "
2903 "stream: %s."), pt_errstr (pt_errcode (errcode
)));
2906 /* Update the packet history in BTINFO. */
2909 btrace_maint_update_pt_packets (struct btrace_thread_info
*btinfo
)
2911 volatile struct gdb_exception except
;
2912 struct pt_packet_decoder
*decoder
;
2913 struct btrace_data_pt
*pt
;
2914 struct pt_config config
;
2917 pt
= &btinfo
->data
.variant
.pt
;
2919 /* Nothing to do if there is no trace. */
2923 memset (&config
, 0, sizeof(config
));
2925 config
.size
= sizeof (config
);
2926 config
.begin
= pt
->data
;
2927 config
.end
= pt
->data
+ pt
->size
;
2929 config
.cpu
.vendor
= pt_translate_cpu_vendor (pt
->config
.cpu
.vendor
);
2930 config
.cpu
.family
= pt
->config
.cpu
.family
;
2931 config
.cpu
.model
= pt
->config
.cpu
.model
;
2932 config
.cpu
.stepping
= pt
->config
.cpu
.stepping
;
2934 errcode
= pt_cpu_errata (&config
.errata
, &config
.cpu
);
2936 error (_("Failed to configure the Intel Processor Trace decoder: %s."),
2937 pt_errstr (pt_errcode (errcode
)));
2939 decoder
= pt_pkt_alloc_decoder (&config
);
2940 if (decoder
== NULL
)
2941 error (_("Failed to allocate the Intel Processor Trace decoder."));
2945 btrace_maint_decode_pt (&btinfo
->maint
, decoder
);
2947 CATCH (except
, RETURN_MASK_ALL
)
2949 pt_pkt_free_decoder (decoder
);
2951 if (except
.reason
< 0)
2952 throw_exception (except
);
2956 pt_pkt_free_decoder (decoder
);
2959 #endif /* !defined (HAVE_LIBIPT) */
2961 /* Update the packet maintenance information for BTINFO and store the
2962 low and high bounds into BEGIN and END, respectively.
2963 Store the current iterator state into FROM and TO. */
2966 btrace_maint_update_packets (struct btrace_thread_info
*btinfo
,
2967 unsigned int *begin
, unsigned int *end
,
2968 unsigned int *from
, unsigned int *to
)
2970 switch (btinfo
->data
.format
)
2979 case BTRACE_FORMAT_BTS
:
2980 /* Nothing to do - we operate directly on BTINFO->DATA. */
2982 *end
= VEC_length (btrace_block_s
, btinfo
->data
.variant
.bts
.blocks
);
2983 *from
= btinfo
->maint
.variant
.bts
.packet_history
.begin
;
2984 *to
= btinfo
->maint
.variant
.bts
.packet_history
.end
;
2987 #if defined (HAVE_LIBIPT)
2988 case BTRACE_FORMAT_PT
:
2989 if (VEC_empty (btrace_pt_packet_s
, btinfo
->maint
.variant
.pt
.packets
))
2990 btrace_maint_update_pt_packets (btinfo
);
2993 *end
= VEC_length (btrace_pt_packet_s
, btinfo
->maint
.variant
.pt
.packets
);
2994 *from
= btinfo
->maint
.variant
.pt
.packet_history
.begin
;
2995 *to
= btinfo
->maint
.variant
.pt
.packet_history
.end
;
2997 #endif /* defined (HAVE_LIBIPT) */
3001 /* Print packets in BTINFO from BEGIN (inclusive) until END (exclusive) and
3002 update the current iterator position. */
3005 btrace_maint_print_packets (struct btrace_thread_info
*btinfo
,
3006 unsigned int begin
, unsigned int end
)
3008 switch (btinfo
->data
.format
)
3013 case BTRACE_FORMAT_BTS
:
3015 VEC (btrace_block_s
) *blocks
;
3018 blocks
= btinfo
->data
.variant
.bts
.blocks
;
3019 for (blk
= begin
; blk
< end
; ++blk
)
3021 const btrace_block_s
*block
;
3023 block
= VEC_index (btrace_block_s
, blocks
, blk
);
3025 printf_unfiltered ("%u\tbegin: %s, end: %s\n", blk
,
3026 core_addr_to_string_nz (block
->begin
),
3027 core_addr_to_string_nz (block
->end
));
3030 btinfo
->maint
.variant
.bts
.packet_history
.begin
= begin
;
3031 btinfo
->maint
.variant
.bts
.packet_history
.end
= end
;
3035 #if defined (HAVE_LIBIPT)
3036 case BTRACE_FORMAT_PT
:
3038 VEC (btrace_pt_packet_s
) *packets
;
3041 packets
= btinfo
->maint
.variant
.pt
.packets
;
3042 for (pkt
= begin
; pkt
< end
; ++pkt
)
3044 const struct btrace_pt_packet
*packet
;
3046 packet
= VEC_index (btrace_pt_packet_s
, packets
, pkt
);
3048 printf_unfiltered ("%u\t", pkt
);
3049 printf_unfiltered ("0x%" PRIx64
"\t", packet
->offset
);
3051 if (packet
->errcode
== pte_ok
)
3052 pt_print_packet (&packet
->packet
);
3054 printf_unfiltered ("[error: %s]", pt_errstr (packet
->errcode
));
3056 printf_unfiltered ("\n");
3059 btinfo
->maint
.variant
.pt
.packet_history
.begin
= begin
;
3060 btinfo
->maint
.variant
.pt
.packet_history
.end
= end
;
3063 #endif /* defined (HAVE_LIBIPT) */
3067 /* Read a number from an argument string. */
3070 get_uint (char **arg
)
3072 char *begin
, *end
, *pos
;
3073 unsigned long number
;
3076 pos
= skip_spaces (begin
);
3078 if (!isdigit (*pos
))
3079 error (_("Expected positive number, got: %s."), pos
);
3081 number
= strtoul (pos
, &end
, 10);
3082 if (number
> UINT_MAX
)
3083 error (_("Number too big."));
3085 *arg
+= (end
- begin
);
3087 return (unsigned int) number
;
3090 /* Read a context size from an argument string. */
3093 get_context_size (char **arg
)
3098 pos
= skip_spaces (*arg
);
3100 if (!isdigit (*pos
))
3101 error (_("Expected positive number, got: %s."), pos
);
3103 return strtol (pos
, arg
, 10);
3106 /* Complain about junk at the end of an argument string. */
3109 no_chunk (char *arg
)
3112 error (_("Junk after argument: %s."), arg
);
3115 /* The "maintenance btrace packet-history" command. */
3118 maint_btrace_packet_history_cmd (char *arg
, int from_tty
)
3120 struct btrace_thread_info
*btinfo
;
3121 struct thread_info
*tp
;
3122 unsigned int size
, begin
, end
, from
, to
;
3124 tp
= find_thread_ptid (inferior_ptid
);
3126 error (_("No thread."));
3129 btinfo
= &tp
->btrace
;
3131 btrace_maint_update_packets (btinfo
, &begin
, &end
, &from
, &to
);
3134 printf_unfiltered (_("No trace.\n"));
3138 if (arg
== NULL
|| *arg
== 0 || strcmp (arg
, "+") == 0)
3142 if (end
- from
< size
)
3146 else if (strcmp (arg
, "-") == 0)
3150 if (to
- begin
< size
)
3156 from
= get_uint (&arg
);
3158 error (_("'%u' is out of range."), from
);
3160 arg
= skip_spaces (arg
);
3163 arg
= skip_spaces (++arg
);
3168 size
= get_context_size (&arg
);
3172 if (end
- from
< size
)
3176 else if (*arg
== '-')
3179 size
= get_context_size (&arg
);
3183 /* Include the packet given as first argument. */
3187 if (to
- begin
< size
)
3193 to
= get_uint (&arg
);
3195 /* Include the packet at the second argument and silently
3196 truncate the range. */
3209 if (end
- from
< size
)
3217 btrace_maint_print_packets (btinfo
, from
, to
);
3220 /* The "maintenance btrace clear-packet-history" command. */
3223 maint_btrace_clear_packet_history_cmd (char *args
, int from_tty
)
3225 struct btrace_thread_info
*btinfo
;
3226 struct thread_info
*tp
;
3228 if (args
!= NULL
&& *args
!= 0)
3229 error (_("Invalid argument."));
3231 tp
= find_thread_ptid (inferior_ptid
);
3233 error (_("No thread."));
3235 btinfo
= &tp
->btrace
;
3237 /* Must clear the maint data before - it depends on BTINFO->DATA. */
3238 btrace_maint_clear (btinfo
);
3239 btrace_data_clear (&btinfo
->data
);
3242 /* The "maintenance btrace clear" command. */
3245 maint_btrace_clear_cmd (char *args
, int from_tty
)
3247 struct btrace_thread_info
*btinfo
;
3248 struct thread_info
*tp
;
3250 if (args
!= NULL
&& *args
!= 0)
3251 error (_("Invalid argument."));
3253 tp
= find_thread_ptid (inferior_ptid
);
3255 error (_("No thread."));
3260 /* The "maintenance btrace" command. */
3263 maint_btrace_cmd (char *args
, int from_tty
)
3265 help_list (maint_btrace_cmdlist
, "maintenance btrace ", all_commands
,
3269 /* The "maintenance set btrace" command. */
3272 maint_btrace_set_cmd (char *args
, int from_tty
)
3274 help_list (maint_btrace_set_cmdlist
, "maintenance set btrace ", all_commands
,
3278 /* The "maintenance show btrace" command. */
3281 maint_btrace_show_cmd (char *args
, int from_tty
)
3283 help_list (maint_btrace_show_cmdlist
, "maintenance show btrace ",
3284 all_commands
, gdb_stdout
);
3287 /* The "maintenance set btrace pt" command. */
3290 maint_btrace_pt_set_cmd (char *args
, int from_tty
)
3292 help_list (maint_btrace_pt_set_cmdlist
, "maintenance set btrace pt ",
3293 all_commands
, gdb_stdout
);
3296 /* The "maintenance show btrace pt" command. */
3299 maint_btrace_pt_show_cmd (char *args
, int from_tty
)
3301 help_list (maint_btrace_pt_show_cmdlist
, "maintenance show btrace pt ",
3302 all_commands
, gdb_stdout
);
3305 /* The "maintenance info btrace" command. */
3308 maint_info_btrace_cmd (char *args
, int from_tty
)
3310 struct btrace_thread_info
*btinfo
;
3311 struct thread_info
*tp
;
3312 const struct btrace_config
*conf
;
3314 if (args
!= NULL
&& *args
!= 0)
3315 error (_("Invalid argument."));
3317 tp
= find_thread_ptid (inferior_ptid
);
3319 error (_("No thread."));
3321 btinfo
= &tp
->btrace
;
3323 conf
= btrace_conf (btinfo
);
3325 error (_("No btrace configuration."));
3327 printf_unfiltered (_("Format: %s.\n"),
3328 btrace_format_string (conf
->format
));
3330 switch (conf
->format
)
3335 case BTRACE_FORMAT_BTS
:
3336 printf_unfiltered (_("Number of packets: %u.\n"),
3337 VEC_length (btrace_block_s
,
3338 btinfo
->data
.variant
.bts
.blocks
));
3341 #if defined (HAVE_LIBIPT)
3342 case BTRACE_FORMAT_PT
:
3344 struct pt_version version
;
3346 version
= pt_library_version ();
3347 printf_unfiltered (_("Version: %u.%u.%u%s.\n"), version
.major
,
3348 version
.minor
, version
.build
,
3349 version
.ext
!= NULL
? version
.ext
: "");
3351 btrace_maint_update_pt_packets (btinfo
);
3352 printf_unfiltered (_("Number of packets: %u.\n"),
3353 VEC_length (btrace_pt_packet_s
,
3354 btinfo
->maint
.variant
.pt
.packets
));
3357 #endif /* defined (HAVE_LIBIPT) */
3361 /* The "maint show btrace pt skip-pad" show value function. */
3364 show_maint_btrace_pt_skip_pad (struct ui_file
*file
, int from_tty
,
3365 struct cmd_list_element
*c
,
3368 fprintf_filtered (file
, _("Skip PAD packets is %s.\n"), value
);
3372 /* Initialize btrace maintenance commands. */
3374 void _initialize_btrace (void);
3376 _initialize_btrace (void)
3378 add_cmd ("btrace", class_maintenance
, maint_info_btrace_cmd
,
3379 _("Info about branch tracing data."), &maintenanceinfolist
);
3381 add_prefix_cmd ("btrace", class_maintenance
, maint_btrace_cmd
,
3382 _("Branch tracing maintenance commands."),
3383 &maint_btrace_cmdlist
, "maintenance btrace ",
3384 0, &maintenancelist
);
3386 add_prefix_cmd ("btrace", class_maintenance
, maint_btrace_set_cmd
, _("\
3387 Set branch tracing specific variables."),
3388 &maint_btrace_set_cmdlist
, "maintenance set btrace ",
3389 0, &maintenance_set_cmdlist
);
3391 add_prefix_cmd ("pt", class_maintenance
, maint_btrace_pt_set_cmd
, _("\
3392 Set Intel Processor Trace specific variables."),
3393 &maint_btrace_pt_set_cmdlist
, "maintenance set btrace pt ",
3394 0, &maint_btrace_set_cmdlist
);
3396 add_prefix_cmd ("btrace", class_maintenance
, maint_btrace_show_cmd
, _("\
3397 Show branch tracing specific variables."),
3398 &maint_btrace_show_cmdlist
, "maintenance show btrace ",
3399 0, &maintenance_show_cmdlist
);
3401 add_prefix_cmd ("pt", class_maintenance
, maint_btrace_pt_show_cmd
, _("\
3402 Show Intel Processor Trace specific variables."),
3403 &maint_btrace_pt_show_cmdlist
, "maintenance show btrace pt ",
3404 0, &maint_btrace_show_cmdlist
);
3406 add_setshow_boolean_cmd ("skip-pad", class_maintenance
,
3407 &maint_btrace_pt_skip_pad
, _("\
3408 Set whether PAD packets should be skipped in the btrace packet history."), _("\
3409 Show whether PAD packets should be skipped in the btrace packet history."),_("\
3410 When enabled, PAD packets are ignored in the btrace packet history."),
3411 NULL
, show_maint_btrace_pt_skip_pad
,
3412 &maint_btrace_pt_set_cmdlist
,
3413 &maint_btrace_pt_show_cmdlist
);
3415 add_cmd ("packet-history", class_maintenance
, maint_btrace_packet_history_cmd
,
3416 _("Print the raw branch tracing data.\n\
3417 With no argument, print ten more packets after the previous ten-line print.\n\
3418 With '-' as argument print ten packets before a previous ten-line print.\n\
3419 One argument specifies the starting packet of a ten-line print.\n\
3420 Two arguments with comma between specify starting and ending packets to \
3422 Preceded with '+'/'-' the second argument specifies the distance from the \
3424 &maint_btrace_cmdlist
);
3426 add_cmd ("clear-packet-history", class_maintenance
,
3427 maint_btrace_clear_packet_history_cmd
,
3428 _("Clears the branch tracing packet history.\n\
3429 Discards the raw branch tracing data but not the execution history data.\n\
3431 &maint_btrace_cmdlist
);
3433 add_cmd ("clear", class_maintenance
, maint_btrace_clear_cmd
,
3434 _("Clears the branch tracing data.\n\
3435 Discards the raw branch tracing data and the execution history data.\n\
3436 The next 'record' command will fetch the branch tracing data anew.\n\
3438 &maint_btrace_cmdlist
);