1 /* Branch trace support for GDB, the GNU debugger.
3 Copyright (C) 2013-2017 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "gdbthread.h"
31 #include "filenames.h"
32 #include "xml-support.h"
36 #include "cli/cli-utils.h"
42 /* Command lists for btrace maintenance commands. */
43 static struct cmd_list_element
*maint_btrace_cmdlist
;
44 static struct cmd_list_element
*maint_btrace_set_cmdlist
;
45 static struct cmd_list_element
*maint_btrace_show_cmdlist
;
46 static struct cmd_list_element
*maint_btrace_pt_set_cmdlist
;
47 static struct cmd_list_element
*maint_btrace_pt_show_cmdlist
;
49 /* Control whether to skip PAD packets when computing the packet history. */
50 static int maint_btrace_pt_skip_pad
= 1;
52 /* A vector of function segments. */
53 typedef struct btrace_function
* bfun_s
;
56 static void btrace_add_pc (struct thread_info
*tp
);
58 /* Print a record debug message. Use do ... while (0) to avoid ambiguities
59 when used in if statements. */
61 #define DEBUG(msg, args...) \
64 if (record_debug != 0) \
65 fprintf_unfiltered (gdb_stdlog, \
66 "[btrace] " msg "\n", ##args); \
70 #define DEBUG_FTRACE(msg, args...) DEBUG ("[ftrace] " msg, ##args)
72 /* Return the function name of a recorded function segment for printing.
73 This function never returns NULL. */
76 ftrace_print_function_name (const struct btrace_function
*bfun
)
78 struct minimal_symbol
*msym
;
85 return SYMBOL_PRINT_NAME (sym
);
88 return MSYMBOL_PRINT_NAME (msym
);
93 /* Return the file name of a recorded function segment for printing.
94 This function never returns NULL. */
97 ftrace_print_filename (const struct btrace_function
*bfun
)
100 const char *filename
;
105 filename
= symtab_to_filename_for_display (symbol_symtab (sym
));
107 filename
= "<unknown>";
112 /* Return a string representation of the address of an instruction.
113 This function never returns NULL. */
116 ftrace_print_insn_addr (const struct btrace_insn
*insn
)
121 return core_addr_to_string_nz (insn
->pc
);
124 /* Print an ftrace debug status message. */
127 ftrace_debug (const struct btrace_function
*bfun
, const char *prefix
)
129 const char *fun
, *file
;
130 unsigned int ibegin
, iend
;
133 fun
= ftrace_print_function_name (bfun
);
134 file
= ftrace_print_filename (bfun
);
137 ibegin
= bfun
->insn_offset
;
138 iend
= ibegin
+ VEC_length (btrace_insn_s
, bfun
->insn
);
140 DEBUG_FTRACE ("%s: fun = %s, file = %s, level = %d, insn = [%u; %u)",
141 prefix
, fun
, file
, level
, ibegin
, iend
);
144 /* Return non-zero if BFUN does not match MFUN and FUN,
145 return zero otherwise. */
148 ftrace_function_switched (const struct btrace_function
*bfun
,
149 const struct minimal_symbol
*mfun
,
150 const struct symbol
*fun
)
152 struct minimal_symbol
*msym
;
158 /* If the minimal symbol changed, we certainly switched functions. */
159 if (mfun
!= NULL
&& msym
!= NULL
160 && strcmp (MSYMBOL_LINKAGE_NAME (mfun
), MSYMBOL_LINKAGE_NAME (msym
)) != 0)
163 /* If the symbol changed, we certainly switched functions. */
164 if (fun
!= NULL
&& sym
!= NULL
)
166 const char *bfname
, *fname
;
168 /* Check the function name. */
169 if (strcmp (SYMBOL_LINKAGE_NAME (fun
), SYMBOL_LINKAGE_NAME (sym
)) != 0)
172 /* Check the location of those functions, as well. */
173 bfname
= symtab_to_fullname (symbol_symtab (sym
));
174 fname
= symtab_to_fullname (symbol_symtab (fun
));
175 if (filename_cmp (fname
, bfname
) != 0)
179 /* If we lost symbol information, we switched functions. */
180 if (!(msym
== NULL
&& sym
== NULL
) && mfun
== NULL
&& fun
== NULL
)
183 /* If we gained symbol information, we switched functions. */
184 if (msym
== NULL
&& sym
== NULL
&& !(mfun
== NULL
&& fun
== NULL
))
190 /* Allocate and initialize a new branch trace function segment.
191 PREV is the chronologically preceding function segment.
192 MFUN and FUN are the symbol information we have for this function. */
194 static struct btrace_function
*
195 ftrace_new_function (struct btrace_function
*prev
,
196 struct minimal_symbol
*mfun
,
199 struct btrace_function
*bfun
;
201 bfun
= XCNEW (struct btrace_function
);
205 bfun
->flow
.prev
= prev
;
209 /* Start counting at one. */
211 bfun
->insn_offset
= 1;
215 gdb_assert (prev
->flow
.next
== NULL
);
216 prev
->flow
.next
= bfun
;
218 bfun
->number
= prev
->number
+ 1;
219 bfun
->insn_offset
= (prev
->insn_offset
220 + VEC_length (btrace_insn_s
, prev
->insn
));
221 bfun
->level
= prev
->level
;
227 /* Update the UP field of a function segment. */
230 ftrace_update_caller (struct btrace_function
*bfun
,
231 struct btrace_function
*caller
,
232 enum btrace_function_flag flags
)
234 if (bfun
->up
!= NULL
)
235 ftrace_debug (bfun
, "updating caller");
240 ftrace_debug (bfun
, "set caller");
241 ftrace_debug (caller
, "..to");
244 /* Fix up the caller for all segments of a function. */
247 ftrace_fixup_caller (struct btrace_function
*bfun
,
248 struct btrace_function
*caller
,
249 enum btrace_function_flag flags
)
251 struct btrace_function
*prev
, *next
;
253 ftrace_update_caller (bfun
, caller
, flags
);
255 /* Update all function segments belonging to the same function. */
256 for (prev
= bfun
->segment
.prev
; prev
!= NULL
; prev
= prev
->segment
.prev
)
257 ftrace_update_caller (prev
, caller
, flags
);
259 for (next
= bfun
->segment
.next
; next
!= NULL
; next
= next
->segment
.next
)
260 ftrace_update_caller (next
, caller
, flags
);
263 /* Add a new function segment for a call.
264 CALLER is the chronologically preceding function segment.
265 MFUN and FUN are the symbol information we have for this function. */
267 static struct btrace_function
*
268 ftrace_new_call (struct btrace_function
*caller
,
269 struct minimal_symbol
*mfun
,
272 struct btrace_function
*bfun
;
274 bfun
= ftrace_new_function (caller
, mfun
, fun
);
278 ftrace_debug (bfun
, "new call");
283 /* Add a new function segment for a tail call.
284 CALLER is the chronologically preceding function segment.
285 MFUN and FUN are the symbol information we have for this function. */
287 static struct btrace_function
*
288 ftrace_new_tailcall (struct btrace_function
*caller
,
289 struct minimal_symbol
*mfun
,
292 struct btrace_function
*bfun
;
294 bfun
= ftrace_new_function (caller
, mfun
, fun
);
297 bfun
->flags
|= BFUN_UP_LINKS_TO_TAILCALL
;
299 ftrace_debug (bfun
, "new tail call");
304 /* Return the caller of BFUN or NULL if there is none. This function skips
305 tail calls in the call chain. */
306 static struct btrace_function
*
307 ftrace_get_caller (struct btrace_function
*bfun
)
309 for (; bfun
!= NULL
; bfun
= bfun
->up
)
310 if ((bfun
->flags
& BFUN_UP_LINKS_TO_TAILCALL
) == 0)
316 /* Find the innermost caller in the back trace of BFUN with MFUN/FUN
317 symbol information. */
319 static struct btrace_function
*
320 ftrace_find_caller (struct btrace_function
*bfun
,
321 struct minimal_symbol
*mfun
,
324 for (; bfun
!= NULL
; bfun
= bfun
->up
)
326 /* Skip functions with incompatible symbol information. */
327 if (ftrace_function_switched (bfun
, mfun
, fun
))
330 /* This is the function segment we're looking for. */
337 /* Find the innermost caller in the back trace of BFUN, skipping all
338 function segments that do not end with a call instruction (e.g.
339 tail calls ending with a jump). */
341 static struct btrace_function
*
342 ftrace_find_call (struct btrace_function
*bfun
)
344 for (; bfun
!= NULL
; bfun
= bfun
->up
)
346 struct btrace_insn
*last
;
349 if (bfun
->errcode
!= 0)
352 last
= VEC_last (btrace_insn_s
, bfun
->insn
);
354 if (last
->iclass
== BTRACE_INSN_CALL
)
361 /* Add a continuation segment for a function into which we return.
362 PREV is the chronologically preceding function segment.
363 MFUN and FUN are the symbol information we have for this function. */
365 static struct btrace_function
*
366 ftrace_new_return (struct btrace_function
*prev
,
367 struct minimal_symbol
*mfun
,
370 struct btrace_function
*bfun
, *caller
;
372 bfun
= ftrace_new_function (prev
, mfun
, fun
);
374 /* It is important to start at PREV's caller. Otherwise, we might find
375 PREV itself, if PREV is a recursive function. */
376 caller
= ftrace_find_caller (prev
->up
, mfun
, fun
);
379 /* The caller of PREV is the preceding btrace function segment in this
380 function instance. */
381 gdb_assert (caller
->segment
.next
== NULL
);
383 caller
->segment
.next
= bfun
;
384 bfun
->segment
.prev
= caller
;
386 /* Maintain the function level. */
387 bfun
->level
= caller
->level
;
389 /* Maintain the call stack. */
390 bfun
->up
= caller
->up
;
391 bfun
->flags
= caller
->flags
;
393 ftrace_debug (bfun
, "new return");
397 /* We did not find a caller. This could mean that something went
398 wrong or that the call is simply not included in the trace. */
400 /* Let's search for some actual call. */
401 caller
= ftrace_find_call (prev
->up
);
404 /* There is no call in PREV's back trace. We assume that the
405 branch trace did not include it. */
407 /* Let's find the topmost function and add a new caller for it.
408 This should handle a series of initial tail calls. */
409 while (prev
->up
!= NULL
)
412 bfun
->level
= prev
->level
- 1;
414 /* Fix up the call stack for PREV. */
415 ftrace_fixup_caller (prev
, bfun
, BFUN_UP_LINKS_TO_RET
);
417 ftrace_debug (bfun
, "new return - no caller");
421 /* There is a call in PREV's back trace to which we should have
422 returned but didn't. Let's start a new, separate back trace
423 from PREV's level. */
424 bfun
->level
= prev
->level
- 1;
426 /* We fix up the back trace for PREV but leave other function segments
427 on the same level as they are.
428 This should handle things like schedule () correctly where we're
429 switching contexts. */
431 prev
->flags
= BFUN_UP_LINKS_TO_RET
;
433 ftrace_debug (bfun
, "new return - unknown caller");
440 /* Add a new function segment for a function switch.
441 PREV is the chronologically preceding function segment.
442 MFUN and FUN are the symbol information we have for this function. */
444 static struct btrace_function
*
445 ftrace_new_switch (struct btrace_function
*prev
,
446 struct minimal_symbol
*mfun
,
449 struct btrace_function
*bfun
;
451 /* This is an unexplained function switch. The call stack will likely
452 be wrong at this point. */
453 bfun
= ftrace_new_function (prev
, mfun
, fun
);
455 ftrace_debug (bfun
, "new switch");
460 /* Add a new function segment for a gap in the trace due to a decode error.
461 PREV is the chronologically preceding function segment.
462 ERRCODE is the format-specific error code. */
464 static struct btrace_function
*
465 ftrace_new_gap (struct btrace_function
*prev
, int errcode
)
467 struct btrace_function
*bfun
;
469 /* We hijack prev if it was empty. */
470 if (prev
!= NULL
&& prev
->errcode
== 0
471 && VEC_empty (btrace_insn_s
, prev
->insn
))
474 bfun
= ftrace_new_function (prev
, NULL
, NULL
);
476 bfun
->errcode
= errcode
;
478 ftrace_debug (bfun
, "new gap");
483 /* Update BFUN with respect to the instruction at PC. This may create new
485 Return the chronologically latest function segment, never NULL. */
487 static struct btrace_function
*
488 ftrace_update_function (struct btrace_function
*bfun
, CORE_ADDR pc
)
490 struct bound_minimal_symbol bmfun
;
491 struct minimal_symbol
*mfun
;
493 struct btrace_insn
*last
;
495 /* Try to determine the function we're in. We use both types of symbols
496 to avoid surprises when we sometimes get a full symbol and sometimes
497 only a minimal symbol. */
498 fun
= find_pc_function (pc
);
499 bmfun
= lookup_minimal_symbol_by_pc (pc
);
502 if (fun
== NULL
&& mfun
== NULL
)
503 DEBUG_FTRACE ("no symbol at %s", core_addr_to_string_nz (pc
));
505 /* If we didn't have a function or if we had a gap before, we create one. */
506 if (bfun
== NULL
|| bfun
->errcode
!= 0)
507 return ftrace_new_function (bfun
, mfun
, fun
);
509 /* Check the last instruction, if we have one.
510 We do this check first, since it allows us to fill in the call stack
511 links in addition to the normal flow links. */
513 if (!VEC_empty (btrace_insn_s
, bfun
->insn
))
514 last
= VEC_last (btrace_insn_s
, bfun
->insn
);
518 switch (last
->iclass
)
520 case BTRACE_INSN_RETURN
:
524 /* On some systems, _dl_runtime_resolve returns to the resolved
525 function instead of jumping to it. From our perspective,
526 however, this is a tailcall.
527 If we treated it as return, we wouldn't be able to find the
528 resolved function in our stack back trace. Hence, we would
529 lose the current stack back trace and start anew with an empty
530 back trace. When the resolved function returns, we would then
531 create a stack back trace with the same function names but
532 different frame id's. This will confuse stepping. */
533 fname
= ftrace_print_function_name (bfun
);
534 if (strcmp (fname
, "_dl_runtime_resolve") == 0)
535 return ftrace_new_tailcall (bfun
, mfun
, fun
);
537 return ftrace_new_return (bfun
, mfun
, fun
);
540 case BTRACE_INSN_CALL
:
541 /* Ignore calls to the next instruction. They are used for PIC. */
542 if (last
->pc
+ last
->size
== pc
)
545 return ftrace_new_call (bfun
, mfun
, fun
);
547 case BTRACE_INSN_JUMP
:
551 start
= get_pc_function_start (pc
);
553 /* A jump to the start of a function is (typically) a tail call. */
555 return ftrace_new_tailcall (bfun
, mfun
, fun
);
557 /* If we can't determine the function for PC, we treat a jump at
558 the end of the block as tail call if we're switching functions
559 and as an intra-function branch if we don't. */
560 if (start
== 0 && ftrace_function_switched (bfun
, mfun
, fun
))
561 return ftrace_new_tailcall (bfun
, mfun
, fun
);
568 /* Check if we're switching functions for some other reason. */
569 if (ftrace_function_switched (bfun
, mfun
, fun
))
571 DEBUG_FTRACE ("switching from %s in %s at %s",
572 ftrace_print_insn_addr (last
),
573 ftrace_print_function_name (bfun
),
574 ftrace_print_filename (bfun
));
576 return ftrace_new_switch (bfun
, mfun
, fun
);
582 /* Add the instruction at PC to BFUN's instructions. */
585 ftrace_update_insns (struct btrace_function
*bfun
,
586 const struct btrace_insn
*insn
)
588 VEC_safe_push (btrace_insn_s
, bfun
->insn
, insn
);
590 if (record_debug
> 1)
591 ftrace_debug (bfun
, "update insn");
594 /* Classify the instruction at PC. */
596 static enum btrace_insn_class
597 ftrace_classify_insn (struct gdbarch
*gdbarch
, CORE_ADDR pc
)
599 enum btrace_insn_class iclass
;
601 iclass
= BTRACE_INSN_OTHER
;
604 if (gdbarch_insn_is_call (gdbarch
, pc
))
605 iclass
= BTRACE_INSN_CALL
;
606 else if (gdbarch_insn_is_ret (gdbarch
, pc
))
607 iclass
= BTRACE_INSN_RETURN
;
608 else if (gdbarch_insn_is_jump (gdbarch
, pc
))
609 iclass
= BTRACE_INSN_JUMP
;
611 CATCH (error
, RETURN_MASK_ERROR
)
619 /* Try to match the back trace at LHS to the back trace at RHS. Returns the
620 number of matching function segments or zero if the back traces do not
624 ftrace_match_backtrace (struct btrace_function
*lhs
,
625 struct btrace_function
*rhs
)
629 for (matches
= 0; lhs
!= NULL
&& rhs
!= NULL
; ++matches
)
631 if (ftrace_function_switched (lhs
, rhs
->msym
, rhs
->sym
))
634 lhs
= ftrace_get_caller (lhs
);
635 rhs
= ftrace_get_caller (rhs
);
641 /* Add ADJUSTMENT to the level of BFUN and succeeding function segments. */
644 ftrace_fixup_level (struct btrace_function
*bfun
, int adjustment
)
649 DEBUG_FTRACE ("fixup level (%+d)", adjustment
);
650 ftrace_debug (bfun
, "..bfun");
652 for (; bfun
!= NULL
; bfun
= bfun
->flow
.next
)
653 bfun
->level
+= adjustment
;
656 /* Recompute the global level offset. Traverse the function trace and compute
657 the global level offset as the negative of the minimal function level. */
660 ftrace_compute_global_level_offset (struct btrace_thread_info
*btinfo
)
662 struct btrace_function
*bfun
, *end
;
668 bfun
= btinfo
->begin
;
672 /* The last function segment contains the current instruction, which is not
673 really part of the trace. If it contains just this one instruction, we
674 stop when we reach it; otherwise, we let the below loop run to the end. */
676 if (VEC_length (btrace_insn_s
, end
->insn
) > 1)
680 for (; bfun
!= end
; bfun
= bfun
->flow
.next
)
681 level
= std::min (level
, bfun
->level
);
683 DEBUG_FTRACE ("setting global level offset: %d", -level
);
684 btinfo
->level
= -level
;
687 /* Connect the function segments PREV and NEXT in a bottom-to-top walk as in
688 ftrace_connect_backtrace. */
691 ftrace_connect_bfun (struct btrace_function
*prev
,
692 struct btrace_function
*next
)
694 DEBUG_FTRACE ("connecting...");
695 ftrace_debug (prev
, "..prev");
696 ftrace_debug (next
, "..next");
698 /* The function segments are not yet connected. */
699 gdb_assert (prev
->segment
.next
== NULL
);
700 gdb_assert (next
->segment
.prev
== NULL
);
702 prev
->segment
.next
= next
;
703 next
->segment
.prev
= prev
;
705 /* We may have moved NEXT to a different function level. */
706 ftrace_fixup_level (next
, prev
->level
- next
->level
);
708 /* If we run out of back trace for one, let's use the other's. */
709 if (prev
->up
== NULL
)
711 if (next
->up
!= NULL
)
713 DEBUG_FTRACE ("using next's callers");
714 ftrace_fixup_caller (prev
, next
->up
, next
->flags
);
717 else if (next
->up
== NULL
)
719 if (prev
->up
!= NULL
)
721 DEBUG_FTRACE ("using prev's callers");
722 ftrace_fixup_caller (next
, prev
->up
, prev
->flags
);
727 /* PREV may have a tailcall caller, NEXT can't. If it does, fixup the up
728 link to add the tail callers to NEXT's back trace.
730 This removes NEXT->UP from NEXT's back trace. It will be added back
731 when connecting NEXT and PREV's callers - provided they exist.
733 If PREV's back trace consists of a series of tail calls without an
734 actual call, there will be no further connection and NEXT's caller will
735 be removed for good. To catch this case, we handle it here and connect
736 the top of PREV's back trace to NEXT's caller. */
737 if ((prev
->flags
& BFUN_UP_LINKS_TO_TAILCALL
) != 0)
739 struct btrace_function
*caller
;
740 btrace_function_flags flags
;
742 /* We checked NEXT->UP above so CALLER can't be NULL. */
746 DEBUG_FTRACE ("adding prev's tail calls to next");
748 ftrace_fixup_caller (next
, prev
->up
, prev
->flags
);
750 for (prev
= prev
->up
; prev
!= NULL
; prev
= prev
->up
)
752 /* At the end of PREV's back trace, continue with CALLER. */
753 if (prev
->up
== NULL
)
755 DEBUG_FTRACE ("fixing up link for tailcall chain");
756 ftrace_debug (prev
, "..top");
757 ftrace_debug (caller
, "..up");
759 ftrace_fixup_caller (prev
, caller
, flags
);
761 /* If we skipped any tail calls, this may move CALLER to a
762 different function level.
764 Note that changing CALLER's level is only OK because we
765 know that this is the last iteration of the bottom-to-top
766 walk in ftrace_connect_backtrace.
768 Otherwise we will fix up CALLER's level when we connect it
769 to PREV's caller in the next iteration. */
770 ftrace_fixup_level (caller
, prev
->level
- caller
->level
- 1);
774 /* There's nothing to do if we find a real call. */
775 if ((prev
->flags
& BFUN_UP_LINKS_TO_TAILCALL
) == 0)
777 DEBUG_FTRACE ("will fix up link in next iteration");
785 /* Connect function segments on the same level in the back trace at LHS and RHS.
786 The back traces at LHS and RHS are expected to match according to
787 ftrace_match_backtrace. */
790 ftrace_connect_backtrace (struct btrace_function
*lhs
,
791 struct btrace_function
*rhs
)
793 while (lhs
!= NULL
&& rhs
!= NULL
)
795 struct btrace_function
*prev
, *next
;
797 gdb_assert (!ftrace_function_switched (lhs
, rhs
->msym
, rhs
->sym
));
799 /* Connecting LHS and RHS may change the up link. */
803 lhs
= ftrace_get_caller (lhs
);
804 rhs
= ftrace_get_caller (rhs
);
806 ftrace_connect_bfun (prev
, next
);
810 /* Bridge the gap between two function segments left and right of a gap if their
811 respective back traces match in at least MIN_MATCHES functions.
813 Returns non-zero if the gap could be bridged, zero otherwise. */
816 ftrace_bridge_gap (struct btrace_function
*lhs
, struct btrace_function
*rhs
,
819 struct btrace_function
*best_l
, *best_r
, *cand_l
, *cand_r
;
822 DEBUG_FTRACE ("checking gap at insn %u (req matches: %d)",
823 rhs
->insn_offset
- 1, min_matches
);
829 /* We search the back traces of LHS and RHS for valid connections and connect
830 the two functon segments that give the longest combined back trace. */
832 for (cand_l
= lhs
; cand_l
!= NULL
; cand_l
= ftrace_get_caller (cand_l
))
833 for (cand_r
= rhs
; cand_r
!= NULL
; cand_r
= ftrace_get_caller (cand_r
))
837 matches
= ftrace_match_backtrace (cand_l
, cand_r
);
838 if (best_matches
< matches
)
840 best_matches
= matches
;
846 /* We need at least MIN_MATCHES matches. */
847 gdb_assert (min_matches
> 0);
848 if (best_matches
< min_matches
)
851 DEBUG_FTRACE ("..matches: %d", best_matches
);
853 /* We will fix up the level of BEST_R and succeeding function segments such
854 that BEST_R's level matches BEST_L's when we connect BEST_L to BEST_R.
856 This will ignore the level of RHS and following if BEST_R != RHS. I.e. if
857 BEST_R is a successor of RHS in the back trace of RHS (phases 1 and 3).
859 To catch this, we already fix up the level here where we can start at RHS
860 instead of at BEST_R. We will ignore the level fixup when connecting
861 BEST_L to BEST_R as they will already be on the same level. */
862 ftrace_fixup_level (rhs
, best_l
->level
- best_r
->level
);
864 ftrace_connect_backtrace (best_l
, best_r
);
869 /* Try to bridge gaps due to overflow or decode errors by connecting the
870 function segments that are separated by the gap. */
873 btrace_bridge_gaps (struct thread_info
*tp
, VEC (bfun_s
) **gaps
)
875 VEC (bfun_s
) *remaining
;
876 struct cleanup
*old_chain
;
879 DEBUG ("bridge gaps");
882 old_chain
= make_cleanup (VEC_cleanup (bfun_s
), &remaining
);
884 /* We require a minimum amount of matches for bridging a gap. The number of
885 required matches will be lowered with each iteration.
887 The more matches the higher our confidence that the bridging is correct.
888 For big gaps or small traces, however, it may not be feasible to require a
889 high number of matches. */
890 for (min_matches
= 5; min_matches
> 0; --min_matches
)
892 /* Let's try to bridge as many gaps as we can. In some cases, we need to
893 skip a gap and revisit it again after we closed later gaps. */
894 while (!VEC_empty (bfun_s
, *gaps
))
896 struct btrace_function
*gap
;
899 for (idx
= 0; VEC_iterate (bfun_s
, *gaps
, idx
, gap
); ++idx
)
901 struct btrace_function
*lhs
, *rhs
;
904 /* We may have a sequence of gaps if we run from one error into
905 the next as we try to re-sync onto the trace stream. Ignore
906 all but the leftmost gap in such a sequence.
908 Also ignore gaps at the beginning of the trace. */
909 lhs
= gap
->flow
.prev
;
910 if (lhs
== NULL
|| lhs
->errcode
!= 0)
913 /* Skip gaps to the right. */
914 for (rhs
= gap
->flow
.next
; rhs
!= NULL
; rhs
= rhs
->flow
.next
)
915 if (rhs
->errcode
== 0)
918 /* Ignore gaps at the end of the trace. */
922 bridged
= ftrace_bridge_gap (lhs
, rhs
, min_matches
);
924 /* Keep track of gaps we were not able to bridge and try again.
925 If we just pushed them to the end of GAPS we would risk an
926 infinite loop in case we simply cannot bridge a gap. */
928 VEC_safe_push (bfun_s
, remaining
, gap
);
931 /* Let's see if we made any progress. */
932 if (VEC_length (bfun_s
, remaining
) == VEC_length (bfun_s
, *gaps
))
935 VEC_free (bfun_s
, *gaps
);
941 /* We get here if either GAPS is empty or if GAPS equals REMAINING. */
942 if (VEC_empty (bfun_s
, *gaps
))
945 VEC_free (bfun_s
, remaining
);
948 do_cleanups (old_chain
);
950 /* We may omit this in some cases. Not sure it is worth the extra
951 complication, though. */
952 ftrace_compute_global_level_offset (&tp
->btrace
);
955 /* Compute the function branch trace from BTS trace. */
958 btrace_compute_ftrace_bts (struct thread_info
*tp
,
959 const struct btrace_data_bts
*btrace
,
962 struct btrace_thread_info
*btinfo
;
963 struct btrace_function
*begin
, *end
;
964 struct gdbarch
*gdbarch
;
968 gdbarch
= target_gdbarch ();
969 btinfo
= &tp
->btrace
;
970 begin
= btinfo
->begin
;
972 level
= begin
!= NULL
? -btinfo
->level
: INT_MAX
;
973 blk
= VEC_length (btrace_block_s
, btrace
->blocks
);
977 btrace_block_s
*block
;
982 block
= VEC_index (btrace_block_s
, btrace
->blocks
, blk
);
987 struct btrace_insn insn
;
990 /* We should hit the end of the block. Warn if we went too far. */
993 /* Indicate the gap in the trace. */
994 end
= ftrace_new_gap (end
, BDE_BTS_OVERFLOW
);
998 VEC_safe_push (bfun_s
, *gaps
, end
);
1000 warning (_("Recorded trace may be corrupted at instruction "
1001 "%u (pc = %s)."), end
->insn_offset
- 1,
1002 core_addr_to_string_nz (pc
));
1007 end
= ftrace_update_function (end
, pc
);
1011 /* Maintain the function level offset.
1012 For all but the last block, we do it here. */
1014 level
= std::min (level
, end
->level
);
1019 size
= gdb_insn_length (gdbarch
, pc
);
1021 CATCH (error
, RETURN_MASK_ERROR
)
1028 insn
.iclass
= ftrace_classify_insn (gdbarch
, pc
);
1031 ftrace_update_insns (end
, &insn
);
1033 /* We're done once we pushed the instruction at the end. */
1034 if (block
->end
== pc
)
1037 /* We can't continue if we fail to compute the size. */
1040 /* Indicate the gap in the trace. We just added INSN so we're
1041 not at the beginning. */
1042 end
= ftrace_new_gap (end
, BDE_BTS_INSN_SIZE
);
1044 VEC_safe_push (bfun_s
, *gaps
, end
);
1046 warning (_("Recorded trace may be incomplete at instruction %u "
1047 "(pc = %s)."), end
->insn_offset
- 1,
1048 core_addr_to_string_nz (pc
));
1055 /* Maintain the function level offset.
1056 For the last block, we do it here to not consider the last
1058 Since the last instruction corresponds to the current instruction
1059 and is not really part of the execution history, it shouldn't
1060 affect the level. */
1062 level
= std::min (level
, end
->level
);
1066 btinfo
->begin
= begin
;
1069 /* LEVEL is the minimal function level of all btrace function segments.
1070 Define the global level offset to -LEVEL so all function levels are
1071 normalized to start at zero. */
1072 btinfo
->level
= -level
;
1075 #if defined (HAVE_LIBIPT)
1077 static enum btrace_insn_class
1078 pt_reclassify_insn (enum pt_insn_class iclass
)
1083 return BTRACE_INSN_CALL
;
1086 return BTRACE_INSN_RETURN
;
1089 return BTRACE_INSN_JUMP
;
1092 return BTRACE_INSN_OTHER
;
1096 /* Return the btrace instruction flags for INSN. */
1098 static btrace_insn_flags
1099 pt_btrace_insn_flags (const struct pt_insn
*insn
)
1101 btrace_insn_flags flags
= 0;
1103 if (insn
->speculative
)
1104 flags
|= BTRACE_INSN_FLAG_SPECULATIVE
;
1109 /* Add function branch trace using DECODER. */
1112 ftrace_add_pt (struct pt_insn_decoder
*decoder
,
1113 struct btrace_function
**pbegin
,
1114 struct btrace_function
**pend
, int *plevel
,
1115 VEC (bfun_s
) **gaps
)
1117 struct btrace_function
*begin
, *end
, *upd
;
1125 struct btrace_insn btinsn
;
1126 struct pt_insn insn
;
1128 errcode
= pt_insn_sync_forward (decoder
);
1131 if (errcode
!= -pte_eos
)
1132 warning (_("Failed to synchronize onto the Intel Processor "
1133 "Trace stream: %s."), pt_errstr (pt_errcode (errcode
)));
1137 memset (&btinsn
, 0, sizeof (btinsn
));
1140 errcode
= pt_insn_next (decoder
, &insn
, sizeof(insn
));
1144 /* Look for gaps in the trace - unless we're at the beginning. */
1147 /* Tracing is disabled and re-enabled each time we enter the
1148 kernel. Most times, we continue from the same instruction we
1149 stopped before. This is indicated via the RESUMED instruction
1150 flag. The ENABLED instruction flag means that we continued
1151 from some other instruction. Indicate this as a trace gap. */
1154 *pend
= end
= ftrace_new_gap (end
, BDE_PT_DISABLED
);
1156 VEC_safe_push (bfun_s
, *gaps
, end
);
1158 pt_insn_get_offset (decoder
, &offset
);
1160 warning (_("Non-contiguous trace at instruction %u (offset "
1161 "= 0x%" PRIx64
", pc = 0x%" PRIx64
")."),
1162 end
->insn_offset
- 1, offset
, insn
.ip
);
1166 /* Indicate trace overflows. */
1169 *pend
= end
= ftrace_new_gap (end
, BDE_PT_OVERFLOW
);
1171 *pbegin
= begin
= end
;
1173 VEC_safe_push (bfun_s
, *gaps
, end
);
1175 pt_insn_get_offset (decoder
, &offset
);
1177 warning (_("Overflow at instruction %u (offset = 0x%" PRIx64
1178 ", pc = 0x%" PRIx64
")."), end
->insn_offset
- 1,
1182 upd
= ftrace_update_function (end
, insn
.ip
);
1188 *pbegin
= begin
= upd
;
1191 /* Maintain the function level offset. */
1192 *plevel
= std::min (*plevel
, end
->level
);
1194 btinsn
.pc
= (CORE_ADDR
) insn
.ip
;
1195 btinsn
.size
= (gdb_byte
) insn
.size
;
1196 btinsn
.iclass
= pt_reclassify_insn (insn
.iclass
);
1197 btinsn
.flags
= pt_btrace_insn_flags (&insn
);
1199 ftrace_update_insns (end
, &btinsn
);
1202 if (errcode
== -pte_eos
)
1205 /* Indicate the gap in the trace. */
1206 *pend
= end
= ftrace_new_gap (end
, errcode
);
1208 *pbegin
= begin
= end
;
1210 VEC_safe_push (bfun_s
, *gaps
, end
);
1212 pt_insn_get_offset (decoder
, &offset
);
1214 warning (_("Decode error (%d) at instruction %u (offset = 0x%" PRIx64
1215 ", pc = 0x%" PRIx64
"): %s."), errcode
, end
->insn_offset
- 1,
1216 offset
, insn
.ip
, pt_errstr (pt_errcode (errcode
)));
1220 /* A callback function to allow the trace decoder to read the inferior's
1224 btrace_pt_readmem_callback (gdb_byte
*buffer
, size_t size
,
1225 const struct pt_asid
*asid
, uint64_t pc
,
1228 int result
, errcode
;
1230 result
= (int) size
;
1233 errcode
= target_read_code ((CORE_ADDR
) pc
, buffer
, size
);
1235 result
= -pte_nomap
;
1237 CATCH (error
, RETURN_MASK_ERROR
)
1239 result
= -pte_nomap
;
1246 /* Translate the vendor from one enum to another. */
1248 static enum pt_cpu_vendor
1249 pt_translate_cpu_vendor (enum btrace_cpu_vendor vendor
)
1261 /* Finalize the function branch trace after decode. */
1263 static void btrace_finalize_ftrace_pt (struct pt_insn_decoder
*decoder
,
1264 struct thread_info
*tp
, int level
)
1266 pt_insn_free_decoder (decoder
);
1268 /* LEVEL is the minimal function level of all btrace function segments.
1269 Define the global level offset to -LEVEL so all function levels are
1270 normalized to start at zero. */
1271 tp
->btrace
.level
= -level
;
1273 /* Add a single last instruction entry for the current PC.
1274 This allows us to compute the backtrace at the current PC using both
1275 standard unwind and btrace unwind.
1276 This extra entry is ignored by all record commands. */
1280 /* Compute the function branch trace from Intel Processor Trace
1284 btrace_compute_ftrace_pt (struct thread_info
*tp
,
1285 const struct btrace_data_pt
*btrace
,
1286 VEC (bfun_s
) **gaps
)
1288 struct btrace_thread_info
*btinfo
;
1289 struct pt_insn_decoder
*decoder
;
1290 struct pt_config config
;
1293 if (btrace
->size
== 0)
1296 btinfo
= &tp
->btrace
;
1297 level
= btinfo
->begin
!= NULL
? -btinfo
->level
: INT_MAX
;
1299 pt_config_init(&config
);
1300 config
.begin
= btrace
->data
;
1301 config
.end
= btrace
->data
+ btrace
->size
;
1303 config
.cpu
.vendor
= pt_translate_cpu_vendor (btrace
->config
.cpu
.vendor
);
1304 config
.cpu
.family
= btrace
->config
.cpu
.family
;
1305 config
.cpu
.model
= btrace
->config
.cpu
.model
;
1306 config
.cpu
.stepping
= btrace
->config
.cpu
.stepping
;
1308 errcode
= pt_cpu_errata (&config
.errata
, &config
.cpu
);
1310 error (_("Failed to configure the Intel Processor Trace decoder: %s."),
1311 pt_errstr (pt_errcode (errcode
)));
1313 decoder
= pt_insn_alloc_decoder (&config
);
1314 if (decoder
== NULL
)
1315 error (_("Failed to allocate the Intel Processor Trace decoder."));
1319 struct pt_image
*image
;
1321 image
= pt_insn_get_image(decoder
);
1323 error (_("Failed to configure the Intel Processor Trace decoder."));
1325 errcode
= pt_image_set_callback(image
, btrace_pt_readmem_callback
, NULL
);
1327 error (_("Failed to configure the Intel Processor Trace decoder: "
1328 "%s."), pt_errstr (pt_errcode (errcode
)));
1330 ftrace_add_pt (decoder
, &btinfo
->begin
, &btinfo
->end
, &level
, gaps
);
1332 CATCH (error
, RETURN_MASK_ALL
)
1334 /* Indicate a gap in the trace if we quit trace processing. */
1335 if (error
.reason
== RETURN_QUIT
&& btinfo
->end
!= NULL
)
1337 btinfo
->end
= ftrace_new_gap (btinfo
->end
, BDE_PT_USER_QUIT
);
1339 VEC_safe_push (bfun_s
, *gaps
, btinfo
->end
);
1342 btrace_finalize_ftrace_pt (decoder
, tp
, level
);
1344 throw_exception (error
);
1348 btrace_finalize_ftrace_pt (decoder
, tp
, level
);
1351 #else /* defined (HAVE_LIBIPT) */
1354 btrace_compute_ftrace_pt (struct thread_info
*tp
,
1355 const struct btrace_data_pt
*btrace
,
1356 VEC (bfun_s
) **gaps
)
1358 internal_error (__FILE__
, __LINE__
, _("Unexpected branch trace format."));
1361 #endif /* defined (HAVE_LIBIPT) */
1363 /* Compute the function branch trace from a block branch trace BTRACE for
1364 a thread given by BTINFO. */
1367 btrace_compute_ftrace_1 (struct thread_info
*tp
, struct btrace_data
*btrace
,
1368 VEC (bfun_s
) **gaps
)
1370 DEBUG ("compute ftrace");
1372 switch (btrace
->format
)
1374 case BTRACE_FORMAT_NONE
:
1377 case BTRACE_FORMAT_BTS
:
1378 btrace_compute_ftrace_bts (tp
, &btrace
->variant
.bts
, gaps
);
1381 case BTRACE_FORMAT_PT
:
1382 btrace_compute_ftrace_pt (tp
, &btrace
->variant
.pt
, gaps
);
1386 internal_error (__FILE__
, __LINE__
, _("Unkown branch trace format."));
1390 btrace_finalize_ftrace (struct thread_info
*tp
, VEC (bfun_s
) **gaps
)
1392 if (!VEC_empty (bfun_s
, *gaps
))
1394 tp
->btrace
.ngaps
+= VEC_length (bfun_s
, *gaps
);
1395 btrace_bridge_gaps (tp
, gaps
);
1400 btrace_compute_ftrace (struct thread_info
*tp
, struct btrace_data
*btrace
)
1403 struct cleanup
*old_chain
;
1406 old_chain
= make_cleanup (VEC_cleanup (bfun_s
), &gaps
);
1410 btrace_compute_ftrace_1 (tp
, btrace
, &gaps
);
1412 CATCH (error
, RETURN_MASK_ALL
)
1414 btrace_finalize_ftrace (tp
, &gaps
);
1416 throw_exception (error
);
1420 btrace_finalize_ftrace (tp
, &gaps
);
1422 do_cleanups (old_chain
);
1425 /* Add an entry for the current PC. */
1428 btrace_add_pc (struct thread_info
*tp
)
1430 struct btrace_data btrace
;
1431 struct btrace_block
*block
;
1432 struct regcache
*regcache
;
1433 struct cleanup
*cleanup
;
1436 regcache
= get_thread_regcache (tp
->ptid
);
1437 pc
= regcache_read_pc (regcache
);
1439 btrace_data_init (&btrace
);
1440 btrace
.format
= BTRACE_FORMAT_BTS
;
1441 btrace
.variant
.bts
.blocks
= NULL
;
1443 cleanup
= make_cleanup_btrace_data (&btrace
);
1445 block
= VEC_safe_push (btrace_block_s
, btrace
.variant
.bts
.blocks
, NULL
);
1449 btrace_compute_ftrace (tp
, &btrace
);
1451 do_cleanups (cleanup
);
1457 btrace_enable (struct thread_info
*tp
, const struct btrace_config
*conf
)
1459 if (tp
->btrace
.target
!= NULL
)
1462 #if !defined (HAVE_LIBIPT)
1463 if (conf
->format
== BTRACE_FORMAT_PT
)
1464 error (_("GDB does not support Intel Processor Trace."));
1465 #endif /* !defined (HAVE_LIBIPT) */
1467 if (!target_supports_btrace (conf
->format
))
1468 error (_("Target does not support branch tracing."));
1470 DEBUG ("enable thread %s (%s)", print_thread_id (tp
),
1471 target_pid_to_str (tp
->ptid
));
1473 tp
->btrace
.target
= target_enable_btrace (tp
->ptid
, conf
);
1475 /* We're done if we failed to enable tracing. */
1476 if (tp
->btrace
.target
== NULL
)
1479 /* We need to undo the enable in case of errors. */
1482 /* Add an entry for the current PC so we start tracing from where we
1485 If we can't access TP's registers, TP is most likely running. In this
1486 case, we can't really say where tracing was enabled so it should be
1487 safe to simply skip this step.
1489 This is not relevant for BTRACE_FORMAT_PT since the trace will already
1490 start at the PC at which tracing was enabled. */
1491 if (conf
->format
!= BTRACE_FORMAT_PT
1492 && can_access_registers_ptid (tp
->ptid
))
1495 CATCH (exception
, RETURN_MASK_ALL
)
1497 btrace_disable (tp
);
1499 throw_exception (exception
);
1506 const struct btrace_config
*
1507 btrace_conf (const struct btrace_thread_info
*btinfo
)
1509 if (btinfo
->target
== NULL
)
1512 return target_btrace_conf (btinfo
->target
);
1518 btrace_disable (struct thread_info
*tp
)
1520 struct btrace_thread_info
*btp
= &tp
->btrace
;
1523 if (btp
->target
== NULL
)
1526 DEBUG ("disable thread %s (%s)", print_thread_id (tp
),
1527 target_pid_to_str (tp
->ptid
));
1529 target_disable_btrace (btp
->target
);
1538 btrace_teardown (struct thread_info
*tp
)
1540 struct btrace_thread_info
*btp
= &tp
->btrace
;
1543 if (btp
->target
== NULL
)
1546 DEBUG ("teardown thread %s (%s)", print_thread_id (tp
),
1547 target_pid_to_str (tp
->ptid
));
1549 target_teardown_btrace (btp
->target
);
1555 /* Stitch branch trace in BTS format. */
1558 btrace_stitch_bts (struct btrace_data_bts
*btrace
, struct thread_info
*tp
)
1560 struct btrace_thread_info
*btinfo
;
1561 struct btrace_function
*last_bfun
;
1562 struct btrace_insn
*last_insn
;
1563 btrace_block_s
*first_new_block
;
1565 btinfo
= &tp
->btrace
;
1566 last_bfun
= btinfo
->end
;
1567 gdb_assert (last_bfun
!= NULL
);
1568 gdb_assert (!VEC_empty (btrace_block_s
, btrace
->blocks
));
1570 /* If the existing trace ends with a gap, we just glue the traces
1571 together. We need to drop the last (i.e. chronologically first) block
1572 of the new trace, though, since we can't fill in the start address.*/
1573 if (VEC_empty (btrace_insn_s
, last_bfun
->insn
))
1575 VEC_pop (btrace_block_s
, btrace
->blocks
);
1579 /* Beware that block trace starts with the most recent block, so the
1580 chronologically first block in the new trace is the last block in
1581 the new trace's block vector. */
1582 first_new_block
= VEC_last (btrace_block_s
, btrace
->blocks
);
1583 last_insn
= VEC_last (btrace_insn_s
, last_bfun
->insn
);
1585 /* If the current PC at the end of the block is the same as in our current
1586 trace, there are two explanations:
1587 1. we executed the instruction and some branch brought us back.
1588 2. we have not made any progress.
1589 In the first case, the delta trace vector should contain at least two
1591 In the second case, the delta trace vector should contain exactly one
1592 entry for the partial block containing the current PC. Remove it. */
1593 if (first_new_block
->end
== last_insn
->pc
1594 && VEC_length (btrace_block_s
, btrace
->blocks
) == 1)
1596 VEC_pop (btrace_block_s
, btrace
->blocks
);
1600 DEBUG ("stitching %s to %s", ftrace_print_insn_addr (last_insn
),
1601 core_addr_to_string_nz (first_new_block
->end
));
1603 /* Do a simple sanity check to make sure we don't accidentally end up
1604 with a bad block. This should not occur in practice. */
1605 if (first_new_block
->end
< last_insn
->pc
)
1607 warning (_("Error while trying to read delta trace. Falling back to "
1612 /* We adjust the last block to start at the end of our current trace. */
1613 gdb_assert (first_new_block
->begin
== 0);
1614 first_new_block
->begin
= last_insn
->pc
;
1616 /* We simply pop the last insn so we can insert it again as part of
1617 the normal branch trace computation.
1618 Since instruction iterators are based on indices in the instructions
1619 vector, we don't leave any pointers dangling. */
1620 DEBUG ("pruning insn at %s for stitching",
1621 ftrace_print_insn_addr (last_insn
));
1623 VEC_pop (btrace_insn_s
, last_bfun
->insn
);
1625 /* The instructions vector may become empty temporarily if this has
1626 been the only instruction in this function segment.
1627 This violates the invariant but will be remedied shortly by
1628 btrace_compute_ftrace when we add the new trace. */
1630 /* The only case where this would hurt is if the entire trace consisted
1631 of just that one instruction. If we remove it, we might turn the now
1632 empty btrace function segment into a gap. But we don't want gaps at
1633 the beginning. To avoid this, we remove the entire old trace. */
1634 if (last_bfun
== btinfo
->begin
&& VEC_empty (btrace_insn_s
, last_bfun
->insn
))
1640 /* Adjust the block trace in order to stitch old and new trace together.
1641 BTRACE is the new delta trace between the last and the current stop.
1642 TP is the traced thread.
1643 May modifx BTRACE as well as the existing trace in TP.
1644 Return 0 on success, -1 otherwise. */
1647 btrace_stitch_trace (struct btrace_data
*btrace
, struct thread_info
*tp
)
1649 /* If we don't have trace, there's nothing to do. */
1650 if (btrace_data_empty (btrace
))
1653 switch (btrace
->format
)
1655 case BTRACE_FORMAT_NONE
:
1658 case BTRACE_FORMAT_BTS
:
1659 return btrace_stitch_bts (&btrace
->variant
.bts
, tp
);
1661 case BTRACE_FORMAT_PT
:
1662 /* Delta reads are not supported. */
1666 internal_error (__FILE__
, __LINE__
, _("Unkown branch trace format."));
1669 /* Clear the branch trace histories in BTINFO. */
1672 btrace_clear_history (struct btrace_thread_info
*btinfo
)
1674 xfree (btinfo
->insn_history
);
1675 xfree (btinfo
->call_history
);
1676 xfree (btinfo
->replay
);
1678 btinfo
->insn_history
= NULL
;
1679 btinfo
->call_history
= NULL
;
1680 btinfo
->replay
= NULL
;
1683 /* Clear the branch trace maintenance histories in BTINFO. */
1686 btrace_maint_clear (struct btrace_thread_info
*btinfo
)
1688 switch (btinfo
->data
.format
)
1693 case BTRACE_FORMAT_BTS
:
1694 btinfo
->maint
.variant
.bts
.packet_history
.begin
= 0;
1695 btinfo
->maint
.variant
.bts
.packet_history
.end
= 0;
1698 #if defined (HAVE_LIBIPT)
1699 case BTRACE_FORMAT_PT
:
1700 xfree (btinfo
->maint
.variant
.pt
.packets
);
1702 btinfo
->maint
.variant
.pt
.packets
= NULL
;
1703 btinfo
->maint
.variant
.pt
.packet_history
.begin
= 0;
1704 btinfo
->maint
.variant
.pt
.packet_history
.end
= 0;
1706 #endif /* defined (HAVE_LIBIPT) */
1713 btrace_fetch (struct thread_info
*tp
)
1715 struct btrace_thread_info
*btinfo
;
1716 struct btrace_target_info
*tinfo
;
1717 struct btrace_data btrace
;
1718 struct cleanup
*cleanup
;
1721 DEBUG ("fetch thread %s (%s)", print_thread_id (tp
),
1722 target_pid_to_str (tp
->ptid
));
1724 btinfo
= &tp
->btrace
;
1725 tinfo
= btinfo
->target
;
1729 /* There's no way we could get new trace while replaying.
1730 On the other hand, delta trace would return a partial record with the
1731 current PC, which is the replay PC, not the last PC, as expected. */
1732 if (btinfo
->replay
!= NULL
)
1735 /* We should not be called on running or exited threads. */
1736 gdb_assert (can_access_registers_ptid (tp
->ptid
));
1738 btrace_data_init (&btrace
);
1739 cleanup
= make_cleanup_btrace_data (&btrace
);
1741 /* Let's first try to extend the trace we already have. */
1742 if (btinfo
->end
!= NULL
)
1744 errcode
= target_read_btrace (&btrace
, tinfo
, BTRACE_READ_DELTA
);
1747 /* Success. Let's try to stitch the traces together. */
1748 errcode
= btrace_stitch_trace (&btrace
, tp
);
1752 /* We failed to read delta trace. Let's try to read new trace. */
1753 errcode
= target_read_btrace (&btrace
, tinfo
, BTRACE_READ_NEW
);
1755 /* If we got any new trace, discard what we have. */
1756 if (errcode
== 0 && !btrace_data_empty (&btrace
))
1760 /* If we were not able to read the trace, we start over. */
1764 errcode
= target_read_btrace (&btrace
, tinfo
, BTRACE_READ_ALL
);
1768 errcode
= target_read_btrace (&btrace
, tinfo
, BTRACE_READ_ALL
);
1770 /* If we were not able to read the branch trace, signal an error. */
1772 error (_("Failed to read branch trace."));
1774 /* Compute the trace, provided we have any. */
1775 if (!btrace_data_empty (&btrace
))
1777 /* Store the raw trace data. The stored data will be cleared in
1778 btrace_clear, so we always append the new trace. */
1779 btrace_data_append (&btinfo
->data
, &btrace
);
1780 btrace_maint_clear (btinfo
);
1782 btrace_clear_history (btinfo
);
1783 btrace_compute_ftrace (tp
, &btrace
);
1786 do_cleanups (cleanup
);
1792 btrace_clear (struct thread_info
*tp
)
1794 struct btrace_thread_info
*btinfo
;
1795 struct btrace_function
*it
, *trash
;
1797 DEBUG ("clear thread %s (%s)", print_thread_id (tp
),
1798 target_pid_to_str (tp
->ptid
));
1800 /* Make sure btrace frames that may hold a pointer into the branch
1801 trace data are destroyed. */
1802 reinit_frame_cache ();
1804 btinfo
= &tp
->btrace
;
1815 btinfo
->begin
= NULL
;
1819 /* Must clear the maint data before - it depends on BTINFO->DATA. */
1820 btrace_maint_clear (btinfo
);
1821 btrace_data_clear (&btinfo
->data
);
1822 btrace_clear_history (btinfo
);
1828 btrace_free_objfile (struct objfile
*objfile
)
1830 struct thread_info
*tp
;
1832 DEBUG ("free objfile");
1834 ALL_NON_EXITED_THREADS (tp
)
1838 #if defined (HAVE_LIBEXPAT)
1840 /* Check the btrace document version. */
1843 check_xml_btrace_version (struct gdb_xml_parser
*parser
,
1844 const struct gdb_xml_element
*element
,
1845 void *user_data
, VEC (gdb_xml_value_s
) *attributes
)
1848 = (const char *) xml_find_attribute (attributes
, "version")->value
;
1850 if (strcmp (version
, "1.0") != 0)
1851 gdb_xml_error (parser
, _("Unsupported btrace version: \"%s\""), version
);
1854 /* Parse a btrace "block" xml record. */
1857 parse_xml_btrace_block (struct gdb_xml_parser
*parser
,
1858 const struct gdb_xml_element
*element
,
1859 void *user_data
, VEC (gdb_xml_value_s
) *attributes
)
1861 struct btrace_data
*btrace
;
1862 struct btrace_block
*block
;
1863 ULONGEST
*begin
, *end
;
1865 btrace
= (struct btrace_data
*) user_data
;
1867 switch (btrace
->format
)
1869 case BTRACE_FORMAT_BTS
:
1872 case BTRACE_FORMAT_NONE
:
1873 btrace
->format
= BTRACE_FORMAT_BTS
;
1874 btrace
->variant
.bts
.blocks
= NULL
;
1878 gdb_xml_error (parser
, _("Btrace format error."));
1881 begin
= (ULONGEST
*) xml_find_attribute (attributes
, "begin")->value
;
1882 end
= (ULONGEST
*) xml_find_attribute (attributes
, "end")->value
;
1884 block
= VEC_safe_push (btrace_block_s
, btrace
->variant
.bts
.blocks
, NULL
);
1885 block
->begin
= *begin
;
1889 /* Parse a "raw" xml record. */
1892 parse_xml_raw (struct gdb_xml_parser
*parser
, const char *body_text
,
1893 gdb_byte
**pdata
, size_t *psize
)
1895 struct cleanup
*cleanup
;
1896 gdb_byte
*data
, *bin
;
1899 len
= strlen (body_text
);
1901 gdb_xml_error (parser
, _("Bad raw data size."));
1905 bin
= data
= (gdb_byte
*) xmalloc (size
);
1906 cleanup
= make_cleanup (xfree
, data
);
1908 /* We use hex encoding - see common/rsp-low.h. */
1916 if (hi
== 0 || lo
== 0)
1917 gdb_xml_error (parser
, _("Bad hex encoding."));
1919 *bin
++ = fromhex (hi
) * 16 + fromhex (lo
);
1923 discard_cleanups (cleanup
);
1929 /* Parse a btrace pt-config "cpu" xml record. */
1932 parse_xml_btrace_pt_config_cpu (struct gdb_xml_parser
*parser
,
1933 const struct gdb_xml_element
*element
,
1935 VEC (gdb_xml_value_s
) *attributes
)
1937 struct btrace_data
*btrace
;
1939 ULONGEST
*family
, *model
, *stepping
;
1941 vendor
= (const char *) xml_find_attribute (attributes
, "vendor")->value
;
1942 family
= (ULONGEST
*) xml_find_attribute (attributes
, "family")->value
;
1943 model
= (ULONGEST
*) xml_find_attribute (attributes
, "model")->value
;
1944 stepping
= (ULONGEST
*) xml_find_attribute (attributes
, "stepping")->value
;
1946 btrace
= (struct btrace_data
*) user_data
;
1948 if (strcmp (vendor
, "GenuineIntel") == 0)
1949 btrace
->variant
.pt
.config
.cpu
.vendor
= CV_INTEL
;
1951 btrace
->variant
.pt
.config
.cpu
.family
= *family
;
1952 btrace
->variant
.pt
.config
.cpu
.model
= *model
;
1953 btrace
->variant
.pt
.config
.cpu
.stepping
= *stepping
;
1956 /* Parse a btrace pt "raw" xml record. */
1959 parse_xml_btrace_pt_raw (struct gdb_xml_parser
*parser
,
1960 const struct gdb_xml_element
*element
,
1961 void *user_data
, const char *body_text
)
1963 struct btrace_data
*btrace
;
1965 btrace
= (struct btrace_data
*) user_data
;
1966 parse_xml_raw (parser
, body_text
, &btrace
->variant
.pt
.data
,
1967 &btrace
->variant
.pt
.size
);
1970 /* Parse a btrace "pt" xml record. */
1973 parse_xml_btrace_pt (struct gdb_xml_parser
*parser
,
1974 const struct gdb_xml_element
*element
,
1975 void *user_data
, VEC (gdb_xml_value_s
) *attributes
)
1977 struct btrace_data
*btrace
;
1979 btrace
= (struct btrace_data
*) user_data
;
1980 btrace
->format
= BTRACE_FORMAT_PT
;
1981 btrace
->variant
.pt
.config
.cpu
.vendor
= CV_UNKNOWN
;
1982 btrace
->variant
.pt
.data
= NULL
;
1983 btrace
->variant
.pt
.size
= 0;
1986 static const struct gdb_xml_attribute block_attributes
[] = {
1987 { "begin", GDB_XML_AF_NONE
, gdb_xml_parse_attr_ulongest
, NULL
},
1988 { "end", GDB_XML_AF_NONE
, gdb_xml_parse_attr_ulongest
, NULL
},
1989 { NULL
, GDB_XML_AF_NONE
, NULL
, NULL
}
1992 static const struct gdb_xml_attribute btrace_pt_config_cpu_attributes
[] = {
1993 { "vendor", GDB_XML_AF_NONE
, NULL
, NULL
},
1994 { "family", GDB_XML_AF_NONE
, gdb_xml_parse_attr_ulongest
, NULL
},
1995 { "model", GDB_XML_AF_NONE
, gdb_xml_parse_attr_ulongest
, NULL
},
1996 { "stepping", GDB_XML_AF_NONE
, gdb_xml_parse_attr_ulongest
, NULL
},
1997 { NULL
, GDB_XML_AF_NONE
, NULL
, NULL
}
2000 static const struct gdb_xml_element btrace_pt_config_children
[] = {
2001 { "cpu", btrace_pt_config_cpu_attributes
, NULL
, GDB_XML_EF_OPTIONAL
,
2002 parse_xml_btrace_pt_config_cpu
, NULL
},
2003 { NULL
, NULL
, NULL
, GDB_XML_EF_NONE
, NULL
, NULL
}
2006 static const struct gdb_xml_element btrace_pt_children
[] = {
2007 { "pt-config", NULL
, btrace_pt_config_children
, GDB_XML_EF_OPTIONAL
, NULL
,
2009 { "raw", NULL
, NULL
, GDB_XML_EF_OPTIONAL
, NULL
, parse_xml_btrace_pt_raw
},
2010 { NULL
, NULL
, NULL
, GDB_XML_EF_NONE
, NULL
, NULL
}
2013 static const struct gdb_xml_attribute btrace_attributes
[] = {
2014 { "version", GDB_XML_AF_NONE
, NULL
, NULL
},
2015 { NULL
, GDB_XML_AF_NONE
, NULL
, NULL
}
2018 static const struct gdb_xml_element btrace_children
[] = {
2019 { "block", block_attributes
, NULL
,
2020 GDB_XML_EF_REPEATABLE
| GDB_XML_EF_OPTIONAL
, parse_xml_btrace_block
, NULL
},
2021 { "pt", NULL
, btrace_pt_children
, GDB_XML_EF_OPTIONAL
, parse_xml_btrace_pt
,
2023 { NULL
, NULL
, NULL
, GDB_XML_EF_NONE
, NULL
, NULL
}
2026 static const struct gdb_xml_element btrace_elements
[] = {
2027 { "btrace", btrace_attributes
, btrace_children
, GDB_XML_EF_NONE
,
2028 check_xml_btrace_version
, NULL
},
2029 { NULL
, NULL
, NULL
, GDB_XML_EF_NONE
, NULL
, NULL
}
2032 #endif /* defined (HAVE_LIBEXPAT) */
2037 parse_xml_btrace (struct btrace_data
*btrace
, const char *buffer
)
2039 struct cleanup
*cleanup
;
2042 #if defined (HAVE_LIBEXPAT)
2044 btrace
->format
= BTRACE_FORMAT_NONE
;
2046 cleanup
= make_cleanup_btrace_data (btrace
);
2047 errcode
= gdb_xml_parse_quick (_("btrace"), "btrace.dtd", btrace_elements
,
2050 error (_("Error parsing branch trace."));
2052 /* Keep parse results. */
2053 discard_cleanups (cleanup
);
2055 #else /* !defined (HAVE_LIBEXPAT) */
2057 error (_("Cannot process branch trace. XML parsing is not supported."));
2059 #endif /* !defined (HAVE_LIBEXPAT) */
2062 #if defined (HAVE_LIBEXPAT)
2064 /* Parse a btrace-conf "bts" xml record. */
2067 parse_xml_btrace_conf_bts (struct gdb_xml_parser
*parser
,
2068 const struct gdb_xml_element
*element
,
2069 void *user_data
, VEC (gdb_xml_value_s
) *attributes
)
2071 struct btrace_config
*conf
;
2072 struct gdb_xml_value
*size
;
2074 conf
= (struct btrace_config
*) user_data
;
2075 conf
->format
= BTRACE_FORMAT_BTS
;
2078 size
= xml_find_attribute (attributes
, "size");
2080 conf
->bts
.size
= (unsigned int) *(ULONGEST
*) size
->value
;
2083 /* Parse a btrace-conf "pt" xml record. */
2086 parse_xml_btrace_conf_pt (struct gdb_xml_parser
*parser
,
2087 const struct gdb_xml_element
*element
,
2088 void *user_data
, VEC (gdb_xml_value_s
) *attributes
)
2090 struct btrace_config
*conf
;
2091 struct gdb_xml_value
*size
;
2093 conf
= (struct btrace_config
*) user_data
;
2094 conf
->format
= BTRACE_FORMAT_PT
;
2097 size
= xml_find_attribute (attributes
, "size");
2099 conf
->pt
.size
= (unsigned int) *(ULONGEST
*) size
->value
;
2102 static const struct gdb_xml_attribute btrace_conf_pt_attributes
[] = {
2103 { "size", GDB_XML_AF_OPTIONAL
, gdb_xml_parse_attr_ulongest
, NULL
},
2104 { NULL
, GDB_XML_AF_NONE
, NULL
, NULL
}
2107 static const struct gdb_xml_attribute btrace_conf_bts_attributes
[] = {
2108 { "size", GDB_XML_AF_OPTIONAL
, gdb_xml_parse_attr_ulongest
, NULL
},
2109 { NULL
, GDB_XML_AF_NONE
, NULL
, NULL
}
2112 static const struct gdb_xml_element btrace_conf_children
[] = {
2113 { "bts", btrace_conf_bts_attributes
, NULL
, GDB_XML_EF_OPTIONAL
,
2114 parse_xml_btrace_conf_bts
, NULL
},
2115 { "pt", btrace_conf_pt_attributes
, NULL
, GDB_XML_EF_OPTIONAL
,
2116 parse_xml_btrace_conf_pt
, NULL
},
2117 { NULL
, NULL
, NULL
, GDB_XML_EF_NONE
, NULL
, NULL
}
2120 static const struct gdb_xml_attribute btrace_conf_attributes
[] = {
2121 { "version", GDB_XML_AF_NONE
, NULL
, NULL
},
2122 { NULL
, GDB_XML_AF_NONE
, NULL
, NULL
}
2125 static const struct gdb_xml_element btrace_conf_elements
[] = {
2126 { "btrace-conf", btrace_conf_attributes
, btrace_conf_children
,
2127 GDB_XML_EF_NONE
, NULL
, NULL
},
2128 { NULL
, NULL
, NULL
, GDB_XML_EF_NONE
, NULL
, NULL
}
2131 #endif /* defined (HAVE_LIBEXPAT) */
2136 parse_xml_btrace_conf (struct btrace_config
*conf
, const char *xml
)
2140 #if defined (HAVE_LIBEXPAT)
2142 errcode
= gdb_xml_parse_quick (_("btrace-conf"), "btrace-conf.dtd",
2143 btrace_conf_elements
, xml
, conf
);
2145 error (_("Error parsing branch trace configuration."));
2147 #else /* !defined (HAVE_LIBEXPAT) */
2149 error (_("XML parsing is not supported."));
2151 #endif /* !defined (HAVE_LIBEXPAT) */
2156 const struct btrace_insn
*
2157 btrace_insn_get (const struct btrace_insn_iterator
*it
)
2159 const struct btrace_function
*bfun
;
2160 unsigned int index
, end
;
2163 bfun
= it
->function
;
2165 /* Check if the iterator points to a gap in the trace. */
2166 if (bfun
->errcode
!= 0)
2169 /* The index is within the bounds of this function's instruction vector. */
2170 end
= VEC_length (btrace_insn_s
, bfun
->insn
);
2171 gdb_assert (0 < end
);
2172 gdb_assert (index
< end
);
2174 return VEC_index (btrace_insn_s
, bfun
->insn
, index
);
2180 btrace_insn_number (const struct btrace_insn_iterator
*it
)
2182 const struct btrace_function
*bfun
;
2184 bfun
= it
->function
;
2186 /* Return zero if the iterator points to a gap in the trace. */
2187 if (bfun
->errcode
!= 0)
2190 return bfun
->insn_offset
+ it
->index
;
2196 btrace_insn_begin (struct btrace_insn_iterator
*it
,
2197 const struct btrace_thread_info
*btinfo
)
2199 const struct btrace_function
*bfun
;
2201 bfun
= btinfo
->begin
;
2203 error (_("No trace."));
2205 it
->function
= bfun
;
2212 btrace_insn_end (struct btrace_insn_iterator
*it
,
2213 const struct btrace_thread_info
*btinfo
)
2215 const struct btrace_function
*bfun
;
2216 unsigned int length
;
2220 error (_("No trace."));
2222 length
= VEC_length (btrace_insn_s
, bfun
->insn
);
2224 /* The last function may either be a gap or it contains the current
2225 instruction, which is one past the end of the execution trace; ignore
2230 it
->function
= bfun
;
2237 btrace_insn_next (struct btrace_insn_iterator
*it
, unsigned int stride
)
2239 const struct btrace_function
*bfun
;
2240 unsigned int index
, steps
;
2242 bfun
= it
->function
;
2248 unsigned int end
, space
, adv
;
2250 end
= VEC_length (btrace_insn_s
, bfun
->insn
);
2252 /* An empty function segment represents a gap in the trace. We count
2253 it as one instruction. */
2256 const struct btrace_function
*next
;
2258 next
= bfun
->flow
.next
;
2271 gdb_assert (0 < end
);
2272 gdb_assert (index
< end
);
2274 /* Compute the number of instructions remaining in this segment. */
2275 space
= end
- index
;
2277 /* Advance the iterator as far as possible within this segment. */
2278 adv
= std::min (space
, stride
);
2283 /* Move to the next function if we're at the end of this one. */
2286 const struct btrace_function
*next
;
2288 next
= bfun
->flow
.next
;
2291 /* We stepped past the last function.
2293 Let's adjust the index to point to the last instruction in
2294 the previous function. */
2300 /* We now point to the first instruction in the new function. */
2305 /* We did make progress. */
2306 gdb_assert (adv
> 0);
2309 /* Update the iterator. */
2310 it
->function
= bfun
;
2319 btrace_insn_prev (struct btrace_insn_iterator
*it
, unsigned int stride
)
2321 const struct btrace_function
*bfun
;
2322 unsigned int index
, steps
;
2324 bfun
= it
->function
;
2332 /* Move to the previous function if we're at the start of this one. */
2335 const struct btrace_function
*prev
;
2337 prev
= bfun
->flow
.prev
;
2341 /* We point to one after the last instruction in the new function. */
2343 index
= VEC_length (btrace_insn_s
, bfun
->insn
);
2345 /* An empty function segment represents a gap in the trace. We count
2346 it as one instruction. */
2356 /* Advance the iterator as far as possible within this segment. */
2357 adv
= std::min (index
, stride
);
2363 /* We did make progress. */
2364 gdb_assert (adv
> 0);
2367 /* Update the iterator. */
2368 it
->function
= bfun
;
2377 btrace_insn_cmp (const struct btrace_insn_iterator
*lhs
,
2378 const struct btrace_insn_iterator
*rhs
)
2380 unsigned int lnum
, rnum
;
2382 lnum
= btrace_insn_number (lhs
);
2383 rnum
= btrace_insn_number (rhs
);
2385 /* A gap has an instruction number of zero. Things are getting more
2386 complicated if gaps are involved.
2388 We take the instruction number offset from the iterator's function.
2389 This is the number of the first instruction after the gap.
2391 This is OK as long as both lhs and rhs point to gaps. If only one of
2392 them does, we need to adjust the number based on the other's regular
2393 instruction number. Otherwise, a gap might compare equal to an
2396 if (lnum
== 0 && rnum
== 0)
2398 lnum
= lhs
->function
->insn_offset
;
2399 rnum
= rhs
->function
->insn_offset
;
2403 lnum
= lhs
->function
->insn_offset
;
2410 rnum
= rhs
->function
->insn_offset
;
2416 return (int) (lnum
- rnum
);
2422 btrace_find_insn_by_number (struct btrace_insn_iterator
*it
,
2423 const struct btrace_thread_info
*btinfo
,
2424 unsigned int number
)
2426 const struct btrace_function
*bfun
;
2427 unsigned int end
, length
;
2429 for (bfun
= btinfo
->end
; bfun
!= NULL
; bfun
= bfun
->flow
.prev
)
2432 if (bfun
->errcode
!= 0)
2435 if (bfun
->insn_offset
<= number
)
2442 length
= VEC_length (btrace_insn_s
, bfun
->insn
);
2443 gdb_assert (length
> 0);
2445 end
= bfun
->insn_offset
+ length
;
2449 it
->function
= bfun
;
2450 it
->index
= number
- bfun
->insn_offset
;
2457 const struct btrace_function
*
2458 btrace_call_get (const struct btrace_call_iterator
*it
)
2460 return it
->function
;
2466 btrace_call_number (const struct btrace_call_iterator
*it
)
2468 const struct btrace_thread_info
*btinfo
;
2469 const struct btrace_function
*bfun
;
2472 btinfo
= it
->btinfo
;
2473 bfun
= it
->function
;
2475 return bfun
->number
;
2477 /* For the end iterator, i.e. bfun == NULL, we return one more than the
2478 number of the last function. */
2480 insns
= VEC_length (btrace_insn_s
, bfun
->insn
);
2482 /* If the function contains only a single instruction (i.e. the current
2483 instruction), it will be skipped and its number is already the number
2486 return bfun
->number
;
2488 /* Otherwise, return one more than the number of the last function. */
2489 return bfun
->number
+ 1;
2495 btrace_call_begin (struct btrace_call_iterator
*it
,
2496 const struct btrace_thread_info
*btinfo
)
2498 const struct btrace_function
*bfun
;
2500 bfun
= btinfo
->begin
;
2502 error (_("No trace."));
2504 it
->btinfo
= btinfo
;
2505 it
->function
= bfun
;
2511 btrace_call_end (struct btrace_call_iterator
*it
,
2512 const struct btrace_thread_info
*btinfo
)
2514 const struct btrace_function
*bfun
;
2518 error (_("No trace."));
2520 it
->btinfo
= btinfo
;
2521 it
->function
= NULL
;
2527 btrace_call_next (struct btrace_call_iterator
*it
, unsigned int stride
)
2529 const struct btrace_function
*bfun
;
2532 bfun
= it
->function
;
2534 while (bfun
!= NULL
)
2536 const struct btrace_function
*next
;
2539 next
= bfun
->flow
.next
;
2542 /* Ignore the last function if it only contains a single
2543 (i.e. the current) instruction. */
2544 insns
= VEC_length (btrace_insn_s
, bfun
->insn
);
2549 if (stride
== steps
)
2556 it
->function
= bfun
;
2563 btrace_call_prev (struct btrace_call_iterator
*it
, unsigned int stride
)
2565 const struct btrace_thread_info
*btinfo
;
2566 const struct btrace_function
*bfun
;
2569 bfun
= it
->function
;
2576 btinfo
= it
->btinfo
;
2581 /* Ignore the last function if it only contains a single
2582 (i.e. the current) instruction. */
2583 insns
= VEC_length (btrace_insn_s
, bfun
->insn
);
2585 bfun
= bfun
->flow
.prev
;
2593 while (steps
< stride
)
2595 const struct btrace_function
*prev
;
2597 prev
= bfun
->flow
.prev
;
2605 it
->function
= bfun
;
2612 btrace_call_cmp (const struct btrace_call_iterator
*lhs
,
2613 const struct btrace_call_iterator
*rhs
)
2615 unsigned int lnum
, rnum
;
2617 lnum
= btrace_call_number (lhs
);
2618 rnum
= btrace_call_number (rhs
);
2620 return (int) (lnum
- rnum
);
2626 btrace_find_call_by_number (struct btrace_call_iterator
*it
,
2627 const struct btrace_thread_info
*btinfo
,
2628 unsigned int number
)
2630 const struct btrace_function
*bfun
;
2632 for (bfun
= btinfo
->end
; bfun
!= NULL
; bfun
= bfun
->flow
.prev
)
2636 bnum
= bfun
->number
;
2639 it
->btinfo
= btinfo
;
2640 it
->function
= bfun
;
2644 /* Functions are ordered and numbered consecutively. We could bail out
2645 earlier. On the other hand, it is very unlikely that we search for
2646 a nonexistent function. */
2655 btrace_set_insn_history (struct btrace_thread_info
*btinfo
,
2656 const struct btrace_insn_iterator
*begin
,
2657 const struct btrace_insn_iterator
*end
)
2659 if (btinfo
->insn_history
== NULL
)
2660 btinfo
->insn_history
= XCNEW (struct btrace_insn_history
);
2662 btinfo
->insn_history
->begin
= *begin
;
2663 btinfo
->insn_history
->end
= *end
;
2669 btrace_set_call_history (struct btrace_thread_info
*btinfo
,
2670 const struct btrace_call_iterator
*begin
,
2671 const struct btrace_call_iterator
*end
)
2673 gdb_assert (begin
->btinfo
== end
->btinfo
);
2675 if (btinfo
->call_history
== NULL
)
2676 btinfo
->call_history
= XCNEW (struct btrace_call_history
);
2678 btinfo
->call_history
->begin
= *begin
;
2679 btinfo
->call_history
->end
= *end
;
2685 btrace_is_replaying (struct thread_info
*tp
)
2687 return tp
->btrace
.replay
!= NULL
;
2693 btrace_is_empty (struct thread_info
*tp
)
2695 struct btrace_insn_iterator begin
, end
;
2696 struct btrace_thread_info
*btinfo
;
2698 btinfo
= &tp
->btrace
;
2700 if (btinfo
->begin
== NULL
)
2703 btrace_insn_begin (&begin
, btinfo
);
2704 btrace_insn_end (&end
, btinfo
);
2706 return btrace_insn_cmp (&begin
, &end
) == 0;
2709 /* Forward the cleanup request. */
2712 do_btrace_data_cleanup (void *arg
)
2714 btrace_data_fini ((struct btrace_data
*) arg
);
2720 make_cleanup_btrace_data (struct btrace_data
*data
)
2722 return make_cleanup (do_btrace_data_cleanup
, data
);
2725 #if defined (HAVE_LIBIPT)
2727 /* Print a single packet. */
2730 pt_print_packet (const struct pt_packet
*packet
)
2732 switch (packet
->type
)
2735 printf_unfiltered (("[??: %x]"), packet
->type
);
2739 printf_unfiltered (("psb"));
2743 printf_unfiltered (("psbend"));
2747 printf_unfiltered (("pad"));
2751 printf_unfiltered (("tip %u: 0x%" PRIx64
""),
2752 packet
->payload
.ip
.ipc
,
2753 packet
->payload
.ip
.ip
);
2757 printf_unfiltered (("tip.pge %u: 0x%" PRIx64
""),
2758 packet
->payload
.ip
.ipc
,
2759 packet
->payload
.ip
.ip
);
2763 printf_unfiltered (("tip.pgd %u: 0x%" PRIx64
""),
2764 packet
->payload
.ip
.ipc
,
2765 packet
->payload
.ip
.ip
);
2769 printf_unfiltered (("fup %u: 0x%" PRIx64
""),
2770 packet
->payload
.ip
.ipc
,
2771 packet
->payload
.ip
.ip
);
2775 printf_unfiltered (("tnt-8 %u: 0x%" PRIx64
""),
2776 packet
->payload
.tnt
.bit_size
,
2777 packet
->payload
.tnt
.payload
);
2781 printf_unfiltered (("tnt-64 %u: 0x%" PRIx64
""),
2782 packet
->payload
.tnt
.bit_size
,
2783 packet
->payload
.tnt
.payload
);
2787 printf_unfiltered (("pip %" PRIx64
"%s"), packet
->payload
.pip
.cr3
,
2788 packet
->payload
.pip
.nr
? (" nr") : (""));
2792 printf_unfiltered (("tsc %" PRIx64
""), packet
->payload
.tsc
.tsc
);
2796 printf_unfiltered (("cbr %u"), packet
->payload
.cbr
.ratio
);
2800 switch (packet
->payload
.mode
.leaf
)
2803 printf_unfiltered (("mode %u"), packet
->payload
.mode
.leaf
);
2807 printf_unfiltered (("mode.exec%s%s"),
2808 packet
->payload
.mode
.bits
.exec
.csl
2810 packet
->payload
.mode
.bits
.exec
.csd
2811 ? (" cs.d") : (""));
2815 printf_unfiltered (("mode.tsx%s%s"),
2816 packet
->payload
.mode
.bits
.tsx
.intx
2818 packet
->payload
.mode
.bits
.tsx
.abrt
2819 ? (" abrt") : (""));
2825 printf_unfiltered (("ovf"));
2829 printf_unfiltered (("stop"));
2833 printf_unfiltered (("vmcs %" PRIx64
""), packet
->payload
.vmcs
.base
);
2837 printf_unfiltered (("tma %x %x"), packet
->payload
.tma
.ctc
,
2838 packet
->payload
.tma
.fc
);
2842 printf_unfiltered (("mtc %x"), packet
->payload
.mtc
.ctc
);
2846 printf_unfiltered (("cyc %" PRIx64
""), packet
->payload
.cyc
.value
);
2850 printf_unfiltered (("mnt %" PRIx64
""), packet
->payload
.mnt
.payload
);
2855 /* Decode packets into MAINT using DECODER. */
2858 btrace_maint_decode_pt (struct btrace_maint_info
*maint
,
2859 struct pt_packet_decoder
*decoder
)
2865 struct btrace_pt_packet packet
;
2867 errcode
= pt_pkt_sync_forward (decoder
);
2873 pt_pkt_get_offset (decoder
, &packet
.offset
);
2875 errcode
= pt_pkt_next (decoder
, &packet
.packet
,
2876 sizeof(packet
.packet
));
2880 if (maint_btrace_pt_skip_pad
== 0 || packet
.packet
.type
!= ppt_pad
)
2882 packet
.errcode
= pt_errcode (errcode
);
2883 VEC_safe_push (btrace_pt_packet_s
, maint
->variant
.pt
.packets
,
2888 if (errcode
== -pte_eos
)
2891 packet
.errcode
= pt_errcode (errcode
);
2892 VEC_safe_push (btrace_pt_packet_s
, maint
->variant
.pt
.packets
,
2895 warning (_("Error at trace offset 0x%" PRIx64
": %s."),
2896 packet
.offset
, pt_errstr (packet
.errcode
));
2899 if (errcode
!= -pte_eos
)
2900 warning (_("Failed to synchronize onto the Intel Processor Trace "
2901 "stream: %s."), pt_errstr (pt_errcode (errcode
)));
2904 /* Update the packet history in BTINFO. */
2907 btrace_maint_update_pt_packets (struct btrace_thread_info
*btinfo
)
2909 volatile struct gdb_exception except
;
2910 struct pt_packet_decoder
*decoder
;
2911 struct btrace_data_pt
*pt
;
2912 struct pt_config config
;
2915 pt
= &btinfo
->data
.variant
.pt
;
2917 /* Nothing to do if there is no trace. */
2921 memset (&config
, 0, sizeof(config
));
2923 config
.size
= sizeof (config
);
2924 config
.begin
= pt
->data
;
2925 config
.end
= pt
->data
+ pt
->size
;
2927 config
.cpu
.vendor
= pt_translate_cpu_vendor (pt
->config
.cpu
.vendor
);
2928 config
.cpu
.family
= pt
->config
.cpu
.family
;
2929 config
.cpu
.model
= pt
->config
.cpu
.model
;
2930 config
.cpu
.stepping
= pt
->config
.cpu
.stepping
;
2932 errcode
= pt_cpu_errata (&config
.errata
, &config
.cpu
);
2934 error (_("Failed to configure the Intel Processor Trace decoder: %s."),
2935 pt_errstr (pt_errcode (errcode
)));
2937 decoder
= pt_pkt_alloc_decoder (&config
);
2938 if (decoder
== NULL
)
2939 error (_("Failed to allocate the Intel Processor Trace decoder."));
2943 btrace_maint_decode_pt (&btinfo
->maint
, decoder
);
2945 CATCH (except
, RETURN_MASK_ALL
)
2947 pt_pkt_free_decoder (decoder
);
2949 if (except
.reason
< 0)
2950 throw_exception (except
);
2954 pt_pkt_free_decoder (decoder
);
2957 #endif /* !defined (HAVE_LIBIPT) */
2959 /* Update the packet maintenance information for BTINFO and store the
2960 low and high bounds into BEGIN and END, respectively.
2961 Store the current iterator state into FROM and TO. */
2964 btrace_maint_update_packets (struct btrace_thread_info
*btinfo
,
2965 unsigned int *begin
, unsigned int *end
,
2966 unsigned int *from
, unsigned int *to
)
2968 switch (btinfo
->data
.format
)
2977 case BTRACE_FORMAT_BTS
:
2978 /* Nothing to do - we operate directly on BTINFO->DATA. */
2980 *end
= VEC_length (btrace_block_s
, btinfo
->data
.variant
.bts
.blocks
);
2981 *from
= btinfo
->maint
.variant
.bts
.packet_history
.begin
;
2982 *to
= btinfo
->maint
.variant
.bts
.packet_history
.end
;
2985 #if defined (HAVE_LIBIPT)
2986 case BTRACE_FORMAT_PT
:
2987 if (VEC_empty (btrace_pt_packet_s
, btinfo
->maint
.variant
.pt
.packets
))
2988 btrace_maint_update_pt_packets (btinfo
);
2991 *end
= VEC_length (btrace_pt_packet_s
, btinfo
->maint
.variant
.pt
.packets
);
2992 *from
= btinfo
->maint
.variant
.pt
.packet_history
.begin
;
2993 *to
= btinfo
->maint
.variant
.pt
.packet_history
.end
;
2995 #endif /* defined (HAVE_LIBIPT) */
2999 /* Print packets in BTINFO from BEGIN (inclusive) until END (exclusive) and
3000 update the current iterator position. */
3003 btrace_maint_print_packets (struct btrace_thread_info
*btinfo
,
3004 unsigned int begin
, unsigned int end
)
3006 switch (btinfo
->data
.format
)
3011 case BTRACE_FORMAT_BTS
:
3013 VEC (btrace_block_s
) *blocks
;
3016 blocks
= btinfo
->data
.variant
.bts
.blocks
;
3017 for (blk
= begin
; blk
< end
; ++blk
)
3019 const btrace_block_s
*block
;
3021 block
= VEC_index (btrace_block_s
, blocks
, blk
);
3023 printf_unfiltered ("%u\tbegin: %s, end: %s\n", blk
,
3024 core_addr_to_string_nz (block
->begin
),
3025 core_addr_to_string_nz (block
->end
));
3028 btinfo
->maint
.variant
.bts
.packet_history
.begin
= begin
;
3029 btinfo
->maint
.variant
.bts
.packet_history
.end
= end
;
3033 #if defined (HAVE_LIBIPT)
3034 case BTRACE_FORMAT_PT
:
3036 VEC (btrace_pt_packet_s
) *packets
;
3039 packets
= btinfo
->maint
.variant
.pt
.packets
;
3040 for (pkt
= begin
; pkt
< end
; ++pkt
)
3042 const struct btrace_pt_packet
*packet
;
3044 packet
= VEC_index (btrace_pt_packet_s
, packets
, pkt
);
3046 printf_unfiltered ("%u\t", pkt
);
3047 printf_unfiltered ("0x%" PRIx64
"\t", packet
->offset
);
3049 if (packet
->errcode
== pte_ok
)
3050 pt_print_packet (&packet
->packet
);
3052 printf_unfiltered ("[error: %s]", pt_errstr (packet
->errcode
));
3054 printf_unfiltered ("\n");
3057 btinfo
->maint
.variant
.pt
.packet_history
.begin
= begin
;
3058 btinfo
->maint
.variant
.pt
.packet_history
.end
= end
;
3061 #endif /* defined (HAVE_LIBIPT) */
3065 /* Read a number from an argument string. */
3068 get_uint (char **arg
)
3070 char *begin
, *end
, *pos
;
3071 unsigned long number
;
3074 pos
= skip_spaces (begin
);
3076 if (!isdigit (*pos
))
3077 error (_("Expected positive number, got: %s."), pos
);
3079 number
= strtoul (pos
, &end
, 10);
3080 if (number
> UINT_MAX
)
3081 error (_("Number too big."));
3083 *arg
+= (end
- begin
);
3085 return (unsigned int) number
;
3088 /* Read a context size from an argument string. */
3091 get_context_size (char **arg
)
3096 pos
= skip_spaces (*arg
);
3098 if (!isdigit (*pos
))
3099 error (_("Expected positive number, got: %s."), pos
);
3101 return strtol (pos
, arg
, 10);
3104 /* Complain about junk at the end of an argument string. */
3107 no_chunk (char *arg
)
3110 error (_("Junk after argument: %s."), arg
);
3113 /* The "maintenance btrace packet-history" command. */
3116 maint_btrace_packet_history_cmd (char *arg
, int from_tty
)
3118 struct btrace_thread_info
*btinfo
;
3119 struct thread_info
*tp
;
3120 unsigned int size
, begin
, end
, from
, to
;
3122 tp
= find_thread_ptid (inferior_ptid
);
3124 error (_("No thread."));
3127 btinfo
= &tp
->btrace
;
3129 btrace_maint_update_packets (btinfo
, &begin
, &end
, &from
, &to
);
3132 printf_unfiltered (_("No trace.\n"));
3136 if (arg
== NULL
|| *arg
== 0 || strcmp (arg
, "+") == 0)
3140 if (end
- from
< size
)
3144 else if (strcmp (arg
, "-") == 0)
3148 if (to
- begin
< size
)
3154 from
= get_uint (&arg
);
3156 error (_("'%u' is out of range."), from
);
3158 arg
= skip_spaces (arg
);
3161 arg
= skip_spaces (++arg
);
3166 size
= get_context_size (&arg
);
3170 if (end
- from
< size
)
3174 else if (*arg
== '-')
3177 size
= get_context_size (&arg
);
3181 /* Include the packet given as first argument. */
3185 if (to
- begin
< size
)
3191 to
= get_uint (&arg
);
3193 /* Include the packet at the second argument and silently
3194 truncate the range. */
3207 if (end
- from
< size
)
3215 btrace_maint_print_packets (btinfo
, from
, to
);
3218 /* The "maintenance btrace clear-packet-history" command. */
3221 maint_btrace_clear_packet_history_cmd (char *args
, int from_tty
)
3223 struct btrace_thread_info
*btinfo
;
3224 struct thread_info
*tp
;
3226 if (args
!= NULL
&& *args
!= 0)
3227 error (_("Invalid argument."));
3229 tp
= find_thread_ptid (inferior_ptid
);
3231 error (_("No thread."));
3233 btinfo
= &tp
->btrace
;
3235 /* Must clear the maint data before - it depends on BTINFO->DATA. */
3236 btrace_maint_clear (btinfo
);
3237 btrace_data_clear (&btinfo
->data
);
3240 /* The "maintenance btrace clear" command. */
3243 maint_btrace_clear_cmd (char *args
, int from_tty
)
3245 struct btrace_thread_info
*btinfo
;
3246 struct thread_info
*tp
;
3248 if (args
!= NULL
&& *args
!= 0)
3249 error (_("Invalid argument."));
3251 tp
= find_thread_ptid (inferior_ptid
);
3253 error (_("No thread."));
3258 /* The "maintenance btrace" command. */
3261 maint_btrace_cmd (char *args
, int from_tty
)
3263 help_list (maint_btrace_cmdlist
, "maintenance btrace ", all_commands
,
3267 /* The "maintenance set btrace" command. */
3270 maint_btrace_set_cmd (char *args
, int from_tty
)
3272 help_list (maint_btrace_set_cmdlist
, "maintenance set btrace ", all_commands
,
3276 /* The "maintenance show btrace" command. */
3279 maint_btrace_show_cmd (char *args
, int from_tty
)
3281 help_list (maint_btrace_show_cmdlist
, "maintenance show btrace ",
3282 all_commands
, gdb_stdout
);
3285 /* The "maintenance set btrace pt" command. */
3288 maint_btrace_pt_set_cmd (char *args
, int from_tty
)
3290 help_list (maint_btrace_pt_set_cmdlist
, "maintenance set btrace pt ",
3291 all_commands
, gdb_stdout
);
3294 /* The "maintenance show btrace pt" command. */
3297 maint_btrace_pt_show_cmd (char *args
, int from_tty
)
3299 help_list (maint_btrace_pt_show_cmdlist
, "maintenance show btrace pt ",
3300 all_commands
, gdb_stdout
);
3303 /* The "maintenance info btrace" command. */
3306 maint_info_btrace_cmd (char *args
, int from_tty
)
3308 struct btrace_thread_info
*btinfo
;
3309 struct thread_info
*tp
;
3310 const struct btrace_config
*conf
;
3312 if (args
!= NULL
&& *args
!= 0)
3313 error (_("Invalid argument."));
3315 tp
= find_thread_ptid (inferior_ptid
);
3317 error (_("No thread."));
3319 btinfo
= &tp
->btrace
;
3321 conf
= btrace_conf (btinfo
);
3323 error (_("No btrace configuration."));
3325 printf_unfiltered (_("Format: %s.\n"),
3326 btrace_format_string (conf
->format
));
3328 switch (conf
->format
)
3333 case BTRACE_FORMAT_BTS
:
3334 printf_unfiltered (_("Number of packets: %u.\n"),
3335 VEC_length (btrace_block_s
,
3336 btinfo
->data
.variant
.bts
.blocks
));
3339 #if defined (HAVE_LIBIPT)
3340 case BTRACE_FORMAT_PT
:
3342 struct pt_version version
;
3344 version
= pt_library_version ();
3345 printf_unfiltered (_("Version: %u.%u.%u%s.\n"), version
.major
,
3346 version
.minor
, version
.build
,
3347 version
.ext
!= NULL
? version
.ext
: "");
3349 btrace_maint_update_pt_packets (btinfo
);
3350 printf_unfiltered (_("Number of packets: %u.\n"),
3351 VEC_length (btrace_pt_packet_s
,
3352 btinfo
->maint
.variant
.pt
.packets
));
3355 #endif /* defined (HAVE_LIBIPT) */
3359 /* The "maint show btrace pt skip-pad" show value function. */
3362 show_maint_btrace_pt_skip_pad (struct ui_file
*file
, int from_tty
,
3363 struct cmd_list_element
*c
,
3366 fprintf_filtered (file
, _("Skip PAD packets is %s.\n"), value
);
3370 /* Initialize btrace maintenance commands. */
3372 void _initialize_btrace (void);
3374 _initialize_btrace (void)
3376 add_cmd ("btrace", class_maintenance
, maint_info_btrace_cmd
,
3377 _("Info about branch tracing data."), &maintenanceinfolist
);
3379 add_prefix_cmd ("btrace", class_maintenance
, maint_btrace_cmd
,
3380 _("Branch tracing maintenance commands."),
3381 &maint_btrace_cmdlist
, "maintenance btrace ",
3382 0, &maintenancelist
);
3384 add_prefix_cmd ("btrace", class_maintenance
, maint_btrace_set_cmd
, _("\
3385 Set branch tracing specific variables."),
3386 &maint_btrace_set_cmdlist
, "maintenance set btrace ",
3387 0, &maintenance_set_cmdlist
);
3389 add_prefix_cmd ("pt", class_maintenance
, maint_btrace_pt_set_cmd
, _("\
3390 Set Intel Processor Trace specific variables."),
3391 &maint_btrace_pt_set_cmdlist
, "maintenance set btrace pt ",
3392 0, &maint_btrace_set_cmdlist
);
3394 add_prefix_cmd ("btrace", class_maintenance
, maint_btrace_show_cmd
, _("\
3395 Show branch tracing specific variables."),
3396 &maint_btrace_show_cmdlist
, "maintenance show btrace ",
3397 0, &maintenance_show_cmdlist
);
3399 add_prefix_cmd ("pt", class_maintenance
, maint_btrace_pt_show_cmd
, _("\
3400 Show Intel Processor Trace specific variables."),
3401 &maint_btrace_pt_show_cmdlist
, "maintenance show btrace pt ",
3402 0, &maint_btrace_show_cmdlist
);
3404 add_setshow_boolean_cmd ("skip-pad", class_maintenance
,
3405 &maint_btrace_pt_skip_pad
, _("\
3406 Set whether PAD packets should be skipped in the btrace packet history."), _("\
3407 Show whether PAD packets should be skipped in the btrace packet history."),_("\
3408 When enabled, PAD packets are ignored in the btrace packet history."),
3409 NULL
, show_maint_btrace_pt_skip_pad
,
3410 &maint_btrace_pt_set_cmdlist
,
3411 &maint_btrace_pt_show_cmdlist
);
3413 add_cmd ("packet-history", class_maintenance
, maint_btrace_packet_history_cmd
,
3414 _("Print the raw branch tracing data.\n\
3415 With no argument, print ten more packets after the previous ten-line print.\n\
3416 With '-' as argument print ten packets before a previous ten-line print.\n\
3417 One argument specifies the starting packet of a ten-line print.\n\
3418 Two arguments with comma between specify starting and ending packets to \
3420 Preceded with '+'/'-' the second argument specifies the distance from the \
3422 &maint_btrace_cmdlist
);
3424 add_cmd ("clear-packet-history", class_maintenance
,
3425 maint_btrace_clear_packet_history_cmd
,
3426 _("Clears the branch tracing packet history.\n\
3427 Discards the raw branch tracing data but not the execution history data.\n\
3429 &maint_btrace_cmdlist
);
3431 add_cmd ("clear", class_maintenance
, maint_btrace_clear_cmd
,
3432 _("Clears the branch tracing data.\n\
3433 Discards the raw branch tracing data and the execution history data.\n\
3434 The next 'record' command will fetch the branch tracing data anew.\n\
3436 &maint_btrace_cmdlist
);