1 /* Branch trace support for GDB, the GNU debugger.
3 Copyright (C) 2013-2017 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "gdbthread.h"
31 #include "filenames.h"
32 #include "xml-support.h"
36 #include "cli/cli-utils.h"
42 /* Command lists for btrace maintenance commands. */
43 static struct cmd_list_element
*maint_btrace_cmdlist
;
44 static struct cmd_list_element
*maint_btrace_set_cmdlist
;
45 static struct cmd_list_element
*maint_btrace_show_cmdlist
;
46 static struct cmd_list_element
*maint_btrace_pt_set_cmdlist
;
47 static struct cmd_list_element
*maint_btrace_pt_show_cmdlist
;
49 /* Control whether to skip PAD packets when computing the packet history. */
50 static int maint_btrace_pt_skip_pad
= 1;
52 /* A vector of function segments. */
53 typedef struct btrace_function
* bfun_s
;
56 static void btrace_add_pc (struct thread_info
*tp
);
58 /* Print a record debug message. Use do ... while (0) to avoid ambiguities
59 when used in if statements. */
61 #define DEBUG(msg, args...) \
64 if (record_debug != 0) \
65 fprintf_unfiltered (gdb_stdlog, \
66 "[btrace] " msg "\n", ##args); \
70 #define DEBUG_FTRACE(msg, args...) DEBUG ("[ftrace] " msg, ##args)
72 /* Return the function name of a recorded function segment for printing.
73 This function never returns NULL. */
76 ftrace_print_function_name (const struct btrace_function
*bfun
)
78 struct minimal_symbol
*msym
;
85 return SYMBOL_PRINT_NAME (sym
);
88 return MSYMBOL_PRINT_NAME (msym
);
93 /* Return the file name of a recorded function segment for printing.
94 This function never returns NULL. */
97 ftrace_print_filename (const struct btrace_function
*bfun
)
100 const char *filename
;
105 filename
= symtab_to_filename_for_display (symbol_symtab (sym
));
107 filename
= "<unknown>";
112 /* Return a string representation of the address of an instruction.
113 This function never returns NULL. */
116 ftrace_print_insn_addr (const struct btrace_insn
*insn
)
121 return core_addr_to_string_nz (insn
->pc
);
124 /* Print an ftrace debug status message. */
127 ftrace_debug (const struct btrace_function
*bfun
, const char *prefix
)
129 const char *fun
, *file
;
130 unsigned int ibegin
, iend
;
133 fun
= ftrace_print_function_name (bfun
);
134 file
= ftrace_print_filename (bfun
);
137 ibegin
= bfun
->insn_offset
;
138 iend
= ibegin
+ VEC_length (btrace_insn_s
, bfun
->insn
);
140 DEBUG_FTRACE ("%s: fun = %s, file = %s, level = %d, insn = [%u; %u)",
141 prefix
, fun
, file
, level
, ibegin
, iend
);
144 /* Return the number of instructions in a given function call segment. */
147 ftrace_call_num_insn (const struct btrace_function
* bfun
)
152 /* A gap is always counted as one instruction. */
153 if (bfun
->errcode
!= 0)
156 return VEC_length (btrace_insn_s
, bfun
->insn
);
159 /* Return non-zero if BFUN does not match MFUN and FUN,
160 return zero otherwise. */
163 ftrace_function_switched (const struct btrace_function
*bfun
,
164 const struct minimal_symbol
*mfun
,
165 const struct symbol
*fun
)
167 struct minimal_symbol
*msym
;
173 /* If the minimal symbol changed, we certainly switched functions. */
174 if (mfun
!= NULL
&& msym
!= NULL
175 && strcmp (MSYMBOL_LINKAGE_NAME (mfun
), MSYMBOL_LINKAGE_NAME (msym
)) != 0)
178 /* If the symbol changed, we certainly switched functions. */
179 if (fun
!= NULL
&& sym
!= NULL
)
181 const char *bfname
, *fname
;
183 /* Check the function name. */
184 if (strcmp (SYMBOL_LINKAGE_NAME (fun
), SYMBOL_LINKAGE_NAME (sym
)) != 0)
187 /* Check the location of those functions, as well. */
188 bfname
= symtab_to_fullname (symbol_symtab (sym
));
189 fname
= symtab_to_fullname (symbol_symtab (fun
));
190 if (filename_cmp (fname
, bfname
) != 0)
194 /* If we lost symbol information, we switched functions. */
195 if (!(msym
== NULL
&& sym
== NULL
) && mfun
== NULL
&& fun
== NULL
)
198 /* If we gained symbol information, we switched functions. */
199 if (msym
== NULL
&& sym
== NULL
&& !(mfun
== NULL
&& fun
== NULL
))
205 /* Allocate and initialize a new branch trace function segment.
206 BTINFO is the branch trace information for the current thread.
207 PREV is the chronologically preceding function segment.
208 MFUN and FUN are the symbol information we have for this function. */
210 static struct btrace_function
*
211 ftrace_new_function (struct btrace_thread_info
*btinfo
,
212 struct btrace_function
*prev
,
213 struct minimal_symbol
*mfun
,
216 struct btrace_function
*bfun
;
218 bfun
= XCNEW (struct btrace_function
);
222 bfun
->flow
.prev
= prev
;
226 /* Start counting at one. */
228 bfun
->insn_offset
= 1;
232 gdb_assert (prev
->flow
.next
== NULL
);
233 prev
->flow
.next
= bfun
;
235 bfun
->number
= prev
->number
+ 1;
236 bfun
->insn_offset
= prev
->insn_offset
+ ftrace_call_num_insn (prev
);
237 bfun
->level
= prev
->level
;
240 btinfo
->functions
.push_back (bfun
);
244 /* Update the UP field of a function segment. */
247 ftrace_update_caller (struct btrace_function
*bfun
,
248 struct btrace_function
*caller
,
249 enum btrace_function_flag flags
)
251 if (bfun
->up
!= NULL
)
252 ftrace_debug (bfun
, "updating caller");
257 ftrace_debug (bfun
, "set caller");
258 ftrace_debug (caller
, "..to");
261 /* Fix up the caller for all segments of a function. */
264 ftrace_fixup_caller (struct btrace_function
*bfun
,
265 struct btrace_function
*caller
,
266 enum btrace_function_flag flags
)
268 struct btrace_function
*prev
, *next
;
270 ftrace_update_caller (bfun
, caller
, flags
);
272 /* Update all function segments belonging to the same function. */
273 for (prev
= bfun
->segment
.prev
; prev
!= NULL
; prev
= prev
->segment
.prev
)
274 ftrace_update_caller (prev
, caller
, flags
);
276 for (next
= bfun
->segment
.next
; next
!= NULL
; next
= next
->segment
.next
)
277 ftrace_update_caller (next
, caller
, flags
);
280 /* Add a new function segment for a call.
281 BTINFO is the branch trace information for the current thread.
282 CALLER is the chronologically preceding function segment.
283 MFUN and FUN are the symbol information we have for this function. */
285 static struct btrace_function
*
286 ftrace_new_call (struct btrace_thread_info
*btinfo
,
287 struct btrace_function
*caller
,
288 struct minimal_symbol
*mfun
,
291 struct btrace_function
*bfun
;
293 bfun
= ftrace_new_function (btinfo
, caller
, mfun
, fun
);
297 ftrace_debug (bfun
, "new call");
302 /* Add a new function segment for a tail call.
303 BTINFO is the branch trace information for the current thread.
304 CALLER is the chronologically preceding function segment.
305 MFUN and FUN are the symbol information we have for this function. */
307 static struct btrace_function
*
308 ftrace_new_tailcall (struct btrace_thread_info
*btinfo
,
309 struct btrace_function
*caller
,
310 struct minimal_symbol
*mfun
,
313 struct btrace_function
*bfun
;
315 bfun
= ftrace_new_function (btinfo
, caller
, mfun
, fun
);
318 bfun
->flags
|= BFUN_UP_LINKS_TO_TAILCALL
;
320 ftrace_debug (bfun
, "new tail call");
325 /* Return the caller of BFUN or NULL if there is none. This function skips
326 tail calls in the call chain. */
327 static struct btrace_function
*
328 ftrace_get_caller (struct btrace_function
*bfun
)
330 for (; bfun
!= NULL
; bfun
= bfun
->up
)
331 if ((bfun
->flags
& BFUN_UP_LINKS_TO_TAILCALL
) == 0)
337 /* Find the innermost caller in the back trace of BFUN with MFUN/FUN
338 symbol information. */
340 static struct btrace_function
*
341 ftrace_find_caller (struct btrace_function
*bfun
,
342 struct minimal_symbol
*mfun
,
345 for (; bfun
!= NULL
; bfun
= bfun
->up
)
347 /* Skip functions with incompatible symbol information. */
348 if (ftrace_function_switched (bfun
, mfun
, fun
))
351 /* This is the function segment we're looking for. */
358 /* Find the innermost caller in the back trace of BFUN, skipping all
359 function segments that do not end with a call instruction (e.g.
360 tail calls ending with a jump). */
362 static struct btrace_function
*
363 ftrace_find_call (struct btrace_function
*bfun
)
365 for (; bfun
!= NULL
; bfun
= bfun
->up
)
367 struct btrace_insn
*last
;
370 if (bfun
->errcode
!= 0)
373 last
= VEC_last (btrace_insn_s
, bfun
->insn
);
375 if (last
->iclass
== BTRACE_INSN_CALL
)
382 /* Add a continuation segment for a function into which we return.
383 BTINFO is the branch trace information for the current thread.
384 PREV is the chronologically preceding function segment.
385 MFUN and FUN are the symbol information we have for this function. */
387 static struct btrace_function
*
388 ftrace_new_return (struct btrace_thread_info
*btinfo
,
389 struct btrace_function
*prev
,
390 struct minimal_symbol
*mfun
,
393 struct btrace_function
*bfun
, *caller
;
395 bfun
= ftrace_new_function (btinfo
, prev
, mfun
, fun
);
397 /* It is important to start at PREV's caller. Otherwise, we might find
398 PREV itself, if PREV is a recursive function. */
399 caller
= ftrace_find_caller (prev
->up
, mfun
, fun
);
402 /* The caller of PREV is the preceding btrace function segment in this
403 function instance. */
404 gdb_assert (caller
->segment
.next
== NULL
);
406 caller
->segment
.next
= bfun
;
407 bfun
->segment
.prev
= caller
;
409 /* Maintain the function level. */
410 bfun
->level
= caller
->level
;
412 /* Maintain the call stack. */
413 bfun
->up
= caller
->up
;
414 bfun
->flags
= caller
->flags
;
416 ftrace_debug (bfun
, "new return");
420 /* We did not find a caller. This could mean that something went
421 wrong or that the call is simply not included in the trace. */
423 /* Let's search for some actual call. */
424 caller
= ftrace_find_call (prev
->up
);
427 /* There is no call in PREV's back trace. We assume that the
428 branch trace did not include it. */
430 /* Let's find the topmost function and add a new caller for it.
431 This should handle a series of initial tail calls. */
432 while (prev
->up
!= NULL
)
435 bfun
->level
= prev
->level
- 1;
437 /* Fix up the call stack for PREV. */
438 ftrace_fixup_caller (prev
, bfun
, BFUN_UP_LINKS_TO_RET
);
440 ftrace_debug (bfun
, "new return - no caller");
444 /* There is a call in PREV's back trace to which we should have
445 returned but didn't. Let's start a new, separate back trace
446 from PREV's level. */
447 bfun
->level
= prev
->level
- 1;
449 /* We fix up the back trace for PREV but leave other function segments
450 on the same level as they are.
451 This should handle things like schedule () correctly where we're
452 switching contexts. */
454 prev
->flags
= BFUN_UP_LINKS_TO_RET
;
456 ftrace_debug (bfun
, "new return - unknown caller");
463 /* Add a new function segment for a function switch.
464 BTINFO is the branch trace information for the current thread.
465 PREV is the chronologically preceding function segment.
466 MFUN and FUN are the symbol information we have for this function. */
468 static struct btrace_function
*
469 ftrace_new_switch (struct btrace_thread_info
*btinfo
,
470 struct btrace_function
*prev
,
471 struct minimal_symbol
*mfun
,
474 struct btrace_function
*bfun
;
476 /* This is an unexplained function switch. We can't really be sure about the
477 call stack, yet the best I can think of right now is to preserve it. */
478 bfun
= ftrace_new_function (btinfo
, prev
, mfun
, fun
);
480 bfun
->flags
= prev
->flags
;
482 ftrace_debug (bfun
, "new switch");
487 /* Add a new function segment for a gap in the trace due to a decode error.
488 BTINFO is the branch trace information for the current thread.
489 PREV is the chronologically preceding function segment.
490 ERRCODE is the format-specific error code. */
492 static struct btrace_function
*
493 ftrace_new_gap (struct btrace_thread_info
*btinfo
,
494 struct btrace_function
*prev
, int errcode
)
496 struct btrace_function
*bfun
;
498 /* We hijack prev if it was empty. */
499 if (prev
!= NULL
&& prev
->errcode
== 0
500 && VEC_empty (btrace_insn_s
, prev
->insn
))
503 bfun
= ftrace_new_function (btinfo
, prev
, NULL
, NULL
);
505 bfun
->errcode
= errcode
;
507 ftrace_debug (bfun
, "new gap");
512 /* Update BFUN with respect to the instruction at PC. BTINFO is the branch
513 trace information for the current thread. This may create new function
515 Return the chronologically latest function segment, never NULL. */
517 static struct btrace_function
*
518 ftrace_update_function (struct btrace_thread_info
*btinfo
,
519 struct btrace_function
*bfun
, CORE_ADDR pc
)
521 struct bound_minimal_symbol bmfun
;
522 struct minimal_symbol
*mfun
;
524 struct btrace_insn
*last
;
526 /* Try to determine the function we're in. We use both types of symbols
527 to avoid surprises when we sometimes get a full symbol and sometimes
528 only a minimal symbol. */
529 fun
= find_pc_function (pc
);
530 bmfun
= lookup_minimal_symbol_by_pc (pc
);
533 if (fun
== NULL
&& mfun
== NULL
)
534 DEBUG_FTRACE ("no symbol at %s", core_addr_to_string_nz (pc
));
536 /* If we didn't have a function or if we had a gap before, we create one. */
537 if (bfun
== NULL
|| bfun
->errcode
!= 0)
538 return ftrace_new_function (btinfo
, bfun
, mfun
, fun
);
540 /* Check the last instruction, if we have one.
541 We do this check first, since it allows us to fill in the call stack
542 links in addition to the normal flow links. */
544 if (!VEC_empty (btrace_insn_s
, bfun
->insn
))
545 last
= VEC_last (btrace_insn_s
, bfun
->insn
);
549 switch (last
->iclass
)
551 case BTRACE_INSN_RETURN
:
555 /* On some systems, _dl_runtime_resolve returns to the resolved
556 function instead of jumping to it. From our perspective,
557 however, this is a tailcall.
558 If we treated it as return, we wouldn't be able to find the
559 resolved function in our stack back trace. Hence, we would
560 lose the current stack back trace and start anew with an empty
561 back trace. When the resolved function returns, we would then
562 create a stack back trace with the same function names but
563 different frame id's. This will confuse stepping. */
564 fname
= ftrace_print_function_name (bfun
);
565 if (strcmp (fname
, "_dl_runtime_resolve") == 0)
566 return ftrace_new_tailcall (btinfo
, bfun
, mfun
, fun
);
568 return ftrace_new_return (btinfo
, bfun
, mfun
, fun
);
571 case BTRACE_INSN_CALL
:
572 /* Ignore calls to the next instruction. They are used for PIC. */
573 if (last
->pc
+ last
->size
== pc
)
576 return ftrace_new_call (btinfo
, bfun
, mfun
, fun
);
578 case BTRACE_INSN_JUMP
:
582 start
= get_pc_function_start (pc
);
584 /* A jump to the start of a function is (typically) a tail call. */
586 return ftrace_new_tailcall (btinfo
, bfun
, mfun
, fun
);
588 /* If we can't determine the function for PC, we treat a jump at
589 the end of the block as tail call if we're switching functions
590 and as an intra-function branch if we don't. */
591 if (start
== 0 && ftrace_function_switched (bfun
, mfun
, fun
))
592 return ftrace_new_tailcall (btinfo
, bfun
, mfun
, fun
);
599 /* Check if we're switching functions for some other reason. */
600 if (ftrace_function_switched (bfun
, mfun
, fun
))
602 DEBUG_FTRACE ("switching from %s in %s at %s",
603 ftrace_print_insn_addr (last
),
604 ftrace_print_function_name (bfun
),
605 ftrace_print_filename (bfun
));
607 return ftrace_new_switch (btinfo
, bfun
, mfun
, fun
);
613 /* Add the instruction at PC to BFUN's instructions. */
616 ftrace_update_insns (struct btrace_function
*bfun
,
617 const struct btrace_insn
*insn
)
619 VEC_safe_push (btrace_insn_s
, bfun
->insn
, insn
);
621 if (record_debug
> 1)
622 ftrace_debug (bfun
, "update insn");
625 /* Classify the instruction at PC. */
627 static enum btrace_insn_class
628 ftrace_classify_insn (struct gdbarch
*gdbarch
, CORE_ADDR pc
)
630 enum btrace_insn_class iclass
;
632 iclass
= BTRACE_INSN_OTHER
;
635 if (gdbarch_insn_is_call (gdbarch
, pc
))
636 iclass
= BTRACE_INSN_CALL
;
637 else if (gdbarch_insn_is_ret (gdbarch
, pc
))
638 iclass
= BTRACE_INSN_RETURN
;
639 else if (gdbarch_insn_is_jump (gdbarch
, pc
))
640 iclass
= BTRACE_INSN_JUMP
;
642 CATCH (error
, RETURN_MASK_ERROR
)
650 /* Try to match the back trace at LHS to the back trace at RHS. Returns the
651 number of matching function segments or zero if the back traces do not
655 ftrace_match_backtrace (struct btrace_function
*lhs
,
656 struct btrace_function
*rhs
)
660 for (matches
= 0; lhs
!= NULL
&& rhs
!= NULL
; ++matches
)
662 if (ftrace_function_switched (lhs
, rhs
->msym
, rhs
->sym
))
665 lhs
= ftrace_get_caller (lhs
);
666 rhs
= ftrace_get_caller (rhs
);
672 /* Add ADJUSTMENT to the level of BFUN and succeeding function segments. */
675 ftrace_fixup_level (struct btrace_function
*bfun
, int adjustment
)
680 DEBUG_FTRACE ("fixup level (%+d)", adjustment
);
681 ftrace_debug (bfun
, "..bfun");
683 for (; bfun
!= NULL
; bfun
= bfun
->flow
.next
)
684 bfun
->level
+= adjustment
;
687 /* Recompute the global level offset. Traverse the function trace and compute
688 the global level offset as the negative of the minimal function level. */
691 ftrace_compute_global_level_offset (struct btrace_thread_info
*btinfo
)
693 struct btrace_function
*bfun
, *end
;
699 bfun
= btinfo
->begin
;
703 /* The last function segment contains the current instruction, which is not
704 really part of the trace. If it contains just this one instruction, we
705 stop when we reach it; otherwise, we let the below loop run to the end. */
707 if (VEC_length (btrace_insn_s
, end
->insn
) > 1)
711 for (; bfun
!= end
; bfun
= bfun
->flow
.next
)
712 level
= std::min (level
, bfun
->level
);
714 DEBUG_FTRACE ("setting global level offset: %d", -level
);
715 btinfo
->level
= -level
;
718 /* Connect the function segments PREV and NEXT in a bottom-to-top walk as in
719 ftrace_connect_backtrace. */
722 ftrace_connect_bfun (struct btrace_function
*prev
,
723 struct btrace_function
*next
)
725 DEBUG_FTRACE ("connecting...");
726 ftrace_debug (prev
, "..prev");
727 ftrace_debug (next
, "..next");
729 /* The function segments are not yet connected. */
730 gdb_assert (prev
->segment
.next
== NULL
);
731 gdb_assert (next
->segment
.prev
== NULL
);
733 prev
->segment
.next
= next
;
734 next
->segment
.prev
= prev
;
736 /* We may have moved NEXT to a different function level. */
737 ftrace_fixup_level (next
, prev
->level
- next
->level
);
739 /* If we run out of back trace for one, let's use the other's. */
740 if (prev
->up
== NULL
)
742 if (next
->up
!= NULL
)
744 DEBUG_FTRACE ("using next's callers");
745 ftrace_fixup_caller (prev
, next
->up
, next
->flags
);
748 else if (next
->up
== NULL
)
750 if (prev
->up
!= NULL
)
752 DEBUG_FTRACE ("using prev's callers");
753 ftrace_fixup_caller (next
, prev
->up
, prev
->flags
);
758 /* PREV may have a tailcall caller, NEXT can't. If it does, fixup the up
759 link to add the tail callers to NEXT's back trace.
761 This removes NEXT->UP from NEXT's back trace. It will be added back
762 when connecting NEXT and PREV's callers - provided they exist.
764 If PREV's back trace consists of a series of tail calls without an
765 actual call, there will be no further connection and NEXT's caller will
766 be removed for good. To catch this case, we handle it here and connect
767 the top of PREV's back trace to NEXT's caller. */
768 if ((prev
->flags
& BFUN_UP_LINKS_TO_TAILCALL
) != 0)
770 struct btrace_function
*caller
;
771 btrace_function_flags flags
;
773 /* We checked NEXT->UP above so CALLER can't be NULL. */
777 DEBUG_FTRACE ("adding prev's tail calls to next");
779 ftrace_fixup_caller (next
, prev
->up
, prev
->flags
);
781 for (prev
= prev
->up
; prev
!= NULL
; prev
= prev
->up
)
783 /* At the end of PREV's back trace, continue with CALLER. */
784 if (prev
->up
== NULL
)
786 DEBUG_FTRACE ("fixing up link for tailcall chain");
787 ftrace_debug (prev
, "..top");
788 ftrace_debug (caller
, "..up");
790 ftrace_fixup_caller (prev
, caller
, flags
);
792 /* If we skipped any tail calls, this may move CALLER to a
793 different function level.
795 Note that changing CALLER's level is only OK because we
796 know that this is the last iteration of the bottom-to-top
797 walk in ftrace_connect_backtrace.
799 Otherwise we will fix up CALLER's level when we connect it
800 to PREV's caller in the next iteration. */
801 ftrace_fixup_level (caller
, prev
->level
- caller
->level
- 1);
805 /* There's nothing to do if we find a real call. */
806 if ((prev
->flags
& BFUN_UP_LINKS_TO_TAILCALL
) == 0)
808 DEBUG_FTRACE ("will fix up link in next iteration");
816 /* Connect function segments on the same level in the back trace at LHS and RHS.
817 The back traces at LHS and RHS are expected to match according to
818 ftrace_match_backtrace. */
821 ftrace_connect_backtrace (struct btrace_function
*lhs
,
822 struct btrace_function
*rhs
)
824 while (lhs
!= NULL
&& rhs
!= NULL
)
826 struct btrace_function
*prev
, *next
;
828 gdb_assert (!ftrace_function_switched (lhs
, rhs
->msym
, rhs
->sym
));
830 /* Connecting LHS and RHS may change the up link. */
834 lhs
= ftrace_get_caller (lhs
);
835 rhs
= ftrace_get_caller (rhs
);
837 ftrace_connect_bfun (prev
, next
);
841 /* Bridge the gap between two function segments left and right of a gap if their
842 respective back traces match in at least MIN_MATCHES functions.
844 Returns non-zero if the gap could be bridged, zero otherwise. */
847 ftrace_bridge_gap (struct btrace_function
*lhs
, struct btrace_function
*rhs
,
850 struct btrace_function
*best_l
, *best_r
, *cand_l
, *cand_r
;
853 DEBUG_FTRACE ("checking gap at insn %u (req matches: %d)",
854 rhs
->insn_offset
- 1, min_matches
);
860 /* We search the back traces of LHS and RHS for valid connections and connect
861 the two functon segments that give the longest combined back trace. */
863 for (cand_l
= lhs
; cand_l
!= NULL
; cand_l
= ftrace_get_caller (cand_l
))
864 for (cand_r
= rhs
; cand_r
!= NULL
; cand_r
= ftrace_get_caller (cand_r
))
868 matches
= ftrace_match_backtrace (cand_l
, cand_r
);
869 if (best_matches
< matches
)
871 best_matches
= matches
;
877 /* We need at least MIN_MATCHES matches. */
878 gdb_assert (min_matches
> 0);
879 if (best_matches
< min_matches
)
882 DEBUG_FTRACE ("..matches: %d", best_matches
);
884 /* We will fix up the level of BEST_R and succeeding function segments such
885 that BEST_R's level matches BEST_L's when we connect BEST_L to BEST_R.
887 This will ignore the level of RHS and following if BEST_R != RHS. I.e. if
888 BEST_R is a successor of RHS in the back trace of RHS (phases 1 and 3).
890 To catch this, we already fix up the level here where we can start at RHS
891 instead of at BEST_R. We will ignore the level fixup when connecting
892 BEST_L to BEST_R as they will already be on the same level. */
893 ftrace_fixup_level (rhs
, best_l
->level
- best_r
->level
);
895 ftrace_connect_backtrace (best_l
, best_r
);
900 /* Try to bridge gaps due to overflow or decode errors by connecting the
901 function segments that are separated by the gap. */
904 btrace_bridge_gaps (struct thread_info
*tp
, VEC (bfun_s
) **gaps
)
906 VEC (bfun_s
) *remaining
;
907 struct cleanup
*old_chain
;
910 DEBUG ("bridge gaps");
913 old_chain
= make_cleanup (VEC_cleanup (bfun_s
), &remaining
);
915 /* We require a minimum amount of matches for bridging a gap. The number of
916 required matches will be lowered with each iteration.
918 The more matches the higher our confidence that the bridging is correct.
919 For big gaps or small traces, however, it may not be feasible to require a
920 high number of matches. */
921 for (min_matches
= 5; min_matches
> 0; --min_matches
)
923 /* Let's try to bridge as many gaps as we can. In some cases, we need to
924 skip a gap and revisit it again after we closed later gaps. */
925 while (!VEC_empty (bfun_s
, *gaps
))
927 struct btrace_function
*gap
;
930 for (idx
= 0; VEC_iterate (bfun_s
, *gaps
, idx
, gap
); ++idx
)
932 struct btrace_function
*lhs
, *rhs
;
935 /* We may have a sequence of gaps if we run from one error into
936 the next as we try to re-sync onto the trace stream. Ignore
937 all but the leftmost gap in such a sequence.
939 Also ignore gaps at the beginning of the trace. */
940 lhs
= gap
->flow
.prev
;
941 if (lhs
== NULL
|| lhs
->errcode
!= 0)
944 /* Skip gaps to the right. */
945 for (rhs
= gap
->flow
.next
; rhs
!= NULL
; rhs
= rhs
->flow
.next
)
946 if (rhs
->errcode
== 0)
949 /* Ignore gaps at the end of the trace. */
953 bridged
= ftrace_bridge_gap (lhs
, rhs
, min_matches
);
955 /* Keep track of gaps we were not able to bridge and try again.
956 If we just pushed them to the end of GAPS we would risk an
957 infinite loop in case we simply cannot bridge a gap. */
959 VEC_safe_push (bfun_s
, remaining
, gap
);
962 /* Let's see if we made any progress. */
963 if (VEC_length (bfun_s
, remaining
) == VEC_length (bfun_s
, *gaps
))
966 VEC_free (bfun_s
, *gaps
);
972 /* We get here if either GAPS is empty or if GAPS equals REMAINING. */
973 if (VEC_empty (bfun_s
, *gaps
))
976 VEC_free (bfun_s
, remaining
);
979 do_cleanups (old_chain
);
981 /* We may omit this in some cases. Not sure it is worth the extra
982 complication, though. */
983 ftrace_compute_global_level_offset (&tp
->btrace
);
986 /* Compute the function branch trace from BTS trace. */
989 btrace_compute_ftrace_bts (struct thread_info
*tp
,
990 const struct btrace_data_bts
*btrace
,
993 struct btrace_thread_info
*btinfo
;
994 struct btrace_function
*begin
, *end
;
995 struct gdbarch
*gdbarch
;
999 gdbarch
= target_gdbarch ();
1000 btinfo
= &tp
->btrace
;
1001 begin
= btinfo
->begin
;
1003 level
= begin
!= NULL
? -btinfo
->level
: INT_MAX
;
1004 blk
= VEC_length (btrace_block_s
, btrace
->blocks
);
1008 btrace_block_s
*block
;
1013 block
= VEC_index (btrace_block_s
, btrace
->blocks
, blk
);
1018 struct btrace_insn insn
;
1021 /* We should hit the end of the block. Warn if we went too far. */
1022 if (block
->end
< pc
)
1024 /* Indicate the gap in the trace. */
1025 end
= ftrace_new_gap (btinfo
, end
, BDE_BTS_OVERFLOW
);
1029 VEC_safe_push (bfun_s
, *gaps
, end
);
1031 warning (_("Recorded trace may be corrupted at instruction "
1032 "%u (pc = %s)."), end
->insn_offset
- 1,
1033 core_addr_to_string_nz (pc
));
1038 end
= ftrace_update_function (btinfo
, end
, pc
);
1042 /* Maintain the function level offset.
1043 For all but the last block, we do it here. */
1045 level
= std::min (level
, end
->level
);
1050 size
= gdb_insn_length (gdbarch
, pc
);
1052 CATCH (error
, RETURN_MASK_ERROR
)
1059 insn
.iclass
= ftrace_classify_insn (gdbarch
, pc
);
1062 ftrace_update_insns (end
, &insn
);
1064 /* We're done once we pushed the instruction at the end. */
1065 if (block
->end
== pc
)
1068 /* We can't continue if we fail to compute the size. */
1071 /* Indicate the gap in the trace. We just added INSN so we're
1072 not at the beginning. */
1073 end
= ftrace_new_gap (btinfo
, end
, BDE_BTS_INSN_SIZE
);
1075 VEC_safe_push (bfun_s
, *gaps
, end
);
1077 warning (_("Recorded trace may be incomplete at instruction %u "
1078 "(pc = %s)."), end
->insn_offset
- 1,
1079 core_addr_to_string_nz (pc
));
1086 /* Maintain the function level offset.
1087 For the last block, we do it here to not consider the last
1089 Since the last instruction corresponds to the current instruction
1090 and is not really part of the execution history, it shouldn't
1091 affect the level. */
1093 level
= std::min (level
, end
->level
);
1097 btinfo
->begin
= begin
;
1100 /* LEVEL is the minimal function level of all btrace function segments.
1101 Define the global level offset to -LEVEL so all function levels are
1102 normalized to start at zero. */
1103 btinfo
->level
= -level
;
1106 #if defined (HAVE_LIBIPT)
1108 static enum btrace_insn_class
1109 pt_reclassify_insn (enum pt_insn_class iclass
)
1114 return BTRACE_INSN_CALL
;
1117 return BTRACE_INSN_RETURN
;
1120 return BTRACE_INSN_JUMP
;
1123 return BTRACE_INSN_OTHER
;
1127 /* Return the btrace instruction flags for INSN. */
1129 static btrace_insn_flags
1130 pt_btrace_insn_flags (const struct pt_insn
&insn
)
1132 btrace_insn_flags flags
= 0;
1134 if (insn
.speculative
)
1135 flags
|= BTRACE_INSN_FLAG_SPECULATIVE
;
1140 /* Return the btrace instruction for INSN. */
1143 pt_btrace_insn (const struct pt_insn
&insn
)
1145 return {(CORE_ADDR
) insn
.ip
, (gdb_byte
) insn
.size
,
1146 pt_reclassify_insn (insn
.iclass
),
1147 pt_btrace_insn_flags (insn
)};
1151 /* Add function branch trace to BTINFO using DECODER. */
1154 ftrace_add_pt (struct btrace_thread_info
*btinfo
,
1155 struct pt_insn_decoder
*decoder
,
1156 struct btrace_function
**pbegin
,
1157 struct btrace_function
**pend
, int *plevel
,
1158 VEC (bfun_s
) **gaps
)
1160 struct btrace_function
*begin
, *end
, *upd
;
1168 struct pt_insn insn
;
1170 errcode
= pt_insn_sync_forward (decoder
);
1173 if (errcode
!= -pte_eos
)
1174 warning (_("Failed to synchronize onto the Intel Processor "
1175 "Trace stream: %s."), pt_errstr (pt_errcode (errcode
)));
1181 errcode
= pt_insn_next (decoder
, &insn
, sizeof(insn
));
1185 /* Look for gaps in the trace - unless we're at the beginning. */
1188 /* Tracing is disabled and re-enabled each time we enter the
1189 kernel. Most times, we continue from the same instruction we
1190 stopped before. This is indicated via the RESUMED instruction
1191 flag. The ENABLED instruction flag means that we continued
1192 from some other instruction. Indicate this as a trace gap. */
1195 *pend
= end
= ftrace_new_gap (btinfo
, end
, BDE_PT_DISABLED
);
1197 VEC_safe_push (bfun_s
, *gaps
, end
);
1199 pt_insn_get_offset (decoder
, &offset
);
1201 warning (_("Non-contiguous trace at instruction %u (offset "
1202 "= 0x%" PRIx64
", pc = 0x%" PRIx64
")."),
1203 end
->insn_offset
- 1, offset
, insn
.ip
);
1207 /* Indicate trace overflows. */
1210 *pend
= end
= ftrace_new_gap (btinfo
, end
, BDE_PT_OVERFLOW
);
1212 *pbegin
= begin
= end
;
1214 VEC_safe_push (bfun_s
, *gaps
, end
);
1216 pt_insn_get_offset (decoder
, &offset
);
1218 warning (_("Overflow at instruction %u (offset = 0x%" PRIx64
1219 ", pc = 0x%" PRIx64
")."), end
->insn_offset
- 1,
1223 upd
= ftrace_update_function (btinfo
, end
, insn
.ip
);
1229 *pbegin
= begin
= upd
;
1232 /* Maintain the function level offset. */
1233 *plevel
= std::min (*plevel
, end
->level
);
1235 btrace_insn btinsn
= pt_btrace_insn (insn
);
1236 ftrace_update_insns (end
, &btinsn
);
1239 if (errcode
== -pte_eos
)
1242 /* Indicate the gap in the trace. */
1243 *pend
= end
= ftrace_new_gap (btinfo
, end
, errcode
);
1245 *pbegin
= begin
= end
;
1247 VEC_safe_push (bfun_s
, *gaps
, end
);
1249 pt_insn_get_offset (decoder
, &offset
);
1251 warning (_("Decode error (%d) at instruction %u (offset = 0x%" PRIx64
1252 ", pc = 0x%" PRIx64
"): %s."), errcode
, end
->insn_offset
- 1,
1253 offset
, insn
.ip
, pt_errstr (pt_errcode (errcode
)));
1257 /* A callback function to allow the trace decoder to read the inferior's
1261 btrace_pt_readmem_callback (gdb_byte
*buffer
, size_t size
,
1262 const struct pt_asid
*asid
, uint64_t pc
,
1265 int result
, errcode
;
1267 result
= (int) size
;
1270 errcode
= target_read_code ((CORE_ADDR
) pc
, buffer
, size
);
1272 result
= -pte_nomap
;
1274 CATCH (error
, RETURN_MASK_ERROR
)
1276 result
= -pte_nomap
;
1283 /* Translate the vendor from one enum to another. */
1285 static enum pt_cpu_vendor
1286 pt_translate_cpu_vendor (enum btrace_cpu_vendor vendor
)
1298 /* Finalize the function branch trace after decode. */
1300 static void btrace_finalize_ftrace_pt (struct pt_insn_decoder
*decoder
,
1301 struct thread_info
*tp
, int level
)
1303 pt_insn_free_decoder (decoder
);
1305 /* LEVEL is the minimal function level of all btrace function segments.
1306 Define the global level offset to -LEVEL so all function levels are
1307 normalized to start at zero. */
1308 tp
->btrace
.level
= -level
;
1310 /* Add a single last instruction entry for the current PC.
1311 This allows us to compute the backtrace at the current PC using both
1312 standard unwind and btrace unwind.
1313 This extra entry is ignored by all record commands. */
1317 /* Compute the function branch trace from Intel Processor Trace
1321 btrace_compute_ftrace_pt (struct thread_info
*tp
,
1322 const struct btrace_data_pt
*btrace
,
1323 VEC (bfun_s
) **gaps
)
1325 struct btrace_thread_info
*btinfo
;
1326 struct pt_insn_decoder
*decoder
;
1327 struct pt_config config
;
1330 if (btrace
->size
== 0)
1333 btinfo
= &tp
->btrace
;
1334 level
= btinfo
->begin
!= NULL
? -btinfo
->level
: INT_MAX
;
1336 pt_config_init(&config
);
1337 config
.begin
= btrace
->data
;
1338 config
.end
= btrace
->data
+ btrace
->size
;
1340 config
.cpu
.vendor
= pt_translate_cpu_vendor (btrace
->config
.cpu
.vendor
);
1341 config
.cpu
.family
= btrace
->config
.cpu
.family
;
1342 config
.cpu
.model
= btrace
->config
.cpu
.model
;
1343 config
.cpu
.stepping
= btrace
->config
.cpu
.stepping
;
1345 errcode
= pt_cpu_errata (&config
.errata
, &config
.cpu
);
1347 error (_("Failed to configure the Intel Processor Trace decoder: %s."),
1348 pt_errstr (pt_errcode (errcode
)));
1350 decoder
= pt_insn_alloc_decoder (&config
);
1351 if (decoder
== NULL
)
1352 error (_("Failed to allocate the Intel Processor Trace decoder."));
1356 struct pt_image
*image
;
1358 image
= pt_insn_get_image(decoder
);
1360 error (_("Failed to configure the Intel Processor Trace decoder."));
1362 errcode
= pt_image_set_callback(image
, btrace_pt_readmem_callback
, NULL
);
1364 error (_("Failed to configure the Intel Processor Trace decoder: "
1365 "%s."), pt_errstr (pt_errcode (errcode
)));
1367 ftrace_add_pt (btinfo
, decoder
, &btinfo
->begin
, &btinfo
->end
, &level
,
1370 CATCH (error
, RETURN_MASK_ALL
)
1372 /* Indicate a gap in the trace if we quit trace processing. */
1373 if (error
.reason
== RETURN_QUIT
&& btinfo
->end
!= NULL
)
1375 btinfo
->end
= ftrace_new_gap (btinfo
, btinfo
->end
, BDE_PT_USER_QUIT
);
1377 VEC_safe_push (bfun_s
, *gaps
, btinfo
->end
);
1380 btrace_finalize_ftrace_pt (decoder
, tp
, level
);
1382 throw_exception (error
);
1386 btrace_finalize_ftrace_pt (decoder
, tp
, level
);
1389 #else /* defined (HAVE_LIBIPT) */
1392 btrace_compute_ftrace_pt (struct thread_info
*tp
,
1393 const struct btrace_data_pt
*btrace
,
1394 VEC (bfun_s
) **gaps
)
1396 internal_error (__FILE__
, __LINE__
, _("Unexpected branch trace format."));
1399 #endif /* defined (HAVE_LIBIPT) */
1401 /* Compute the function branch trace from a block branch trace BTRACE for
1402 a thread given by BTINFO. */
1405 btrace_compute_ftrace_1 (struct thread_info
*tp
, struct btrace_data
*btrace
,
1406 VEC (bfun_s
) **gaps
)
1408 DEBUG ("compute ftrace");
1410 switch (btrace
->format
)
1412 case BTRACE_FORMAT_NONE
:
1415 case BTRACE_FORMAT_BTS
:
1416 btrace_compute_ftrace_bts (tp
, &btrace
->variant
.bts
, gaps
);
1419 case BTRACE_FORMAT_PT
:
1420 btrace_compute_ftrace_pt (tp
, &btrace
->variant
.pt
, gaps
);
1424 internal_error (__FILE__
, __LINE__
, _("Unkown branch trace format."));
1428 btrace_finalize_ftrace (struct thread_info
*tp
, VEC (bfun_s
) **gaps
)
1430 if (!VEC_empty (bfun_s
, *gaps
))
1432 tp
->btrace
.ngaps
+= VEC_length (bfun_s
, *gaps
);
1433 btrace_bridge_gaps (tp
, gaps
);
1438 btrace_compute_ftrace (struct thread_info
*tp
, struct btrace_data
*btrace
)
1441 struct cleanup
*old_chain
;
1444 old_chain
= make_cleanup (VEC_cleanup (bfun_s
), &gaps
);
1448 btrace_compute_ftrace_1 (tp
, btrace
, &gaps
);
1450 CATCH (error
, RETURN_MASK_ALL
)
1452 btrace_finalize_ftrace (tp
, &gaps
);
1454 throw_exception (error
);
1458 btrace_finalize_ftrace (tp
, &gaps
);
1460 do_cleanups (old_chain
);
1463 /* Add an entry for the current PC. */
1466 btrace_add_pc (struct thread_info
*tp
)
1468 struct btrace_data btrace
;
1469 struct btrace_block
*block
;
1470 struct regcache
*regcache
;
1471 struct cleanup
*cleanup
;
1474 regcache
= get_thread_regcache (tp
->ptid
);
1475 pc
= regcache_read_pc (regcache
);
1477 btrace_data_init (&btrace
);
1478 btrace
.format
= BTRACE_FORMAT_BTS
;
1479 btrace
.variant
.bts
.blocks
= NULL
;
1481 cleanup
= make_cleanup_btrace_data (&btrace
);
1483 block
= VEC_safe_push (btrace_block_s
, btrace
.variant
.bts
.blocks
, NULL
);
1487 btrace_compute_ftrace (tp
, &btrace
);
1489 do_cleanups (cleanup
);
1495 btrace_enable (struct thread_info
*tp
, const struct btrace_config
*conf
)
1497 if (tp
->btrace
.target
!= NULL
)
1500 #if !defined (HAVE_LIBIPT)
1501 if (conf
->format
== BTRACE_FORMAT_PT
)
1502 error (_("GDB does not support Intel Processor Trace."));
1503 #endif /* !defined (HAVE_LIBIPT) */
1505 if (!target_supports_btrace (conf
->format
))
1506 error (_("Target does not support branch tracing."));
1508 DEBUG ("enable thread %s (%s)", print_thread_id (tp
),
1509 target_pid_to_str (tp
->ptid
));
1511 tp
->btrace
.target
= target_enable_btrace (tp
->ptid
, conf
);
1513 /* We're done if we failed to enable tracing. */
1514 if (tp
->btrace
.target
== NULL
)
1517 /* We need to undo the enable in case of errors. */
1520 /* Add an entry for the current PC so we start tracing from where we
1523 If we can't access TP's registers, TP is most likely running. In this
1524 case, we can't really say where tracing was enabled so it should be
1525 safe to simply skip this step.
1527 This is not relevant for BTRACE_FORMAT_PT since the trace will already
1528 start at the PC at which tracing was enabled. */
1529 if (conf
->format
!= BTRACE_FORMAT_PT
1530 && can_access_registers_ptid (tp
->ptid
))
1533 CATCH (exception
, RETURN_MASK_ALL
)
1535 btrace_disable (tp
);
1537 throw_exception (exception
);
1544 const struct btrace_config
*
1545 btrace_conf (const struct btrace_thread_info
*btinfo
)
1547 if (btinfo
->target
== NULL
)
1550 return target_btrace_conf (btinfo
->target
);
1556 btrace_disable (struct thread_info
*tp
)
1558 struct btrace_thread_info
*btp
= &tp
->btrace
;
1561 if (btp
->target
== NULL
)
1564 DEBUG ("disable thread %s (%s)", print_thread_id (tp
),
1565 target_pid_to_str (tp
->ptid
));
1567 target_disable_btrace (btp
->target
);
1576 btrace_teardown (struct thread_info
*tp
)
1578 struct btrace_thread_info
*btp
= &tp
->btrace
;
1581 if (btp
->target
== NULL
)
1584 DEBUG ("teardown thread %s (%s)", print_thread_id (tp
),
1585 target_pid_to_str (tp
->ptid
));
1587 target_teardown_btrace (btp
->target
);
1593 /* Stitch branch trace in BTS format. */
1596 btrace_stitch_bts (struct btrace_data_bts
*btrace
, struct thread_info
*tp
)
1598 struct btrace_thread_info
*btinfo
;
1599 struct btrace_function
*last_bfun
;
1600 struct btrace_insn
*last_insn
;
1601 btrace_block_s
*first_new_block
;
1603 btinfo
= &tp
->btrace
;
1604 last_bfun
= btinfo
->end
;
1605 gdb_assert (last_bfun
!= NULL
);
1606 gdb_assert (!VEC_empty (btrace_block_s
, btrace
->blocks
));
1608 /* If the existing trace ends with a gap, we just glue the traces
1609 together. We need to drop the last (i.e. chronologically first) block
1610 of the new trace, though, since we can't fill in the start address.*/
1611 if (VEC_empty (btrace_insn_s
, last_bfun
->insn
))
1613 VEC_pop (btrace_block_s
, btrace
->blocks
);
1617 /* Beware that block trace starts with the most recent block, so the
1618 chronologically first block in the new trace is the last block in
1619 the new trace's block vector. */
1620 first_new_block
= VEC_last (btrace_block_s
, btrace
->blocks
);
1621 last_insn
= VEC_last (btrace_insn_s
, last_bfun
->insn
);
1623 /* If the current PC at the end of the block is the same as in our current
1624 trace, there are two explanations:
1625 1. we executed the instruction and some branch brought us back.
1626 2. we have not made any progress.
1627 In the first case, the delta trace vector should contain at least two
1629 In the second case, the delta trace vector should contain exactly one
1630 entry for the partial block containing the current PC. Remove it. */
1631 if (first_new_block
->end
== last_insn
->pc
1632 && VEC_length (btrace_block_s
, btrace
->blocks
) == 1)
1634 VEC_pop (btrace_block_s
, btrace
->blocks
);
1638 DEBUG ("stitching %s to %s", ftrace_print_insn_addr (last_insn
),
1639 core_addr_to_string_nz (first_new_block
->end
));
1641 /* Do a simple sanity check to make sure we don't accidentally end up
1642 with a bad block. This should not occur in practice. */
1643 if (first_new_block
->end
< last_insn
->pc
)
1645 warning (_("Error while trying to read delta trace. Falling back to "
1650 /* We adjust the last block to start at the end of our current trace. */
1651 gdb_assert (first_new_block
->begin
== 0);
1652 first_new_block
->begin
= last_insn
->pc
;
1654 /* We simply pop the last insn so we can insert it again as part of
1655 the normal branch trace computation.
1656 Since instruction iterators are based on indices in the instructions
1657 vector, we don't leave any pointers dangling. */
1658 DEBUG ("pruning insn at %s for stitching",
1659 ftrace_print_insn_addr (last_insn
));
1661 VEC_pop (btrace_insn_s
, last_bfun
->insn
);
1663 /* The instructions vector may become empty temporarily if this has
1664 been the only instruction in this function segment.
1665 This violates the invariant but will be remedied shortly by
1666 btrace_compute_ftrace when we add the new trace. */
1668 /* The only case where this would hurt is if the entire trace consisted
1669 of just that one instruction. If we remove it, we might turn the now
1670 empty btrace function segment into a gap. But we don't want gaps at
1671 the beginning. To avoid this, we remove the entire old trace. */
1672 if (last_bfun
== btinfo
->begin
&& VEC_empty (btrace_insn_s
, last_bfun
->insn
))
1678 /* Adjust the block trace in order to stitch old and new trace together.
1679 BTRACE is the new delta trace between the last and the current stop.
1680 TP is the traced thread.
1681 May modifx BTRACE as well as the existing trace in TP.
1682 Return 0 on success, -1 otherwise. */
1685 btrace_stitch_trace (struct btrace_data
*btrace
, struct thread_info
*tp
)
1687 /* If we don't have trace, there's nothing to do. */
1688 if (btrace_data_empty (btrace
))
1691 switch (btrace
->format
)
1693 case BTRACE_FORMAT_NONE
:
1696 case BTRACE_FORMAT_BTS
:
1697 return btrace_stitch_bts (&btrace
->variant
.bts
, tp
);
1699 case BTRACE_FORMAT_PT
:
1700 /* Delta reads are not supported. */
1704 internal_error (__FILE__
, __LINE__
, _("Unkown branch trace format."));
1707 /* Clear the branch trace histories in BTINFO. */
1710 btrace_clear_history (struct btrace_thread_info
*btinfo
)
1712 xfree (btinfo
->insn_history
);
1713 xfree (btinfo
->call_history
);
1714 xfree (btinfo
->replay
);
1716 btinfo
->insn_history
= NULL
;
1717 btinfo
->call_history
= NULL
;
1718 btinfo
->replay
= NULL
;
1721 /* Clear the branch trace maintenance histories in BTINFO. */
1724 btrace_maint_clear (struct btrace_thread_info
*btinfo
)
1726 switch (btinfo
->data
.format
)
1731 case BTRACE_FORMAT_BTS
:
1732 btinfo
->maint
.variant
.bts
.packet_history
.begin
= 0;
1733 btinfo
->maint
.variant
.bts
.packet_history
.end
= 0;
1736 #if defined (HAVE_LIBIPT)
1737 case BTRACE_FORMAT_PT
:
1738 xfree (btinfo
->maint
.variant
.pt
.packets
);
1740 btinfo
->maint
.variant
.pt
.packets
= NULL
;
1741 btinfo
->maint
.variant
.pt
.packet_history
.begin
= 0;
1742 btinfo
->maint
.variant
.pt
.packet_history
.end
= 0;
1744 #endif /* defined (HAVE_LIBIPT) */
1751 btrace_decode_error (enum btrace_format format
, int errcode
)
1755 case BTRACE_FORMAT_BTS
:
1758 case BDE_BTS_OVERFLOW
:
1759 return _("instruction overflow");
1761 case BDE_BTS_INSN_SIZE
:
1762 return _("unknown instruction");
1769 #if defined (HAVE_LIBIPT)
1770 case BTRACE_FORMAT_PT
:
1773 case BDE_PT_USER_QUIT
:
1774 return _("trace decode cancelled");
1776 case BDE_PT_DISABLED
:
1777 return _("disabled");
1779 case BDE_PT_OVERFLOW
:
1780 return _("overflow");
1784 return pt_errstr (pt_errcode (errcode
));
1788 #endif /* defined (HAVE_LIBIPT) */
1794 return _("unknown");
1800 btrace_fetch (struct thread_info
*tp
)
1802 struct btrace_thread_info
*btinfo
;
1803 struct btrace_target_info
*tinfo
;
1804 struct btrace_data btrace
;
1805 struct cleanup
*cleanup
;
1808 DEBUG ("fetch thread %s (%s)", print_thread_id (tp
),
1809 target_pid_to_str (tp
->ptid
));
1811 btinfo
= &tp
->btrace
;
1812 tinfo
= btinfo
->target
;
1816 /* There's no way we could get new trace while replaying.
1817 On the other hand, delta trace would return a partial record with the
1818 current PC, which is the replay PC, not the last PC, as expected. */
1819 if (btinfo
->replay
!= NULL
)
1822 /* With CLI usage, TP->PTID always equals INFERIOR_PTID here. Now that we
1823 can store a gdb.Record object in Python referring to a different thread
1824 than the current one, temporarily set INFERIOR_PTID. */
1825 cleanup
= save_inferior_ptid ();
1826 inferior_ptid
= tp
->ptid
;
1828 /* We should not be called on running or exited threads. */
1829 gdb_assert (can_access_registers_ptid (tp
->ptid
));
1831 btrace_data_init (&btrace
);
1832 make_cleanup_btrace_data (&btrace
);
1834 /* Let's first try to extend the trace we already have. */
1835 if (btinfo
->end
!= NULL
)
1837 errcode
= target_read_btrace (&btrace
, tinfo
, BTRACE_READ_DELTA
);
1840 /* Success. Let's try to stitch the traces together. */
1841 errcode
= btrace_stitch_trace (&btrace
, tp
);
1845 /* We failed to read delta trace. Let's try to read new trace. */
1846 errcode
= target_read_btrace (&btrace
, tinfo
, BTRACE_READ_NEW
);
1848 /* If we got any new trace, discard what we have. */
1849 if (errcode
== 0 && !btrace_data_empty (&btrace
))
1853 /* If we were not able to read the trace, we start over. */
1857 errcode
= target_read_btrace (&btrace
, tinfo
, BTRACE_READ_ALL
);
1861 errcode
= target_read_btrace (&btrace
, tinfo
, BTRACE_READ_ALL
);
1863 /* If we were not able to read the branch trace, signal an error. */
1865 error (_("Failed to read branch trace."));
1867 /* Compute the trace, provided we have any. */
1868 if (!btrace_data_empty (&btrace
))
1870 /* Store the raw trace data. The stored data will be cleared in
1871 btrace_clear, so we always append the new trace. */
1872 btrace_data_append (&btinfo
->data
, &btrace
);
1873 btrace_maint_clear (btinfo
);
1875 btrace_clear_history (btinfo
);
1876 btrace_compute_ftrace (tp
, &btrace
);
1879 do_cleanups (cleanup
);
1885 btrace_clear (struct thread_info
*tp
)
1887 struct btrace_thread_info
*btinfo
;
1889 DEBUG ("clear thread %s (%s)", print_thread_id (tp
),
1890 target_pid_to_str (tp
->ptid
));
1892 /* Make sure btrace frames that may hold a pointer into the branch
1893 trace data are destroyed. */
1894 reinit_frame_cache ();
1896 btinfo
= &tp
->btrace
;
1897 for (auto &bfun
: btinfo
->functions
)
1899 VEC_free (btrace_insn_s
, bfun
->insn
);
1903 btinfo
->functions
.clear ();
1904 btinfo
->begin
= NULL
;
1908 /* Must clear the maint data before - it depends on BTINFO->DATA. */
1909 btrace_maint_clear (btinfo
);
1910 btrace_data_clear (&btinfo
->data
);
1911 btrace_clear_history (btinfo
);
1917 btrace_free_objfile (struct objfile
*objfile
)
1919 struct thread_info
*tp
;
1921 DEBUG ("free objfile");
1923 ALL_NON_EXITED_THREADS (tp
)
1927 #if defined (HAVE_LIBEXPAT)
1929 /* Check the btrace document version. */
1932 check_xml_btrace_version (struct gdb_xml_parser
*parser
,
1933 const struct gdb_xml_element
*element
,
1934 void *user_data
, VEC (gdb_xml_value_s
) *attributes
)
1937 = (const char *) xml_find_attribute (attributes
, "version")->value
;
1939 if (strcmp (version
, "1.0") != 0)
1940 gdb_xml_error (parser
, _("Unsupported btrace version: \"%s\""), version
);
1943 /* Parse a btrace "block" xml record. */
1946 parse_xml_btrace_block (struct gdb_xml_parser
*parser
,
1947 const struct gdb_xml_element
*element
,
1948 void *user_data
, VEC (gdb_xml_value_s
) *attributes
)
1950 struct btrace_data
*btrace
;
1951 struct btrace_block
*block
;
1952 ULONGEST
*begin
, *end
;
1954 btrace
= (struct btrace_data
*) user_data
;
1956 switch (btrace
->format
)
1958 case BTRACE_FORMAT_BTS
:
1961 case BTRACE_FORMAT_NONE
:
1962 btrace
->format
= BTRACE_FORMAT_BTS
;
1963 btrace
->variant
.bts
.blocks
= NULL
;
1967 gdb_xml_error (parser
, _("Btrace format error."));
1970 begin
= (ULONGEST
*) xml_find_attribute (attributes
, "begin")->value
;
1971 end
= (ULONGEST
*) xml_find_attribute (attributes
, "end")->value
;
1973 block
= VEC_safe_push (btrace_block_s
, btrace
->variant
.bts
.blocks
, NULL
);
1974 block
->begin
= *begin
;
1978 /* Parse a "raw" xml record. */
1981 parse_xml_raw (struct gdb_xml_parser
*parser
, const char *body_text
,
1982 gdb_byte
**pdata
, size_t *psize
)
1984 struct cleanup
*cleanup
;
1985 gdb_byte
*data
, *bin
;
1988 len
= strlen (body_text
);
1990 gdb_xml_error (parser
, _("Bad raw data size."));
1994 bin
= data
= (gdb_byte
*) xmalloc (size
);
1995 cleanup
= make_cleanup (xfree
, data
);
1997 /* We use hex encoding - see common/rsp-low.h. */
2005 if (hi
== 0 || lo
== 0)
2006 gdb_xml_error (parser
, _("Bad hex encoding."));
2008 *bin
++ = fromhex (hi
) * 16 + fromhex (lo
);
2012 discard_cleanups (cleanup
);
2018 /* Parse a btrace pt-config "cpu" xml record. */
2021 parse_xml_btrace_pt_config_cpu (struct gdb_xml_parser
*parser
,
2022 const struct gdb_xml_element
*element
,
2024 VEC (gdb_xml_value_s
) *attributes
)
2026 struct btrace_data
*btrace
;
2028 ULONGEST
*family
, *model
, *stepping
;
2030 vendor
= (const char *) xml_find_attribute (attributes
, "vendor")->value
;
2031 family
= (ULONGEST
*) xml_find_attribute (attributes
, "family")->value
;
2032 model
= (ULONGEST
*) xml_find_attribute (attributes
, "model")->value
;
2033 stepping
= (ULONGEST
*) xml_find_attribute (attributes
, "stepping")->value
;
2035 btrace
= (struct btrace_data
*) user_data
;
2037 if (strcmp (vendor
, "GenuineIntel") == 0)
2038 btrace
->variant
.pt
.config
.cpu
.vendor
= CV_INTEL
;
2040 btrace
->variant
.pt
.config
.cpu
.family
= *family
;
2041 btrace
->variant
.pt
.config
.cpu
.model
= *model
;
2042 btrace
->variant
.pt
.config
.cpu
.stepping
= *stepping
;
2045 /* Parse a btrace pt "raw" xml record. */
2048 parse_xml_btrace_pt_raw (struct gdb_xml_parser
*parser
,
2049 const struct gdb_xml_element
*element
,
2050 void *user_data
, const char *body_text
)
2052 struct btrace_data
*btrace
;
2054 btrace
= (struct btrace_data
*) user_data
;
2055 parse_xml_raw (parser
, body_text
, &btrace
->variant
.pt
.data
,
2056 &btrace
->variant
.pt
.size
);
2059 /* Parse a btrace "pt" xml record. */
2062 parse_xml_btrace_pt (struct gdb_xml_parser
*parser
,
2063 const struct gdb_xml_element
*element
,
2064 void *user_data
, VEC (gdb_xml_value_s
) *attributes
)
2066 struct btrace_data
*btrace
;
2068 btrace
= (struct btrace_data
*) user_data
;
2069 btrace
->format
= BTRACE_FORMAT_PT
;
2070 btrace
->variant
.pt
.config
.cpu
.vendor
= CV_UNKNOWN
;
2071 btrace
->variant
.pt
.data
= NULL
;
2072 btrace
->variant
.pt
.size
= 0;
2075 static const struct gdb_xml_attribute block_attributes
[] = {
2076 { "begin", GDB_XML_AF_NONE
, gdb_xml_parse_attr_ulongest
, NULL
},
2077 { "end", GDB_XML_AF_NONE
, gdb_xml_parse_attr_ulongest
, NULL
},
2078 { NULL
, GDB_XML_AF_NONE
, NULL
, NULL
}
2081 static const struct gdb_xml_attribute btrace_pt_config_cpu_attributes
[] = {
2082 { "vendor", GDB_XML_AF_NONE
, NULL
, NULL
},
2083 { "family", GDB_XML_AF_NONE
, gdb_xml_parse_attr_ulongest
, NULL
},
2084 { "model", GDB_XML_AF_NONE
, gdb_xml_parse_attr_ulongest
, NULL
},
2085 { "stepping", GDB_XML_AF_NONE
, gdb_xml_parse_attr_ulongest
, NULL
},
2086 { NULL
, GDB_XML_AF_NONE
, NULL
, NULL
}
2089 static const struct gdb_xml_element btrace_pt_config_children
[] = {
2090 { "cpu", btrace_pt_config_cpu_attributes
, NULL
, GDB_XML_EF_OPTIONAL
,
2091 parse_xml_btrace_pt_config_cpu
, NULL
},
2092 { NULL
, NULL
, NULL
, GDB_XML_EF_NONE
, NULL
, NULL
}
2095 static const struct gdb_xml_element btrace_pt_children
[] = {
2096 { "pt-config", NULL
, btrace_pt_config_children
, GDB_XML_EF_OPTIONAL
, NULL
,
2098 { "raw", NULL
, NULL
, GDB_XML_EF_OPTIONAL
, NULL
, parse_xml_btrace_pt_raw
},
2099 { NULL
, NULL
, NULL
, GDB_XML_EF_NONE
, NULL
, NULL
}
2102 static const struct gdb_xml_attribute btrace_attributes
[] = {
2103 { "version", GDB_XML_AF_NONE
, NULL
, NULL
},
2104 { NULL
, GDB_XML_AF_NONE
, NULL
, NULL
}
2107 static const struct gdb_xml_element btrace_children
[] = {
2108 { "block", block_attributes
, NULL
,
2109 GDB_XML_EF_REPEATABLE
| GDB_XML_EF_OPTIONAL
, parse_xml_btrace_block
, NULL
},
2110 { "pt", NULL
, btrace_pt_children
, GDB_XML_EF_OPTIONAL
, parse_xml_btrace_pt
,
2112 { NULL
, NULL
, NULL
, GDB_XML_EF_NONE
, NULL
, NULL
}
2115 static const struct gdb_xml_element btrace_elements
[] = {
2116 { "btrace", btrace_attributes
, btrace_children
, GDB_XML_EF_NONE
,
2117 check_xml_btrace_version
, NULL
},
2118 { NULL
, NULL
, NULL
, GDB_XML_EF_NONE
, NULL
, NULL
}
2121 #endif /* defined (HAVE_LIBEXPAT) */
2126 parse_xml_btrace (struct btrace_data
*btrace
, const char *buffer
)
2128 struct cleanup
*cleanup
;
2131 #if defined (HAVE_LIBEXPAT)
2133 btrace
->format
= BTRACE_FORMAT_NONE
;
2135 cleanup
= make_cleanup_btrace_data (btrace
);
2136 errcode
= gdb_xml_parse_quick (_("btrace"), "btrace.dtd", btrace_elements
,
2139 error (_("Error parsing branch trace."));
2141 /* Keep parse results. */
2142 discard_cleanups (cleanup
);
2144 #else /* !defined (HAVE_LIBEXPAT) */
2146 error (_("Cannot process branch trace. XML parsing is not supported."));
2148 #endif /* !defined (HAVE_LIBEXPAT) */
2151 #if defined (HAVE_LIBEXPAT)
2153 /* Parse a btrace-conf "bts" xml record. */
2156 parse_xml_btrace_conf_bts (struct gdb_xml_parser
*parser
,
2157 const struct gdb_xml_element
*element
,
2158 void *user_data
, VEC (gdb_xml_value_s
) *attributes
)
2160 struct btrace_config
*conf
;
2161 struct gdb_xml_value
*size
;
2163 conf
= (struct btrace_config
*) user_data
;
2164 conf
->format
= BTRACE_FORMAT_BTS
;
2167 size
= xml_find_attribute (attributes
, "size");
2169 conf
->bts
.size
= (unsigned int) *(ULONGEST
*) size
->value
;
2172 /* Parse a btrace-conf "pt" xml record. */
2175 parse_xml_btrace_conf_pt (struct gdb_xml_parser
*parser
,
2176 const struct gdb_xml_element
*element
,
2177 void *user_data
, VEC (gdb_xml_value_s
) *attributes
)
2179 struct btrace_config
*conf
;
2180 struct gdb_xml_value
*size
;
2182 conf
= (struct btrace_config
*) user_data
;
2183 conf
->format
= BTRACE_FORMAT_PT
;
2186 size
= xml_find_attribute (attributes
, "size");
2188 conf
->pt
.size
= (unsigned int) *(ULONGEST
*) size
->value
;
2191 static const struct gdb_xml_attribute btrace_conf_pt_attributes
[] = {
2192 { "size", GDB_XML_AF_OPTIONAL
, gdb_xml_parse_attr_ulongest
, NULL
},
2193 { NULL
, GDB_XML_AF_NONE
, NULL
, NULL
}
2196 static const struct gdb_xml_attribute btrace_conf_bts_attributes
[] = {
2197 { "size", GDB_XML_AF_OPTIONAL
, gdb_xml_parse_attr_ulongest
, NULL
},
2198 { NULL
, GDB_XML_AF_NONE
, NULL
, NULL
}
2201 static const struct gdb_xml_element btrace_conf_children
[] = {
2202 { "bts", btrace_conf_bts_attributes
, NULL
, GDB_XML_EF_OPTIONAL
,
2203 parse_xml_btrace_conf_bts
, NULL
},
2204 { "pt", btrace_conf_pt_attributes
, NULL
, GDB_XML_EF_OPTIONAL
,
2205 parse_xml_btrace_conf_pt
, NULL
},
2206 { NULL
, NULL
, NULL
, GDB_XML_EF_NONE
, NULL
, NULL
}
2209 static const struct gdb_xml_attribute btrace_conf_attributes
[] = {
2210 { "version", GDB_XML_AF_NONE
, NULL
, NULL
},
2211 { NULL
, GDB_XML_AF_NONE
, NULL
, NULL
}
2214 static const struct gdb_xml_element btrace_conf_elements
[] = {
2215 { "btrace-conf", btrace_conf_attributes
, btrace_conf_children
,
2216 GDB_XML_EF_NONE
, NULL
, NULL
},
2217 { NULL
, NULL
, NULL
, GDB_XML_EF_NONE
, NULL
, NULL
}
2220 #endif /* defined (HAVE_LIBEXPAT) */
2225 parse_xml_btrace_conf (struct btrace_config
*conf
, const char *xml
)
2229 #if defined (HAVE_LIBEXPAT)
2231 errcode
= gdb_xml_parse_quick (_("btrace-conf"), "btrace-conf.dtd",
2232 btrace_conf_elements
, xml
, conf
);
2234 error (_("Error parsing branch trace configuration."));
2236 #else /* !defined (HAVE_LIBEXPAT) */
2238 error (_("XML parsing is not supported."));
2240 #endif /* !defined (HAVE_LIBEXPAT) */
2245 const struct btrace_insn
*
2246 btrace_insn_get (const struct btrace_insn_iterator
*it
)
2248 const struct btrace_function
*bfun
;
2249 unsigned int index
, end
;
2252 bfun
= it
->function
;
2254 /* Check if the iterator points to a gap in the trace. */
2255 if (bfun
->errcode
!= 0)
2258 /* The index is within the bounds of this function's instruction vector. */
2259 end
= VEC_length (btrace_insn_s
, bfun
->insn
);
2260 gdb_assert (0 < end
);
2261 gdb_assert (index
< end
);
2263 return VEC_index (btrace_insn_s
, bfun
->insn
, index
);
2269 btrace_insn_get_error (const struct btrace_insn_iterator
*it
)
2271 return it
->function
->errcode
;
2277 btrace_insn_number (const struct btrace_insn_iterator
*it
)
2279 return it
->function
->insn_offset
+ it
->index
;
2285 btrace_insn_begin (struct btrace_insn_iterator
*it
,
2286 const struct btrace_thread_info
*btinfo
)
2288 const struct btrace_function
*bfun
;
2290 bfun
= btinfo
->begin
;
2292 error (_("No trace."));
2294 it
->btinfo
= btinfo
;
2295 it
->function
= bfun
;
2302 btrace_insn_end (struct btrace_insn_iterator
*it
,
2303 const struct btrace_thread_info
*btinfo
)
2305 const struct btrace_function
*bfun
;
2306 unsigned int length
;
2310 error (_("No trace."));
2312 length
= VEC_length (btrace_insn_s
, bfun
->insn
);
2314 /* The last function may either be a gap or it contains the current
2315 instruction, which is one past the end of the execution trace; ignore
2320 it
->btinfo
= btinfo
;
2321 it
->function
= bfun
;
2328 btrace_insn_next (struct btrace_insn_iterator
*it
, unsigned int stride
)
2330 const struct btrace_function
*bfun
;
2331 unsigned int index
, steps
;
2333 bfun
= it
->function
;
2339 unsigned int end
, space
, adv
;
2341 end
= VEC_length (btrace_insn_s
, bfun
->insn
);
2343 /* An empty function segment represents a gap in the trace. We count
2344 it as one instruction. */
2347 const struct btrace_function
*next
;
2349 next
= bfun
->flow
.next
;
2362 gdb_assert (0 < end
);
2363 gdb_assert (index
< end
);
2365 /* Compute the number of instructions remaining in this segment. */
2366 space
= end
- index
;
2368 /* Advance the iterator as far as possible within this segment. */
2369 adv
= std::min (space
, stride
);
2374 /* Move to the next function if we're at the end of this one. */
2377 const struct btrace_function
*next
;
2379 next
= bfun
->flow
.next
;
2382 /* We stepped past the last function.
2384 Let's adjust the index to point to the last instruction in
2385 the previous function. */
2391 /* We now point to the first instruction in the new function. */
2396 /* We did make progress. */
2397 gdb_assert (adv
> 0);
2400 /* Update the iterator. */
2401 it
->function
= bfun
;
2410 btrace_insn_prev (struct btrace_insn_iterator
*it
, unsigned int stride
)
2412 const struct btrace_function
*bfun
;
2413 unsigned int index
, steps
;
2415 bfun
= it
->function
;
2423 /* Move to the previous function if we're at the start of this one. */
2426 const struct btrace_function
*prev
;
2428 prev
= bfun
->flow
.prev
;
2432 /* We point to one after the last instruction in the new function. */
2434 index
= VEC_length (btrace_insn_s
, bfun
->insn
);
2436 /* An empty function segment represents a gap in the trace. We count
2437 it as one instruction. */
2447 /* Advance the iterator as far as possible within this segment. */
2448 adv
= std::min (index
, stride
);
2454 /* We did make progress. */
2455 gdb_assert (adv
> 0);
2458 /* Update the iterator. */
2459 it
->function
= bfun
;
2468 btrace_insn_cmp (const struct btrace_insn_iterator
*lhs
,
2469 const struct btrace_insn_iterator
*rhs
)
2471 unsigned int lnum
, rnum
;
2473 lnum
= btrace_insn_number (lhs
);
2474 rnum
= btrace_insn_number (rhs
);
2476 return (int) (lnum
- rnum
);
2482 btrace_find_insn_by_number (struct btrace_insn_iterator
*it
,
2483 const struct btrace_thread_info
*btinfo
,
2484 unsigned int number
)
2486 const struct btrace_function
*bfun
;
2487 unsigned int upper
, lower
;
2489 if (btinfo
->functions
.empty ())
2493 bfun
= btinfo
->functions
[lower
];
2494 if (number
< bfun
->insn_offset
)
2497 upper
= btinfo
->functions
.size () - 1;
2498 bfun
= btinfo
->functions
[upper
];
2499 if (number
>= bfun
->insn_offset
+ ftrace_call_num_insn (bfun
))
2502 /* We assume that there are no holes in the numbering. */
2505 const unsigned int average
= lower
+ (upper
- lower
) / 2;
2507 bfun
= btinfo
->functions
[average
];
2509 if (number
< bfun
->insn_offset
)
2511 upper
= average
- 1;
2515 if (number
>= bfun
->insn_offset
+ ftrace_call_num_insn (bfun
))
2517 lower
= average
+ 1;
2524 it
->btinfo
= btinfo
;
2525 it
->function
= bfun
;
2526 it
->index
= number
- bfun
->insn_offset
;
2532 const struct btrace_function
*
2533 btrace_call_get (const struct btrace_call_iterator
*it
)
2535 return it
->function
;
2541 btrace_call_number (const struct btrace_call_iterator
*it
)
2543 const struct btrace_thread_info
*btinfo
;
2544 const struct btrace_function
*bfun
;
2547 btinfo
= it
->btinfo
;
2548 bfun
= it
->function
;
2550 return bfun
->number
;
2552 /* For the end iterator, i.e. bfun == NULL, we return one more than the
2553 number of the last function. */
2555 insns
= VEC_length (btrace_insn_s
, bfun
->insn
);
2557 /* If the function contains only a single instruction (i.e. the current
2558 instruction), it will be skipped and its number is already the number
2561 return bfun
->number
;
2563 /* Otherwise, return one more than the number of the last function. */
2564 return bfun
->number
+ 1;
2570 btrace_call_begin (struct btrace_call_iterator
*it
,
2571 const struct btrace_thread_info
*btinfo
)
2573 const struct btrace_function
*bfun
;
2575 bfun
= btinfo
->begin
;
2577 error (_("No trace."));
2579 it
->btinfo
= btinfo
;
2580 it
->function
= bfun
;
2586 btrace_call_end (struct btrace_call_iterator
*it
,
2587 const struct btrace_thread_info
*btinfo
)
2589 const struct btrace_function
*bfun
;
2593 error (_("No trace."));
2595 it
->btinfo
= btinfo
;
2596 it
->function
= NULL
;
2602 btrace_call_next (struct btrace_call_iterator
*it
, unsigned int stride
)
2604 const struct btrace_function
*bfun
;
2607 bfun
= it
->function
;
2609 while (bfun
!= NULL
)
2611 const struct btrace_function
*next
;
2614 next
= bfun
->flow
.next
;
2617 /* Ignore the last function if it only contains a single
2618 (i.e. the current) instruction. */
2619 insns
= VEC_length (btrace_insn_s
, bfun
->insn
);
2624 if (stride
== steps
)
2631 it
->function
= bfun
;
2638 btrace_call_prev (struct btrace_call_iterator
*it
, unsigned int stride
)
2640 const struct btrace_thread_info
*btinfo
;
2641 const struct btrace_function
*bfun
;
2644 bfun
= it
->function
;
2651 btinfo
= it
->btinfo
;
2656 /* Ignore the last function if it only contains a single
2657 (i.e. the current) instruction. */
2658 insns
= VEC_length (btrace_insn_s
, bfun
->insn
);
2660 bfun
= bfun
->flow
.prev
;
2668 while (steps
< stride
)
2670 const struct btrace_function
*prev
;
2672 prev
= bfun
->flow
.prev
;
2680 it
->function
= bfun
;
2687 btrace_call_cmp (const struct btrace_call_iterator
*lhs
,
2688 const struct btrace_call_iterator
*rhs
)
2690 unsigned int lnum
, rnum
;
2692 lnum
= btrace_call_number (lhs
);
2693 rnum
= btrace_call_number (rhs
);
2695 return (int) (lnum
- rnum
);
2701 btrace_find_call_by_number (struct btrace_call_iterator
*it
,
2702 const struct btrace_thread_info
*btinfo
,
2703 unsigned int number
)
2705 const struct btrace_function
*bfun
;
2707 for (bfun
= btinfo
->end
; bfun
!= NULL
; bfun
= bfun
->flow
.prev
)
2711 bnum
= bfun
->number
;
2714 it
->btinfo
= btinfo
;
2715 it
->function
= bfun
;
2719 /* Functions are ordered and numbered consecutively. We could bail out
2720 earlier. On the other hand, it is very unlikely that we search for
2721 a nonexistent function. */
2730 btrace_set_insn_history (struct btrace_thread_info
*btinfo
,
2731 const struct btrace_insn_iterator
*begin
,
2732 const struct btrace_insn_iterator
*end
)
2734 if (btinfo
->insn_history
== NULL
)
2735 btinfo
->insn_history
= XCNEW (struct btrace_insn_history
);
2737 btinfo
->insn_history
->begin
= *begin
;
2738 btinfo
->insn_history
->end
= *end
;
2744 btrace_set_call_history (struct btrace_thread_info
*btinfo
,
2745 const struct btrace_call_iterator
*begin
,
2746 const struct btrace_call_iterator
*end
)
2748 gdb_assert (begin
->btinfo
== end
->btinfo
);
2750 if (btinfo
->call_history
== NULL
)
2751 btinfo
->call_history
= XCNEW (struct btrace_call_history
);
2753 btinfo
->call_history
->begin
= *begin
;
2754 btinfo
->call_history
->end
= *end
;
2760 btrace_is_replaying (struct thread_info
*tp
)
2762 return tp
->btrace
.replay
!= NULL
;
2768 btrace_is_empty (struct thread_info
*tp
)
2770 struct btrace_insn_iterator begin
, end
;
2771 struct btrace_thread_info
*btinfo
;
2773 btinfo
= &tp
->btrace
;
2775 if (btinfo
->begin
== NULL
)
2778 btrace_insn_begin (&begin
, btinfo
);
2779 btrace_insn_end (&end
, btinfo
);
2781 return btrace_insn_cmp (&begin
, &end
) == 0;
2784 /* Forward the cleanup request. */
2787 do_btrace_data_cleanup (void *arg
)
2789 btrace_data_fini ((struct btrace_data
*) arg
);
2795 make_cleanup_btrace_data (struct btrace_data
*data
)
2797 return make_cleanup (do_btrace_data_cleanup
, data
);
2800 #if defined (HAVE_LIBIPT)
2802 /* Print a single packet. */
2805 pt_print_packet (const struct pt_packet
*packet
)
2807 switch (packet
->type
)
2810 printf_unfiltered (("[??: %x]"), packet
->type
);
2814 printf_unfiltered (("psb"));
2818 printf_unfiltered (("psbend"));
2822 printf_unfiltered (("pad"));
2826 printf_unfiltered (("tip %u: 0x%" PRIx64
""),
2827 packet
->payload
.ip
.ipc
,
2828 packet
->payload
.ip
.ip
);
2832 printf_unfiltered (("tip.pge %u: 0x%" PRIx64
""),
2833 packet
->payload
.ip
.ipc
,
2834 packet
->payload
.ip
.ip
);
2838 printf_unfiltered (("tip.pgd %u: 0x%" PRIx64
""),
2839 packet
->payload
.ip
.ipc
,
2840 packet
->payload
.ip
.ip
);
2844 printf_unfiltered (("fup %u: 0x%" PRIx64
""),
2845 packet
->payload
.ip
.ipc
,
2846 packet
->payload
.ip
.ip
);
2850 printf_unfiltered (("tnt-8 %u: 0x%" PRIx64
""),
2851 packet
->payload
.tnt
.bit_size
,
2852 packet
->payload
.tnt
.payload
);
2856 printf_unfiltered (("tnt-64 %u: 0x%" PRIx64
""),
2857 packet
->payload
.tnt
.bit_size
,
2858 packet
->payload
.tnt
.payload
);
2862 printf_unfiltered (("pip %" PRIx64
"%s"), packet
->payload
.pip
.cr3
,
2863 packet
->payload
.pip
.nr
? (" nr") : (""));
2867 printf_unfiltered (("tsc %" PRIx64
""), packet
->payload
.tsc
.tsc
);
2871 printf_unfiltered (("cbr %u"), packet
->payload
.cbr
.ratio
);
2875 switch (packet
->payload
.mode
.leaf
)
2878 printf_unfiltered (("mode %u"), packet
->payload
.mode
.leaf
);
2882 printf_unfiltered (("mode.exec%s%s"),
2883 packet
->payload
.mode
.bits
.exec
.csl
2885 packet
->payload
.mode
.bits
.exec
.csd
2886 ? (" cs.d") : (""));
2890 printf_unfiltered (("mode.tsx%s%s"),
2891 packet
->payload
.mode
.bits
.tsx
.intx
2893 packet
->payload
.mode
.bits
.tsx
.abrt
2894 ? (" abrt") : (""));
2900 printf_unfiltered (("ovf"));
2904 printf_unfiltered (("stop"));
2908 printf_unfiltered (("vmcs %" PRIx64
""), packet
->payload
.vmcs
.base
);
2912 printf_unfiltered (("tma %x %x"), packet
->payload
.tma
.ctc
,
2913 packet
->payload
.tma
.fc
);
2917 printf_unfiltered (("mtc %x"), packet
->payload
.mtc
.ctc
);
2921 printf_unfiltered (("cyc %" PRIx64
""), packet
->payload
.cyc
.value
);
2925 printf_unfiltered (("mnt %" PRIx64
""), packet
->payload
.mnt
.payload
);
2930 /* Decode packets into MAINT using DECODER. */
2933 btrace_maint_decode_pt (struct btrace_maint_info
*maint
,
2934 struct pt_packet_decoder
*decoder
)
2940 struct btrace_pt_packet packet
;
2942 errcode
= pt_pkt_sync_forward (decoder
);
2948 pt_pkt_get_offset (decoder
, &packet
.offset
);
2950 errcode
= pt_pkt_next (decoder
, &packet
.packet
,
2951 sizeof(packet
.packet
));
2955 if (maint_btrace_pt_skip_pad
== 0 || packet
.packet
.type
!= ppt_pad
)
2957 packet
.errcode
= pt_errcode (errcode
);
2958 VEC_safe_push (btrace_pt_packet_s
, maint
->variant
.pt
.packets
,
2963 if (errcode
== -pte_eos
)
2966 packet
.errcode
= pt_errcode (errcode
);
2967 VEC_safe_push (btrace_pt_packet_s
, maint
->variant
.pt
.packets
,
2970 warning (_("Error at trace offset 0x%" PRIx64
": %s."),
2971 packet
.offset
, pt_errstr (packet
.errcode
));
2974 if (errcode
!= -pte_eos
)
2975 warning (_("Failed to synchronize onto the Intel Processor Trace "
2976 "stream: %s."), pt_errstr (pt_errcode (errcode
)));
2979 /* Update the packet history in BTINFO. */
2982 btrace_maint_update_pt_packets (struct btrace_thread_info
*btinfo
)
2984 volatile struct gdb_exception except
;
2985 struct pt_packet_decoder
*decoder
;
2986 struct btrace_data_pt
*pt
;
2987 struct pt_config config
;
2990 pt
= &btinfo
->data
.variant
.pt
;
2992 /* Nothing to do if there is no trace. */
2996 memset (&config
, 0, sizeof(config
));
2998 config
.size
= sizeof (config
);
2999 config
.begin
= pt
->data
;
3000 config
.end
= pt
->data
+ pt
->size
;
3002 config
.cpu
.vendor
= pt_translate_cpu_vendor (pt
->config
.cpu
.vendor
);
3003 config
.cpu
.family
= pt
->config
.cpu
.family
;
3004 config
.cpu
.model
= pt
->config
.cpu
.model
;
3005 config
.cpu
.stepping
= pt
->config
.cpu
.stepping
;
3007 errcode
= pt_cpu_errata (&config
.errata
, &config
.cpu
);
3009 error (_("Failed to configure the Intel Processor Trace decoder: %s."),
3010 pt_errstr (pt_errcode (errcode
)));
3012 decoder
= pt_pkt_alloc_decoder (&config
);
3013 if (decoder
== NULL
)
3014 error (_("Failed to allocate the Intel Processor Trace decoder."));
3018 btrace_maint_decode_pt (&btinfo
->maint
, decoder
);
3020 CATCH (except
, RETURN_MASK_ALL
)
3022 pt_pkt_free_decoder (decoder
);
3024 if (except
.reason
< 0)
3025 throw_exception (except
);
3029 pt_pkt_free_decoder (decoder
);
3032 #endif /* !defined (HAVE_LIBIPT) */
3034 /* Update the packet maintenance information for BTINFO and store the
3035 low and high bounds into BEGIN and END, respectively.
3036 Store the current iterator state into FROM and TO. */
3039 btrace_maint_update_packets (struct btrace_thread_info
*btinfo
,
3040 unsigned int *begin
, unsigned int *end
,
3041 unsigned int *from
, unsigned int *to
)
3043 switch (btinfo
->data
.format
)
3052 case BTRACE_FORMAT_BTS
:
3053 /* Nothing to do - we operate directly on BTINFO->DATA. */
3055 *end
= VEC_length (btrace_block_s
, btinfo
->data
.variant
.bts
.blocks
);
3056 *from
= btinfo
->maint
.variant
.bts
.packet_history
.begin
;
3057 *to
= btinfo
->maint
.variant
.bts
.packet_history
.end
;
3060 #if defined (HAVE_LIBIPT)
3061 case BTRACE_FORMAT_PT
:
3062 if (VEC_empty (btrace_pt_packet_s
, btinfo
->maint
.variant
.pt
.packets
))
3063 btrace_maint_update_pt_packets (btinfo
);
3066 *end
= VEC_length (btrace_pt_packet_s
, btinfo
->maint
.variant
.pt
.packets
);
3067 *from
= btinfo
->maint
.variant
.pt
.packet_history
.begin
;
3068 *to
= btinfo
->maint
.variant
.pt
.packet_history
.end
;
3070 #endif /* defined (HAVE_LIBIPT) */
3074 /* Print packets in BTINFO from BEGIN (inclusive) until END (exclusive) and
3075 update the current iterator position. */
3078 btrace_maint_print_packets (struct btrace_thread_info
*btinfo
,
3079 unsigned int begin
, unsigned int end
)
3081 switch (btinfo
->data
.format
)
3086 case BTRACE_FORMAT_BTS
:
3088 VEC (btrace_block_s
) *blocks
;
3091 blocks
= btinfo
->data
.variant
.bts
.blocks
;
3092 for (blk
= begin
; blk
< end
; ++blk
)
3094 const btrace_block_s
*block
;
3096 block
= VEC_index (btrace_block_s
, blocks
, blk
);
3098 printf_unfiltered ("%u\tbegin: %s, end: %s\n", blk
,
3099 core_addr_to_string_nz (block
->begin
),
3100 core_addr_to_string_nz (block
->end
));
3103 btinfo
->maint
.variant
.bts
.packet_history
.begin
= begin
;
3104 btinfo
->maint
.variant
.bts
.packet_history
.end
= end
;
3108 #if defined (HAVE_LIBIPT)
3109 case BTRACE_FORMAT_PT
:
3111 VEC (btrace_pt_packet_s
) *packets
;
3114 packets
= btinfo
->maint
.variant
.pt
.packets
;
3115 for (pkt
= begin
; pkt
< end
; ++pkt
)
3117 const struct btrace_pt_packet
*packet
;
3119 packet
= VEC_index (btrace_pt_packet_s
, packets
, pkt
);
3121 printf_unfiltered ("%u\t", pkt
);
3122 printf_unfiltered ("0x%" PRIx64
"\t", packet
->offset
);
3124 if (packet
->errcode
== pte_ok
)
3125 pt_print_packet (&packet
->packet
);
3127 printf_unfiltered ("[error: %s]", pt_errstr (packet
->errcode
));
3129 printf_unfiltered ("\n");
3132 btinfo
->maint
.variant
.pt
.packet_history
.begin
= begin
;
3133 btinfo
->maint
.variant
.pt
.packet_history
.end
= end
;
3136 #endif /* defined (HAVE_LIBIPT) */
3140 /* Read a number from an argument string. */
3143 get_uint (char **arg
)
3145 char *begin
, *end
, *pos
;
3146 unsigned long number
;
3149 pos
= skip_spaces (begin
);
3151 if (!isdigit (*pos
))
3152 error (_("Expected positive number, got: %s."), pos
);
3154 number
= strtoul (pos
, &end
, 10);
3155 if (number
> UINT_MAX
)
3156 error (_("Number too big."));
3158 *arg
+= (end
- begin
);
3160 return (unsigned int) number
;
3163 /* Read a context size from an argument string. */
3166 get_context_size (char **arg
)
3171 pos
= skip_spaces (*arg
);
3173 if (!isdigit (*pos
))
3174 error (_("Expected positive number, got: %s."), pos
);
3176 return strtol (pos
, arg
, 10);
3179 /* Complain about junk at the end of an argument string. */
3182 no_chunk (char *arg
)
3185 error (_("Junk after argument: %s."), arg
);
3188 /* The "maintenance btrace packet-history" command. */
3191 maint_btrace_packet_history_cmd (char *arg
, int from_tty
)
3193 struct btrace_thread_info
*btinfo
;
3194 struct thread_info
*tp
;
3195 unsigned int size
, begin
, end
, from
, to
;
3197 tp
= find_thread_ptid (inferior_ptid
);
3199 error (_("No thread."));
3202 btinfo
= &tp
->btrace
;
3204 btrace_maint_update_packets (btinfo
, &begin
, &end
, &from
, &to
);
3207 printf_unfiltered (_("No trace.\n"));
3211 if (arg
== NULL
|| *arg
== 0 || strcmp (arg
, "+") == 0)
3215 if (end
- from
< size
)
3219 else if (strcmp (arg
, "-") == 0)
3223 if (to
- begin
< size
)
3229 from
= get_uint (&arg
);
3231 error (_("'%u' is out of range."), from
);
3233 arg
= skip_spaces (arg
);
3236 arg
= skip_spaces (++arg
);
3241 size
= get_context_size (&arg
);
3245 if (end
- from
< size
)
3249 else if (*arg
== '-')
3252 size
= get_context_size (&arg
);
3256 /* Include the packet given as first argument. */
3260 if (to
- begin
< size
)
3266 to
= get_uint (&arg
);
3268 /* Include the packet at the second argument and silently
3269 truncate the range. */
3282 if (end
- from
< size
)
3290 btrace_maint_print_packets (btinfo
, from
, to
);
3293 /* The "maintenance btrace clear-packet-history" command. */
3296 maint_btrace_clear_packet_history_cmd (char *args
, int from_tty
)
3298 struct btrace_thread_info
*btinfo
;
3299 struct thread_info
*tp
;
3301 if (args
!= NULL
&& *args
!= 0)
3302 error (_("Invalid argument."));
3304 tp
= find_thread_ptid (inferior_ptid
);
3306 error (_("No thread."));
3308 btinfo
= &tp
->btrace
;
3310 /* Must clear the maint data before - it depends on BTINFO->DATA. */
3311 btrace_maint_clear (btinfo
);
3312 btrace_data_clear (&btinfo
->data
);
3315 /* The "maintenance btrace clear" command. */
3318 maint_btrace_clear_cmd (char *args
, int from_tty
)
3320 struct btrace_thread_info
*btinfo
;
3321 struct thread_info
*tp
;
3323 if (args
!= NULL
&& *args
!= 0)
3324 error (_("Invalid argument."));
3326 tp
= find_thread_ptid (inferior_ptid
);
3328 error (_("No thread."));
3333 /* The "maintenance btrace" command. */
3336 maint_btrace_cmd (char *args
, int from_tty
)
3338 help_list (maint_btrace_cmdlist
, "maintenance btrace ", all_commands
,
3342 /* The "maintenance set btrace" command. */
3345 maint_btrace_set_cmd (char *args
, int from_tty
)
3347 help_list (maint_btrace_set_cmdlist
, "maintenance set btrace ", all_commands
,
3351 /* The "maintenance show btrace" command. */
3354 maint_btrace_show_cmd (char *args
, int from_tty
)
3356 help_list (maint_btrace_show_cmdlist
, "maintenance show btrace ",
3357 all_commands
, gdb_stdout
);
3360 /* The "maintenance set btrace pt" command. */
3363 maint_btrace_pt_set_cmd (char *args
, int from_tty
)
3365 help_list (maint_btrace_pt_set_cmdlist
, "maintenance set btrace pt ",
3366 all_commands
, gdb_stdout
);
3369 /* The "maintenance show btrace pt" command. */
3372 maint_btrace_pt_show_cmd (char *args
, int from_tty
)
3374 help_list (maint_btrace_pt_show_cmdlist
, "maintenance show btrace pt ",
3375 all_commands
, gdb_stdout
);
3378 /* The "maintenance info btrace" command. */
3381 maint_info_btrace_cmd (char *args
, int from_tty
)
3383 struct btrace_thread_info
*btinfo
;
3384 struct thread_info
*tp
;
3385 const struct btrace_config
*conf
;
3387 if (args
!= NULL
&& *args
!= 0)
3388 error (_("Invalid argument."));
3390 tp
= find_thread_ptid (inferior_ptid
);
3392 error (_("No thread."));
3394 btinfo
= &tp
->btrace
;
3396 conf
= btrace_conf (btinfo
);
3398 error (_("No btrace configuration."));
3400 printf_unfiltered (_("Format: %s.\n"),
3401 btrace_format_string (conf
->format
));
3403 switch (conf
->format
)
3408 case BTRACE_FORMAT_BTS
:
3409 printf_unfiltered (_("Number of packets: %u.\n"),
3410 VEC_length (btrace_block_s
,
3411 btinfo
->data
.variant
.bts
.blocks
));
3414 #if defined (HAVE_LIBIPT)
3415 case BTRACE_FORMAT_PT
:
3417 struct pt_version version
;
3419 version
= pt_library_version ();
3420 printf_unfiltered (_("Version: %u.%u.%u%s.\n"), version
.major
,
3421 version
.minor
, version
.build
,
3422 version
.ext
!= NULL
? version
.ext
: "");
3424 btrace_maint_update_pt_packets (btinfo
);
3425 printf_unfiltered (_("Number of packets: %u.\n"),
3426 VEC_length (btrace_pt_packet_s
,
3427 btinfo
->maint
.variant
.pt
.packets
));
3430 #endif /* defined (HAVE_LIBIPT) */
3434 /* The "maint show btrace pt skip-pad" show value function. */
3437 show_maint_btrace_pt_skip_pad (struct ui_file
*file
, int from_tty
,
3438 struct cmd_list_element
*c
,
3441 fprintf_filtered (file
, _("Skip PAD packets is %s.\n"), value
);
3445 /* Initialize btrace maintenance commands. */
3447 void _initialize_btrace (void);
3449 _initialize_btrace (void)
3451 add_cmd ("btrace", class_maintenance
, maint_info_btrace_cmd
,
3452 _("Info about branch tracing data."), &maintenanceinfolist
);
3454 add_prefix_cmd ("btrace", class_maintenance
, maint_btrace_cmd
,
3455 _("Branch tracing maintenance commands."),
3456 &maint_btrace_cmdlist
, "maintenance btrace ",
3457 0, &maintenancelist
);
3459 add_prefix_cmd ("btrace", class_maintenance
, maint_btrace_set_cmd
, _("\
3460 Set branch tracing specific variables."),
3461 &maint_btrace_set_cmdlist
, "maintenance set btrace ",
3462 0, &maintenance_set_cmdlist
);
3464 add_prefix_cmd ("pt", class_maintenance
, maint_btrace_pt_set_cmd
, _("\
3465 Set Intel Processor Trace specific variables."),
3466 &maint_btrace_pt_set_cmdlist
, "maintenance set btrace pt ",
3467 0, &maint_btrace_set_cmdlist
);
3469 add_prefix_cmd ("btrace", class_maintenance
, maint_btrace_show_cmd
, _("\
3470 Show branch tracing specific variables."),
3471 &maint_btrace_show_cmdlist
, "maintenance show btrace ",
3472 0, &maintenance_show_cmdlist
);
3474 add_prefix_cmd ("pt", class_maintenance
, maint_btrace_pt_show_cmd
, _("\
3475 Show Intel Processor Trace specific variables."),
3476 &maint_btrace_pt_show_cmdlist
, "maintenance show btrace pt ",
3477 0, &maint_btrace_show_cmdlist
);
3479 add_setshow_boolean_cmd ("skip-pad", class_maintenance
,
3480 &maint_btrace_pt_skip_pad
, _("\
3481 Set whether PAD packets should be skipped in the btrace packet history."), _("\
3482 Show whether PAD packets should be skipped in the btrace packet history."),_("\
3483 When enabled, PAD packets are ignored in the btrace packet history."),
3484 NULL
, show_maint_btrace_pt_skip_pad
,
3485 &maint_btrace_pt_set_cmdlist
,
3486 &maint_btrace_pt_show_cmdlist
);
3488 add_cmd ("packet-history", class_maintenance
, maint_btrace_packet_history_cmd
,
3489 _("Print the raw branch tracing data.\n\
3490 With no argument, print ten more packets after the previous ten-line print.\n\
3491 With '-' as argument print ten packets before a previous ten-line print.\n\
3492 One argument specifies the starting packet of a ten-line print.\n\
3493 Two arguments with comma between specify starting and ending packets to \
3495 Preceded with '+'/'-' the second argument specifies the distance from the \
3497 &maint_btrace_cmdlist
);
3499 add_cmd ("clear-packet-history", class_maintenance
,
3500 maint_btrace_clear_packet_history_cmd
,
3501 _("Clears the branch tracing packet history.\n\
3502 Discards the raw branch tracing data but not the execution history data.\n\
3504 &maint_btrace_cmdlist
);
3506 add_cmd ("clear", class_maintenance
, maint_btrace_clear_cmd
,
3507 _("Clears the branch tracing data.\n\
3508 Discards the raw branch tracing data and the execution history data.\n\
3509 The next 'record' command will fetch the branch tracing data anew.\n\
3511 &maint_btrace_cmdlist
);