1 /* Branch trace support for GDB, the GNU debugger.
3 Copyright (C) 2013-2020 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "gdbthread.h"
31 #include "filenames.h"
32 #include "xml-support.h"
34 #include "gdbsupport/rsp-low.h"
36 #include "cli/cli-utils.h"
39 /* For maintenance commands. */
40 #include "record-btrace.h"
46 /* Command lists for btrace maintenance commands. */
47 static struct cmd_list_element
*maint_btrace_cmdlist
;
48 static struct cmd_list_element
*maint_btrace_set_cmdlist
;
49 static struct cmd_list_element
*maint_btrace_show_cmdlist
;
50 static struct cmd_list_element
*maint_btrace_pt_set_cmdlist
;
51 static struct cmd_list_element
*maint_btrace_pt_show_cmdlist
;
53 /* Control whether to skip PAD packets when computing the packet history. */
54 static bool maint_btrace_pt_skip_pad
= true;
56 static void btrace_add_pc (struct thread_info
*tp
);
58 /* Print a record debug message. Use do ... while (0) to avoid ambiguities
59 when used in if statements. */
61 #define DEBUG(msg, args...) \
64 if (record_debug != 0) \
65 fprintf_unfiltered (gdb_stdlog, \
66 "[btrace] " msg "\n", ##args); \
70 #define DEBUG_FTRACE(msg, args...) DEBUG ("[ftrace] " msg, ##args)
72 /* Return the function name of a recorded function segment for printing.
73 This function never returns NULL. */
76 ftrace_print_function_name (const struct btrace_function
*bfun
)
78 struct minimal_symbol
*msym
;
85 return sym
->print_name ();
88 return msym
->print_name ();
93 /* Return the file name of a recorded function segment for printing.
94 This function never returns NULL. */
97 ftrace_print_filename (const struct btrace_function
*bfun
)
100 const char *filename
;
105 filename
= symtab_to_filename_for_display (symbol_symtab (sym
));
107 filename
= "<unknown>";
112 /* Return a string representation of the address of an instruction.
113 This function never returns NULL. */
116 ftrace_print_insn_addr (const struct btrace_insn
*insn
)
121 return core_addr_to_string_nz (insn
->pc
);
124 /* Print an ftrace debug status message. */
127 ftrace_debug (const struct btrace_function
*bfun
, const char *prefix
)
129 const char *fun
, *file
;
130 unsigned int ibegin
, iend
;
133 fun
= ftrace_print_function_name (bfun
);
134 file
= ftrace_print_filename (bfun
);
137 ibegin
= bfun
->insn_offset
;
138 iend
= ibegin
+ bfun
->insn
.size ();
140 DEBUG_FTRACE ("%s: fun = %s, file = %s, level = %d, insn = [%u; %u)",
141 prefix
, fun
, file
, level
, ibegin
, iend
);
144 /* Return the number of instructions in a given function call segment. */
147 ftrace_call_num_insn (const struct btrace_function
* bfun
)
152 /* A gap is always counted as one instruction. */
153 if (bfun
->errcode
!= 0)
156 return bfun
->insn
.size ();
159 /* Return the function segment with the given NUMBER or NULL if no such segment
160 exists. BTINFO is the branch trace information for the current thread. */
162 static struct btrace_function
*
163 ftrace_find_call_by_number (struct btrace_thread_info
*btinfo
,
166 if (number
== 0 || number
> btinfo
->functions
.size ())
169 return &btinfo
->functions
[number
- 1];
172 /* A const version of the function above. */
174 static const struct btrace_function
*
175 ftrace_find_call_by_number (const struct btrace_thread_info
*btinfo
,
178 if (number
== 0 || number
> btinfo
->functions
.size ())
181 return &btinfo
->functions
[number
- 1];
184 /* Return non-zero if BFUN does not match MFUN and FUN,
185 return zero otherwise. */
188 ftrace_function_switched (const struct btrace_function
*bfun
,
189 const struct minimal_symbol
*mfun
,
190 const struct symbol
*fun
)
192 struct minimal_symbol
*msym
;
198 /* If the minimal symbol changed, we certainly switched functions. */
199 if (mfun
!= NULL
&& msym
!= NULL
200 && strcmp (mfun
->linkage_name (), msym
->linkage_name ()) != 0)
203 /* If the symbol changed, we certainly switched functions. */
204 if (fun
!= NULL
&& sym
!= NULL
)
206 const char *bfname
, *fname
;
208 /* Check the function name. */
209 if (strcmp (fun
->linkage_name (), sym
->linkage_name ()) != 0)
212 /* Check the location of those functions, as well. */
213 bfname
= symtab_to_fullname (symbol_symtab (sym
));
214 fname
= symtab_to_fullname (symbol_symtab (fun
));
215 if (filename_cmp (fname
, bfname
) != 0)
219 /* If we lost symbol information, we switched functions. */
220 if (!(msym
== NULL
&& sym
== NULL
) && mfun
== NULL
&& fun
== NULL
)
223 /* If we gained symbol information, we switched functions. */
224 if (msym
== NULL
&& sym
== NULL
&& !(mfun
== NULL
&& fun
== NULL
))
230 /* Allocate and initialize a new branch trace function segment at the end of
232 BTINFO is the branch trace information for the current thread.
233 MFUN and FUN are the symbol information we have for this function.
234 This invalidates all struct btrace_function pointer currently held. */
236 static struct btrace_function
*
237 ftrace_new_function (struct btrace_thread_info
*btinfo
,
238 struct minimal_symbol
*mfun
,
242 unsigned int number
, insn_offset
;
244 if (btinfo
->functions
.empty ())
246 /* Start counting NUMBER and INSN_OFFSET at one. */
253 const struct btrace_function
*prev
= &btinfo
->functions
.back ();
255 number
= prev
->number
+ 1;
256 insn_offset
= prev
->insn_offset
+ ftrace_call_num_insn (prev
);
259 btinfo
->functions
.emplace_back (mfun
, fun
, number
, insn_offset
, level
);
260 return &btinfo
->functions
.back ();
263 /* Update the UP field of a function segment. */
266 ftrace_update_caller (struct btrace_function
*bfun
,
267 struct btrace_function
*caller
,
268 enum btrace_function_flag flags
)
271 ftrace_debug (bfun
, "updating caller");
273 bfun
->up
= caller
->number
;
276 ftrace_debug (bfun
, "set caller");
277 ftrace_debug (caller
, "..to");
280 /* Fix up the caller for all segments of a function. */
283 ftrace_fixup_caller (struct btrace_thread_info
*btinfo
,
284 struct btrace_function
*bfun
,
285 struct btrace_function
*caller
,
286 enum btrace_function_flag flags
)
288 unsigned int prev
, next
;
292 ftrace_update_caller (bfun
, caller
, flags
);
294 /* Update all function segments belonging to the same function. */
295 for (; prev
!= 0; prev
= bfun
->prev
)
297 bfun
= ftrace_find_call_by_number (btinfo
, prev
);
298 ftrace_update_caller (bfun
, caller
, flags
);
301 for (; next
!= 0; next
= bfun
->next
)
303 bfun
= ftrace_find_call_by_number (btinfo
, next
);
304 ftrace_update_caller (bfun
, caller
, flags
);
308 /* Add a new function segment for a call at the end of the trace.
309 BTINFO is the branch trace information for the current thread.
310 MFUN and FUN are the symbol information we have for this function. */
312 static struct btrace_function
*
313 ftrace_new_call (struct btrace_thread_info
*btinfo
,
314 struct minimal_symbol
*mfun
,
317 const unsigned int length
= btinfo
->functions
.size ();
318 struct btrace_function
*bfun
= ftrace_new_function (btinfo
, mfun
, fun
);
323 ftrace_debug (bfun
, "new call");
328 /* Add a new function segment for a tail call at the end of the trace.
329 BTINFO is the branch trace information for the current thread.
330 MFUN and FUN are the symbol information we have for this function. */
332 static struct btrace_function
*
333 ftrace_new_tailcall (struct btrace_thread_info
*btinfo
,
334 struct minimal_symbol
*mfun
,
337 const unsigned int length
= btinfo
->functions
.size ();
338 struct btrace_function
*bfun
= ftrace_new_function (btinfo
, mfun
, fun
);
342 bfun
->flags
|= BFUN_UP_LINKS_TO_TAILCALL
;
344 ftrace_debug (bfun
, "new tail call");
349 /* Return the caller of BFUN or NULL if there is none. This function skips
350 tail calls in the call chain. BTINFO is the branch trace information for
351 the current thread. */
352 static struct btrace_function
*
353 ftrace_get_caller (struct btrace_thread_info
*btinfo
,
354 struct btrace_function
*bfun
)
356 for (; bfun
!= NULL
; bfun
= ftrace_find_call_by_number (btinfo
, bfun
->up
))
357 if ((bfun
->flags
& BFUN_UP_LINKS_TO_TAILCALL
) == 0)
358 return ftrace_find_call_by_number (btinfo
, bfun
->up
);
363 /* Find the innermost caller in the back trace of BFUN with MFUN/FUN
364 symbol information. BTINFO is the branch trace information for the current
367 static struct btrace_function
*
368 ftrace_find_caller (struct btrace_thread_info
*btinfo
,
369 struct btrace_function
*bfun
,
370 struct minimal_symbol
*mfun
,
373 for (; bfun
!= NULL
; bfun
= ftrace_find_call_by_number (btinfo
, bfun
->up
))
375 /* Skip functions with incompatible symbol information. */
376 if (ftrace_function_switched (bfun
, mfun
, fun
))
379 /* This is the function segment we're looking for. */
386 /* Find the innermost caller in the back trace of BFUN, skipping all
387 function segments that do not end with a call instruction (e.g.
388 tail calls ending with a jump). BTINFO is the branch trace information for
389 the current thread. */
391 static struct btrace_function
*
392 ftrace_find_call (struct btrace_thread_info
*btinfo
,
393 struct btrace_function
*bfun
)
395 for (; bfun
!= NULL
; bfun
= ftrace_find_call_by_number (btinfo
, bfun
->up
))
398 if (bfun
->errcode
!= 0)
401 btrace_insn
&last
= bfun
->insn
.back ();
403 if (last
.iclass
== BTRACE_INSN_CALL
)
410 /* Add a continuation segment for a function into which we return at the end of
412 BTINFO is the branch trace information for the current thread.
413 MFUN and FUN are the symbol information we have for this function. */
415 static struct btrace_function
*
416 ftrace_new_return (struct btrace_thread_info
*btinfo
,
417 struct minimal_symbol
*mfun
,
420 struct btrace_function
*prev
, *bfun
, *caller
;
422 bfun
= ftrace_new_function (btinfo
, mfun
, fun
);
423 prev
= ftrace_find_call_by_number (btinfo
, bfun
->number
- 1);
425 /* It is important to start at PREV's caller. Otherwise, we might find
426 PREV itself, if PREV is a recursive function. */
427 caller
= ftrace_find_call_by_number (btinfo
, prev
->up
);
428 caller
= ftrace_find_caller (btinfo
, caller
, mfun
, fun
);
431 /* The caller of PREV is the preceding btrace function segment in this
432 function instance. */
433 gdb_assert (caller
->next
== 0);
435 caller
->next
= bfun
->number
;
436 bfun
->prev
= caller
->number
;
438 /* Maintain the function level. */
439 bfun
->level
= caller
->level
;
441 /* Maintain the call stack. */
442 bfun
->up
= caller
->up
;
443 bfun
->flags
= caller
->flags
;
445 ftrace_debug (bfun
, "new return");
449 /* We did not find a caller. This could mean that something went
450 wrong or that the call is simply not included in the trace. */
452 /* Let's search for some actual call. */
453 caller
= ftrace_find_call_by_number (btinfo
, prev
->up
);
454 caller
= ftrace_find_call (btinfo
, caller
);
457 /* There is no call in PREV's back trace. We assume that the
458 branch trace did not include it. */
460 /* Let's find the topmost function and add a new caller for it.
461 This should handle a series of initial tail calls. */
462 while (prev
->up
!= 0)
463 prev
= ftrace_find_call_by_number (btinfo
, prev
->up
);
465 bfun
->level
= prev
->level
- 1;
467 /* Fix up the call stack for PREV. */
468 ftrace_fixup_caller (btinfo
, prev
, bfun
, BFUN_UP_LINKS_TO_RET
);
470 ftrace_debug (bfun
, "new return - no caller");
474 /* There is a call in PREV's back trace to which we should have
475 returned but didn't. Let's start a new, separate back trace
476 from PREV's level. */
477 bfun
->level
= prev
->level
- 1;
479 /* We fix up the back trace for PREV but leave other function segments
480 on the same level as they are.
481 This should handle things like schedule () correctly where we're
482 switching contexts. */
483 prev
->up
= bfun
->number
;
484 prev
->flags
= BFUN_UP_LINKS_TO_RET
;
486 ftrace_debug (bfun
, "new return - unknown caller");
493 /* Add a new function segment for a function switch at the end of the trace.
494 BTINFO is the branch trace information for the current thread.
495 MFUN and FUN are the symbol information we have for this function. */
497 static struct btrace_function
*
498 ftrace_new_switch (struct btrace_thread_info
*btinfo
,
499 struct minimal_symbol
*mfun
,
502 struct btrace_function
*prev
, *bfun
;
504 /* This is an unexplained function switch. We can't really be sure about the
505 call stack, yet the best I can think of right now is to preserve it. */
506 bfun
= ftrace_new_function (btinfo
, mfun
, fun
);
507 prev
= ftrace_find_call_by_number (btinfo
, bfun
->number
- 1);
509 bfun
->flags
= prev
->flags
;
511 ftrace_debug (bfun
, "new switch");
516 /* Add a new function segment for a gap in the trace due to a decode error at
517 the end of the trace.
518 BTINFO is the branch trace information for the current thread.
519 ERRCODE is the format-specific error code. */
521 static struct btrace_function
*
522 ftrace_new_gap (struct btrace_thread_info
*btinfo
, int errcode
,
523 std::vector
<unsigned int> &gaps
)
525 struct btrace_function
*bfun
;
527 if (btinfo
->functions
.empty ())
528 bfun
= ftrace_new_function (btinfo
, NULL
, NULL
);
531 /* We hijack the previous function segment if it was empty. */
532 bfun
= &btinfo
->functions
.back ();
533 if (bfun
->errcode
!= 0 || !bfun
->insn
.empty ())
534 bfun
= ftrace_new_function (btinfo
, NULL
, NULL
);
537 bfun
->errcode
= errcode
;
538 gaps
.push_back (bfun
->number
);
540 ftrace_debug (bfun
, "new gap");
545 /* Update the current function segment at the end of the trace in BTINFO with
546 respect to the instruction at PC. This may create new function segments.
547 Return the chronologically latest function segment, never NULL. */
549 static struct btrace_function
*
550 ftrace_update_function (struct btrace_thread_info
*btinfo
, CORE_ADDR pc
)
552 struct bound_minimal_symbol bmfun
;
553 struct minimal_symbol
*mfun
;
555 struct btrace_function
*bfun
;
557 /* Try to determine the function we're in. We use both types of symbols
558 to avoid surprises when we sometimes get a full symbol and sometimes
559 only a minimal symbol. */
560 fun
= find_pc_function (pc
);
561 bmfun
= lookup_minimal_symbol_by_pc (pc
);
564 if (fun
== NULL
&& mfun
== NULL
)
565 DEBUG_FTRACE ("no symbol at %s", core_addr_to_string_nz (pc
));
567 /* If we didn't have a function, we create one. */
568 if (btinfo
->functions
.empty ())
569 return ftrace_new_function (btinfo
, mfun
, fun
);
571 /* If we had a gap before, we create a function. */
572 bfun
= &btinfo
->functions
.back ();
573 if (bfun
->errcode
!= 0)
574 return ftrace_new_function (btinfo
, mfun
, fun
);
576 /* Check the last instruction, if we have one.
577 We do this check first, since it allows us to fill in the call stack
578 links in addition to the normal flow links. */
579 btrace_insn
*last
= NULL
;
580 if (!bfun
->insn
.empty ())
581 last
= &bfun
->insn
.back ();
585 switch (last
->iclass
)
587 case BTRACE_INSN_RETURN
:
591 /* On some systems, _dl_runtime_resolve returns to the resolved
592 function instead of jumping to it. From our perspective,
593 however, this is a tailcall.
594 If we treated it as return, we wouldn't be able to find the
595 resolved function in our stack back trace. Hence, we would
596 lose the current stack back trace and start anew with an empty
597 back trace. When the resolved function returns, we would then
598 create a stack back trace with the same function names but
599 different frame id's. This will confuse stepping. */
600 fname
= ftrace_print_function_name (bfun
);
601 if (strcmp (fname
, "_dl_runtime_resolve") == 0)
602 return ftrace_new_tailcall (btinfo
, mfun
, fun
);
604 return ftrace_new_return (btinfo
, mfun
, fun
);
607 case BTRACE_INSN_CALL
:
608 /* Ignore calls to the next instruction. They are used for PIC. */
609 if (last
->pc
+ last
->size
== pc
)
612 return ftrace_new_call (btinfo
, mfun
, fun
);
614 case BTRACE_INSN_JUMP
:
618 start
= get_pc_function_start (pc
);
620 /* A jump to the start of a function is (typically) a tail call. */
622 return ftrace_new_tailcall (btinfo
, mfun
, fun
);
624 /* Some versions of _Unwind_RaiseException use an indirect
625 jump to 'return' to the exception handler of the caller
626 handling the exception instead of a return. Let's restrict
627 this heuristic to that and related functions. */
628 const char *fname
= ftrace_print_function_name (bfun
);
629 if (strncmp (fname
, "_Unwind_", strlen ("_Unwind_")) == 0)
631 struct btrace_function
*caller
632 = ftrace_find_call_by_number (btinfo
, bfun
->up
);
633 caller
= ftrace_find_caller (btinfo
, caller
, mfun
, fun
);
635 return ftrace_new_return (btinfo
, mfun
, fun
);
638 /* If we can't determine the function for PC, we treat a jump at
639 the end of the block as tail call if we're switching functions
640 and as an intra-function branch if we don't. */
641 if (start
== 0 && ftrace_function_switched (bfun
, mfun
, fun
))
642 return ftrace_new_tailcall (btinfo
, mfun
, fun
);
649 /* Check if we're switching functions for some other reason. */
650 if (ftrace_function_switched (bfun
, mfun
, fun
))
652 DEBUG_FTRACE ("switching from %s in %s at %s",
653 ftrace_print_insn_addr (last
),
654 ftrace_print_function_name (bfun
),
655 ftrace_print_filename (bfun
));
657 return ftrace_new_switch (btinfo
, mfun
, fun
);
663 /* Add the instruction at PC to BFUN's instructions. */
666 ftrace_update_insns (struct btrace_function
*bfun
, const btrace_insn
&insn
)
668 bfun
->insn
.push_back (insn
);
670 if (record_debug
> 1)
671 ftrace_debug (bfun
, "update insn");
674 /* Classify the instruction at PC. */
676 static enum btrace_insn_class
677 ftrace_classify_insn (struct gdbarch
*gdbarch
, CORE_ADDR pc
)
679 enum btrace_insn_class iclass
;
681 iclass
= BTRACE_INSN_OTHER
;
684 if (gdbarch_insn_is_call (gdbarch
, pc
))
685 iclass
= BTRACE_INSN_CALL
;
686 else if (gdbarch_insn_is_ret (gdbarch
, pc
))
687 iclass
= BTRACE_INSN_RETURN
;
688 else if (gdbarch_insn_is_jump (gdbarch
, pc
))
689 iclass
= BTRACE_INSN_JUMP
;
691 catch (const gdb_exception_error
&error
)
698 /* Try to match the back trace at LHS to the back trace at RHS. Returns the
699 number of matching function segments or zero if the back traces do not
700 match. BTINFO is the branch trace information for the current thread. */
703 ftrace_match_backtrace (struct btrace_thread_info
*btinfo
,
704 struct btrace_function
*lhs
,
705 struct btrace_function
*rhs
)
709 for (matches
= 0; lhs
!= NULL
&& rhs
!= NULL
; ++matches
)
711 if (ftrace_function_switched (lhs
, rhs
->msym
, rhs
->sym
))
714 lhs
= ftrace_get_caller (btinfo
, lhs
);
715 rhs
= ftrace_get_caller (btinfo
, rhs
);
721 /* Add ADJUSTMENT to the level of BFUN and succeeding function segments.
722 BTINFO is the branch trace information for the current thread. */
725 ftrace_fixup_level (struct btrace_thread_info
*btinfo
,
726 struct btrace_function
*bfun
, int adjustment
)
731 DEBUG_FTRACE ("fixup level (%+d)", adjustment
);
732 ftrace_debug (bfun
, "..bfun");
736 bfun
->level
+= adjustment
;
737 bfun
= ftrace_find_call_by_number (btinfo
, bfun
->number
+ 1);
741 /* Recompute the global level offset. Traverse the function trace and compute
742 the global level offset as the negative of the minimal function level. */
745 ftrace_compute_global_level_offset (struct btrace_thread_info
*btinfo
)
752 if (btinfo
->functions
.empty ())
755 unsigned int length
= btinfo
->functions
.size() - 1;
756 for (unsigned int i
= 0; i
< length
; ++i
)
757 level
= std::min (level
, btinfo
->functions
[i
].level
);
759 /* The last function segment contains the current instruction, which is not
760 really part of the trace. If it contains just this one instruction, we
761 ignore the segment. */
762 struct btrace_function
*last
= &btinfo
->functions
.back();
763 if (last
->insn
.size () != 1)
764 level
= std::min (level
, last
->level
);
766 DEBUG_FTRACE ("setting global level offset: %d", -level
);
767 btinfo
->level
= -level
;
770 /* Connect the function segments PREV and NEXT in a bottom-to-top walk as in
771 ftrace_connect_backtrace. BTINFO is the branch trace information for the
775 ftrace_connect_bfun (struct btrace_thread_info
*btinfo
,
776 struct btrace_function
*prev
,
777 struct btrace_function
*next
)
779 DEBUG_FTRACE ("connecting...");
780 ftrace_debug (prev
, "..prev");
781 ftrace_debug (next
, "..next");
783 /* The function segments are not yet connected. */
784 gdb_assert (prev
->next
== 0);
785 gdb_assert (next
->prev
== 0);
787 prev
->next
= next
->number
;
788 next
->prev
= prev
->number
;
790 /* We may have moved NEXT to a different function level. */
791 ftrace_fixup_level (btinfo
, next
, prev
->level
- next
->level
);
793 /* If we run out of back trace for one, let's use the other's. */
796 const btrace_function_flags flags
= next
->flags
;
798 next
= ftrace_find_call_by_number (btinfo
, next
->up
);
801 DEBUG_FTRACE ("using next's callers");
802 ftrace_fixup_caller (btinfo
, prev
, next
, flags
);
805 else if (next
->up
== 0)
807 const btrace_function_flags flags
= prev
->flags
;
809 prev
= ftrace_find_call_by_number (btinfo
, prev
->up
);
812 DEBUG_FTRACE ("using prev's callers");
813 ftrace_fixup_caller (btinfo
, next
, prev
, flags
);
818 /* PREV may have a tailcall caller, NEXT can't. If it does, fixup the up
819 link to add the tail callers to NEXT's back trace.
821 This removes NEXT->UP from NEXT's back trace. It will be added back
822 when connecting NEXT and PREV's callers - provided they exist.
824 If PREV's back trace consists of a series of tail calls without an
825 actual call, there will be no further connection and NEXT's caller will
826 be removed for good. To catch this case, we handle it here and connect
827 the top of PREV's back trace to NEXT's caller. */
828 if ((prev
->flags
& BFUN_UP_LINKS_TO_TAILCALL
) != 0)
830 struct btrace_function
*caller
;
831 btrace_function_flags next_flags
, prev_flags
;
833 /* We checked NEXT->UP above so CALLER can't be NULL. */
834 caller
= ftrace_find_call_by_number (btinfo
, next
->up
);
835 next_flags
= next
->flags
;
836 prev_flags
= prev
->flags
;
838 DEBUG_FTRACE ("adding prev's tail calls to next");
840 prev
= ftrace_find_call_by_number (btinfo
, prev
->up
);
841 ftrace_fixup_caller (btinfo
, next
, prev
, prev_flags
);
843 for (; prev
!= NULL
; prev
= ftrace_find_call_by_number (btinfo
,
846 /* At the end of PREV's back trace, continue with CALLER. */
849 DEBUG_FTRACE ("fixing up link for tailcall chain");
850 ftrace_debug (prev
, "..top");
851 ftrace_debug (caller
, "..up");
853 ftrace_fixup_caller (btinfo
, prev
, caller
, next_flags
);
855 /* If we skipped any tail calls, this may move CALLER to a
856 different function level.
858 Note that changing CALLER's level is only OK because we
859 know that this is the last iteration of the bottom-to-top
860 walk in ftrace_connect_backtrace.
862 Otherwise we will fix up CALLER's level when we connect it
863 to PREV's caller in the next iteration. */
864 ftrace_fixup_level (btinfo
, caller
,
865 prev
->level
- caller
->level
- 1);
869 /* There's nothing to do if we find a real call. */
870 if ((prev
->flags
& BFUN_UP_LINKS_TO_TAILCALL
) == 0)
872 DEBUG_FTRACE ("will fix up link in next iteration");
880 /* Connect function segments on the same level in the back trace at LHS and RHS.
881 The back traces at LHS and RHS are expected to match according to
882 ftrace_match_backtrace. BTINFO is the branch trace information for the
886 ftrace_connect_backtrace (struct btrace_thread_info
*btinfo
,
887 struct btrace_function
*lhs
,
888 struct btrace_function
*rhs
)
890 while (lhs
!= NULL
&& rhs
!= NULL
)
892 struct btrace_function
*prev
, *next
;
894 gdb_assert (!ftrace_function_switched (lhs
, rhs
->msym
, rhs
->sym
));
896 /* Connecting LHS and RHS may change the up link. */
900 lhs
= ftrace_get_caller (btinfo
, lhs
);
901 rhs
= ftrace_get_caller (btinfo
, rhs
);
903 ftrace_connect_bfun (btinfo
, prev
, next
);
907 /* Bridge the gap between two function segments left and right of a gap if their
908 respective back traces match in at least MIN_MATCHES functions. BTINFO is
909 the branch trace information for the current thread.
911 Returns non-zero if the gap could be bridged, zero otherwise. */
914 ftrace_bridge_gap (struct btrace_thread_info
*btinfo
,
915 struct btrace_function
*lhs
, struct btrace_function
*rhs
,
918 struct btrace_function
*best_l
, *best_r
, *cand_l
, *cand_r
;
921 DEBUG_FTRACE ("checking gap at insn %u (req matches: %d)",
922 rhs
->insn_offset
- 1, min_matches
);
928 /* We search the back traces of LHS and RHS for valid connections and connect
929 the two function segments that give the longest combined back trace. */
931 for (cand_l
= lhs
; cand_l
!= NULL
;
932 cand_l
= ftrace_get_caller (btinfo
, cand_l
))
933 for (cand_r
= rhs
; cand_r
!= NULL
;
934 cand_r
= ftrace_get_caller (btinfo
, cand_r
))
938 matches
= ftrace_match_backtrace (btinfo
, cand_l
, cand_r
);
939 if (best_matches
< matches
)
941 best_matches
= matches
;
947 /* We need at least MIN_MATCHES matches. */
948 gdb_assert (min_matches
> 0);
949 if (best_matches
< min_matches
)
952 DEBUG_FTRACE ("..matches: %d", best_matches
);
954 /* We will fix up the level of BEST_R and succeeding function segments such
955 that BEST_R's level matches BEST_L's when we connect BEST_L to BEST_R.
957 This will ignore the level of RHS and following if BEST_R != RHS. I.e. if
958 BEST_R is a successor of RHS in the back trace of RHS (phases 1 and 3).
960 To catch this, we already fix up the level here where we can start at RHS
961 instead of at BEST_R. We will ignore the level fixup when connecting
962 BEST_L to BEST_R as they will already be on the same level. */
963 ftrace_fixup_level (btinfo
, rhs
, best_l
->level
- best_r
->level
);
965 ftrace_connect_backtrace (btinfo
, best_l
, best_r
);
970 /* Try to bridge gaps due to overflow or decode errors by connecting the
971 function segments that are separated by the gap. */
974 btrace_bridge_gaps (struct thread_info
*tp
, std::vector
<unsigned int> &gaps
)
976 struct btrace_thread_info
*btinfo
= &tp
->btrace
;
977 std::vector
<unsigned int> remaining
;
980 DEBUG ("bridge gaps");
982 /* We require a minimum amount of matches for bridging a gap. The number of
983 required matches will be lowered with each iteration.
985 The more matches the higher our confidence that the bridging is correct.
986 For big gaps or small traces, however, it may not be feasible to require a
987 high number of matches. */
988 for (min_matches
= 5; min_matches
> 0; --min_matches
)
990 /* Let's try to bridge as many gaps as we can. In some cases, we need to
991 skip a gap and revisit it again after we closed later gaps. */
992 while (!gaps
.empty ())
994 for (const unsigned int number
: gaps
)
996 struct btrace_function
*gap
, *lhs
, *rhs
;
999 gap
= ftrace_find_call_by_number (btinfo
, number
);
1001 /* We may have a sequence of gaps if we run from one error into
1002 the next as we try to re-sync onto the trace stream. Ignore
1003 all but the leftmost gap in such a sequence.
1005 Also ignore gaps at the beginning of the trace. */
1006 lhs
= ftrace_find_call_by_number (btinfo
, gap
->number
- 1);
1007 if (lhs
== NULL
|| lhs
->errcode
!= 0)
1010 /* Skip gaps to the right. */
1011 rhs
= ftrace_find_call_by_number (btinfo
, gap
->number
+ 1);
1012 while (rhs
!= NULL
&& rhs
->errcode
!= 0)
1013 rhs
= ftrace_find_call_by_number (btinfo
, rhs
->number
+ 1);
1015 /* Ignore gaps at the end of the trace. */
1019 bridged
= ftrace_bridge_gap (btinfo
, lhs
, rhs
, min_matches
);
1021 /* Keep track of gaps we were not able to bridge and try again.
1022 If we just pushed them to the end of GAPS we would risk an
1023 infinite loop in case we simply cannot bridge a gap. */
1025 remaining
.push_back (number
);
1028 /* Let's see if we made any progress. */
1029 if (remaining
.size () == gaps
.size ())
1033 gaps
.swap (remaining
);
1036 /* We get here if either GAPS is empty or if GAPS equals REMAINING. */
1043 /* We may omit this in some cases. Not sure it is worth the extra
1044 complication, though. */
1045 ftrace_compute_global_level_offset (btinfo
);
1048 /* Compute the function branch trace from BTS trace. */
1051 btrace_compute_ftrace_bts (struct thread_info
*tp
,
1052 const struct btrace_data_bts
*btrace
,
1053 std::vector
<unsigned int> &gaps
)
1055 struct btrace_thread_info
*btinfo
;
1056 struct gdbarch
*gdbarch
;
1060 gdbarch
= target_gdbarch ();
1061 btinfo
= &tp
->btrace
;
1062 blk
= btrace
->blocks
->size ();
1064 if (btinfo
->functions
.empty ())
1067 level
= -btinfo
->level
;
1075 const btrace_block
&block
= btrace
->blocks
->at (blk
);
1080 struct btrace_function
*bfun
;
1081 struct btrace_insn insn
;
1084 /* We should hit the end of the block. Warn if we went too far. */
1087 /* Indicate the gap in the trace. */
1088 bfun
= ftrace_new_gap (btinfo
, BDE_BTS_OVERFLOW
, gaps
);
1090 warning (_("Recorded trace may be corrupted at instruction "
1091 "%u (pc = %s)."), bfun
->insn_offset
- 1,
1092 core_addr_to_string_nz (pc
));
1097 bfun
= ftrace_update_function (btinfo
, pc
);
1099 /* Maintain the function level offset.
1100 For all but the last block, we do it here. */
1102 level
= std::min (level
, bfun
->level
);
1107 size
= gdb_insn_length (gdbarch
, pc
);
1109 catch (const gdb_exception_error
&error
)
1115 insn
.iclass
= ftrace_classify_insn (gdbarch
, pc
);
1118 ftrace_update_insns (bfun
, insn
);
1120 /* We're done once we pushed the instruction at the end. */
1121 if (block
.end
== pc
)
1124 /* We can't continue if we fail to compute the size. */
1127 /* Indicate the gap in the trace. We just added INSN so we're
1128 not at the beginning. */
1129 bfun
= ftrace_new_gap (btinfo
, BDE_BTS_INSN_SIZE
, gaps
);
1131 warning (_("Recorded trace may be incomplete at instruction %u "
1132 "(pc = %s)."), bfun
->insn_offset
- 1,
1133 core_addr_to_string_nz (pc
));
1140 /* Maintain the function level offset.
1141 For the last block, we do it here to not consider the last
1143 Since the last instruction corresponds to the current instruction
1144 and is not really part of the execution history, it shouldn't
1145 affect the level. */
1147 level
= std::min (level
, bfun
->level
);
1151 /* LEVEL is the minimal function level of all btrace function segments.
1152 Define the global level offset to -LEVEL so all function levels are
1153 normalized to start at zero. */
1154 btinfo
->level
= -level
;
1157 #if defined (HAVE_LIBIPT)
1159 static enum btrace_insn_class
1160 pt_reclassify_insn (enum pt_insn_class iclass
)
1165 return BTRACE_INSN_CALL
;
1168 return BTRACE_INSN_RETURN
;
1171 return BTRACE_INSN_JUMP
;
1174 return BTRACE_INSN_OTHER
;
1178 /* Return the btrace instruction flags for INSN. */
1180 static btrace_insn_flags
1181 pt_btrace_insn_flags (const struct pt_insn
&insn
)
1183 btrace_insn_flags flags
= 0;
1185 if (insn
.speculative
)
1186 flags
|= BTRACE_INSN_FLAG_SPECULATIVE
;
1191 /* Return the btrace instruction for INSN. */
1194 pt_btrace_insn (const struct pt_insn
&insn
)
1196 return {(CORE_ADDR
) insn
.ip
, (gdb_byte
) insn
.size
,
1197 pt_reclassify_insn (insn
.iclass
),
1198 pt_btrace_insn_flags (insn
)};
1201 /* Handle instruction decode events (libipt-v2). */
1204 handle_pt_insn_events (struct btrace_thread_info
*btinfo
,
1205 struct pt_insn_decoder
*decoder
,
1206 std::vector
<unsigned int> &gaps
, int status
)
1208 #if defined (HAVE_PT_INSN_EVENT)
1209 while (status
& pts_event_pending
)
1211 struct btrace_function
*bfun
;
1212 struct pt_event event
;
1215 status
= pt_insn_event (decoder
, &event
, sizeof (event
));
1225 if (event
.variant
.enabled
.resumed
== 0 && !btinfo
->functions
.empty ())
1227 bfun
= ftrace_new_gap (btinfo
, BDE_PT_DISABLED
, gaps
);
1229 pt_insn_get_offset (decoder
, &offset
);
1231 warning (_("Non-contiguous trace at instruction %u (offset = 0x%"
1232 PRIx64
")."), bfun
->insn_offset
- 1, offset
);
1238 bfun
= ftrace_new_gap (btinfo
, BDE_PT_OVERFLOW
, gaps
);
1240 pt_insn_get_offset (decoder
, &offset
);
1242 warning (_("Overflow at instruction %u (offset = 0x%" PRIx64
")."),
1243 bfun
->insn_offset
- 1, offset
);
1248 #endif /* defined (HAVE_PT_INSN_EVENT) */
1253 /* Handle events indicated by flags in INSN (libipt-v1). */
1256 handle_pt_insn_event_flags (struct btrace_thread_info
*btinfo
,
1257 struct pt_insn_decoder
*decoder
,
1258 const struct pt_insn
&insn
,
1259 std::vector
<unsigned int> &gaps
)
1261 #if defined (HAVE_STRUCT_PT_INSN_ENABLED)
1262 /* Tracing is disabled and re-enabled each time we enter the kernel. Most
1263 times, we continue from the same instruction we stopped before. This is
1264 indicated via the RESUMED instruction flag. The ENABLED instruction flag
1265 means that we continued from some other instruction. Indicate this as a
1266 trace gap except when tracing just started. */
1267 if (insn
.enabled
&& !btinfo
->functions
.empty ())
1269 struct btrace_function
*bfun
;
1272 bfun
= ftrace_new_gap (btinfo
, BDE_PT_DISABLED
, gaps
);
1274 pt_insn_get_offset (decoder
, &offset
);
1276 warning (_("Non-contiguous trace at instruction %u (offset = 0x%" PRIx64
1277 ", pc = 0x%" PRIx64
")."), bfun
->insn_offset
- 1, offset
,
1280 #endif /* defined (HAVE_STRUCT_PT_INSN_ENABLED) */
1282 #if defined (HAVE_STRUCT_PT_INSN_RESYNCED)
1283 /* Indicate trace overflows. */
1286 struct btrace_function
*bfun
;
1289 bfun
= ftrace_new_gap (btinfo
, BDE_PT_OVERFLOW
, gaps
);
1291 pt_insn_get_offset (decoder
, &offset
);
1293 warning (_("Overflow at instruction %u (offset = 0x%" PRIx64
", pc = 0x%"
1294 PRIx64
")."), bfun
->insn_offset
- 1, offset
, insn
.ip
);
1296 #endif /* defined (HAVE_STRUCT_PT_INSN_RESYNCED) */
1299 /* Add function branch trace to BTINFO using DECODER. */
1302 ftrace_add_pt (struct btrace_thread_info
*btinfo
,
1303 struct pt_insn_decoder
*decoder
,
1305 std::vector
<unsigned int> &gaps
)
1307 struct btrace_function
*bfun
;
1313 struct pt_insn insn
;
1315 status
= pt_insn_sync_forward (decoder
);
1318 if (status
!= -pte_eos
)
1319 warning (_("Failed to synchronize onto the Intel Processor "
1320 "Trace stream: %s."), pt_errstr (pt_errcode (status
)));
1326 /* Handle events from the previous iteration or synchronization. */
1327 status
= handle_pt_insn_events (btinfo
, decoder
, gaps
, status
);
1331 status
= pt_insn_next (decoder
, &insn
, sizeof(insn
));
1335 /* Handle events indicated by flags in INSN. */
1336 handle_pt_insn_event_flags (btinfo
, decoder
, insn
, gaps
);
1338 bfun
= ftrace_update_function (btinfo
, insn
.ip
);
1340 /* Maintain the function level offset. */
1341 *plevel
= std::min (*plevel
, bfun
->level
);
1343 ftrace_update_insns (bfun
, pt_btrace_insn (insn
));
1346 if (status
== -pte_eos
)
1349 /* Indicate the gap in the trace. */
1350 bfun
= ftrace_new_gap (btinfo
, status
, gaps
);
1352 pt_insn_get_offset (decoder
, &offset
);
1354 warning (_("Decode error (%d) at instruction %u (offset = 0x%" PRIx64
1355 ", pc = 0x%" PRIx64
"): %s."), status
, bfun
->insn_offset
- 1,
1356 offset
, insn
.ip
, pt_errstr (pt_errcode (status
)));
1360 /* A callback function to allow the trace decoder to read the inferior's
1364 btrace_pt_readmem_callback (gdb_byte
*buffer
, size_t size
,
1365 const struct pt_asid
*asid
, uint64_t pc
,
1368 int result
, errcode
;
1370 result
= (int) size
;
1373 errcode
= target_read_code ((CORE_ADDR
) pc
, buffer
, size
);
1375 result
= -pte_nomap
;
1377 catch (const gdb_exception_error
&error
)
1379 result
= -pte_nomap
;
1385 /* Translate the vendor from one enum to another. */
1387 static enum pt_cpu_vendor
1388 pt_translate_cpu_vendor (enum btrace_cpu_vendor vendor
)
1400 /* Finalize the function branch trace after decode. */
1402 static void btrace_finalize_ftrace_pt (struct pt_insn_decoder
*decoder
,
1403 struct thread_info
*tp
, int level
)
1405 pt_insn_free_decoder (decoder
);
1407 /* LEVEL is the minimal function level of all btrace function segments.
1408 Define the global level offset to -LEVEL so all function levels are
1409 normalized to start at zero. */
1410 tp
->btrace
.level
= -level
;
1412 /* Add a single last instruction entry for the current PC.
1413 This allows us to compute the backtrace at the current PC using both
1414 standard unwind and btrace unwind.
1415 This extra entry is ignored by all record commands. */
1419 /* Compute the function branch trace from Intel Processor Trace
1423 btrace_compute_ftrace_pt (struct thread_info
*tp
,
1424 const struct btrace_data_pt
*btrace
,
1425 std::vector
<unsigned int> &gaps
)
1427 struct btrace_thread_info
*btinfo
;
1428 struct pt_insn_decoder
*decoder
;
1429 struct pt_config config
;
1432 if (btrace
->size
== 0)
1435 btinfo
= &tp
->btrace
;
1436 if (btinfo
->functions
.empty ())
1439 level
= -btinfo
->level
;
1441 pt_config_init(&config
);
1442 config
.begin
= btrace
->data
;
1443 config
.end
= btrace
->data
+ btrace
->size
;
1445 /* We treat an unknown vendor as 'no errata'. */
1446 if (btrace
->config
.cpu
.vendor
!= CV_UNKNOWN
)
1449 = pt_translate_cpu_vendor (btrace
->config
.cpu
.vendor
);
1450 config
.cpu
.family
= btrace
->config
.cpu
.family
;
1451 config
.cpu
.model
= btrace
->config
.cpu
.model
;
1452 config
.cpu
.stepping
= btrace
->config
.cpu
.stepping
;
1454 errcode
= pt_cpu_errata (&config
.errata
, &config
.cpu
);
1456 error (_("Failed to configure the Intel Processor Trace "
1457 "decoder: %s."), pt_errstr (pt_errcode (errcode
)));
1460 decoder
= pt_insn_alloc_decoder (&config
);
1461 if (decoder
== NULL
)
1462 error (_("Failed to allocate the Intel Processor Trace decoder."));
1466 struct pt_image
*image
;
1468 image
= pt_insn_get_image(decoder
);
1470 error (_("Failed to configure the Intel Processor Trace decoder."));
1472 errcode
= pt_image_set_callback(image
, btrace_pt_readmem_callback
, NULL
);
1474 error (_("Failed to configure the Intel Processor Trace decoder: "
1475 "%s."), pt_errstr (pt_errcode (errcode
)));
1477 ftrace_add_pt (btinfo
, decoder
, &level
, gaps
);
1479 catch (const gdb_exception
&error
)
1481 /* Indicate a gap in the trace if we quit trace processing. */
1482 if (error
.reason
== RETURN_QUIT
&& !btinfo
->functions
.empty ())
1483 ftrace_new_gap (btinfo
, BDE_PT_USER_QUIT
, gaps
);
1485 btrace_finalize_ftrace_pt (decoder
, tp
, level
);
1490 btrace_finalize_ftrace_pt (decoder
, tp
, level
);
1493 #else /* defined (HAVE_LIBIPT) */
1496 btrace_compute_ftrace_pt (struct thread_info
*tp
,
1497 const struct btrace_data_pt
*btrace
,
1498 std::vector
<unsigned int> &gaps
)
1500 internal_error (__FILE__
, __LINE__
, _("Unexpected branch trace format."));
1503 #endif /* defined (HAVE_LIBIPT) */
1505 /* Compute the function branch trace from a block branch trace BTRACE for
1506 a thread given by BTINFO. If CPU is not NULL, overwrite the cpu in the
1507 branch trace configuration. This is currently only used for the PT
1511 btrace_compute_ftrace_1 (struct thread_info
*tp
,
1512 struct btrace_data
*btrace
,
1513 const struct btrace_cpu
*cpu
,
1514 std::vector
<unsigned int> &gaps
)
1516 DEBUG ("compute ftrace");
1518 switch (btrace
->format
)
1520 case BTRACE_FORMAT_NONE
:
1523 case BTRACE_FORMAT_BTS
:
1524 btrace_compute_ftrace_bts (tp
, &btrace
->variant
.bts
, gaps
);
1527 case BTRACE_FORMAT_PT
:
1528 /* Overwrite the cpu we use for enabling errata workarounds. */
1530 btrace
->variant
.pt
.config
.cpu
= *cpu
;
1532 btrace_compute_ftrace_pt (tp
, &btrace
->variant
.pt
, gaps
);
1536 internal_error (__FILE__
, __LINE__
, _("Unkown branch trace format."));
1540 btrace_finalize_ftrace (struct thread_info
*tp
, std::vector
<unsigned int> &gaps
)
1544 tp
->btrace
.ngaps
+= gaps
.size ();
1545 btrace_bridge_gaps (tp
, gaps
);
1550 btrace_compute_ftrace (struct thread_info
*tp
, struct btrace_data
*btrace
,
1551 const struct btrace_cpu
*cpu
)
1553 std::vector
<unsigned int> gaps
;
1557 btrace_compute_ftrace_1 (tp
, btrace
, cpu
, gaps
);
1559 catch (const gdb_exception
&error
)
1561 btrace_finalize_ftrace (tp
, gaps
);
1566 btrace_finalize_ftrace (tp
, gaps
);
1569 /* Add an entry for the current PC. */
1572 btrace_add_pc (struct thread_info
*tp
)
1574 struct btrace_data btrace
;
1575 struct regcache
*regcache
;
1578 regcache
= get_thread_regcache (tp
);
1579 pc
= regcache_read_pc (regcache
);
1581 btrace
.format
= BTRACE_FORMAT_BTS
;
1582 btrace
.variant
.bts
.blocks
= new std::vector
<btrace_block
>;
1584 btrace
.variant
.bts
.blocks
->emplace_back (pc
, pc
);
1586 btrace_compute_ftrace (tp
, &btrace
, NULL
);
1592 btrace_enable (struct thread_info
*tp
, const struct btrace_config
*conf
)
1594 if (tp
->btrace
.target
!= NULL
)
1597 #if !defined (HAVE_LIBIPT)
1598 if (conf
->format
== BTRACE_FORMAT_PT
)
1599 error (_("Intel Processor Trace support was disabled at compile time."));
1600 #endif /* !defined (HAVE_LIBIPT) */
1602 DEBUG ("enable thread %s (%s)", print_thread_id (tp
),
1603 target_pid_to_str (tp
->ptid
).c_str ());
1605 tp
->btrace
.target
= target_enable_btrace (tp
->ptid
, conf
);
1607 /* We're done if we failed to enable tracing. */
1608 if (tp
->btrace
.target
== NULL
)
1611 /* We need to undo the enable in case of errors. */
1614 /* Add an entry for the current PC so we start tracing from where we
1617 If we can't access TP's registers, TP is most likely running. In this
1618 case, we can't really say where tracing was enabled so it should be
1619 safe to simply skip this step.
1621 This is not relevant for BTRACE_FORMAT_PT since the trace will already
1622 start at the PC at which tracing was enabled. */
1623 if (conf
->format
!= BTRACE_FORMAT_PT
1624 && can_access_registers_thread (tp
))
1627 catch (const gdb_exception
&exception
)
1629 btrace_disable (tp
);
1637 const struct btrace_config
*
1638 btrace_conf (const struct btrace_thread_info
*btinfo
)
1640 if (btinfo
->target
== NULL
)
1643 return target_btrace_conf (btinfo
->target
);
1649 btrace_disable (struct thread_info
*tp
)
1651 struct btrace_thread_info
*btp
= &tp
->btrace
;
1653 if (btp
->target
== NULL
)
1656 DEBUG ("disable thread %s (%s)", print_thread_id (tp
),
1657 target_pid_to_str (tp
->ptid
).c_str ());
1659 target_disable_btrace (btp
->target
);
1668 btrace_teardown (struct thread_info
*tp
)
1670 struct btrace_thread_info
*btp
= &tp
->btrace
;
1672 if (btp
->target
== NULL
)
1675 DEBUG ("teardown thread %s (%s)", print_thread_id (tp
),
1676 target_pid_to_str (tp
->ptid
).c_str ());
1678 target_teardown_btrace (btp
->target
);
1684 /* Stitch branch trace in BTS format. */
1687 btrace_stitch_bts (struct btrace_data_bts
*btrace
, struct thread_info
*tp
)
1689 struct btrace_thread_info
*btinfo
;
1690 struct btrace_function
*last_bfun
;
1691 btrace_block
*first_new_block
;
1693 btinfo
= &tp
->btrace
;
1694 gdb_assert (!btinfo
->functions
.empty ());
1695 gdb_assert (!btrace
->blocks
->empty ());
1697 last_bfun
= &btinfo
->functions
.back ();
1699 /* If the existing trace ends with a gap, we just glue the traces
1700 together. We need to drop the last (i.e. chronologically first) block
1701 of the new trace, though, since we can't fill in the start address.*/
1702 if (last_bfun
->insn
.empty ())
1704 btrace
->blocks
->pop_back ();
1708 /* Beware that block trace starts with the most recent block, so the
1709 chronologically first block in the new trace is the last block in
1710 the new trace's block vector. */
1711 first_new_block
= &btrace
->blocks
->back ();
1712 const btrace_insn
&last_insn
= last_bfun
->insn
.back ();
1714 /* If the current PC at the end of the block is the same as in our current
1715 trace, there are two explanations:
1716 1. we executed the instruction and some branch brought us back.
1717 2. we have not made any progress.
1718 In the first case, the delta trace vector should contain at least two
1720 In the second case, the delta trace vector should contain exactly one
1721 entry for the partial block containing the current PC. Remove it. */
1722 if (first_new_block
->end
== last_insn
.pc
&& btrace
->blocks
->size () == 1)
1724 btrace
->blocks
->pop_back ();
1728 DEBUG ("stitching %s to %s", ftrace_print_insn_addr (&last_insn
),
1729 core_addr_to_string_nz (first_new_block
->end
));
1731 /* Do a simple sanity check to make sure we don't accidentally end up
1732 with a bad block. This should not occur in practice. */
1733 if (first_new_block
->end
< last_insn
.pc
)
1735 warning (_("Error while trying to read delta trace. Falling back to "
1740 /* We adjust the last block to start at the end of our current trace. */
1741 gdb_assert (first_new_block
->begin
== 0);
1742 first_new_block
->begin
= last_insn
.pc
;
1744 /* We simply pop the last insn so we can insert it again as part of
1745 the normal branch trace computation.
1746 Since instruction iterators are based on indices in the instructions
1747 vector, we don't leave any pointers dangling. */
1748 DEBUG ("pruning insn at %s for stitching",
1749 ftrace_print_insn_addr (&last_insn
));
1751 last_bfun
->insn
.pop_back ();
1753 /* The instructions vector may become empty temporarily if this has
1754 been the only instruction in this function segment.
1755 This violates the invariant but will be remedied shortly by
1756 btrace_compute_ftrace when we add the new trace. */
1758 /* The only case where this would hurt is if the entire trace consisted
1759 of just that one instruction. If we remove it, we might turn the now
1760 empty btrace function segment into a gap. But we don't want gaps at
1761 the beginning. To avoid this, we remove the entire old trace. */
1762 if (last_bfun
->number
== 1 && last_bfun
->insn
.empty ())
1768 /* Adjust the block trace in order to stitch old and new trace together.
1769 BTRACE is the new delta trace between the last and the current stop.
1770 TP is the traced thread.
1771 May modifx BTRACE as well as the existing trace in TP.
1772 Return 0 on success, -1 otherwise. */
1775 btrace_stitch_trace (struct btrace_data
*btrace
, struct thread_info
*tp
)
1777 /* If we don't have trace, there's nothing to do. */
1778 if (btrace
->empty ())
1781 switch (btrace
->format
)
1783 case BTRACE_FORMAT_NONE
:
1786 case BTRACE_FORMAT_BTS
:
1787 return btrace_stitch_bts (&btrace
->variant
.bts
, tp
);
1789 case BTRACE_FORMAT_PT
:
1790 /* Delta reads are not supported. */
1794 internal_error (__FILE__
, __LINE__
, _("Unkown branch trace format."));
1797 /* Clear the branch trace histories in BTINFO. */
1800 btrace_clear_history (struct btrace_thread_info
*btinfo
)
1802 xfree (btinfo
->insn_history
);
1803 xfree (btinfo
->call_history
);
1804 xfree (btinfo
->replay
);
1806 btinfo
->insn_history
= NULL
;
1807 btinfo
->call_history
= NULL
;
1808 btinfo
->replay
= NULL
;
1811 /* Clear the branch trace maintenance histories in BTINFO. */
1814 btrace_maint_clear (struct btrace_thread_info
*btinfo
)
1816 switch (btinfo
->data
.format
)
1821 case BTRACE_FORMAT_BTS
:
1822 btinfo
->maint
.variant
.bts
.packet_history
.begin
= 0;
1823 btinfo
->maint
.variant
.bts
.packet_history
.end
= 0;
1826 #if defined (HAVE_LIBIPT)
1827 case BTRACE_FORMAT_PT
:
1828 delete btinfo
->maint
.variant
.pt
.packets
;
1830 btinfo
->maint
.variant
.pt
.packets
= NULL
;
1831 btinfo
->maint
.variant
.pt
.packet_history
.begin
= 0;
1832 btinfo
->maint
.variant
.pt
.packet_history
.end
= 0;
1834 #endif /* defined (HAVE_LIBIPT) */
1841 btrace_decode_error (enum btrace_format format
, int errcode
)
1845 case BTRACE_FORMAT_BTS
:
1848 case BDE_BTS_OVERFLOW
:
1849 return _("instruction overflow");
1851 case BDE_BTS_INSN_SIZE
:
1852 return _("unknown instruction");
1859 #if defined (HAVE_LIBIPT)
1860 case BTRACE_FORMAT_PT
:
1863 case BDE_PT_USER_QUIT
:
1864 return _("trace decode cancelled");
1866 case BDE_PT_DISABLED
:
1867 return _("disabled");
1869 case BDE_PT_OVERFLOW
:
1870 return _("overflow");
1874 return pt_errstr (pt_errcode (errcode
));
1878 #endif /* defined (HAVE_LIBIPT) */
1884 return _("unknown");
1890 btrace_fetch (struct thread_info
*tp
, const struct btrace_cpu
*cpu
)
1892 struct btrace_thread_info
*btinfo
;
1893 struct btrace_target_info
*tinfo
;
1894 struct btrace_data btrace
;
1897 DEBUG ("fetch thread %s (%s)", print_thread_id (tp
),
1898 target_pid_to_str (tp
->ptid
).c_str ());
1900 btinfo
= &tp
->btrace
;
1901 tinfo
= btinfo
->target
;
1905 /* There's no way we could get new trace while replaying.
1906 On the other hand, delta trace would return a partial record with the
1907 current PC, which is the replay PC, not the last PC, as expected. */
1908 if (btinfo
->replay
!= NULL
)
1911 /* With CLI usage, TP->PTID always equals INFERIOR_PTID here. Now that we
1912 can store a gdb.Record object in Python referring to a different thread
1913 than the current one, temporarily set INFERIOR_PTID. */
1914 scoped_restore save_inferior_ptid
= make_scoped_restore (&inferior_ptid
);
1915 inferior_ptid
= tp
->ptid
;
1917 /* We should not be called on running or exited threads. */
1918 gdb_assert (can_access_registers_thread (tp
));
1920 /* Let's first try to extend the trace we already have. */
1921 if (!btinfo
->functions
.empty ())
1923 errcode
= target_read_btrace (&btrace
, tinfo
, BTRACE_READ_DELTA
);
1926 /* Success. Let's try to stitch the traces together. */
1927 errcode
= btrace_stitch_trace (&btrace
, tp
);
1931 /* We failed to read delta trace. Let's try to read new trace. */
1932 errcode
= target_read_btrace (&btrace
, tinfo
, BTRACE_READ_NEW
);
1934 /* If we got any new trace, discard what we have. */
1935 if (errcode
== 0 && !btrace
.empty ())
1939 /* If we were not able to read the trace, we start over. */
1943 errcode
= target_read_btrace (&btrace
, tinfo
, BTRACE_READ_ALL
);
1947 errcode
= target_read_btrace (&btrace
, tinfo
, BTRACE_READ_ALL
);
1949 /* If we were not able to read the branch trace, signal an error. */
1951 error (_("Failed to read branch trace."));
1953 /* Compute the trace, provided we have any. */
1954 if (!btrace
.empty ())
1956 /* Store the raw trace data. The stored data will be cleared in
1957 btrace_clear, so we always append the new trace. */
1958 btrace_data_append (&btinfo
->data
, &btrace
);
1959 btrace_maint_clear (btinfo
);
1961 btrace_clear_history (btinfo
);
1962 btrace_compute_ftrace (tp
, &btrace
, cpu
);
1969 btrace_clear (struct thread_info
*tp
)
1971 struct btrace_thread_info
*btinfo
;
1973 DEBUG ("clear thread %s (%s)", print_thread_id (tp
),
1974 target_pid_to_str (tp
->ptid
).c_str ());
1976 /* Make sure btrace frames that may hold a pointer into the branch
1977 trace data are destroyed. */
1978 reinit_frame_cache ();
1980 btinfo
= &tp
->btrace
;
1982 btinfo
->functions
.clear ();
1985 /* Must clear the maint data before - it depends on BTINFO->DATA. */
1986 btrace_maint_clear (btinfo
);
1987 btinfo
->data
.clear ();
1988 btrace_clear_history (btinfo
);
1994 btrace_free_objfile (struct objfile
*objfile
)
1996 DEBUG ("free objfile");
1998 for (thread_info
*tp
: all_non_exited_threads ())
2002 #if defined (HAVE_LIBEXPAT)
2004 /* Check the btrace document version. */
2007 check_xml_btrace_version (struct gdb_xml_parser
*parser
,
2008 const struct gdb_xml_element
*element
,
2010 std::vector
<gdb_xml_value
> &attributes
)
2013 = (const char *) xml_find_attribute (attributes
, "version")->value
.get ();
2015 if (strcmp (version
, "1.0") != 0)
2016 gdb_xml_error (parser
, _("Unsupported btrace version: \"%s\""), version
);
2019 /* Parse a btrace "block" xml record. */
2022 parse_xml_btrace_block (struct gdb_xml_parser
*parser
,
2023 const struct gdb_xml_element
*element
,
2025 std::vector
<gdb_xml_value
> &attributes
)
2027 struct btrace_data
*btrace
;
2028 ULONGEST
*begin
, *end
;
2030 btrace
= (struct btrace_data
*) user_data
;
2032 switch (btrace
->format
)
2034 case BTRACE_FORMAT_BTS
:
2037 case BTRACE_FORMAT_NONE
:
2038 btrace
->format
= BTRACE_FORMAT_BTS
;
2039 btrace
->variant
.bts
.blocks
= new std::vector
<btrace_block
>;
2043 gdb_xml_error (parser
, _("Btrace format error."));
2046 begin
= (ULONGEST
*) xml_find_attribute (attributes
, "begin")->value
.get ();
2047 end
= (ULONGEST
*) xml_find_attribute (attributes
, "end")->value
.get ();
2048 btrace
->variant
.bts
.blocks
->emplace_back (*begin
, *end
);
2051 /* Parse a "raw" xml record. */
2054 parse_xml_raw (struct gdb_xml_parser
*parser
, const char *body_text
,
2055 gdb_byte
**pdata
, size_t *psize
)
2060 len
= strlen (body_text
);
2062 gdb_xml_error (parser
, _("Bad raw data size."));
2066 gdb::unique_xmalloc_ptr
<gdb_byte
> data ((gdb_byte
*) xmalloc (size
));
2069 /* We use hex encoding - see gdbsupport/rsp-low.h. */
2077 if (hi
== 0 || lo
== 0)
2078 gdb_xml_error (parser
, _("Bad hex encoding."));
2080 *bin
++ = fromhex (hi
) * 16 + fromhex (lo
);
2084 *pdata
= data
.release ();
2088 /* Parse a btrace pt-config "cpu" xml record. */
2091 parse_xml_btrace_pt_config_cpu (struct gdb_xml_parser
*parser
,
2092 const struct gdb_xml_element
*element
,
2094 std::vector
<gdb_xml_value
> &attributes
)
2096 struct btrace_data
*btrace
;
2098 ULONGEST
*family
, *model
, *stepping
;
2101 (const char *) xml_find_attribute (attributes
, "vendor")->value
.get ();
2103 = (ULONGEST
*) xml_find_attribute (attributes
, "family")->value
.get ();
2105 = (ULONGEST
*) xml_find_attribute (attributes
, "model")->value
.get ();
2107 = (ULONGEST
*) xml_find_attribute (attributes
, "stepping")->value
.get ();
2109 btrace
= (struct btrace_data
*) user_data
;
2111 if (strcmp (vendor
, "GenuineIntel") == 0)
2112 btrace
->variant
.pt
.config
.cpu
.vendor
= CV_INTEL
;
2114 btrace
->variant
.pt
.config
.cpu
.family
= *family
;
2115 btrace
->variant
.pt
.config
.cpu
.model
= *model
;
2116 btrace
->variant
.pt
.config
.cpu
.stepping
= *stepping
;
2119 /* Parse a btrace pt "raw" xml record. */
2122 parse_xml_btrace_pt_raw (struct gdb_xml_parser
*parser
,
2123 const struct gdb_xml_element
*element
,
2124 void *user_data
, const char *body_text
)
2126 struct btrace_data
*btrace
;
2128 btrace
= (struct btrace_data
*) user_data
;
2129 parse_xml_raw (parser
, body_text
, &btrace
->variant
.pt
.data
,
2130 &btrace
->variant
.pt
.size
);
2133 /* Parse a btrace "pt" xml record. */
2136 parse_xml_btrace_pt (struct gdb_xml_parser
*parser
,
2137 const struct gdb_xml_element
*element
,
2139 std::vector
<gdb_xml_value
> &attributes
)
2141 struct btrace_data
*btrace
;
2143 btrace
= (struct btrace_data
*) user_data
;
2144 btrace
->format
= BTRACE_FORMAT_PT
;
2145 btrace
->variant
.pt
.config
.cpu
.vendor
= CV_UNKNOWN
;
2146 btrace
->variant
.pt
.data
= NULL
;
2147 btrace
->variant
.pt
.size
= 0;
2150 static const struct gdb_xml_attribute block_attributes
[] = {
2151 { "begin", GDB_XML_AF_NONE
, gdb_xml_parse_attr_ulongest
, NULL
},
2152 { "end", GDB_XML_AF_NONE
, gdb_xml_parse_attr_ulongest
, NULL
},
2153 { NULL
, GDB_XML_AF_NONE
, NULL
, NULL
}
2156 static const struct gdb_xml_attribute btrace_pt_config_cpu_attributes
[] = {
2157 { "vendor", GDB_XML_AF_NONE
, NULL
, NULL
},
2158 { "family", GDB_XML_AF_NONE
, gdb_xml_parse_attr_ulongest
, NULL
},
2159 { "model", GDB_XML_AF_NONE
, gdb_xml_parse_attr_ulongest
, NULL
},
2160 { "stepping", GDB_XML_AF_NONE
, gdb_xml_parse_attr_ulongest
, NULL
},
2161 { NULL
, GDB_XML_AF_NONE
, NULL
, NULL
}
2164 static const struct gdb_xml_element btrace_pt_config_children
[] = {
2165 { "cpu", btrace_pt_config_cpu_attributes
, NULL
, GDB_XML_EF_OPTIONAL
,
2166 parse_xml_btrace_pt_config_cpu
, NULL
},
2167 { NULL
, NULL
, NULL
, GDB_XML_EF_NONE
, NULL
, NULL
}
2170 static const struct gdb_xml_element btrace_pt_children
[] = {
2171 { "pt-config", NULL
, btrace_pt_config_children
, GDB_XML_EF_OPTIONAL
, NULL
,
2173 { "raw", NULL
, NULL
, GDB_XML_EF_OPTIONAL
, NULL
, parse_xml_btrace_pt_raw
},
2174 { NULL
, NULL
, NULL
, GDB_XML_EF_NONE
, NULL
, NULL
}
2177 static const struct gdb_xml_attribute btrace_attributes
[] = {
2178 { "version", GDB_XML_AF_NONE
, NULL
, NULL
},
2179 { NULL
, GDB_XML_AF_NONE
, NULL
, NULL
}
2182 static const struct gdb_xml_element btrace_children
[] = {
2183 { "block", block_attributes
, NULL
,
2184 GDB_XML_EF_REPEATABLE
| GDB_XML_EF_OPTIONAL
, parse_xml_btrace_block
, NULL
},
2185 { "pt", NULL
, btrace_pt_children
, GDB_XML_EF_OPTIONAL
, parse_xml_btrace_pt
,
2187 { NULL
, NULL
, NULL
, GDB_XML_EF_NONE
, NULL
, NULL
}
2190 static const struct gdb_xml_element btrace_elements
[] = {
2191 { "btrace", btrace_attributes
, btrace_children
, GDB_XML_EF_NONE
,
2192 check_xml_btrace_version
, NULL
},
2193 { NULL
, NULL
, NULL
, GDB_XML_EF_NONE
, NULL
, NULL
}
2196 #endif /* defined (HAVE_LIBEXPAT) */
2201 parse_xml_btrace (struct btrace_data
*btrace
, const char *buffer
)
2203 #if defined (HAVE_LIBEXPAT)
2207 result
.format
= BTRACE_FORMAT_NONE
;
2209 errcode
= gdb_xml_parse_quick (_("btrace"), "btrace.dtd", btrace_elements
,
2212 error (_("Error parsing branch trace."));
2214 /* Keep parse results. */
2215 *btrace
= std::move (result
);
2217 #else /* !defined (HAVE_LIBEXPAT) */
2219 error (_("Cannot process branch trace. XML support was disabled at "
2222 #endif /* !defined (HAVE_LIBEXPAT) */
2225 #if defined (HAVE_LIBEXPAT)
2227 /* Parse a btrace-conf "bts" xml record. */
2230 parse_xml_btrace_conf_bts (struct gdb_xml_parser
*parser
,
2231 const struct gdb_xml_element
*element
,
2233 std::vector
<gdb_xml_value
> &attributes
)
2235 struct btrace_config
*conf
;
2236 struct gdb_xml_value
*size
;
2238 conf
= (struct btrace_config
*) user_data
;
2239 conf
->format
= BTRACE_FORMAT_BTS
;
2242 size
= xml_find_attribute (attributes
, "size");
2244 conf
->bts
.size
= (unsigned int) *(ULONGEST
*) size
->value
.get ();
2247 /* Parse a btrace-conf "pt" xml record. */
2250 parse_xml_btrace_conf_pt (struct gdb_xml_parser
*parser
,
2251 const struct gdb_xml_element
*element
,
2253 std::vector
<gdb_xml_value
> &attributes
)
2255 struct btrace_config
*conf
;
2256 struct gdb_xml_value
*size
;
2258 conf
= (struct btrace_config
*) user_data
;
2259 conf
->format
= BTRACE_FORMAT_PT
;
2262 size
= xml_find_attribute (attributes
, "size");
2264 conf
->pt
.size
= (unsigned int) *(ULONGEST
*) size
->value
.get ();
2267 static const struct gdb_xml_attribute btrace_conf_pt_attributes
[] = {
2268 { "size", GDB_XML_AF_OPTIONAL
, gdb_xml_parse_attr_ulongest
, NULL
},
2269 { NULL
, GDB_XML_AF_NONE
, NULL
, NULL
}
2272 static const struct gdb_xml_attribute btrace_conf_bts_attributes
[] = {
2273 { "size", GDB_XML_AF_OPTIONAL
, gdb_xml_parse_attr_ulongest
, NULL
},
2274 { NULL
, GDB_XML_AF_NONE
, NULL
, NULL
}
2277 static const struct gdb_xml_element btrace_conf_children
[] = {
2278 { "bts", btrace_conf_bts_attributes
, NULL
, GDB_XML_EF_OPTIONAL
,
2279 parse_xml_btrace_conf_bts
, NULL
},
2280 { "pt", btrace_conf_pt_attributes
, NULL
, GDB_XML_EF_OPTIONAL
,
2281 parse_xml_btrace_conf_pt
, NULL
},
2282 { NULL
, NULL
, NULL
, GDB_XML_EF_NONE
, NULL
, NULL
}
2285 static const struct gdb_xml_attribute btrace_conf_attributes
[] = {
2286 { "version", GDB_XML_AF_NONE
, NULL
, NULL
},
2287 { NULL
, GDB_XML_AF_NONE
, NULL
, NULL
}
2290 static const struct gdb_xml_element btrace_conf_elements
[] = {
2291 { "btrace-conf", btrace_conf_attributes
, btrace_conf_children
,
2292 GDB_XML_EF_NONE
, NULL
, NULL
},
2293 { NULL
, NULL
, NULL
, GDB_XML_EF_NONE
, NULL
, NULL
}
2296 #endif /* defined (HAVE_LIBEXPAT) */
2301 parse_xml_btrace_conf (struct btrace_config
*conf
, const char *xml
)
2303 #if defined (HAVE_LIBEXPAT)
2306 errcode
= gdb_xml_parse_quick (_("btrace-conf"), "btrace-conf.dtd",
2307 btrace_conf_elements
, xml
, conf
);
2309 error (_("Error parsing branch trace configuration."));
2311 #else /* !defined (HAVE_LIBEXPAT) */
2313 error (_("Cannot process the branch trace configuration. XML support "
2314 "was disabled at compile time."));
2316 #endif /* !defined (HAVE_LIBEXPAT) */
2321 const struct btrace_insn
*
2322 btrace_insn_get (const struct btrace_insn_iterator
*it
)
2324 const struct btrace_function
*bfun
;
2325 unsigned int index
, end
;
2327 index
= it
->insn_index
;
2328 bfun
= &it
->btinfo
->functions
[it
->call_index
];
2330 /* Check if the iterator points to a gap in the trace. */
2331 if (bfun
->errcode
!= 0)
2334 /* The index is within the bounds of this function's instruction vector. */
2335 end
= bfun
->insn
.size ();
2336 gdb_assert (0 < end
);
2337 gdb_assert (index
< end
);
2339 return &bfun
->insn
[index
];
2345 btrace_insn_get_error (const struct btrace_insn_iterator
*it
)
2347 return it
->btinfo
->functions
[it
->call_index
].errcode
;
2353 btrace_insn_number (const struct btrace_insn_iterator
*it
)
2355 return it
->btinfo
->functions
[it
->call_index
].insn_offset
+ it
->insn_index
;
2361 btrace_insn_begin (struct btrace_insn_iterator
*it
,
2362 const struct btrace_thread_info
*btinfo
)
2364 if (btinfo
->functions
.empty ())
2365 error (_("No trace."));
2367 it
->btinfo
= btinfo
;
2375 btrace_insn_end (struct btrace_insn_iterator
*it
,
2376 const struct btrace_thread_info
*btinfo
)
2378 const struct btrace_function
*bfun
;
2379 unsigned int length
;
2381 if (btinfo
->functions
.empty ())
2382 error (_("No trace."));
2384 bfun
= &btinfo
->functions
.back ();
2385 length
= bfun
->insn
.size ();
2387 /* The last function may either be a gap or it contains the current
2388 instruction, which is one past the end of the execution trace; ignore
2393 it
->btinfo
= btinfo
;
2394 it
->call_index
= bfun
->number
- 1;
2395 it
->insn_index
= length
;
2401 btrace_insn_next (struct btrace_insn_iterator
*it
, unsigned int stride
)
2403 const struct btrace_function
*bfun
;
2404 unsigned int index
, steps
;
2406 bfun
= &it
->btinfo
->functions
[it
->call_index
];
2408 index
= it
->insn_index
;
2412 unsigned int end
, space
, adv
;
2414 end
= bfun
->insn
.size ();
2416 /* An empty function segment represents a gap in the trace. We count
2417 it as one instruction. */
2420 const struct btrace_function
*next
;
2422 next
= ftrace_find_call_by_number (it
->btinfo
, bfun
->number
+ 1);
2435 gdb_assert (0 < end
);
2436 gdb_assert (index
< end
);
2438 /* Compute the number of instructions remaining in this segment. */
2439 space
= end
- index
;
2441 /* Advance the iterator as far as possible within this segment. */
2442 adv
= std::min (space
, stride
);
2447 /* Move to the next function if we're at the end of this one. */
2450 const struct btrace_function
*next
;
2452 next
= ftrace_find_call_by_number (it
->btinfo
, bfun
->number
+ 1);
2455 /* We stepped past the last function.
2457 Let's adjust the index to point to the last instruction in
2458 the previous function. */
2464 /* We now point to the first instruction in the new function. */
2469 /* We did make progress. */
2470 gdb_assert (adv
> 0);
2473 /* Update the iterator. */
2474 it
->call_index
= bfun
->number
- 1;
2475 it
->insn_index
= index
;
2483 btrace_insn_prev (struct btrace_insn_iterator
*it
, unsigned int stride
)
2485 const struct btrace_function
*bfun
;
2486 unsigned int index
, steps
;
2488 bfun
= &it
->btinfo
->functions
[it
->call_index
];
2490 index
= it
->insn_index
;
2496 /* Move to the previous function if we're at the start of this one. */
2499 const struct btrace_function
*prev
;
2501 prev
= ftrace_find_call_by_number (it
->btinfo
, bfun
->number
- 1);
2505 /* We point to one after the last instruction in the new function. */
2507 index
= bfun
->insn
.size ();
2509 /* An empty function segment represents a gap in the trace. We count
2510 it as one instruction. */
2520 /* Advance the iterator as far as possible within this segment. */
2521 adv
= std::min (index
, stride
);
2527 /* We did make progress. */
2528 gdb_assert (adv
> 0);
2531 /* Update the iterator. */
2532 it
->call_index
= bfun
->number
- 1;
2533 it
->insn_index
= index
;
2541 btrace_insn_cmp (const struct btrace_insn_iterator
*lhs
,
2542 const struct btrace_insn_iterator
*rhs
)
2544 gdb_assert (lhs
->btinfo
== rhs
->btinfo
);
2546 if (lhs
->call_index
!= rhs
->call_index
)
2547 return lhs
->call_index
- rhs
->call_index
;
2549 return lhs
->insn_index
- rhs
->insn_index
;
2555 btrace_find_insn_by_number (struct btrace_insn_iterator
*it
,
2556 const struct btrace_thread_info
*btinfo
,
2557 unsigned int number
)
2559 const struct btrace_function
*bfun
;
2560 unsigned int upper
, lower
;
2562 if (btinfo
->functions
.empty ())
2566 bfun
= &btinfo
->functions
[lower
];
2567 if (number
< bfun
->insn_offset
)
2570 upper
= btinfo
->functions
.size () - 1;
2571 bfun
= &btinfo
->functions
[upper
];
2572 if (number
>= bfun
->insn_offset
+ ftrace_call_num_insn (bfun
))
2575 /* We assume that there are no holes in the numbering. */
2578 const unsigned int average
= lower
+ (upper
- lower
) / 2;
2580 bfun
= &btinfo
->functions
[average
];
2582 if (number
< bfun
->insn_offset
)
2584 upper
= average
- 1;
2588 if (number
>= bfun
->insn_offset
+ ftrace_call_num_insn (bfun
))
2590 lower
= average
+ 1;
2597 it
->btinfo
= btinfo
;
2598 it
->call_index
= bfun
->number
- 1;
2599 it
->insn_index
= number
- bfun
->insn_offset
;
2603 /* Returns true if the recording ends with a function segment that
2604 contains only a single (i.e. the current) instruction. */
2607 btrace_ends_with_single_insn (const struct btrace_thread_info
*btinfo
)
2609 const btrace_function
*bfun
;
2611 if (btinfo
->functions
.empty ())
2614 bfun
= &btinfo
->functions
.back ();
2615 if (bfun
->errcode
!= 0)
2618 return ftrace_call_num_insn (bfun
) == 1;
2623 const struct btrace_function
*
2624 btrace_call_get (const struct btrace_call_iterator
*it
)
2626 if (it
->index
>= it
->btinfo
->functions
.size ())
2629 return &it
->btinfo
->functions
[it
->index
];
2635 btrace_call_number (const struct btrace_call_iterator
*it
)
2637 const unsigned int length
= it
->btinfo
->functions
.size ();
2639 /* If the last function segment contains only a single instruction (i.e. the
2640 current instruction), skip it. */
2641 if ((it
->index
== length
) && btrace_ends_with_single_insn (it
->btinfo
))
2644 return it
->index
+ 1;
2650 btrace_call_begin (struct btrace_call_iterator
*it
,
2651 const struct btrace_thread_info
*btinfo
)
2653 if (btinfo
->functions
.empty ())
2654 error (_("No trace."));
2656 it
->btinfo
= btinfo
;
2663 btrace_call_end (struct btrace_call_iterator
*it
,
2664 const struct btrace_thread_info
*btinfo
)
2666 if (btinfo
->functions
.empty ())
2667 error (_("No trace."));
2669 it
->btinfo
= btinfo
;
2670 it
->index
= btinfo
->functions
.size ();
2676 btrace_call_next (struct btrace_call_iterator
*it
, unsigned int stride
)
2678 const unsigned int length
= it
->btinfo
->functions
.size ();
2680 if (it
->index
+ stride
< length
- 1)
2681 /* Default case: Simply advance the iterator. */
2682 it
->index
+= stride
;
2683 else if (it
->index
+ stride
== length
- 1)
2685 /* We land exactly at the last function segment. If it contains only one
2686 instruction (i.e. the current instruction) it is not actually part of
2688 if (btrace_ends_with_single_insn (it
->btinfo
))
2691 it
->index
= length
- 1;
2695 /* We land past the last function segment and have to adjust the stride.
2696 If the last function segment contains only one instruction (i.e. the
2697 current instruction) it is not actually part of the trace. */
2698 if (btrace_ends_with_single_insn (it
->btinfo
))
2699 stride
= length
- it
->index
- 1;
2701 stride
= length
- it
->index
;
2712 btrace_call_prev (struct btrace_call_iterator
*it
, unsigned int stride
)
2714 const unsigned int length
= it
->btinfo
->functions
.size ();
2717 gdb_assert (it
->index
<= length
);
2719 if (stride
== 0 || it
->index
== 0)
2722 /* If we are at the end, the first step is a special case. If the last
2723 function segment contains only one instruction (i.e. the current
2724 instruction) it is not actually part of the trace. To be able to step
2725 over this instruction, we need at least one more function segment. */
2726 if ((it
->index
== length
) && (length
> 1))
2728 if (btrace_ends_with_single_insn (it
->btinfo
))
2729 it
->index
= length
- 2;
2731 it
->index
= length
- 1;
2737 stride
= std::min (stride
, it
->index
);
2739 it
->index
-= stride
;
2740 return steps
+ stride
;
2746 btrace_call_cmp (const struct btrace_call_iterator
*lhs
,
2747 const struct btrace_call_iterator
*rhs
)
2749 gdb_assert (lhs
->btinfo
== rhs
->btinfo
);
2750 return (int) (lhs
->index
- rhs
->index
);
2756 btrace_find_call_by_number (struct btrace_call_iterator
*it
,
2757 const struct btrace_thread_info
*btinfo
,
2758 unsigned int number
)
2760 const unsigned int length
= btinfo
->functions
.size ();
2762 if ((number
== 0) || (number
> length
))
2765 it
->btinfo
= btinfo
;
2766 it
->index
= number
- 1;
2773 btrace_set_insn_history (struct btrace_thread_info
*btinfo
,
2774 const struct btrace_insn_iterator
*begin
,
2775 const struct btrace_insn_iterator
*end
)
2777 if (btinfo
->insn_history
== NULL
)
2778 btinfo
->insn_history
= XCNEW (struct btrace_insn_history
);
2780 btinfo
->insn_history
->begin
= *begin
;
2781 btinfo
->insn_history
->end
= *end
;
2787 btrace_set_call_history (struct btrace_thread_info
*btinfo
,
2788 const struct btrace_call_iterator
*begin
,
2789 const struct btrace_call_iterator
*end
)
2791 gdb_assert (begin
->btinfo
== end
->btinfo
);
2793 if (btinfo
->call_history
== NULL
)
2794 btinfo
->call_history
= XCNEW (struct btrace_call_history
);
2796 btinfo
->call_history
->begin
= *begin
;
2797 btinfo
->call_history
->end
= *end
;
2803 btrace_is_replaying (struct thread_info
*tp
)
2805 return tp
->btrace
.replay
!= NULL
;
2811 btrace_is_empty (struct thread_info
*tp
)
2813 struct btrace_insn_iterator begin
, end
;
2814 struct btrace_thread_info
*btinfo
;
2816 btinfo
= &tp
->btrace
;
2818 if (btinfo
->functions
.empty ())
2821 btrace_insn_begin (&begin
, btinfo
);
2822 btrace_insn_end (&end
, btinfo
);
2824 return btrace_insn_cmp (&begin
, &end
) == 0;
2827 #if defined (HAVE_LIBIPT)
2829 /* Print a single packet. */
2832 pt_print_packet (const struct pt_packet
*packet
)
2834 switch (packet
->type
)
2837 printf_unfiltered (("[??: %x]"), packet
->type
);
2841 printf_unfiltered (("psb"));
2845 printf_unfiltered (("psbend"));
2849 printf_unfiltered (("pad"));
2853 printf_unfiltered (("tip %u: 0x%" PRIx64
""),
2854 packet
->payload
.ip
.ipc
,
2855 packet
->payload
.ip
.ip
);
2859 printf_unfiltered (("tip.pge %u: 0x%" PRIx64
""),
2860 packet
->payload
.ip
.ipc
,
2861 packet
->payload
.ip
.ip
);
2865 printf_unfiltered (("tip.pgd %u: 0x%" PRIx64
""),
2866 packet
->payload
.ip
.ipc
,
2867 packet
->payload
.ip
.ip
);
2871 printf_unfiltered (("fup %u: 0x%" PRIx64
""),
2872 packet
->payload
.ip
.ipc
,
2873 packet
->payload
.ip
.ip
);
2877 printf_unfiltered (("tnt-8 %u: 0x%" PRIx64
""),
2878 packet
->payload
.tnt
.bit_size
,
2879 packet
->payload
.tnt
.payload
);
2883 printf_unfiltered (("tnt-64 %u: 0x%" PRIx64
""),
2884 packet
->payload
.tnt
.bit_size
,
2885 packet
->payload
.tnt
.payload
);
2889 printf_unfiltered (("pip %" PRIx64
"%s"), packet
->payload
.pip
.cr3
,
2890 packet
->payload
.pip
.nr
? (" nr") : (""));
2894 printf_unfiltered (("tsc %" PRIx64
""), packet
->payload
.tsc
.tsc
);
2898 printf_unfiltered (("cbr %u"), packet
->payload
.cbr
.ratio
);
2902 switch (packet
->payload
.mode
.leaf
)
2905 printf_unfiltered (("mode %u"), packet
->payload
.mode
.leaf
);
2909 printf_unfiltered (("mode.exec%s%s"),
2910 packet
->payload
.mode
.bits
.exec
.csl
2912 packet
->payload
.mode
.bits
.exec
.csd
2913 ? (" cs.d") : (""));
2917 printf_unfiltered (("mode.tsx%s%s"),
2918 packet
->payload
.mode
.bits
.tsx
.intx
2920 packet
->payload
.mode
.bits
.tsx
.abrt
2921 ? (" abrt") : (""));
2927 printf_unfiltered (("ovf"));
2931 printf_unfiltered (("stop"));
2935 printf_unfiltered (("vmcs %" PRIx64
""), packet
->payload
.vmcs
.base
);
2939 printf_unfiltered (("tma %x %x"), packet
->payload
.tma
.ctc
,
2940 packet
->payload
.tma
.fc
);
2944 printf_unfiltered (("mtc %x"), packet
->payload
.mtc
.ctc
);
2948 printf_unfiltered (("cyc %" PRIx64
""), packet
->payload
.cyc
.value
);
2952 printf_unfiltered (("mnt %" PRIx64
""), packet
->payload
.mnt
.payload
);
2957 /* Decode packets into MAINT using DECODER. */
2960 btrace_maint_decode_pt (struct btrace_maint_info
*maint
,
2961 struct pt_packet_decoder
*decoder
)
2965 if (maint
->variant
.pt
.packets
== NULL
)
2966 maint
->variant
.pt
.packets
= new std::vector
<btrace_pt_packet
>;
2970 struct btrace_pt_packet packet
;
2972 errcode
= pt_pkt_sync_forward (decoder
);
2978 pt_pkt_get_offset (decoder
, &packet
.offset
);
2980 errcode
= pt_pkt_next (decoder
, &packet
.packet
,
2981 sizeof(packet
.packet
));
2985 if (maint_btrace_pt_skip_pad
== 0 || packet
.packet
.type
!= ppt_pad
)
2987 packet
.errcode
= pt_errcode (errcode
);
2988 maint
->variant
.pt
.packets
->push_back (packet
);
2992 if (errcode
== -pte_eos
)
2995 packet
.errcode
= pt_errcode (errcode
);
2996 maint
->variant
.pt
.packets
->push_back (packet
);
2998 warning (_("Error at trace offset 0x%" PRIx64
": %s."),
2999 packet
.offset
, pt_errstr (packet
.errcode
));
3002 if (errcode
!= -pte_eos
)
3003 warning (_("Failed to synchronize onto the Intel Processor Trace "
3004 "stream: %s."), pt_errstr (pt_errcode (errcode
)));
3007 /* Update the packet history in BTINFO. */
3010 btrace_maint_update_pt_packets (struct btrace_thread_info
*btinfo
)
3012 struct pt_packet_decoder
*decoder
;
3013 const struct btrace_cpu
*cpu
;
3014 struct btrace_data_pt
*pt
;
3015 struct pt_config config
;
3018 pt
= &btinfo
->data
.variant
.pt
;
3020 /* Nothing to do if there is no trace. */
3024 memset (&config
, 0, sizeof(config
));
3026 config
.size
= sizeof (config
);
3027 config
.begin
= pt
->data
;
3028 config
.end
= pt
->data
+ pt
->size
;
3030 cpu
= record_btrace_get_cpu ();
3032 cpu
= &pt
->config
.cpu
;
3034 /* We treat an unknown vendor as 'no errata'. */
3035 if (cpu
->vendor
!= CV_UNKNOWN
)
3037 config
.cpu
.vendor
= pt_translate_cpu_vendor (cpu
->vendor
);
3038 config
.cpu
.family
= cpu
->family
;
3039 config
.cpu
.model
= cpu
->model
;
3040 config
.cpu
.stepping
= cpu
->stepping
;
3042 errcode
= pt_cpu_errata (&config
.errata
, &config
.cpu
);
3044 error (_("Failed to configure the Intel Processor Trace "
3045 "decoder: %s."), pt_errstr (pt_errcode (errcode
)));
3048 decoder
= pt_pkt_alloc_decoder (&config
);
3049 if (decoder
== NULL
)
3050 error (_("Failed to allocate the Intel Processor Trace decoder."));
3054 btrace_maint_decode_pt (&btinfo
->maint
, decoder
);
3056 catch (const gdb_exception
&except
)
3058 pt_pkt_free_decoder (decoder
);
3060 if (except
.reason
< 0)
3064 pt_pkt_free_decoder (decoder
);
3067 #endif /* !defined (HAVE_LIBIPT) */
3069 /* Update the packet maintenance information for BTINFO and store the
3070 low and high bounds into BEGIN and END, respectively.
3071 Store the current iterator state into FROM and TO. */
3074 btrace_maint_update_packets (struct btrace_thread_info
*btinfo
,
3075 unsigned int *begin
, unsigned int *end
,
3076 unsigned int *from
, unsigned int *to
)
3078 switch (btinfo
->data
.format
)
3087 case BTRACE_FORMAT_BTS
:
3088 /* Nothing to do - we operate directly on BTINFO->DATA. */
3090 *end
= btinfo
->data
.variant
.bts
.blocks
->size ();
3091 *from
= btinfo
->maint
.variant
.bts
.packet_history
.begin
;
3092 *to
= btinfo
->maint
.variant
.bts
.packet_history
.end
;
3095 #if defined (HAVE_LIBIPT)
3096 case BTRACE_FORMAT_PT
:
3097 if (btinfo
->maint
.variant
.pt
.packets
== nullptr)
3098 btinfo
->maint
.variant
.pt
.packets
= new std::vector
<btrace_pt_packet
>;
3100 if (btinfo
->maint
.variant
.pt
.packets
->empty ())
3101 btrace_maint_update_pt_packets (btinfo
);
3104 *end
= btinfo
->maint
.variant
.pt
.packets
->size ();
3105 *from
= btinfo
->maint
.variant
.pt
.packet_history
.begin
;
3106 *to
= btinfo
->maint
.variant
.pt
.packet_history
.end
;
3108 #endif /* defined (HAVE_LIBIPT) */
3112 /* Print packets in BTINFO from BEGIN (inclusive) until END (exclusive) and
3113 update the current iterator position. */
3116 btrace_maint_print_packets (struct btrace_thread_info
*btinfo
,
3117 unsigned int begin
, unsigned int end
)
3119 switch (btinfo
->data
.format
)
3124 case BTRACE_FORMAT_BTS
:
3126 const std::vector
<btrace_block
> &blocks
3127 = *btinfo
->data
.variant
.bts
.blocks
;
3130 for (blk
= begin
; blk
< end
; ++blk
)
3132 const btrace_block
&block
= blocks
.at (blk
);
3134 printf_unfiltered ("%u\tbegin: %s, end: %s\n", blk
,
3135 core_addr_to_string_nz (block
.begin
),
3136 core_addr_to_string_nz (block
.end
));
3139 btinfo
->maint
.variant
.bts
.packet_history
.begin
= begin
;
3140 btinfo
->maint
.variant
.bts
.packet_history
.end
= end
;
3144 #if defined (HAVE_LIBIPT)
3145 case BTRACE_FORMAT_PT
:
3147 const std::vector
<btrace_pt_packet
> &packets
3148 = *btinfo
->maint
.variant
.pt
.packets
;
3151 for (pkt
= begin
; pkt
< end
; ++pkt
)
3153 const struct btrace_pt_packet
&packet
= packets
.at (pkt
);
3155 printf_unfiltered ("%u\t", pkt
);
3156 printf_unfiltered ("0x%" PRIx64
"\t", packet
.offset
);
3158 if (packet
.errcode
== pte_ok
)
3159 pt_print_packet (&packet
.packet
);
3161 printf_unfiltered ("[error: %s]", pt_errstr (packet
.errcode
));
3163 printf_unfiltered ("\n");
3166 btinfo
->maint
.variant
.pt
.packet_history
.begin
= begin
;
3167 btinfo
->maint
.variant
.pt
.packet_history
.end
= end
;
3170 #endif /* defined (HAVE_LIBIPT) */
3174 /* Read a number from an argument string. */
3177 get_uint (const char **arg
)
3179 const char *begin
, *pos
;
3181 unsigned long number
;
3184 pos
= skip_spaces (begin
);
3186 if (!isdigit (*pos
))
3187 error (_("Expected positive number, got: %s."), pos
);
3189 number
= strtoul (pos
, &end
, 10);
3190 if (number
> UINT_MAX
)
3191 error (_("Number too big."));
3193 *arg
+= (end
- begin
);
3195 return (unsigned int) number
;
3198 /* Read a context size from an argument string. */
3201 get_context_size (const char **arg
)
3203 const char *pos
= skip_spaces (*arg
);
3205 if (!isdigit (*pos
))
3206 error (_("Expected positive number, got: %s."), pos
);
3209 long result
= strtol (pos
, &end
, 10);
3214 /* Complain about junk at the end of an argument string. */
3217 no_chunk (const char *arg
)
3220 error (_("Junk after argument: %s."), arg
);
3223 /* The "maintenance btrace packet-history" command. */
3226 maint_btrace_packet_history_cmd (const char *arg
, int from_tty
)
3228 struct btrace_thread_info
*btinfo
;
3229 unsigned int size
, begin
, end
, from
, to
;
3231 thread_info
*tp
= find_thread_ptid (inferior_ptid
);
3233 error (_("No thread."));
3236 btinfo
= &tp
->btrace
;
3238 btrace_maint_update_packets (btinfo
, &begin
, &end
, &from
, &to
);
3241 printf_unfiltered (_("No trace.\n"));
3245 if (arg
== NULL
|| *arg
== 0 || strcmp (arg
, "+") == 0)
3249 if (end
- from
< size
)
3253 else if (strcmp (arg
, "-") == 0)
3257 if (to
- begin
< size
)
3263 from
= get_uint (&arg
);
3265 error (_("'%u' is out of range."), from
);
3267 arg
= skip_spaces (arg
);
3270 arg
= skip_spaces (++arg
);
3275 size
= get_context_size (&arg
);
3279 if (end
- from
< size
)
3283 else if (*arg
== '-')
3286 size
= get_context_size (&arg
);
3290 /* Include the packet given as first argument. */
3294 if (to
- begin
< size
)
3300 to
= get_uint (&arg
);
3302 /* Include the packet at the second argument and silently
3303 truncate the range. */
3316 if (end
- from
< size
)
3324 btrace_maint_print_packets (btinfo
, from
, to
);
3327 /* The "maintenance btrace clear-packet-history" command. */
3330 maint_btrace_clear_packet_history_cmd (const char *args
, int from_tty
)
3332 if (args
!= NULL
&& *args
!= 0)
3333 error (_("Invalid argument."));
3335 if (inferior_ptid
== null_ptid
)
3336 error (_("No thread."));
3338 thread_info
*tp
= inferior_thread ();
3339 btrace_thread_info
*btinfo
= &tp
->btrace
;
3341 /* Must clear the maint data before - it depends on BTINFO->DATA. */
3342 btrace_maint_clear (btinfo
);
3343 btinfo
->data
.clear ();
3346 /* The "maintenance btrace clear" command. */
3349 maint_btrace_clear_cmd (const char *args
, int from_tty
)
3351 if (args
!= NULL
&& *args
!= 0)
3352 error (_("Invalid argument."));
3354 if (inferior_ptid
== null_ptid
)
3355 error (_("No thread."));
3357 thread_info
*tp
= inferior_thread ();
3361 /* The "maintenance btrace" command. */
3364 maint_btrace_cmd (const char *args
, int from_tty
)
3366 help_list (maint_btrace_cmdlist
, "maintenance btrace ", all_commands
,
3370 /* The "maintenance set btrace" command. */
3373 maint_btrace_set_cmd (const char *args
, int from_tty
)
3375 help_list (maint_btrace_set_cmdlist
, "maintenance set btrace ", all_commands
,
3379 /* The "maintenance show btrace" command. */
3382 maint_btrace_show_cmd (const char *args
, int from_tty
)
3384 help_list (maint_btrace_show_cmdlist
, "maintenance show btrace ",
3385 all_commands
, gdb_stdout
);
3388 /* The "maintenance set btrace pt" command. */
3391 maint_btrace_pt_set_cmd (const char *args
, int from_tty
)
3393 help_list (maint_btrace_pt_set_cmdlist
, "maintenance set btrace pt ",
3394 all_commands
, gdb_stdout
);
3397 /* The "maintenance show btrace pt" command. */
3400 maint_btrace_pt_show_cmd (const char *args
, int from_tty
)
3402 help_list (maint_btrace_pt_show_cmdlist
, "maintenance show btrace pt ",
3403 all_commands
, gdb_stdout
);
3406 /* The "maintenance info btrace" command. */
3409 maint_info_btrace_cmd (const char *args
, int from_tty
)
3411 struct btrace_thread_info
*btinfo
;
3412 const struct btrace_config
*conf
;
3414 if (args
!= NULL
&& *args
!= 0)
3415 error (_("Invalid argument."));
3417 if (inferior_ptid
== null_ptid
)
3418 error (_("No thread."));
3420 thread_info
*tp
= inferior_thread ();
3422 btinfo
= &tp
->btrace
;
3424 conf
= btrace_conf (btinfo
);
3426 error (_("No btrace configuration."));
3428 printf_unfiltered (_("Format: %s.\n"),
3429 btrace_format_string (conf
->format
));
3431 switch (conf
->format
)
3436 case BTRACE_FORMAT_BTS
:
3437 printf_unfiltered (_("Number of packets: %zu.\n"),
3438 btinfo
->data
.variant
.bts
.blocks
->size ());
3441 #if defined (HAVE_LIBIPT)
3442 case BTRACE_FORMAT_PT
:
3444 struct pt_version version
;
3446 version
= pt_library_version ();
3447 printf_unfiltered (_("Version: %u.%u.%u%s.\n"), version
.major
,
3448 version
.minor
, version
.build
,
3449 version
.ext
!= NULL
? version
.ext
: "");
3451 btrace_maint_update_pt_packets (btinfo
);
3452 printf_unfiltered (_("Number of packets: %zu.\n"),
3453 ((btinfo
->maint
.variant
.pt
.packets
== nullptr)
3454 ? 0 : btinfo
->maint
.variant
.pt
.packets
->size ()));
3457 #endif /* defined (HAVE_LIBIPT) */
3461 /* The "maint show btrace pt skip-pad" show value function. */
3464 show_maint_btrace_pt_skip_pad (struct ui_file
*file
, int from_tty
,
3465 struct cmd_list_element
*c
,
3468 fprintf_filtered (file
, _("Skip PAD packets is %s.\n"), value
);
3472 /* Initialize btrace maintenance commands. */
3475 _initialize_btrace (void)
3477 add_cmd ("btrace", class_maintenance
, maint_info_btrace_cmd
,
3478 _("Info about branch tracing data."), &maintenanceinfolist
);
3480 add_prefix_cmd ("btrace", class_maintenance
, maint_btrace_cmd
,
3481 _("Branch tracing maintenance commands."),
3482 &maint_btrace_cmdlist
, "maintenance btrace ",
3483 0, &maintenancelist
);
3485 add_prefix_cmd ("btrace", class_maintenance
, maint_btrace_set_cmd
, _("\
3486 Set branch tracing specific variables."),
3487 &maint_btrace_set_cmdlist
, "maintenance set btrace ",
3488 0, &maintenance_set_cmdlist
);
3490 add_prefix_cmd ("pt", class_maintenance
, maint_btrace_pt_set_cmd
, _("\
3491 Set Intel Processor Trace specific variables."),
3492 &maint_btrace_pt_set_cmdlist
, "maintenance set btrace pt ",
3493 0, &maint_btrace_set_cmdlist
);
3495 add_prefix_cmd ("btrace", class_maintenance
, maint_btrace_show_cmd
, _("\
3496 Show branch tracing specific variables."),
3497 &maint_btrace_show_cmdlist
, "maintenance show btrace ",
3498 0, &maintenance_show_cmdlist
);
3500 add_prefix_cmd ("pt", class_maintenance
, maint_btrace_pt_show_cmd
, _("\
3501 Show Intel Processor Trace specific variables."),
3502 &maint_btrace_pt_show_cmdlist
, "maintenance show btrace pt ",
3503 0, &maint_btrace_show_cmdlist
);
3505 add_setshow_boolean_cmd ("skip-pad", class_maintenance
,
3506 &maint_btrace_pt_skip_pad
, _("\
3507 Set whether PAD packets should be skipped in the btrace packet history."), _("\
3508 Show whether PAD packets should be skipped in the btrace packet history."),_("\
3509 When enabled, PAD packets are ignored in the btrace packet history."),
3510 NULL
, show_maint_btrace_pt_skip_pad
,
3511 &maint_btrace_pt_set_cmdlist
,
3512 &maint_btrace_pt_show_cmdlist
);
3514 add_cmd ("packet-history", class_maintenance
, maint_btrace_packet_history_cmd
,
3515 _("Print the raw branch tracing data.\n\
3516 With no argument, print ten more packets after the previous ten-line print.\n\
3517 With '-' as argument print ten packets before a previous ten-line print.\n\
3518 One argument specifies the starting packet of a ten-line print.\n\
3519 Two arguments with comma between specify starting and ending packets to \
3521 Preceded with '+'/'-' the second argument specifies the distance from the \
3523 &maint_btrace_cmdlist
);
3525 add_cmd ("clear-packet-history", class_maintenance
,
3526 maint_btrace_clear_packet_history_cmd
,
3527 _("Clears the branch tracing packet history.\n\
3528 Discards the raw branch tracing data but not the execution history data."),
3529 &maint_btrace_cmdlist
);
3531 add_cmd ("clear", class_maintenance
, maint_btrace_clear_cmd
,
3532 _("Clears the branch tracing data.\n\
3533 Discards the raw branch tracing data and the execution history data.\n\
3534 The next 'record' command will fetch the branch tracing data anew."),
3535 &maint_btrace_cmdlist
);