1 /* Branch trace support for GDB, the GNU debugger.
3 Copyright (C) 2013-2016 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "gdbthread.h"
31 #include "filenames.h"
32 #include "xml-support.h"
36 #include "cli/cli-utils.h"
42 /* Command lists for btrace maintenance commands. */
43 static struct cmd_list_element
*maint_btrace_cmdlist
;
44 static struct cmd_list_element
*maint_btrace_set_cmdlist
;
45 static struct cmd_list_element
*maint_btrace_show_cmdlist
;
46 static struct cmd_list_element
*maint_btrace_pt_set_cmdlist
;
47 static struct cmd_list_element
*maint_btrace_pt_show_cmdlist
;
49 /* Control whether to skip PAD packets when computing the packet history. */
50 static int maint_btrace_pt_skip_pad
= 1;
52 static void btrace_add_pc (struct thread_info
*tp
);
54 /* Print a record debug message. Use do ... while (0) to avoid ambiguities
55 when used in if statements. */
57 #define DEBUG(msg, args...) \
60 if (record_debug != 0) \
61 fprintf_unfiltered (gdb_stdlog, \
62 "[btrace] " msg "\n", ##args); \
66 #define DEBUG_FTRACE(msg, args...) DEBUG ("[ftrace] " msg, ##args)
68 /* Return the function name of a recorded function segment for printing.
69 This function never returns NULL. */
72 ftrace_print_function_name (const struct btrace_function
*bfun
)
74 struct minimal_symbol
*msym
;
81 return SYMBOL_PRINT_NAME (sym
);
84 return MSYMBOL_PRINT_NAME (msym
);
89 /* Return the file name of a recorded function segment for printing.
90 This function never returns NULL. */
93 ftrace_print_filename (const struct btrace_function
*bfun
)
101 filename
= symtab_to_filename_for_display (symbol_symtab (sym
));
103 filename
= "<unknown>";
108 /* Return a string representation of the address of an instruction.
109 This function never returns NULL. */
112 ftrace_print_insn_addr (const struct btrace_insn
*insn
)
117 return core_addr_to_string_nz (insn
->pc
);
120 /* Print an ftrace debug status message. */
123 ftrace_debug (const struct btrace_function
*bfun
, const char *prefix
)
125 const char *fun
, *file
;
126 unsigned int ibegin
, iend
;
129 fun
= ftrace_print_function_name (bfun
);
130 file
= ftrace_print_filename (bfun
);
133 ibegin
= bfun
->insn_offset
;
134 iend
= ibegin
+ VEC_length (btrace_insn_s
, bfun
->insn
);
136 DEBUG_FTRACE ("%s: fun = %s, file = %s, level = %d, insn = [%u; %u)",
137 prefix
, fun
, file
, level
, ibegin
, iend
);
140 /* Return non-zero if BFUN does not match MFUN and FUN,
141 return zero otherwise. */
144 ftrace_function_switched (const struct btrace_function
*bfun
,
145 const struct minimal_symbol
*mfun
,
146 const struct symbol
*fun
)
148 struct minimal_symbol
*msym
;
154 /* If the minimal symbol changed, we certainly switched functions. */
155 if (mfun
!= NULL
&& msym
!= NULL
156 && strcmp (MSYMBOL_LINKAGE_NAME (mfun
), MSYMBOL_LINKAGE_NAME (msym
)) != 0)
159 /* If the symbol changed, we certainly switched functions. */
160 if (fun
!= NULL
&& sym
!= NULL
)
162 const char *bfname
, *fname
;
164 /* Check the function name. */
165 if (strcmp (SYMBOL_LINKAGE_NAME (fun
), SYMBOL_LINKAGE_NAME (sym
)) != 0)
168 /* Check the location of those functions, as well. */
169 bfname
= symtab_to_fullname (symbol_symtab (sym
));
170 fname
= symtab_to_fullname (symbol_symtab (fun
));
171 if (filename_cmp (fname
, bfname
) != 0)
175 /* If we lost symbol information, we switched functions. */
176 if (!(msym
== NULL
&& sym
== NULL
) && mfun
== NULL
&& fun
== NULL
)
179 /* If we gained symbol information, we switched functions. */
180 if (msym
== NULL
&& sym
== NULL
&& !(mfun
== NULL
&& fun
== NULL
))
186 /* Allocate and initialize a new branch trace function segment.
187 PREV is the chronologically preceding function segment.
188 MFUN and FUN are the symbol information we have for this function. */
190 static struct btrace_function
*
191 ftrace_new_function (struct btrace_function
*prev
,
192 struct minimal_symbol
*mfun
,
195 struct btrace_function
*bfun
;
197 bfun
= XCNEW (struct btrace_function
);
201 bfun
->flow
.prev
= prev
;
205 /* Start counting at one. */
207 bfun
->insn_offset
= 1;
211 gdb_assert (prev
->flow
.next
== NULL
);
212 prev
->flow
.next
= bfun
;
214 bfun
->number
= prev
->number
+ 1;
215 bfun
->insn_offset
= (prev
->insn_offset
216 + VEC_length (btrace_insn_s
, prev
->insn
));
217 bfun
->level
= prev
->level
;
223 /* Update the UP field of a function segment. */
226 ftrace_update_caller (struct btrace_function
*bfun
,
227 struct btrace_function
*caller
,
228 enum btrace_function_flag flags
)
230 if (bfun
->up
!= NULL
)
231 ftrace_debug (bfun
, "updating caller");
236 ftrace_debug (bfun
, "set caller");
239 /* Fix up the caller for all segments of a function. */
242 ftrace_fixup_caller (struct btrace_function
*bfun
,
243 struct btrace_function
*caller
,
244 enum btrace_function_flag flags
)
246 struct btrace_function
*prev
, *next
;
248 ftrace_update_caller (bfun
, caller
, flags
);
250 /* Update all function segments belonging to the same function. */
251 for (prev
= bfun
->segment
.prev
; prev
!= NULL
; prev
= prev
->segment
.prev
)
252 ftrace_update_caller (prev
, caller
, flags
);
254 for (next
= bfun
->segment
.next
; next
!= NULL
; next
= next
->segment
.next
)
255 ftrace_update_caller (next
, caller
, flags
);
258 /* Add a new function segment for a call.
259 CALLER is the chronologically preceding function segment.
260 MFUN and FUN are the symbol information we have for this function. */
262 static struct btrace_function
*
263 ftrace_new_call (struct btrace_function
*caller
,
264 struct minimal_symbol
*mfun
,
267 struct btrace_function
*bfun
;
269 bfun
= ftrace_new_function (caller
, mfun
, fun
);
273 ftrace_debug (bfun
, "new call");
278 /* Add a new function segment for a tail call.
279 CALLER is the chronologically preceding function segment.
280 MFUN and FUN are the symbol information we have for this function. */
282 static struct btrace_function
*
283 ftrace_new_tailcall (struct btrace_function
*caller
,
284 struct minimal_symbol
*mfun
,
287 struct btrace_function
*bfun
;
289 bfun
= ftrace_new_function (caller
, mfun
, fun
);
292 bfun
->flags
|= BFUN_UP_LINKS_TO_TAILCALL
;
294 ftrace_debug (bfun
, "new tail call");
299 /* Find the innermost caller in the back trace of BFUN with MFUN/FUN
300 symbol information. */
302 static struct btrace_function
*
303 ftrace_find_caller (struct btrace_function
*bfun
,
304 struct minimal_symbol
*mfun
,
307 for (; bfun
!= NULL
; bfun
= bfun
->up
)
309 /* Skip functions with incompatible symbol information. */
310 if (ftrace_function_switched (bfun
, mfun
, fun
))
313 /* This is the function segment we're looking for. */
320 /* Find the innermost caller in the back trace of BFUN, skipping all
321 function segments that do not end with a call instruction (e.g.
322 tail calls ending with a jump). */
324 static struct btrace_function
*
325 ftrace_find_call (struct btrace_function
*bfun
)
327 for (; bfun
!= NULL
; bfun
= bfun
->up
)
329 struct btrace_insn
*last
;
332 if (bfun
->errcode
!= 0)
335 last
= VEC_last (btrace_insn_s
, bfun
->insn
);
337 if (last
->iclass
== BTRACE_INSN_CALL
)
344 /* Add a continuation segment for a function into which we return.
345 PREV is the chronologically preceding function segment.
346 MFUN and FUN are the symbol information we have for this function. */
348 static struct btrace_function
*
349 ftrace_new_return (struct btrace_function
*prev
,
350 struct minimal_symbol
*mfun
,
353 struct btrace_function
*bfun
, *caller
;
355 bfun
= ftrace_new_function (prev
, mfun
, fun
);
357 /* It is important to start at PREV's caller. Otherwise, we might find
358 PREV itself, if PREV is a recursive function. */
359 caller
= ftrace_find_caller (prev
->up
, mfun
, fun
);
362 /* The caller of PREV is the preceding btrace function segment in this
363 function instance. */
364 gdb_assert (caller
->segment
.next
== NULL
);
366 caller
->segment
.next
= bfun
;
367 bfun
->segment
.prev
= caller
;
369 /* Maintain the function level. */
370 bfun
->level
= caller
->level
;
372 /* Maintain the call stack. */
373 bfun
->up
= caller
->up
;
374 bfun
->flags
= caller
->flags
;
376 ftrace_debug (bfun
, "new return");
380 /* We did not find a caller. This could mean that something went
381 wrong or that the call is simply not included in the trace. */
383 /* Let's search for some actual call. */
384 caller
= ftrace_find_call (prev
->up
);
387 /* There is no call in PREV's back trace. We assume that the
388 branch trace did not include it. */
390 /* Let's find the topmost call function - this skips tail calls. */
391 while (prev
->up
!= NULL
)
394 /* We maintain levels for a series of returns for which we have
396 We start at the preceding function's level in case this has
397 already been a return for which we have not seen the call.
398 We start at level 0 otherwise, to handle tail calls correctly. */
399 bfun
->level
= std::min (0, prev
->level
) - 1;
401 /* Fix up the call stack for PREV. */
402 ftrace_fixup_caller (prev
, bfun
, BFUN_UP_LINKS_TO_RET
);
404 ftrace_debug (bfun
, "new return - no caller");
408 /* There is a call in PREV's back trace to which we should have
409 returned. Let's remain at this level. */
410 bfun
->level
= prev
->level
;
412 ftrace_debug (bfun
, "new return - unknown caller");
419 /* Add a new function segment for a function switch.
420 PREV is the chronologically preceding function segment.
421 MFUN and FUN are the symbol information we have for this function. */
423 static struct btrace_function
*
424 ftrace_new_switch (struct btrace_function
*prev
,
425 struct minimal_symbol
*mfun
,
428 struct btrace_function
*bfun
;
430 /* This is an unexplained function switch. The call stack will likely
431 be wrong at this point. */
432 bfun
= ftrace_new_function (prev
, mfun
, fun
);
434 ftrace_debug (bfun
, "new switch");
439 /* Add a new function segment for a gap in the trace due to a decode error.
440 PREV is the chronologically preceding function segment.
441 ERRCODE is the format-specific error code. */
443 static struct btrace_function
*
444 ftrace_new_gap (struct btrace_function
*prev
, int errcode
)
446 struct btrace_function
*bfun
;
448 /* We hijack prev if it was empty. */
449 if (prev
!= NULL
&& prev
->errcode
== 0
450 && VEC_empty (btrace_insn_s
, prev
->insn
))
453 bfun
= ftrace_new_function (prev
, NULL
, NULL
);
455 bfun
->errcode
= errcode
;
457 ftrace_debug (bfun
, "new gap");
462 /* Update BFUN with respect to the instruction at PC. This may create new
464 Return the chronologically latest function segment, never NULL. */
466 static struct btrace_function
*
467 ftrace_update_function (struct btrace_function
*bfun
, CORE_ADDR pc
)
469 struct bound_minimal_symbol bmfun
;
470 struct minimal_symbol
*mfun
;
472 struct btrace_insn
*last
;
474 /* Try to determine the function we're in. We use both types of symbols
475 to avoid surprises when we sometimes get a full symbol and sometimes
476 only a minimal symbol. */
477 fun
= find_pc_function (pc
);
478 bmfun
= lookup_minimal_symbol_by_pc (pc
);
481 if (fun
== NULL
&& mfun
== NULL
)
482 DEBUG_FTRACE ("no symbol at %s", core_addr_to_string_nz (pc
));
484 /* If we didn't have a function or if we had a gap before, we create one. */
485 if (bfun
== NULL
|| bfun
->errcode
!= 0)
486 return ftrace_new_function (bfun
, mfun
, fun
);
488 /* Check the last instruction, if we have one.
489 We do this check first, since it allows us to fill in the call stack
490 links in addition to the normal flow links. */
492 if (!VEC_empty (btrace_insn_s
, bfun
->insn
))
493 last
= VEC_last (btrace_insn_s
, bfun
->insn
);
497 switch (last
->iclass
)
499 case BTRACE_INSN_RETURN
:
503 /* On some systems, _dl_runtime_resolve returns to the resolved
504 function instead of jumping to it. From our perspective,
505 however, this is a tailcall.
506 If we treated it as return, we wouldn't be able to find the
507 resolved function in our stack back trace. Hence, we would
508 lose the current stack back trace and start anew with an empty
509 back trace. When the resolved function returns, we would then
510 create a stack back trace with the same function names but
511 different frame id's. This will confuse stepping. */
512 fname
= ftrace_print_function_name (bfun
);
513 if (strcmp (fname
, "_dl_runtime_resolve") == 0)
514 return ftrace_new_tailcall (bfun
, mfun
, fun
);
516 return ftrace_new_return (bfun
, mfun
, fun
);
519 case BTRACE_INSN_CALL
:
520 /* Ignore calls to the next instruction. They are used for PIC. */
521 if (last
->pc
+ last
->size
== pc
)
524 return ftrace_new_call (bfun
, mfun
, fun
);
526 case BTRACE_INSN_JUMP
:
530 start
= get_pc_function_start (pc
);
532 /* If we can't determine the function for PC, we treat a jump at
533 the end of the block as tail call. */
534 if (start
== 0 || start
== pc
)
535 return ftrace_new_tailcall (bfun
, mfun
, fun
);
540 /* Check if we're switching functions for some other reason. */
541 if (ftrace_function_switched (bfun
, mfun
, fun
))
543 DEBUG_FTRACE ("switching from %s in %s at %s",
544 ftrace_print_insn_addr (last
),
545 ftrace_print_function_name (bfun
),
546 ftrace_print_filename (bfun
));
548 return ftrace_new_switch (bfun
, mfun
, fun
);
554 /* Add the instruction at PC to BFUN's instructions. */
557 ftrace_update_insns (struct btrace_function
*bfun
,
558 const struct btrace_insn
*insn
)
560 VEC_safe_push (btrace_insn_s
, bfun
->insn
, insn
);
562 if (record_debug
> 1)
563 ftrace_debug (bfun
, "update insn");
566 /* Classify the instruction at PC. */
568 static enum btrace_insn_class
569 ftrace_classify_insn (struct gdbarch
*gdbarch
, CORE_ADDR pc
)
571 enum btrace_insn_class iclass
;
573 iclass
= BTRACE_INSN_OTHER
;
576 if (gdbarch_insn_is_call (gdbarch
, pc
))
577 iclass
= BTRACE_INSN_CALL
;
578 else if (gdbarch_insn_is_ret (gdbarch
, pc
))
579 iclass
= BTRACE_INSN_RETURN
;
580 else if (gdbarch_insn_is_jump (gdbarch
, pc
))
581 iclass
= BTRACE_INSN_JUMP
;
583 CATCH (error
, RETURN_MASK_ERROR
)
591 /* Compute the function branch trace from BTS trace. */
594 btrace_compute_ftrace_bts (struct thread_info
*tp
,
595 const struct btrace_data_bts
*btrace
)
597 struct btrace_thread_info
*btinfo
;
598 struct btrace_function
*begin
, *end
;
599 struct gdbarch
*gdbarch
;
600 unsigned int blk
, ngaps
;
603 gdbarch
= target_gdbarch ();
604 btinfo
= &tp
->btrace
;
605 begin
= btinfo
->begin
;
607 ngaps
= btinfo
->ngaps
;
608 level
= begin
!= NULL
? -btinfo
->level
: INT_MAX
;
609 blk
= VEC_length (btrace_block_s
, btrace
->blocks
);
613 btrace_block_s
*block
;
618 block
= VEC_index (btrace_block_s
, btrace
->blocks
, blk
);
623 struct btrace_insn insn
;
626 /* We should hit the end of the block. Warn if we went too far. */
629 /* Indicate the gap in the trace - unless we're at the
633 warning (_("Recorded trace may be corrupted around %s."),
634 core_addr_to_string_nz (pc
));
636 end
= ftrace_new_gap (end
, BDE_BTS_OVERFLOW
);
642 end
= ftrace_update_function (end
, pc
);
646 /* Maintain the function level offset.
647 For all but the last block, we do it here. */
649 level
= std::min (level
, end
->level
);
654 size
= gdb_insn_length (gdbarch
, pc
);
656 CATCH (error
, RETURN_MASK_ERROR
)
663 insn
.iclass
= ftrace_classify_insn (gdbarch
, pc
);
666 ftrace_update_insns (end
, &insn
);
668 /* We're done once we pushed the instruction at the end. */
669 if (block
->end
== pc
)
672 /* We can't continue if we fail to compute the size. */
675 warning (_("Recorded trace may be incomplete around %s."),
676 core_addr_to_string_nz (pc
));
678 /* Indicate the gap in the trace. We just added INSN so we're
679 not at the beginning. */
680 end
= ftrace_new_gap (end
, BDE_BTS_INSN_SIZE
);
688 /* Maintain the function level offset.
689 For the last block, we do it here to not consider the last
691 Since the last instruction corresponds to the current instruction
692 and is not really part of the execution history, it shouldn't
695 level
= std::min (level
, end
->level
);
699 btinfo
->begin
= begin
;
701 btinfo
->ngaps
= ngaps
;
703 /* LEVEL is the minimal function level of all btrace function segments.
704 Define the global level offset to -LEVEL so all function levels are
705 normalized to start at zero. */
706 btinfo
->level
= -level
;
709 #if defined (HAVE_LIBIPT)
711 static enum btrace_insn_class
712 pt_reclassify_insn (enum pt_insn_class iclass
)
717 return BTRACE_INSN_CALL
;
720 return BTRACE_INSN_RETURN
;
723 return BTRACE_INSN_JUMP
;
726 return BTRACE_INSN_OTHER
;
730 /* Return the btrace instruction flags for INSN. */
732 static btrace_insn_flags
733 pt_btrace_insn_flags (const struct pt_insn
*insn
)
735 btrace_insn_flags flags
= 0;
737 if (insn
->speculative
)
738 flags
|= BTRACE_INSN_FLAG_SPECULATIVE
;
743 /* Add function branch trace using DECODER. */
746 ftrace_add_pt (struct pt_insn_decoder
*decoder
,
747 struct btrace_function
**pbegin
,
748 struct btrace_function
**pend
, int *plevel
,
751 struct btrace_function
*begin
, *end
, *upd
;
753 int errcode
, nerrors
;
760 struct btrace_insn btinsn
;
763 errcode
= pt_insn_sync_forward (decoder
);
766 if (errcode
!= -pte_eos
)
767 warning (_("Failed to synchronize onto the Intel Processor "
768 "Trace stream: %s."), pt_errstr (pt_errcode (errcode
)));
772 memset (&btinsn
, 0, sizeof (btinsn
));
775 errcode
= pt_insn_next (decoder
, &insn
, sizeof(insn
));
779 /* Look for gaps in the trace - unless we're at the beginning. */
782 /* Tracing is disabled and re-enabled each time we enter the
783 kernel. Most times, we continue from the same instruction we
784 stopped before. This is indicated via the RESUMED instruction
785 flag. The ENABLED instruction flag means that we continued
786 from some other instruction. Indicate this as a trace gap. */
788 *pend
= end
= ftrace_new_gap (end
, BDE_PT_DISABLED
);
790 /* Indicate trace overflows. */
792 *pend
= end
= ftrace_new_gap (end
, BDE_PT_OVERFLOW
);
795 upd
= ftrace_update_function (end
, insn
.ip
);
801 *pbegin
= begin
= upd
;
804 /* Maintain the function level offset. */
805 *plevel
= std::min (*plevel
, end
->level
);
807 btinsn
.pc
= (CORE_ADDR
) insn
.ip
;
808 btinsn
.size
= (gdb_byte
) insn
.size
;
809 btinsn
.iclass
= pt_reclassify_insn (insn
.iclass
);
810 btinsn
.flags
= pt_btrace_insn_flags (&insn
);
812 ftrace_update_insns (end
, &btinsn
);
815 if (errcode
== -pte_eos
)
818 /* If the gap is at the very beginning, we ignore it - we will have
819 less trace, but we won't have any holes in the trace. */
823 pt_insn_get_offset (decoder
, &offset
);
825 warning (_("Failed to decode Intel Processor Trace near trace "
826 "offset 0x%" PRIx64
" near recorded PC 0x%" PRIx64
": %s."),
827 offset
, insn
.ip
, pt_errstr (pt_errcode (errcode
)));
829 /* Indicate the gap in the trace. */
830 *pend
= end
= ftrace_new_gap (end
, errcode
);
835 warning (_("The recorded execution trace may have gaps."));
838 /* A callback function to allow the trace decoder to read the inferior's
842 btrace_pt_readmem_callback (gdb_byte
*buffer
, size_t size
,
843 const struct pt_asid
*asid
, uint64_t pc
,
851 errcode
= target_read_code ((CORE_ADDR
) pc
, buffer
, size
);
855 CATCH (error
, RETURN_MASK_ERROR
)
864 /* Translate the vendor from one enum to another. */
866 static enum pt_cpu_vendor
867 pt_translate_cpu_vendor (enum btrace_cpu_vendor vendor
)
879 /* Finalize the function branch trace after decode. */
881 static void btrace_finalize_ftrace_pt (struct pt_insn_decoder
*decoder
,
882 struct thread_info
*tp
, int level
)
884 pt_insn_free_decoder (decoder
);
886 /* LEVEL is the minimal function level of all btrace function segments.
887 Define the global level offset to -LEVEL so all function levels are
888 normalized to start at zero. */
889 tp
->btrace
.level
= -level
;
891 /* Add a single last instruction entry for the current PC.
892 This allows us to compute the backtrace at the current PC using both
893 standard unwind and btrace unwind.
894 This extra entry is ignored by all record commands. */
898 /* Compute the function branch trace from Intel Processor Trace
902 btrace_compute_ftrace_pt (struct thread_info
*tp
,
903 const struct btrace_data_pt
*btrace
)
905 struct btrace_thread_info
*btinfo
;
906 struct pt_insn_decoder
*decoder
;
907 struct pt_config config
;
910 if (btrace
->size
== 0)
913 btinfo
= &tp
->btrace
;
914 level
= btinfo
->begin
!= NULL
? -btinfo
->level
: INT_MAX
;
916 pt_config_init(&config
);
917 config
.begin
= btrace
->data
;
918 config
.end
= btrace
->data
+ btrace
->size
;
920 config
.cpu
.vendor
= pt_translate_cpu_vendor (btrace
->config
.cpu
.vendor
);
921 config
.cpu
.family
= btrace
->config
.cpu
.family
;
922 config
.cpu
.model
= btrace
->config
.cpu
.model
;
923 config
.cpu
.stepping
= btrace
->config
.cpu
.stepping
;
925 errcode
= pt_cpu_errata (&config
.errata
, &config
.cpu
);
927 error (_("Failed to configure the Intel Processor Trace decoder: %s."),
928 pt_errstr (pt_errcode (errcode
)));
930 decoder
= pt_insn_alloc_decoder (&config
);
932 error (_("Failed to allocate the Intel Processor Trace decoder."));
936 struct pt_image
*image
;
938 image
= pt_insn_get_image(decoder
);
940 error (_("Failed to configure the Intel Processor Trace decoder."));
942 errcode
= pt_image_set_callback(image
, btrace_pt_readmem_callback
, NULL
);
944 error (_("Failed to configure the Intel Processor Trace decoder: "
945 "%s."), pt_errstr (pt_errcode (errcode
)));
947 ftrace_add_pt (decoder
, &btinfo
->begin
, &btinfo
->end
, &level
,
950 CATCH (error
, RETURN_MASK_ALL
)
952 /* Indicate a gap in the trace if we quit trace processing. */
953 if (error
.reason
== RETURN_QUIT
&& btinfo
->end
!= NULL
)
955 btinfo
->end
= ftrace_new_gap (btinfo
->end
, BDE_PT_USER_QUIT
);
959 btrace_finalize_ftrace_pt (decoder
, tp
, level
);
961 throw_exception (error
);
965 btrace_finalize_ftrace_pt (decoder
, tp
, level
);
968 #else /* defined (HAVE_LIBIPT) */
971 btrace_compute_ftrace_pt (struct thread_info
*tp
,
972 const struct btrace_data_pt
*btrace
)
974 internal_error (__FILE__
, __LINE__
, _("Unexpected branch trace format."));
977 #endif /* defined (HAVE_LIBIPT) */
979 /* Compute the function branch trace from a block branch trace BTRACE for
980 a thread given by BTINFO. */
983 btrace_compute_ftrace (struct thread_info
*tp
, struct btrace_data
*btrace
)
985 DEBUG ("compute ftrace");
987 switch (btrace
->format
)
989 case BTRACE_FORMAT_NONE
:
992 case BTRACE_FORMAT_BTS
:
993 btrace_compute_ftrace_bts (tp
, &btrace
->variant
.bts
);
996 case BTRACE_FORMAT_PT
:
997 btrace_compute_ftrace_pt (tp
, &btrace
->variant
.pt
);
1001 internal_error (__FILE__
, __LINE__
, _("Unkown branch trace format."));
1004 /* Add an entry for the current PC. */
1007 btrace_add_pc (struct thread_info
*tp
)
1009 struct btrace_data btrace
;
1010 struct btrace_block
*block
;
1011 struct regcache
*regcache
;
1012 struct cleanup
*cleanup
;
1015 regcache
= get_thread_regcache (tp
->ptid
);
1016 pc
= regcache_read_pc (regcache
);
1018 btrace_data_init (&btrace
);
1019 btrace
.format
= BTRACE_FORMAT_BTS
;
1020 btrace
.variant
.bts
.blocks
= NULL
;
1022 cleanup
= make_cleanup_btrace_data (&btrace
);
1024 block
= VEC_safe_push (btrace_block_s
, btrace
.variant
.bts
.blocks
, NULL
);
1028 btrace_compute_ftrace (tp
, &btrace
);
1030 do_cleanups (cleanup
);
1036 btrace_enable (struct thread_info
*tp
, const struct btrace_config
*conf
)
1038 if (tp
->btrace
.target
!= NULL
)
1041 #if !defined (HAVE_LIBIPT)
1042 if (conf
->format
== BTRACE_FORMAT_PT
)
1043 error (_("GDB does not support Intel Processor Trace."));
1044 #endif /* !defined (HAVE_LIBIPT) */
1046 if (!target_supports_btrace (conf
->format
))
1047 error (_("Target does not support branch tracing."));
1049 DEBUG ("enable thread %s (%s)", print_thread_id (tp
),
1050 target_pid_to_str (tp
->ptid
));
1052 tp
->btrace
.target
= target_enable_btrace (tp
->ptid
, conf
);
1054 /* Add an entry for the current PC so we start tracing from where we
1056 if (tp
->btrace
.target
!= NULL
)
1062 const struct btrace_config
*
1063 btrace_conf (const struct btrace_thread_info
*btinfo
)
1065 if (btinfo
->target
== NULL
)
1068 return target_btrace_conf (btinfo
->target
);
1074 btrace_disable (struct thread_info
*tp
)
1076 struct btrace_thread_info
*btp
= &tp
->btrace
;
1079 if (btp
->target
== NULL
)
1082 DEBUG ("disable thread %s (%s)", print_thread_id (tp
),
1083 target_pid_to_str (tp
->ptid
));
1085 target_disable_btrace (btp
->target
);
1094 btrace_teardown (struct thread_info
*tp
)
1096 struct btrace_thread_info
*btp
= &tp
->btrace
;
1099 if (btp
->target
== NULL
)
1102 DEBUG ("teardown thread %s (%s)", print_thread_id (tp
),
1103 target_pid_to_str (tp
->ptid
));
1105 target_teardown_btrace (btp
->target
);
1111 /* Stitch branch trace in BTS format. */
1114 btrace_stitch_bts (struct btrace_data_bts
*btrace
, struct thread_info
*tp
)
1116 struct btrace_thread_info
*btinfo
;
1117 struct btrace_function
*last_bfun
;
1118 struct btrace_insn
*last_insn
;
1119 btrace_block_s
*first_new_block
;
1121 btinfo
= &tp
->btrace
;
1122 last_bfun
= btinfo
->end
;
1123 gdb_assert (last_bfun
!= NULL
);
1124 gdb_assert (!VEC_empty (btrace_block_s
, btrace
->blocks
));
1126 /* If the existing trace ends with a gap, we just glue the traces
1127 together. We need to drop the last (i.e. chronologically first) block
1128 of the new trace, though, since we can't fill in the start address.*/
1129 if (VEC_empty (btrace_insn_s
, last_bfun
->insn
))
1131 VEC_pop (btrace_block_s
, btrace
->blocks
);
1135 /* Beware that block trace starts with the most recent block, so the
1136 chronologically first block in the new trace is the last block in
1137 the new trace's block vector. */
1138 first_new_block
= VEC_last (btrace_block_s
, btrace
->blocks
);
1139 last_insn
= VEC_last (btrace_insn_s
, last_bfun
->insn
);
1141 /* If the current PC at the end of the block is the same as in our current
1142 trace, there are two explanations:
1143 1. we executed the instruction and some branch brought us back.
1144 2. we have not made any progress.
1145 In the first case, the delta trace vector should contain at least two
1147 In the second case, the delta trace vector should contain exactly one
1148 entry for the partial block containing the current PC. Remove it. */
1149 if (first_new_block
->end
== last_insn
->pc
1150 && VEC_length (btrace_block_s
, btrace
->blocks
) == 1)
1152 VEC_pop (btrace_block_s
, btrace
->blocks
);
1156 DEBUG ("stitching %s to %s", ftrace_print_insn_addr (last_insn
),
1157 core_addr_to_string_nz (first_new_block
->end
));
1159 /* Do a simple sanity check to make sure we don't accidentally end up
1160 with a bad block. This should not occur in practice. */
1161 if (first_new_block
->end
< last_insn
->pc
)
1163 warning (_("Error while trying to read delta trace. Falling back to "
1168 /* We adjust the last block to start at the end of our current trace. */
1169 gdb_assert (first_new_block
->begin
== 0);
1170 first_new_block
->begin
= last_insn
->pc
;
1172 /* We simply pop the last insn so we can insert it again as part of
1173 the normal branch trace computation.
1174 Since instruction iterators are based on indices in the instructions
1175 vector, we don't leave any pointers dangling. */
1176 DEBUG ("pruning insn at %s for stitching",
1177 ftrace_print_insn_addr (last_insn
));
1179 VEC_pop (btrace_insn_s
, last_bfun
->insn
);
1181 /* The instructions vector may become empty temporarily if this has
1182 been the only instruction in this function segment.
1183 This violates the invariant but will be remedied shortly by
1184 btrace_compute_ftrace when we add the new trace. */
1186 /* The only case where this would hurt is if the entire trace consisted
1187 of just that one instruction. If we remove it, we might turn the now
1188 empty btrace function segment into a gap. But we don't want gaps at
1189 the beginning. To avoid this, we remove the entire old trace. */
1190 if (last_bfun
== btinfo
->begin
&& VEC_empty (btrace_insn_s
, last_bfun
->insn
))
1196 /* Adjust the block trace in order to stitch old and new trace together.
1197 BTRACE is the new delta trace between the last and the current stop.
1198 TP is the traced thread.
1199 May modifx BTRACE as well as the existing trace in TP.
1200 Return 0 on success, -1 otherwise. */
1203 btrace_stitch_trace (struct btrace_data
*btrace
, struct thread_info
*tp
)
1205 /* If we don't have trace, there's nothing to do. */
1206 if (btrace_data_empty (btrace
))
1209 switch (btrace
->format
)
1211 case BTRACE_FORMAT_NONE
:
1214 case BTRACE_FORMAT_BTS
:
1215 return btrace_stitch_bts (&btrace
->variant
.bts
, tp
);
1217 case BTRACE_FORMAT_PT
:
1218 /* Delta reads are not supported. */
1222 internal_error (__FILE__
, __LINE__
, _("Unkown branch trace format."));
1225 /* Clear the branch trace histories in BTINFO. */
1228 btrace_clear_history (struct btrace_thread_info
*btinfo
)
1230 xfree (btinfo
->insn_history
);
1231 xfree (btinfo
->call_history
);
1232 xfree (btinfo
->replay
);
1234 btinfo
->insn_history
= NULL
;
1235 btinfo
->call_history
= NULL
;
1236 btinfo
->replay
= NULL
;
1239 /* Clear the branch trace maintenance histories in BTINFO. */
1242 btrace_maint_clear (struct btrace_thread_info
*btinfo
)
1244 switch (btinfo
->data
.format
)
1249 case BTRACE_FORMAT_BTS
:
1250 btinfo
->maint
.variant
.bts
.packet_history
.begin
= 0;
1251 btinfo
->maint
.variant
.bts
.packet_history
.end
= 0;
1254 #if defined (HAVE_LIBIPT)
1255 case BTRACE_FORMAT_PT
:
1256 xfree (btinfo
->maint
.variant
.pt
.packets
);
1258 btinfo
->maint
.variant
.pt
.packets
= NULL
;
1259 btinfo
->maint
.variant
.pt
.packet_history
.begin
= 0;
1260 btinfo
->maint
.variant
.pt
.packet_history
.end
= 0;
1262 #endif /* defined (HAVE_LIBIPT) */
1269 btrace_fetch (struct thread_info
*tp
)
1271 struct btrace_thread_info
*btinfo
;
1272 struct btrace_target_info
*tinfo
;
1273 struct btrace_data btrace
;
1274 struct cleanup
*cleanup
;
1277 DEBUG ("fetch thread %s (%s)", print_thread_id (tp
),
1278 target_pid_to_str (tp
->ptid
));
1280 btinfo
= &tp
->btrace
;
1281 tinfo
= btinfo
->target
;
1285 /* There's no way we could get new trace while replaying.
1286 On the other hand, delta trace would return a partial record with the
1287 current PC, which is the replay PC, not the last PC, as expected. */
1288 if (btinfo
->replay
!= NULL
)
1291 btrace_data_init (&btrace
);
1292 cleanup
= make_cleanup_btrace_data (&btrace
);
1294 /* Let's first try to extend the trace we already have. */
1295 if (btinfo
->end
!= NULL
)
1297 errcode
= target_read_btrace (&btrace
, tinfo
, BTRACE_READ_DELTA
);
1300 /* Success. Let's try to stitch the traces together. */
1301 errcode
= btrace_stitch_trace (&btrace
, tp
);
1305 /* We failed to read delta trace. Let's try to read new trace. */
1306 errcode
= target_read_btrace (&btrace
, tinfo
, BTRACE_READ_NEW
);
1308 /* If we got any new trace, discard what we have. */
1309 if (errcode
== 0 && !btrace_data_empty (&btrace
))
1313 /* If we were not able to read the trace, we start over. */
1317 errcode
= target_read_btrace (&btrace
, tinfo
, BTRACE_READ_ALL
);
1321 errcode
= target_read_btrace (&btrace
, tinfo
, BTRACE_READ_ALL
);
1323 /* If we were not able to read the branch trace, signal an error. */
1325 error (_("Failed to read branch trace."));
1327 /* Compute the trace, provided we have any. */
1328 if (!btrace_data_empty (&btrace
))
1330 /* Store the raw trace data. The stored data will be cleared in
1331 btrace_clear, so we always append the new trace. */
1332 btrace_data_append (&btinfo
->data
, &btrace
);
1333 btrace_maint_clear (btinfo
);
1335 btrace_clear_history (btinfo
);
1336 btrace_compute_ftrace (tp
, &btrace
);
1339 do_cleanups (cleanup
);
1345 btrace_clear (struct thread_info
*tp
)
1347 struct btrace_thread_info
*btinfo
;
1348 struct btrace_function
*it
, *trash
;
1350 DEBUG ("clear thread %s (%s)", print_thread_id (tp
),
1351 target_pid_to_str (tp
->ptid
));
1353 /* Make sure btrace frames that may hold a pointer into the branch
1354 trace data are destroyed. */
1355 reinit_frame_cache ();
1357 btinfo
= &tp
->btrace
;
1368 btinfo
->begin
= NULL
;
1372 /* Must clear the maint data before - it depends on BTINFO->DATA. */
1373 btrace_maint_clear (btinfo
);
1374 btrace_data_clear (&btinfo
->data
);
1375 btrace_clear_history (btinfo
);
1381 btrace_free_objfile (struct objfile
*objfile
)
1383 struct thread_info
*tp
;
1385 DEBUG ("free objfile");
1387 ALL_NON_EXITED_THREADS (tp
)
1391 #if defined (HAVE_LIBEXPAT)
1393 /* Check the btrace document version. */
1396 check_xml_btrace_version (struct gdb_xml_parser
*parser
,
1397 const struct gdb_xml_element
*element
,
1398 void *user_data
, VEC (gdb_xml_value_s
) *attributes
)
1401 = (const char *) xml_find_attribute (attributes
, "version")->value
;
1403 if (strcmp (version
, "1.0") != 0)
1404 gdb_xml_error (parser
, _("Unsupported btrace version: \"%s\""), version
);
1407 /* Parse a btrace "block" xml record. */
1410 parse_xml_btrace_block (struct gdb_xml_parser
*parser
,
1411 const struct gdb_xml_element
*element
,
1412 void *user_data
, VEC (gdb_xml_value_s
) *attributes
)
1414 struct btrace_data
*btrace
;
1415 struct btrace_block
*block
;
1416 ULONGEST
*begin
, *end
;
1418 btrace
= (struct btrace_data
*) user_data
;
1420 switch (btrace
->format
)
1422 case BTRACE_FORMAT_BTS
:
1425 case BTRACE_FORMAT_NONE
:
1426 btrace
->format
= BTRACE_FORMAT_BTS
;
1427 btrace
->variant
.bts
.blocks
= NULL
;
1431 gdb_xml_error (parser
, _("Btrace format error."));
1434 begin
= (ULONGEST
*) xml_find_attribute (attributes
, "begin")->value
;
1435 end
= (ULONGEST
*) xml_find_attribute (attributes
, "end")->value
;
1437 block
= VEC_safe_push (btrace_block_s
, btrace
->variant
.bts
.blocks
, NULL
);
1438 block
->begin
= *begin
;
1442 /* Parse a "raw" xml record. */
1445 parse_xml_raw (struct gdb_xml_parser
*parser
, const char *body_text
,
1446 gdb_byte
**pdata
, size_t *psize
)
1448 struct cleanup
*cleanup
;
1449 gdb_byte
*data
, *bin
;
1452 len
= strlen (body_text
);
1454 gdb_xml_error (parser
, _("Bad raw data size."));
1458 bin
= data
= (gdb_byte
*) xmalloc (size
);
1459 cleanup
= make_cleanup (xfree
, data
);
1461 /* We use hex encoding - see common/rsp-low.h. */
1469 if (hi
== 0 || lo
== 0)
1470 gdb_xml_error (parser
, _("Bad hex encoding."));
1472 *bin
++ = fromhex (hi
) * 16 + fromhex (lo
);
1476 discard_cleanups (cleanup
);
1482 /* Parse a btrace pt-config "cpu" xml record. */
1485 parse_xml_btrace_pt_config_cpu (struct gdb_xml_parser
*parser
,
1486 const struct gdb_xml_element
*element
,
1488 VEC (gdb_xml_value_s
) *attributes
)
1490 struct btrace_data
*btrace
;
1492 ULONGEST
*family
, *model
, *stepping
;
1494 vendor
= (const char *) xml_find_attribute (attributes
, "vendor")->value
;
1495 family
= (ULONGEST
*) xml_find_attribute (attributes
, "family")->value
;
1496 model
= (ULONGEST
*) xml_find_attribute (attributes
, "model")->value
;
1497 stepping
= (ULONGEST
*) xml_find_attribute (attributes
, "stepping")->value
;
1499 btrace
= (struct btrace_data
*) user_data
;
1501 if (strcmp (vendor
, "GenuineIntel") == 0)
1502 btrace
->variant
.pt
.config
.cpu
.vendor
= CV_INTEL
;
1504 btrace
->variant
.pt
.config
.cpu
.family
= *family
;
1505 btrace
->variant
.pt
.config
.cpu
.model
= *model
;
1506 btrace
->variant
.pt
.config
.cpu
.stepping
= *stepping
;
1509 /* Parse a btrace pt "raw" xml record. */
1512 parse_xml_btrace_pt_raw (struct gdb_xml_parser
*parser
,
1513 const struct gdb_xml_element
*element
,
1514 void *user_data
, const char *body_text
)
1516 struct btrace_data
*btrace
;
1518 btrace
= (struct btrace_data
*) user_data
;
1519 parse_xml_raw (parser
, body_text
, &btrace
->variant
.pt
.data
,
1520 &btrace
->variant
.pt
.size
);
1523 /* Parse a btrace "pt" xml record. */
1526 parse_xml_btrace_pt (struct gdb_xml_parser
*parser
,
1527 const struct gdb_xml_element
*element
,
1528 void *user_data
, VEC (gdb_xml_value_s
) *attributes
)
1530 struct btrace_data
*btrace
;
1532 btrace
= (struct btrace_data
*) user_data
;
1533 btrace
->format
= BTRACE_FORMAT_PT
;
1534 btrace
->variant
.pt
.config
.cpu
.vendor
= CV_UNKNOWN
;
1535 btrace
->variant
.pt
.data
= NULL
;
1536 btrace
->variant
.pt
.size
= 0;
1539 static const struct gdb_xml_attribute block_attributes
[] = {
1540 { "begin", GDB_XML_AF_NONE
, gdb_xml_parse_attr_ulongest
, NULL
},
1541 { "end", GDB_XML_AF_NONE
, gdb_xml_parse_attr_ulongest
, NULL
},
1542 { NULL
, GDB_XML_AF_NONE
, NULL
, NULL
}
1545 static const struct gdb_xml_attribute btrace_pt_config_cpu_attributes
[] = {
1546 { "vendor", GDB_XML_AF_NONE
, NULL
, NULL
},
1547 { "family", GDB_XML_AF_NONE
, gdb_xml_parse_attr_ulongest
, NULL
},
1548 { "model", GDB_XML_AF_NONE
, gdb_xml_parse_attr_ulongest
, NULL
},
1549 { "stepping", GDB_XML_AF_NONE
, gdb_xml_parse_attr_ulongest
, NULL
},
1550 { NULL
, GDB_XML_AF_NONE
, NULL
, NULL
}
1553 static const struct gdb_xml_element btrace_pt_config_children
[] = {
1554 { "cpu", btrace_pt_config_cpu_attributes
, NULL
, GDB_XML_EF_OPTIONAL
,
1555 parse_xml_btrace_pt_config_cpu
, NULL
},
1556 { NULL
, NULL
, NULL
, GDB_XML_EF_NONE
, NULL
, NULL
}
1559 static const struct gdb_xml_element btrace_pt_children
[] = {
1560 { "pt-config", NULL
, btrace_pt_config_children
, GDB_XML_EF_OPTIONAL
, NULL
,
1562 { "raw", NULL
, NULL
, GDB_XML_EF_OPTIONAL
, NULL
, parse_xml_btrace_pt_raw
},
1563 { NULL
, NULL
, NULL
, GDB_XML_EF_NONE
, NULL
, NULL
}
1566 static const struct gdb_xml_attribute btrace_attributes
[] = {
1567 { "version", GDB_XML_AF_NONE
, NULL
, NULL
},
1568 { NULL
, GDB_XML_AF_NONE
, NULL
, NULL
}
1571 static const struct gdb_xml_element btrace_children
[] = {
1572 { "block", block_attributes
, NULL
,
1573 GDB_XML_EF_REPEATABLE
| GDB_XML_EF_OPTIONAL
, parse_xml_btrace_block
, NULL
},
1574 { "pt", NULL
, btrace_pt_children
, GDB_XML_EF_OPTIONAL
, parse_xml_btrace_pt
,
1576 { NULL
, NULL
, NULL
, GDB_XML_EF_NONE
, NULL
, NULL
}
1579 static const struct gdb_xml_element btrace_elements
[] = {
1580 { "btrace", btrace_attributes
, btrace_children
, GDB_XML_EF_NONE
,
1581 check_xml_btrace_version
, NULL
},
1582 { NULL
, NULL
, NULL
, GDB_XML_EF_NONE
, NULL
, NULL
}
1585 #endif /* defined (HAVE_LIBEXPAT) */
1590 parse_xml_btrace (struct btrace_data
*btrace
, const char *buffer
)
1592 struct cleanup
*cleanup
;
1595 #if defined (HAVE_LIBEXPAT)
1597 btrace
->format
= BTRACE_FORMAT_NONE
;
1599 cleanup
= make_cleanup_btrace_data (btrace
);
1600 errcode
= gdb_xml_parse_quick (_("btrace"), "btrace.dtd", btrace_elements
,
1603 error (_("Error parsing branch trace."));
1605 /* Keep parse results. */
1606 discard_cleanups (cleanup
);
1608 #else /* !defined (HAVE_LIBEXPAT) */
1610 error (_("Cannot process branch trace. XML parsing is not supported."));
1612 #endif /* !defined (HAVE_LIBEXPAT) */
1615 #if defined (HAVE_LIBEXPAT)
1617 /* Parse a btrace-conf "bts" xml record. */
1620 parse_xml_btrace_conf_bts (struct gdb_xml_parser
*parser
,
1621 const struct gdb_xml_element
*element
,
1622 void *user_data
, VEC (gdb_xml_value_s
) *attributes
)
1624 struct btrace_config
*conf
;
1625 struct gdb_xml_value
*size
;
1627 conf
= (struct btrace_config
*) user_data
;
1628 conf
->format
= BTRACE_FORMAT_BTS
;
1631 size
= xml_find_attribute (attributes
, "size");
1633 conf
->bts
.size
= (unsigned int) *(ULONGEST
*) size
->value
;
1636 /* Parse a btrace-conf "pt" xml record. */
1639 parse_xml_btrace_conf_pt (struct gdb_xml_parser
*parser
,
1640 const struct gdb_xml_element
*element
,
1641 void *user_data
, VEC (gdb_xml_value_s
) *attributes
)
1643 struct btrace_config
*conf
;
1644 struct gdb_xml_value
*size
;
1646 conf
= (struct btrace_config
*) user_data
;
1647 conf
->format
= BTRACE_FORMAT_PT
;
1650 size
= xml_find_attribute (attributes
, "size");
1652 conf
->pt
.size
= (unsigned int) *(ULONGEST
*) size
->value
;
1655 static const struct gdb_xml_attribute btrace_conf_pt_attributes
[] = {
1656 { "size", GDB_XML_AF_OPTIONAL
, gdb_xml_parse_attr_ulongest
, NULL
},
1657 { NULL
, GDB_XML_AF_NONE
, NULL
, NULL
}
1660 static const struct gdb_xml_attribute btrace_conf_bts_attributes
[] = {
1661 { "size", GDB_XML_AF_OPTIONAL
, gdb_xml_parse_attr_ulongest
, NULL
},
1662 { NULL
, GDB_XML_AF_NONE
, NULL
, NULL
}
1665 static const struct gdb_xml_element btrace_conf_children
[] = {
1666 { "bts", btrace_conf_bts_attributes
, NULL
, GDB_XML_EF_OPTIONAL
,
1667 parse_xml_btrace_conf_bts
, NULL
},
1668 { "pt", btrace_conf_pt_attributes
, NULL
, GDB_XML_EF_OPTIONAL
,
1669 parse_xml_btrace_conf_pt
, NULL
},
1670 { NULL
, NULL
, NULL
, GDB_XML_EF_NONE
, NULL
, NULL
}
1673 static const struct gdb_xml_attribute btrace_conf_attributes
[] = {
1674 { "version", GDB_XML_AF_NONE
, NULL
, NULL
},
1675 { NULL
, GDB_XML_AF_NONE
, NULL
, NULL
}
1678 static const struct gdb_xml_element btrace_conf_elements
[] = {
1679 { "btrace-conf", btrace_conf_attributes
, btrace_conf_children
,
1680 GDB_XML_EF_NONE
, NULL
, NULL
},
1681 { NULL
, NULL
, NULL
, GDB_XML_EF_NONE
, NULL
, NULL
}
1684 #endif /* defined (HAVE_LIBEXPAT) */
1689 parse_xml_btrace_conf (struct btrace_config
*conf
, const char *xml
)
1693 #if defined (HAVE_LIBEXPAT)
1695 errcode
= gdb_xml_parse_quick (_("btrace-conf"), "btrace-conf.dtd",
1696 btrace_conf_elements
, xml
, conf
);
1698 error (_("Error parsing branch trace configuration."));
1700 #else /* !defined (HAVE_LIBEXPAT) */
1702 error (_("XML parsing is not supported."));
1704 #endif /* !defined (HAVE_LIBEXPAT) */
1709 const struct btrace_insn
*
1710 btrace_insn_get (const struct btrace_insn_iterator
*it
)
1712 const struct btrace_function
*bfun
;
1713 unsigned int index
, end
;
1716 bfun
= it
->function
;
1718 /* Check if the iterator points to a gap in the trace. */
1719 if (bfun
->errcode
!= 0)
1722 /* The index is within the bounds of this function's instruction vector. */
1723 end
= VEC_length (btrace_insn_s
, bfun
->insn
);
1724 gdb_assert (0 < end
);
1725 gdb_assert (index
< end
);
1727 return VEC_index (btrace_insn_s
, bfun
->insn
, index
);
1733 btrace_insn_number (const struct btrace_insn_iterator
*it
)
1735 const struct btrace_function
*bfun
;
1737 bfun
= it
->function
;
1739 /* Return zero if the iterator points to a gap in the trace. */
1740 if (bfun
->errcode
!= 0)
1743 return bfun
->insn_offset
+ it
->index
;
1749 btrace_insn_begin (struct btrace_insn_iterator
*it
,
1750 const struct btrace_thread_info
*btinfo
)
1752 const struct btrace_function
*bfun
;
1754 bfun
= btinfo
->begin
;
1756 error (_("No trace."));
1758 it
->function
= bfun
;
1765 btrace_insn_end (struct btrace_insn_iterator
*it
,
1766 const struct btrace_thread_info
*btinfo
)
1768 const struct btrace_function
*bfun
;
1769 unsigned int length
;
1773 error (_("No trace."));
1775 length
= VEC_length (btrace_insn_s
, bfun
->insn
);
1777 /* The last function may either be a gap or it contains the current
1778 instruction, which is one past the end of the execution trace; ignore
1783 it
->function
= bfun
;
1790 btrace_insn_next (struct btrace_insn_iterator
*it
, unsigned int stride
)
1792 const struct btrace_function
*bfun
;
1793 unsigned int index
, steps
;
1795 bfun
= it
->function
;
1801 unsigned int end
, space
, adv
;
1803 end
= VEC_length (btrace_insn_s
, bfun
->insn
);
1805 /* An empty function segment represents a gap in the trace. We count
1806 it as one instruction. */
1809 const struct btrace_function
*next
;
1811 next
= bfun
->flow
.next
;
1824 gdb_assert (0 < end
);
1825 gdb_assert (index
< end
);
1827 /* Compute the number of instructions remaining in this segment. */
1828 space
= end
- index
;
1830 /* Advance the iterator as far as possible within this segment. */
1831 adv
= std::min (space
, stride
);
1836 /* Move to the next function if we're at the end of this one. */
1839 const struct btrace_function
*next
;
1841 next
= bfun
->flow
.next
;
1844 /* We stepped past the last function.
1846 Let's adjust the index to point to the last instruction in
1847 the previous function. */
1853 /* We now point to the first instruction in the new function. */
1858 /* We did make progress. */
1859 gdb_assert (adv
> 0);
1862 /* Update the iterator. */
1863 it
->function
= bfun
;
1872 btrace_insn_prev (struct btrace_insn_iterator
*it
, unsigned int stride
)
1874 const struct btrace_function
*bfun
;
1875 unsigned int index
, steps
;
1877 bfun
= it
->function
;
1885 /* Move to the previous function if we're at the start of this one. */
1888 const struct btrace_function
*prev
;
1890 prev
= bfun
->flow
.prev
;
1894 /* We point to one after the last instruction in the new function. */
1896 index
= VEC_length (btrace_insn_s
, bfun
->insn
);
1898 /* An empty function segment represents a gap in the trace. We count
1899 it as one instruction. */
1909 /* Advance the iterator as far as possible within this segment. */
1910 adv
= std::min (index
, stride
);
1916 /* We did make progress. */
1917 gdb_assert (adv
> 0);
1920 /* Update the iterator. */
1921 it
->function
= bfun
;
1930 btrace_insn_cmp (const struct btrace_insn_iterator
*lhs
,
1931 const struct btrace_insn_iterator
*rhs
)
1933 unsigned int lnum
, rnum
;
1935 lnum
= btrace_insn_number (lhs
);
1936 rnum
= btrace_insn_number (rhs
);
1938 /* A gap has an instruction number of zero. Things are getting more
1939 complicated if gaps are involved.
1941 We take the instruction number offset from the iterator's function.
1942 This is the number of the first instruction after the gap.
1944 This is OK as long as both lhs and rhs point to gaps. If only one of
1945 them does, we need to adjust the number based on the other's regular
1946 instruction number. Otherwise, a gap might compare equal to an
1949 if (lnum
== 0 && rnum
== 0)
1951 lnum
= lhs
->function
->insn_offset
;
1952 rnum
= rhs
->function
->insn_offset
;
1956 lnum
= lhs
->function
->insn_offset
;
1963 rnum
= rhs
->function
->insn_offset
;
1969 return (int) (lnum
- rnum
);
1975 btrace_find_insn_by_number (struct btrace_insn_iterator
*it
,
1976 const struct btrace_thread_info
*btinfo
,
1977 unsigned int number
)
1979 const struct btrace_function
*bfun
;
1980 unsigned int end
, length
;
1982 for (bfun
= btinfo
->end
; bfun
!= NULL
; bfun
= bfun
->flow
.prev
)
1985 if (bfun
->errcode
!= 0)
1988 if (bfun
->insn_offset
<= number
)
1995 length
= VEC_length (btrace_insn_s
, bfun
->insn
);
1996 gdb_assert (length
> 0);
1998 end
= bfun
->insn_offset
+ length
;
2002 it
->function
= bfun
;
2003 it
->index
= number
- bfun
->insn_offset
;
2010 const struct btrace_function
*
2011 btrace_call_get (const struct btrace_call_iterator
*it
)
2013 return it
->function
;
2019 btrace_call_number (const struct btrace_call_iterator
*it
)
2021 const struct btrace_thread_info
*btinfo
;
2022 const struct btrace_function
*bfun
;
2025 btinfo
= it
->btinfo
;
2026 bfun
= it
->function
;
2028 return bfun
->number
;
2030 /* For the end iterator, i.e. bfun == NULL, we return one more than the
2031 number of the last function. */
2033 insns
= VEC_length (btrace_insn_s
, bfun
->insn
);
2035 /* If the function contains only a single instruction (i.e. the current
2036 instruction), it will be skipped and its number is already the number
2039 return bfun
->number
;
2041 /* Otherwise, return one more than the number of the last function. */
2042 return bfun
->number
+ 1;
2048 btrace_call_begin (struct btrace_call_iterator
*it
,
2049 const struct btrace_thread_info
*btinfo
)
2051 const struct btrace_function
*bfun
;
2053 bfun
= btinfo
->begin
;
2055 error (_("No trace."));
2057 it
->btinfo
= btinfo
;
2058 it
->function
= bfun
;
2064 btrace_call_end (struct btrace_call_iterator
*it
,
2065 const struct btrace_thread_info
*btinfo
)
2067 const struct btrace_function
*bfun
;
2071 error (_("No trace."));
2073 it
->btinfo
= btinfo
;
2074 it
->function
= NULL
;
2080 btrace_call_next (struct btrace_call_iterator
*it
, unsigned int stride
)
2082 const struct btrace_function
*bfun
;
2085 bfun
= it
->function
;
2087 while (bfun
!= NULL
)
2089 const struct btrace_function
*next
;
2092 next
= bfun
->flow
.next
;
2095 /* Ignore the last function if it only contains a single
2096 (i.e. the current) instruction. */
2097 insns
= VEC_length (btrace_insn_s
, bfun
->insn
);
2102 if (stride
== steps
)
2109 it
->function
= bfun
;
2116 btrace_call_prev (struct btrace_call_iterator
*it
, unsigned int stride
)
2118 const struct btrace_thread_info
*btinfo
;
2119 const struct btrace_function
*bfun
;
2122 bfun
= it
->function
;
2129 btinfo
= it
->btinfo
;
2134 /* Ignore the last function if it only contains a single
2135 (i.e. the current) instruction. */
2136 insns
= VEC_length (btrace_insn_s
, bfun
->insn
);
2138 bfun
= bfun
->flow
.prev
;
2146 while (steps
< stride
)
2148 const struct btrace_function
*prev
;
2150 prev
= bfun
->flow
.prev
;
2158 it
->function
= bfun
;
2165 btrace_call_cmp (const struct btrace_call_iterator
*lhs
,
2166 const struct btrace_call_iterator
*rhs
)
2168 unsigned int lnum
, rnum
;
2170 lnum
= btrace_call_number (lhs
);
2171 rnum
= btrace_call_number (rhs
);
2173 return (int) (lnum
- rnum
);
2179 btrace_find_call_by_number (struct btrace_call_iterator
*it
,
2180 const struct btrace_thread_info
*btinfo
,
2181 unsigned int number
)
2183 const struct btrace_function
*bfun
;
2185 for (bfun
= btinfo
->end
; bfun
!= NULL
; bfun
= bfun
->flow
.prev
)
2189 bnum
= bfun
->number
;
2192 it
->btinfo
= btinfo
;
2193 it
->function
= bfun
;
2197 /* Functions are ordered and numbered consecutively. We could bail out
2198 earlier. On the other hand, it is very unlikely that we search for
2199 a nonexistent function. */
2208 btrace_set_insn_history (struct btrace_thread_info
*btinfo
,
2209 const struct btrace_insn_iterator
*begin
,
2210 const struct btrace_insn_iterator
*end
)
2212 if (btinfo
->insn_history
== NULL
)
2213 btinfo
->insn_history
= XCNEW (struct btrace_insn_history
);
2215 btinfo
->insn_history
->begin
= *begin
;
2216 btinfo
->insn_history
->end
= *end
;
2222 btrace_set_call_history (struct btrace_thread_info
*btinfo
,
2223 const struct btrace_call_iterator
*begin
,
2224 const struct btrace_call_iterator
*end
)
2226 gdb_assert (begin
->btinfo
== end
->btinfo
);
2228 if (btinfo
->call_history
== NULL
)
2229 btinfo
->call_history
= XCNEW (struct btrace_call_history
);
2231 btinfo
->call_history
->begin
= *begin
;
2232 btinfo
->call_history
->end
= *end
;
2238 btrace_is_replaying (struct thread_info
*tp
)
2240 return tp
->btrace
.replay
!= NULL
;
2246 btrace_is_empty (struct thread_info
*tp
)
2248 struct btrace_insn_iterator begin
, end
;
2249 struct btrace_thread_info
*btinfo
;
2251 btinfo
= &tp
->btrace
;
2253 if (btinfo
->begin
== NULL
)
2256 btrace_insn_begin (&begin
, btinfo
);
2257 btrace_insn_end (&end
, btinfo
);
2259 return btrace_insn_cmp (&begin
, &end
) == 0;
2262 /* Forward the cleanup request. */
2265 do_btrace_data_cleanup (void *arg
)
2267 btrace_data_fini ((struct btrace_data
*) arg
);
2273 make_cleanup_btrace_data (struct btrace_data
*data
)
2275 return make_cleanup (do_btrace_data_cleanup
, data
);
2278 #if defined (HAVE_LIBIPT)
2280 /* Print a single packet. */
2283 pt_print_packet (const struct pt_packet
*packet
)
2285 switch (packet
->type
)
2288 printf_unfiltered (("[??: %x]"), packet
->type
);
2292 printf_unfiltered (("psb"));
2296 printf_unfiltered (("psbend"));
2300 printf_unfiltered (("pad"));
2304 printf_unfiltered (("tip %u: 0x%" PRIx64
""),
2305 packet
->payload
.ip
.ipc
,
2306 packet
->payload
.ip
.ip
);
2310 printf_unfiltered (("tip.pge %u: 0x%" PRIx64
""),
2311 packet
->payload
.ip
.ipc
,
2312 packet
->payload
.ip
.ip
);
2316 printf_unfiltered (("tip.pgd %u: 0x%" PRIx64
""),
2317 packet
->payload
.ip
.ipc
,
2318 packet
->payload
.ip
.ip
);
2322 printf_unfiltered (("fup %u: 0x%" PRIx64
""),
2323 packet
->payload
.ip
.ipc
,
2324 packet
->payload
.ip
.ip
);
2328 printf_unfiltered (("tnt-8 %u: 0x%" PRIx64
""),
2329 packet
->payload
.tnt
.bit_size
,
2330 packet
->payload
.tnt
.payload
);
2334 printf_unfiltered (("tnt-64 %u: 0x%" PRIx64
""),
2335 packet
->payload
.tnt
.bit_size
,
2336 packet
->payload
.tnt
.payload
);
2340 printf_unfiltered (("pip %" PRIx64
"%s"), packet
->payload
.pip
.cr3
,
2341 packet
->payload
.pip
.nr
? (" nr") : (""));
2345 printf_unfiltered (("tsc %" PRIx64
""), packet
->payload
.tsc
.tsc
);
2349 printf_unfiltered (("cbr %u"), packet
->payload
.cbr
.ratio
);
2353 switch (packet
->payload
.mode
.leaf
)
2356 printf_unfiltered (("mode %u"), packet
->payload
.mode
.leaf
);
2360 printf_unfiltered (("mode.exec%s%s"),
2361 packet
->payload
.mode
.bits
.exec
.csl
2363 packet
->payload
.mode
.bits
.exec
.csd
2364 ? (" cs.d") : (""));
2368 printf_unfiltered (("mode.tsx%s%s"),
2369 packet
->payload
.mode
.bits
.tsx
.intx
2371 packet
->payload
.mode
.bits
.tsx
.abrt
2372 ? (" abrt") : (""));
2378 printf_unfiltered (("ovf"));
2382 printf_unfiltered (("stop"));
2386 printf_unfiltered (("vmcs %" PRIx64
""), packet
->payload
.vmcs
.base
);
2390 printf_unfiltered (("tma %x %x"), packet
->payload
.tma
.ctc
,
2391 packet
->payload
.tma
.fc
);
2395 printf_unfiltered (("mtc %x"), packet
->payload
.mtc
.ctc
);
2399 printf_unfiltered (("cyc %" PRIx64
""), packet
->payload
.cyc
.value
);
2403 printf_unfiltered (("mnt %" PRIx64
""), packet
->payload
.mnt
.payload
);
2408 /* Decode packets into MAINT using DECODER. */
2411 btrace_maint_decode_pt (struct btrace_maint_info
*maint
,
2412 struct pt_packet_decoder
*decoder
)
2418 struct btrace_pt_packet packet
;
2420 errcode
= pt_pkt_sync_forward (decoder
);
2426 pt_pkt_get_offset (decoder
, &packet
.offset
);
2428 errcode
= pt_pkt_next (decoder
, &packet
.packet
,
2429 sizeof(packet
.packet
));
2433 if (maint_btrace_pt_skip_pad
== 0 || packet
.packet
.type
!= ppt_pad
)
2435 packet
.errcode
= pt_errcode (errcode
);
2436 VEC_safe_push (btrace_pt_packet_s
, maint
->variant
.pt
.packets
,
2441 if (errcode
== -pte_eos
)
2444 packet
.errcode
= pt_errcode (errcode
);
2445 VEC_safe_push (btrace_pt_packet_s
, maint
->variant
.pt
.packets
,
2448 warning (_("Error at trace offset 0x%" PRIx64
": %s."),
2449 packet
.offset
, pt_errstr (packet
.errcode
));
2452 if (errcode
!= -pte_eos
)
2453 warning (_("Failed to synchronize onto the Intel Processor Trace "
2454 "stream: %s."), pt_errstr (pt_errcode (errcode
)));
2457 /* Update the packet history in BTINFO. */
2460 btrace_maint_update_pt_packets (struct btrace_thread_info
*btinfo
)
2462 volatile struct gdb_exception except
;
2463 struct pt_packet_decoder
*decoder
;
2464 struct btrace_data_pt
*pt
;
2465 struct pt_config config
;
2468 pt
= &btinfo
->data
.variant
.pt
;
2470 /* Nothing to do if there is no trace. */
2474 memset (&config
, 0, sizeof(config
));
2476 config
.size
= sizeof (config
);
2477 config
.begin
= pt
->data
;
2478 config
.end
= pt
->data
+ pt
->size
;
2480 config
.cpu
.vendor
= pt_translate_cpu_vendor (pt
->config
.cpu
.vendor
);
2481 config
.cpu
.family
= pt
->config
.cpu
.family
;
2482 config
.cpu
.model
= pt
->config
.cpu
.model
;
2483 config
.cpu
.stepping
= pt
->config
.cpu
.stepping
;
2485 errcode
= pt_cpu_errata (&config
.errata
, &config
.cpu
);
2487 error (_("Failed to configure the Intel Processor Trace decoder: %s."),
2488 pt_errstr (pt_errcode (errcode
)));
2490 decoder
= pt_pkt_alloc_decoder (&config
);
2491 if (decoder
== NULL
)
2492 error (_("Failed to allocate the Intel Processor Trace decoder."));
2496 btrace_maint_decode_pt (&btinfo
->maint
, decoder
);
2498 CATCH (except
, RETURN_MASK_ALL
)
2500 pt_pkt_free_decoder (decoder
);
2502 if (except
.reason
< 0)
2503 throw_exception (except
);
2507 pt_pkt_free_decoder (decoder
);
2510 #endif /* !defined (HAVE_LIBIPT) */
2512 /* Update the packet maintenance information for BTINFO and store the
2513 low and high bounds into BEGIN and END, respectively.
2514 Store the current iterator state into FROM and TO. */
2517 btrace_maint_update_packets (struct btrace_thread_info
*btinfo
,
2518 unsigned int *begin
, unsigned int *end
,
2519 unsigned int *from
, unsigned int *to
)
2521 switch (btinfo
->data
.format
)
2530 case BTRACE_FORMAT_BTS
:
2531 /* Nothing to do - we operate directly on BTINFO->DATA. */
2533 *end
= VEC_length (btrace_block_s
, btinfo
->data
.variant
.bts
.blocks
);
2534 *from
= btinfo
->maint
.variant
.bts
.packet_history
.begin
;
2535 *to
= btinfo
->maint
.variant
.bts
.packet_history
.end
;
2538 #if defined (HAVE_LIBIPT)
2539 case BTRACE_FORMAT_PT
:
2540 if (VEC_empty (btrace_pt_packet_s
, btinfo
->maint
.variant
.pt
.packets
))
2541 btrace_maint_update_pt_packets (btinfo
);
2544 *end
= VEC_length (btrace_pt_packet_s
, btinfo
->maint
.variant
.pt
.packets
);
2545 *from
= btinfo
->maint
.variant
.pt
.packet_history
.begin
;
2546 *to
= btinfo
->maint
.variant
.pt
.packet_history
.end
;
2548 #endif /* defined (HAVE_LIBIPT) */
2552 /* Print packets in BTINFO from BEGIN (inclusive) until END (exclusive) and
2553 update the current iterator position. */
2556 btrace_maint_print_packets (struct btrace_thread_info
*btinfo
,
2557 unsigned int begin
, unsigned int end
)
2559 switch (btinfo
->data
.format
)
2564 case BTRACE_FORMAT_BTS
:
2566 VEC (btrace_block_s
) *blocks
;
2569 blocks
= btinfo
->data
.variant
.bts
.blocks
;
2570 for (blk
= begin
; blk
< end
; ++blk
)
2572 const btrace_block_s
*block
;
2574 block
= VEC_index (btrace_block_s
, blocks
, blk
);
2576 printf_unfiltered ("%u\tbegin: %s, end: %s\n", blk
,
2577 core_addr_to_string_nz (block
->begin
),
2578 core_addr_to_string_nz (block
->end
));
2581 btinfo
->maint
.variant
.bts
.packet_history
.begin
= begin
;
2582 btinfo
->maint
.variant
.bts
.packet_history
.end
= end
;
2586 #if defined (HAVE_LIBIPT)
2587 case BTRACE_FORMAT_PT
:
2589 VEC (btrace_pt_packet_s
) *packets
;
2592 packets
= btinfo
->maint
.variant
.pt
.packets
;
2593 for (pkt
= begin
; pkt
< end
; ++pkt
)
2595 const struct btrace_pt_packet
*packet
;
2597 packet
= VEC_index (btrace_pt_packet_s
, packets
, pkt
);
2599 printf_unfiltered ("%u\t", pkt
);
2600 printf_unfiltered ("0x%" PRIx64
"\t", packet
->offset
);
2602 if (packet
->errcode
== pte_ok
)
2603 pt_print_packet (&packet
->packet
);
2605 printf_unfiltered ("[error: %s]", pt_errstr (packet
->errcode
));
2607 printf_unfiltered ("\n");
2610 btinfo
->maint
.variant
.pt
.packet_history
.begin
= begin
;
2611 btinfo
->maint
.variant
.pt
.packet_history
.end
= end
;
2614 #endif /* defined (HAVE_LIBIPT) */
2618 /* Read a number from an argument string. */
2621 get_uint (char **arg
)
2623 char *begin
, *end
, *pos
;
2624 unsigned long number
;
2627 pos
= skip_spaces (begin
);
2629 if (!isdigit (*pos
))
2630 error (_("Expected positive number, got: %s."), pos
);
2632 number
= strtoul (pos
, &end
, 10);
2633 if (number
> UINT_MAX
)
2634 error (_("Number too big."));
2636 *arg
+= (end
- begin
);
2638 return (unsigned int) number
;
2641 /* Read a context size from an argument string. */
2644 get_context_size (char **arg
)
2649 pos
= skip_spaces (*arg
);
2651 if (!isdigit (*pos
))
2652 error (_("Expected positive number, got: %s."), pos
);
2654 return strtol (pos
, arg
, 10);
2657 /* Complain about junk at the end of an argument string. */
2660 no_chunk (char *arg
)
2663 error (_("Junk after argument: %s."), arg
);
2666 /* The "maintenance btrace packet-history" command. */
2669 maint_btrace_packet_history_cmd (char *arg
, int from_tty
)
2671 struct btrace_thread_info
*btinfo
;
2672 struct thread_info
*tp
;
2673 unsigned int size
, begin
, end
, from
, to
;
2675 tp
= find_thread_ptid (inferior_ptid
);
2677 error (_("No thread."));
2680 btinfo
= &tp
->btrace
;
2682 btrace_maint_update_packets (btinfo
, &begin
, &end
, &from
, &to
);
2685 printf_unfiltered (_("No trace.\n"));
2689 if (arg
== NULL
|| *arg
== 0 || strcmp (arg
, "+") == 0)
2693 if (end
- from
< size
)
2697 else if (strcmp (arg
, "-") == 0)
2701 if (to
- begin
< size
)
2707 from
= get_uint (&arg
);
2709 error (_("'%u' is out of range."), from
);
2711 arg
= skip_spaces (arg
);
2714 arg
= skip_spaces (++arg
);
2719 size
= get_context_size (&arg
);
2723 if (end
- from
< size
)
2727 else if (*arg
== '-')
2730 size
= get_context_size (&arg
);
2734 /* Include the packet given as first argument. */
2738 if (to
- begin
< size
)
2744 to
= get_uint (&arg
);
2746 /* Include the packet at the second argument and silently
2747 truncate the range. */
2760 if (end
- from
< size
)
2768 btrace_maint_print_packets (btinfo
, from
, to
);
2771 /* The "maintenance btrace clear-packet-history" command. */
2774 maint_btrace_clear_packet_history_cmd (char *args
, int from_tty
)
2776 struct btrace_thread_info
*btinfo
;
2777 struct thread_info
*tp
;
2779 if (args
!= NULL
&& *args
!= 0)
2780 error (_("Invalid argument."));
2782 tp
= find_thread_ptid (inferior_ptid
);
2784 error (_("No thread."));
2786 btinfo
= &tp
->btrace
;
2788 /* Must clear the maint data before - it depends on BTINFO->DATA. */
2789 btrace_maint_clear (btinfo
);
2790 btrace_data_clear (&btinfo
->data
);
2793 /* The "maintenance btrace clear" command. */
2796 maint_btrace_clear_cmd (char *args
, int from_tty
)
2798 struct btrace_thread_info
*btinfo
;
2799 struct thread_info
*tp
;
2801 if (args
!= NULL
&& *args
!= 0)
2802 error (_("Invalid argument."));
2804 tp
= find_thread_ptid (inferior_ptid
);
2806 error (_("No thread."));
2811 /* The "maintenance btrace" command. */
2814 maint_btrace_cmd (char *args
, int from_tty
)
2816 help_list (maint_btrace_cmdlist
, "maintenance btrace ", all_commands
,
2820 /* The "maintenance set btrace" command. */
2823 maint_btrace_set_cmd (char *args
, int from_tty
)
2825 help_list (maint_btrace_set_cmdlist
, "maintenance set btrace ", all_commands
,
2829 /* The "maintenance show btrace" command. */
2832 maint_btrace_show_cmd (char *args
, int from_tty
)
2834 help_list (maint_btrace_show_cmdlist
, "maintenance show btrace ",
2835 all_commands
, gdb_stdout
);
2838 /* The "maintenance set btrace pt" command. */
2841 maint_btrace_pt_set_cmd (char *args
, int from_tty
)
2843 help_list (maint_btrace_pt_set_cmdlist
, "maintenance set btrace pt ",
2844 all_commands
, gdb_stdout
);
2847 /* The "maintenance show btrace pt" command. */
2850 maint_btrace_pt_show_cmd (char *args
, int from_tty
)
2852 help_list (maint_btrace_pt_show_cmdlist
, "maintenance show btrace pt ",
2853 all_commands
, gdb_stdout
);
2856 /* The "maintenance info btrace" command. */
2859 maint_info_btrace_cmd (char *args
, int from_tty
)
2861 struct btrace_thread_info
*btinfo
;
2862 struct thread_info
*tp
;
2863 const struct btrace_config
*conf
;
2865 if (args
!= NULL
&& *args
!= 0)
2866 error (_("Invalid argument."));
2868 tp
= find_thread_ptid (inferior_ptid
);
2870 error (_("No thread."));
2872 btinfo
= &tp
->btrace
;
2874 conf
= btrace_conf (btinfo
);
2876 error (_("No btrace configuration."));
2878 printf_unfiltered (_("Format: %s.\n"),
2879 btrace_format_string (conf
->format
));
2881 switch (conf
->format
)
2886 case BTRACE_FORMAT_BTS
:
2887 printf_unfiltered (_("Number of packets: %u.\n"),
2888 VEC_length (btrace_block_s
,
2889 btinfo
->data
.variant
.bts
.blocks
));
2892 #if defined (HAVE_LIBIPT)
2893 case BTRACE_FORMAT_PT
:
2895 struct pt_version version
;
2897 version
= pt_library_version ();
2898 printf_unfiltered (_("Version: %u.%u.%u%s.\n"), version
.major
,
2899 version
.minor
, version
.build
,
2900 version
.ext
!= NULL
? version
.ext
: "");
2902 btrace_maint_update_pt_packets (btinfo
);
2903 printf_unfiltered (_("Number of packets: %u.\n"),
2904 VEC_length (btrace_pt_packet_s
,
2905 btinfo
->maint
.variant
.pt
.packets
));
2908 #endif /* defined (HAVE_LIBIPT) */
2912 /* The "maint show btrace pt skip-pad" show value function. */
2915 show_maint_btrace_pt_skip_pad (struct ui_file
*file
, int from_tty
,
2916 struct cmd_list_element
*c
,
2919 fprintf_filtered (file
, _("Skip PAD packets is %s.\n"), value
);
2923 /* Initialize btrace maintenance commands. */
2925 void _initialize_btrace (void);
2927 _initialize_btrace (void)
2929 add_cmd ("btrace", class_maintenance
, maint_info_btrace_cmd
,
2930 _("Info about branch tracing data."), &maintenanceinfolist
);
2932 add_prefix_cmd ("btrace", class_maintenance
, maint_btrace_cmd
,
2933 _("Branch tracing maintenance commands."),
2934 &maint_btrace_cmdlist
, "maintenance btrace ",
2935 0, &maintenancelist
);
2937 add_prefix_cmd ("btrace", class_maintenance
, maint_btrace_set_cmd
, _("\
2938 Set branch tracing specific variables."),
2939 &maint_btrace_set_cmdlist
, "maintenance set btrace ",
2940 0, &maintenance_set_cmdlist
);
2942 add_prefix_cmd ("pt", class_maintenance
, maint_btrace_pt_set_cmd
, _("\
2943 Set Intel Processor Trace specific variables."),
2944 &maint_btrace_pt_set_cmdlist
, "maintenance set btrace pt ",
2945 0, &maint_btrace_set_cmdlist
);
2947 add_prefix_cmd ("btrace", class_maintenance
, maint_btrace_show_cmd
, _("\
2948 Show branch tracing specific variables."),
2949 &maint_btrace_show_cmdlist
, "maintenance show btrace ",
2950 0, &maintenance_show_cmdlist
);
2952 add_prefix_cmd ("pt", class_maintenance
, maint_btrace_pt_show_cmd
, _("\
2953 Show Intel Processor Trace specific variables."),
2954 &maint_btrace_pt_show_cmdlist
, "maintenance show btrace pt ",
2955 0, &maint_btrace_show_cmdlist
);
2957 add_setshow_boolean_cmd ("skip-pad", class_maintenance
,
2958 &maint_btrace_pt_skip_pad
, _("\
2959 Set whether PAD packets should be skipped in the btrace packet history."), _("\
2960 Show whether PAD packets should be skipped in the btrace packet history."),_("\
2961 When enabled, PAD packets are ignored in the btrace packet history."),
2962 NULL
, show_maint_btrace_pt_skip_pad
,
2963 &maint_btrace_pt_set_cmdlist
,
2964 &maint_btrace_pt_show_cmdlist
);
2966 add_cmd ("packet-history", class_maintenance
, maint_btrace_packet_history_cmd
,
2967 _("Print the raw branch tracing data.\n\
2968 With no argument, print ten more packets after the previous ten-line print.\n\
2969 With '-' as argument print ten packets before a previous ten-line print.\n\
2970 One argument specifies the starting packet of a ten-line print.\n\
2971 Two arguments with comma between specify starting and ending packets to \
2973 Preceded with '+'/'-' the second argument specifies the distance from the \
2975 &maint_btrace_cmdlist
);
2977 add_cmd ("clear-packet-history", class_maintenance
,
2978 maint_btrace_clear_packet_history_cmd
,
2979 _("Clears the branch tracing packet history.\n\
2980 Discards the raw branch tracing data but not the execution history data.\n\
2982 &maint_btrace_cmdlist
);
2984 add_cmd ("clear", class_maintenance
, maint_btrace_clear_cmd
,
2985 _("Clears the branch tracing data.\n\
2986 Discards the raw branch tracing data and the execution history data.\n\
2987 The next 'record' command will fetch the branch tracing data anew.\n\
2989 &maint_btrace_cmdlist
);