1 /* Branch trace support for GDB, the GNU debugger.
3 Copyright (C) 2013-2015 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "gdbthread.h"
31 #include "filenames.h"
32 #include "xml-support.h"
38 static void btrace_add_pc (struct thread_info
*tp
);
40 /* Print a record debug message. Use do ... while (0) to avoid ambiguities
41 when used in if statements. */
43 #define DEBUG(msg, args...) \
46 if (record_debug != 0) \
47 fprintf_unfiltered (gdb_stdlog, \
48 "[btrace] " msg "\n", ##args); \
52 #define DEBUG_FTRACE(msg, args...) DEBUG ("[ftrace] " msg, ##args)
54 /* Return the function name of a recorded function segment for printing.
55 This function never returns NULL. */
58 ftrace_print_function_name (const struct btrace_function
*bfun
)
60 struct minimal_symbol
*msym
;
67 return SYMBOL_PRINT_NAME (sym
);
70 return MSYMBOL_PRINT_NAME (msym
);
75 /* Return the file name of a recorded function segment for printing.
76 This function never returns NULL. */
79 ftrace_print_filename (const struct btrace_function
*bfun
)
87 filename
= symtab_to_filename_for_display (symbol_symtab (sym
));
89 filename
= "<unknown>";
94 /* Return a string representation of the address of an instruction.
95 This function never returns NULL. */
98 ftrace_print_insn_addr (const struct btrace_insn
*insn
)
103 return core_addr_to_string_nz (insn
->pc
);
106 /* Print an ftrace debug status message. */
109 ftrace_debug (const struct btrace_function
*bfun
, const char *prefix
)
111 const char *fun
, *file
;
112 unsigned int ibegin
, iend
;
115 fun
= ftrace_print_function_name (bfun
);
116 file
= ftrace_print_filename (bfun
);
119 ibegin
= bfun
->insn_offset
;
120 iend
= ibegin
+ VEC_length (btrace_insn_s
, bfun
->insn
);
122 DEBUG_FTRACE ("%s: fun = %s, file = %s, level = %d, insn = [%u; %u)",
123 prefix
, fun
, file
, level
, ibegin
, iend
);
126 /* Return non-zero if BFUN does not match MFUN and FUN,
127 return zero otherwise. */
130 ftrace_function_switched (const struct btrace_function
*bfun
,
131 const struct minimal_symbol
*mfun
,
132 const struct symbol
*fun
)
134 struct minimal_symbol
*msym
;
140 /* If the minimal symbol changed, we certainly switched functions. */
141 if (mfun
!= NULL
&& msym
!= NULL
142 && strcmp (MSYMBOL_LINKAGE_NAME (mfun
), MSYMBOL_LINKAGE_NAME (msym
)) != 0)
145 /* If the symbol changed, we certainly switched functions. */
146 if (fun
!= NULL
&& sym
!= NULL
)
148 const char *bfname
, *fname
;
150 /* Check the function name. */
151 if (strcmp (SYMBOL_LINKAGE_NAME (fun
), SYMBOL_LINKAGE_NAME (sym
)) != 0)
154 /* Check the location of those functions, as well. */
155 bfname
= symtab_to_fullname (symbol_symtab (sym
));
156 fname
= symtab_to_fullname (symbol_symtab (fun
));
157 if (filename_cmp (fname
, bfname
) != 0)
161 /* If we lost symbol information, we switched functions. */
162 if (!(msym
== NULL
&& sym
== NULL
) && mfun
== NULL
&& fun
== NULL
)
165 /* If we gained symbol information, we switched functions. */
166 if (msym
== NULL
&& sym
== NULL
&& !(mfun
== NULL
&& fun
== NULL
))
172 /* Allocate and initialize a new branch trace function segment.
173 PREV is the chronologically preceding function segment.
174 MFUN and FUN are the symbol information we have for this function. */
176 static struct btrace_function
*
177 ftrace_new_function (struct btrace_function
*prev
,
178 struct minimal_symbol
*mfun
,
181 struct btrace_function
*bfun
;
183 bfun
= xzalloc (sizeof (*bfun
));
187 bfun
->flow
.prev
= prev
;
191 /* Start counting at one. */
193 bfun
->insn_offset
= 1;
197 gdb_assert (prev
->flow
.next
== NULL
);
198 prev
->flow
.next
= bfun
;
200 bfun
->number
= prev
->number
+ 1;
201 bfun
->insn_offset
= (prev
->insn_offset
202 + VEC_length (btrace_insn_s
, prev
->insn
));
203 bfun
->level
= prev
->level
;
209 /* Update the UP field of a function segment. */
212 ftrace_update_caller (struct btrace_function
*bfun
,
213 struct btrace_function
*caller
,
214 enum btrace_function_flag flags
)
216 if (bfun
->up
!= NULL
)
217 ftrace_debug (bfun
, "updating caller");
222 ftrace_debug (bfun
, "set caller");
225 /* Fix up the caller for all segments of a function. */
228 ftrace_fixup_caller (struct btrace_function
*bfun
,
229 struct btrace_function
*caller
,
230 enum btrace_function_flag flags
)
232 struct btrace_function
*prev
, *next
;
234 ftrace_update_caller (bfun
, caller
, flags
);
236 /* Update all function segments belonging to the same function. */
237 for (prev
= bfun
->segment
.prev
; prev
!= NULL
; prev
= prev
->segment
.prev
)
238 ftrace_update_caller (prev
, caller
, flags
);
240 for (next
= bfun
->segment
.next
; next
!= NULL
; next
= next
->segment
.next
)
241 ftrace_update_caller (next
, caller
, flags
);
244 /* Add a new function segment for a call.
245 CALLER is the chronologically preceding function segment.
246 MFUN and FUN are the symbol information we have for this function. */
248 static struct btrace_function
*
249 ftrace_new_call (struct btrace_function
*caller
,
250 struct minimal_symbol
*mfun
,
253 struct btrace_function
*bfun
;
255 bfun
= ftrace_new_function (caller
, mfun
, fun
);
259 ftrace_debug (bfun
, "new call");
264 /* Add a new function segment for a tail call.
265 CALLER is the chronologically preceding function segment.
266 MFUN and FUN are the symbol information we have for this function. */
268 static struct btrace_function
*
269 ftrace_new_tailcall (struct btrace_function
*caller
,
270 struct minimal_symbol
*mfun
,
273 struct btrace_function
*bfun
;
275 bfun
= ftrace_new_function (caller
, mfun
, fun
);
278 bfun
->flags
|= BFUN_UP_LINKS_TO_TAILCALL
;
280 ftrace_debug (bfun
, "new tail call");
285 /* Find the innermost caller in the back trace of BFUN with MFUN/FUN
286 symbol information. */
288 static struct btrace_function
*
289 ftrace_find_caller (struct btrace_function
*bfun
,
290 struct minimal_symbol
*mfun
,
293 for (; bfun
!= NULL
; bfun
= bfun
->up
)
295 /* Skip functions with incompatible symbol information. */
296 if (ftrace_function_switched (bfun
, mfun
, fun
))
299 /* This is the function segment we're looking for. */
306 /* Find the innermost caller in the back trace of BFUN, skipping all
307 function segments that do not end with a call instruction (e.g.
308 tail calls ending with a jump). */
310 static struct btrace_function
*
311 ftrace_find_call (struct btrace_function
*bfun
)
313 for (; bfun
!= NULL
; bfun
= bfun
->up
)
315 struct btrace_insn
*last
;
318 if (bfun
->errcode
!= 0)
321 last
= VEC_last (btrace_insn_s
, bfun
->insn
);
323 if (last
->iclass
== BTRACE_INSN_CALL
)
330 /* Add a continuation segment for a function into which we return.
331 PREV is the chronologically preceding function segment.
332 MFUN and FUN are the symbol information we have for this function. */
334 static struct btrace_function
*
335 ftrace_new_return (struct btrace_function
*prev
,
336 struct minimal_symbol
*mfun
,
339 struct btrace_function
*bfun
, *caller
;
341 bfun
= ftrace_new_function (prev
, mfun
, fun
);
343 /* It is important to start at PREV's caller. Otherwise, we might find
344 PREV itself, if PREV is a recursive function. */
345 caller
= ftrace_find_caller (prev
->up
, mfun
, fun
);
348 /* The caller of PREV is the preceding btrace function segment in this
349 function instance. */
350 gdb_assert (caller
->segment
.next
== NULL
);
352 caller
->segment
.next
= bfun
;
353 bfun
->segment
.prev
= caller
;
355 /* Maintain the function level. */
356 bfun
->level
= caller
->level
;
358 /* Maintain the call stack. */
359 bfun
->up
= caller
->up
;
360 bfun
->flags
= caller
->flags
;
362 ftrace_debug (bfun
, "new return");
366 /* We did not find a caller. This could mean that something went
367 wrong or that the call is simply not included in the trace. */
369 /* Let's search for some actual call. */
370 caller
= ftrace_find_call (prev
->up
);
373 /* There is no call in PREV's back trace. We assume that the
374 branch trace did not include it. */
376 /* Let's find the topmost call function - this skips tail calls. */
377 while (prev
->up
!= NULL
)
380 /* We maintain levels for a series of returns for which we have
382 We start at the preceding function's level in case this has
383 already been a return for which we have not seen the call.
384 We start at level 0 otherwise, to handle tail calls correctly. */
385 bfun
->level
= min (0, prev
->level
) - 1;
387 /* Fix up the call stack for PREV. */
388 ftrace_fixup_caller (prev
, bfun
, BFUN_UP_LINKS_TO_RET
);
390 ftrace_debug (bfun
, "new return - no caller");
394 /* There is a call in PREV's back trace to which we should have
395 returned. Let's remain at this level. */
396 bfun
->level
= prev
->level
;
398 ftrace_debug (bfun
, "new return - unknown caller");
405 /* Add a new function segment for a function switch.
406 PREV is the chronologically preceding function segment.
407 MFUN and FUN are the symbol information we have for this function. */
409 static struct btrace_function
*
410 ftrace_new_switch (struct btrace_function
*prev
,
411 struct minimal_symbol
*mfun
,
414 struct btrace_function
*bfun
;
416 /* This is an unexplained function switch. The call stack will likely
417 be wrong at this point. */
418 bfun
= ftrace_new_function (prev
, mfun
, fun
);
420 ftrace_debug (bfun
, "new switch");
425 /* Add a new function segment for a gap in the trace due to a decode error.
426 PREV is the chronologically preceding function segment.
427 ERRCODE is the format-specific error code. */
429 static struct btrace_function
*
430 ftrace_new_gap (struct btrace_function
*prev
, int errcode
)
432 struct btrace_function
*bfun
;
434 /* We hijack prev if it was empty. */
435 if (prev
!= NULL
&& prev
->errcode
== 0
436 && VEC_empty (btrace_insn_s
, prev
->insn
))
439 bfun
= ftrace_new_function (prev
, NULL
, NULL
);
441 bfun
->errcode
= errcode
;
443 ftrace_debug (bfun
, "new gap");
448 /* Update BFUN with respect to the instruction at PC. This may create new
450 Return the chronologically latest function segment, never NULL. */
452 static struct btrace_function
*
453 ftrace_update_function (struct btrace_function
*bfun
, CORE_ADDR pc
)
455 struct bound_minimal_symbol bmfun
;
456 struct minimal_symbol
*mfun
;
458 struct btrace_insn
*last
;
460 /* Try to determine the function we're in. We use both types of symbols
461 to avoid surprises when we sometimes get a full symbol and sometimes
462 only a minimal symbol. */
463 fun
= find_pc_function (pc
);
464 bmfun
= lookup_minimal_symbol_by_pc (pc
);
467 if (fun
== NULL
&& mfun
== NULL
)
468 DEBUG_FTRACE ("no symbol at %s", core_addr_to_string_nz (pc
));
470 /* If we didn't have a function or if we had a gap before, we create one. */
471 if (bfun
== NULL
|| bfun
->errcode
!= 0)
472 return ftrace_new_function (bfun
, mfun
, fun
);
474 /* Check the last instruction, if we have one.
475 We do this check first, since it allows us to fill in the call stack
476 links in addition to the normal flow links. */
478 if (!VEC_empty (btrace_insn_s
, bfun
->insn
))
479 last
= VEC_last (btrace_insn_s
, bfun
->insn
);
483 switch (last
->iclass
)
485 case BTRACE_INSN_RETURN
:
489 /* On some systems, _dl_runtime_resolve returns to the resolved
490 function instead of jumping to it. From our perspective,
491 however, this is a tailcall.
492 If we treated it as return, we wouldn't be able to find the
493 resolved function in our stack back trace. Hence, we would
494 lose the current stack back trace and start anew with an empty
495 back trace. When the resolved function returns, we would then
496 create a stack back trace with the same function names but
497 different frame id's. This will confuse stepping. */
498 fname
= ftrace_print_function_name (bfun
);
499 if (strcmp (fname
, "_dl_runtime_resolve") == 0)
500 return ftrace_new_tailcall (bfun
, mfun
, fun
);
502 return ftrace_new_return (bfun
, mfun
, fun
);
505 case BTRACE_INSN_CALL
:
506 /* Ignore calls to the next instruction. They are used for PIC. */
507 if (last
->pc
+ last
->size
== pc
)
510 return ftrace_new_call (bfun
, mfun
, fun
);
512 case BTRACE_INSN_JUMP
:
516 start
= get_pc_function_start (pc
);
518 /* If we can't determine the function for PC, we treat a jump at
519 the end of the block as tail call. */
520 if (start
== 0 || start
== pc
)
521 return ftrace_new_tailcall (bfun
, mfun
, fun
);
526 /* Check if we're switching functions for some other reason. */
527 if (ftrace_function_switched (bfun
, mfun
, fun
))
529 DEBUG_FTRACE ("switching from %s in %s at %s",
530 ftrace_print_insn_addr (last
),
531 ftrace_print_function_name (bfun
),
532 ftrace_print_filename (bfun
));
534 return ftrace_new_switch (bfun
, mfun
, fun
);
540 /* Add the instruction at PC to BFUN's instructions. */
543 ftrace_update_insns (struct btrace_function
*bfun
,
544 const struct btrace_insn
*insn
)
546 VEC_safe_push (btrace_insn_s
, bfun
->insn
, insn
);
548 if (record_debug
> 1)
549 ftrace_debug (bfun
, "update insn");
552 /* Classify the instruction at PC. */
554 static enum btrace_insn_class
555 ftrace_classify_insn (struct gdbarch
*gdbarch
, CORE_ADDR pc
)
557 enum btrace_insn_class iclass
;
559 iclass
= BTRACE_INSN_OTHER
;
562 if (gdbarch_insn_is_call (gdbarch
, pc
))
563 iclass
= BTRACE_INSN_CALL
;
564 else if (gdbarch_insn_is_ret (gdbarch
, pc
))
565 iclass
= BTRACE_INSN_RETURN
;
566 else if (gdbarch_insn_is_jump (gdbarch
, pc
))
567 iclass
= BTRACE_INSN_JUMP
;
569 CATCH (error
, RETURN_MASK_ERROR
)
577 /* Compute the function branch trace from BTS trace. */
580 btrace_compute_ftrace_bts (struct thread_info
*tp
,
581 const struct btrace_data_bts
*btrace
)
583 struct btrace_thread_info
*btinfo
;
584 struct btrace_function
*begin
, *end
;
585 struct gdbarch
*gdbarch
;
586 unsigned int blk
, ngaps
;
589 gdbarch
= target_gdbarch ();
590 btinfo
= &tp
->btrace
;
591 begin
= btinfo
->begin
;
593 ngaps
= btinfo
->ngaps
;
594 level
= begin
!= NULL
? -btinfo
->level
: INT_MAX
;
595 blk
= VEC_length (btrace_block_s
, btrace
->blocks
);
599 btrace_block_s
*block
;
604 block
= VEC_index (btrace_block_s
, btrace
->blocks
, blk
);
609 struct btrace_insn insn
;
612 /* We should hit the end of the block. Warn if we went too far. */
615 /* Indicate the gap in the trace - unless we're at the
619 warning (_("Recorded trace may be corrupted around %s."),
620 core_addr_to_string_nz (pc
));
622 end
= ftrace_new_gap (end
, BDE_BTS_OVERFLOW
);
628 end
= ftrace_update_function (end
, pc
);
632 /* Maintain the function level offset.
633 For all but the last block, we do it here. */
635 level
= min (level
, end
->level
);
640 size
= gdb_insn_length (gdbarch
, pc
);
642 CATCH (error
, RETURN_MASK_ERROR
)
649 insn
.iclass
= ftrace_classify_insn (gdbarch
, pc
);
651 ftrace_update_insns (end
, &insn
);
653 /* We're done once we pushed the instruction at the end. */
654 if (block
->end
== pc
)
657 /* We can't continue if we fail to compute the size. */
660 warning (_("Recorded trace may be incomplete around %s."),
661 core_addr_to_string_nz (pc
));
663 /* Indicate the gap in the trace. We just added INSN so we're
664 not at the beginning. */
665 end
= ftrace_new_gap (end
, BDE_BTS_INSN_SIZE
);
673 /* Maintain the function level offset.
674 For the last block, we do it here to not consider the last
676 Since the last instruction corresponds to the current instruction
677 and is not really part of the execution history, it shouldn't
680 level
= min (level
, end
->level
);
684 btinfo
->begin
= begin
;
686 btinfo
->ngaps
= ngaps
;
688 /* LEVEL is the minimal function level of all btrace function segments.
689 Define the global level offset to -LEVEL so all function levels are
690 normalized to start at zero. */
691 btinfo
->level
= -level
;
694 #if defined (HAVE_LIBIPT)
696 static enum btrace_insn_class
697 pt_reclassify_insn (enum pt_insn_class iclass
)
702 return BTRACE_INSN_CALL
;
705 return BTRACE_INSN_RETURN
;
708 return BTRACE_INSN_JUMP
;
711 return BTRACE_INSN_OTHER
;
715 /* Add function branch trace using DECODER. */
718 ftrace_add_pt (struct pt_insn_decoder
*decoder
,
719 struct btrace_function
**pbegin
,
720 struct btrace_function
**pend
, int *plevel
,
723 struct btrace_function
*begin
, *end
, *upd
;
725 int errcode
, nerrors
;
732 struct btrace_insn btinsn
;
735 errcode
= pt_insn_sync_forward (decoder
);
738 if (errcode
!= -pte_eos
)
739 warning (_("Failed to synchronize onto the Intel(R) Processor "
740 "Trace stream: %s."), pt_errstr (pt_errcode (errcode
)));
744 memset (&btinsn
, 0, sizeof (btinsn
));
747 errcode
= pt_insn_next (decoder
, &insn
, sizeof(insn
));
751 /* Look for gaps in the trace - unless we're at the beginning. */
754 /* Tracing is disabled and re-enabled each time we enter the
755 kernel. Most times, we continue from the same instruction we
756 stopped before. This is indicated via the RESUMED instruction
757 flag. The ENABLED instruction flag means that we continued
758 from some other instruction. Indicate this as a trace gap. */
760 *pend
= end
= ftrace_new_gap (end
, BDE_PT_DISABLED
);
762 /* Indicate trace overflows. */
764 *pend
= end
= ftrace_new_gap (end
, BDE_PT_OVERFLOW
);
767 upd
= ftrace_update_function (end
, insn
.ip
);
773 *pbegin
= begin
= upd
;
776 /* Maintain the function level offset. */
777 *plevel
= min (*plevel
, end
->level
);
779 btinsn
.pc
= (CORE_ADDR
) insn
.ip
;
780 btinsn
.size
= (gdb_byte
) insn
.size
;
781 btinsn
.iclass
= pt_reclassify_insn (insn
.iclass
);
783 ftrace_update_insns (end
, &btinsn
);
786 if (errcode
== -pte_eos
)
789 /* If the gap is at the very beginning, we ignore it - we will have
790 less trace, but we won't have any holes in the trace. */
794 pt_insn_get_offset (decoder
, &offset
);
796 warning (_("Failed to decode Intel(R) Processor Trace near trace "
797 "offset 0x%" PRIx64
" near recorded PC 0x%" PRIx64
": %s."),
798 offset
, insn
.ip
, pt_errstr (pt_errcode (errcode
)));
800 /* Indicate the gap in the trace. */
801 *pend
= end
= ftrace_new_gap (end
, errcode
);
806 warning (_("The recorded execution trace may have gaps."));
809 /* A callback function to allow the trace decoder to read the inferior's
813 btrace_pt_readmem_callback (gdb_byte
*buffer
, size_t size
,
814 const struct pt_asid
*asid
, CORE_ADDR pc
,
821 errcode
= target_read_code (pc
, buffer
, size
);
825 CATCH (error
, RETURN_MASK_ERROR
)
834 /* Translate the vendor from one enum to another. */
836 static enum pt_cpu_vendor
837 pt_translate_cpu_vendor (enum btrace_cpu_vendor vendor
)
849 /* Finalize the function branch trace after decode. */
851 static void btrace_finalize_ftrace_pt (struct pt_insn_decoder
*decoder
,
852 struct thread_info
*tp
, int level
)
854 pt_insn_free_decoder (decoder
);
856 /* LEVEL is the minimal function level of all btrace function segments.
857 Define the global level offset to -LEVEL so all function levels are
858 normalized to start at zero. */
859 tp
->btrace
.level
= -level
;
861 /* Add a single last instruction entry for the current PC.
862 This allows us to compute the backtrace at the current PC using both
863 standard unwind and btrace unwind.
864 This extra entry is ignored by all record commands. */
868 /* Compute the function branch trace from Intel(R) Processor Trace. */
871 btrace_compute_ftrace_pt (struct thread_info
*tp
,
872 const struct btrace_data_pt
*btrace
)
874 struct btrace_thread_info
*btinfo
;
875 struct pt_insn_decoder
*decoder
;
876 struct pt_config config
;
879 if (btrace
->size
== 0)
882 btinfo
= &tp
->btrace
;
883 level
= btinfo
->begin
!= NULL
? -btinfo
->level
: INT_MAX
;
885 pt_config_init(&config
);
886 config
.begin
= btrace
->data
;
887 config
.end
= btrace
->data
+ btrace
->size
;
889 config
.cpu
.vendor
= pt_translate_cpu_vendor (btrace
->config
.cpu
.vendor
);
890 config
.cpu
.family
= btrace
->config
.cpu
.family
;
891 config
.cpu
.model
= btrace
->config
.cpu
.model
;
892 config
.cpu
.stepping
= btrace
->config
.cpu
.stepping
;
894 errcode
= pt_cpu_errata (&config
.errata
, &config
.cpu
);
896 error (_("Failed to configure the Intel(R) Processor Trace decoder: %s."),
897 pt_errstr (pt_errcode (errcode
)));
899 decoder
= pt_insn_alloc_decoder (&config
);
901 error (_("Failed to allocate the Intel(R) Processor Trace decoder."));
905 struct pt_image
*image
;
907 image
= pt_insn_get_image(decoder
);
909 error (_("Failed to configure the Intel(R) Processor Trace decoder."));
911 errcode
= pt_image_set_callback(image
, btrace_pt_readmem_callback
, NULL
);
913 error (_("Failed to configure the Intel(R) Processor Trace decoder: "
914 "%s."), pt_errstr (pt_errcode (errcode
)));
916 ftrace_add_pt (decoder
, &btinfo
->begin
, &btinfo
->end
, &level
,
919 CATCH (error
, RETURN_MASK_ALL
)
921 /* Indicate a gap in the trace if we quit trace processing. */
922 if (error
.reason
== RETURN_QUIT
&& btinfo
->end
!= NULL
)
924 btinfo
->end
= ftrace_new_gap (btinfo
->end
, BDE_PT_USER_QUIT
);
928 btrace_finalize_ftrace_pt (decoder
, tp
, level
);
930 throw_exception (error
);
934 btrace_finalize_ftrace_pt (decoder
, tp
, level
);
937 #else /* defined (HAVE_LIBIPT) */
940 btrace_compute_ftrace_pt (struct thread_info
*tp
,
941 const struct btrace_data_pt
*btrace
)
943 internal_error (__FILE__
, __LINE__
, _("Unexpected branch trace format."));
946 #endif /* defined (HAVE_LIBIPT) */
948 /* Compute the function branch trace from a block branch trace BTRACE for
949 a thread given by BTINFO. */
952 btrace_compute_ftrace (struct thread_info
*tp
, struct btrace_data
*btrace
)
954 DEBUG ("compute ftrace");
956 switch (btrace
->format
)
958 case BTRACE_FORMAT_NONE
:
961 case BTRACE_FORMAT_BTS
:
962 btrace_compute_ftrace_bts (tp
, &btrace
->variant
.bts
);
965 case BTRACE_FORMAT_PT
:
966 btrace_compute_ftrace_pt (tp
, &btrace
->variant
.pt
);
970 internal_error (__FILE__
, __LINE__
, _("Unkown branch trace format."));
973 /* Add an entry for the current PC. */
976 btrace_add_pc (struct thread_info
*tp
)
978 struct btrace_data btrace
;
979 struct btrace_block
*block
;
980 struct regcache
*regcache
;
981 struct cleanup
*cleanup
;
984 regcache
= get_thread_regcache (tp
->ptid
);
985 pc
= regcache_read_pc (regcache
);
987 btrace_data_init (&btrace
);
988 btrace
.format
= BTRACE_FORMAT_BTS
;
989 btrace
.variant
.bts
.blocks
= NULL
;
991 cleanup
= make_cleanup_btrace_data (&btrace
);
993 block
= VEC_safe_push (btrace_block_s
, btrace
.variant
.bts
.blocks
, NULL
);
997 btrace_compute_ftrace (tp
, &btrace
);
999 do_cleanups (cleanup
);
1005 btrace_enable (struct thread_info
*tp
, const struct btrace_config
*conf
)
1007 if (tp
->btrace
.target
!= NULL
)
1010 if (!target_supports_btrace (conf
->format
))
1011 error (_("Target does not support branch tracing."));
1013 DEBUG ("enable thread %d (%s)", tp
->num
, target_pid_to_str (tp
->ptid
));
1015 tp
->btrace
.target
= target_enable_btrace (tp
->ptid
, conf
);
1017 /* Add an entry for the current PC so we start tracing from where we
1019 if (tp
->btrace
.target
!= NULL
)
1025 const struct btrace_config
*
1026 btrace_conf (const struct btrace_thread_info
*btinfo
)
1028 if (btinfo
->target
== NULL
)
1031 return target_btrace_conf (btinfo
->target
);
1037 btrace_disable (struct thread_info
*tp
)
1039 struct btrace_thread_info
*btp
= &tp
->btrace
;
1042 if (btp
->target
== NULL
)
1045 DEBUG ("disable thread %d (%s)", tp
->num
, target_pid_to_str (tp
->ptid
));
1047 target_disable_btrace (btp
->target
);
1056 btrace_teardown (struct thread_info
*tp
)
1058 struct btrace_thread_info
*btp
= &tp
->btrace
;
1061 if (btp
->target
== NULL
)
1064 DEBUG ("teardown thread %d (%s)", tp
->num
, target_pid_to_str (tp
->ptid
));
1066 target_teardown_btrace (btp
->target
);
1072 /* Stitch branch trace in BTS format. */
1075 btrace_stitch_bts (struct btrace_data_bts
*btrace
, struct thread_info
*tp
)
1077 struct btrace_thread_info
*btinfo
;
1078 struct btrace_function
*last_bfun
;
1079 struct btrace_insn
*last_insn
;
1080 btrace_block_s
*first_new_block
;
1082 btinfo
= &tp
->btrace
;
1083 last_bfun
= btinfo
->end
;
1084 gdb_assert (last_bfun
!= NULL
);
1085 gdb_assert (!VEC_empty (btrace_block_s
, btrace
->blocks
));
1087 /* If the existing trace ends with a gap, we just glue the traces
1088 together. We need to drop the last (i.e. chronologically first) block
1089 of the new trace, though, since we can't fill in the start address.*/
1090 if (VEC_empty (btrace_insn_s
, last_bfun
->insn
))
1092 VEC_pop (btrace_block_s
, btrace
->blocks
);
1096 /* Beware that block trace starts with the most recent block, so the
1097 chronologically first block in the new trace is the last block in
1098 the new trace's block vector. */
1099 first_new_block
= VEC_last (btrace_block_s
, btrace
->blocks
);
1100 last_insn
= VEC_last (btrace_insn_s
, last_bfun
->insn
);
1102 /* If the current PC at the end of the block is the same as in our current
1103 trace, there are two explanations:
1104 1. we executed the instruction and some branch brought us back.
1105 2. we have not made any progress.
1106 In the first case, the delta trace vector should contain at least two
1108 In the second case, the delta trace vector should contain exactly one
1109 entry for the partial block containing the current PC. Remove it. */
1110 if (first_new_block
->end
== last_insn
->pc
1111 && VEC_length (btrace_block_s
, btrace
->blocks
) == 1)
1113 VEC_pop (btrace_block_s
, btrace
->blocks
);
1117 DEBUG ("stitching %s to %s", ftrace_print_insn_addr (last_insn
),
1118 core_addr_to_string_nz (first_new_block
->end
));
1120 /* Do a simple sanity check to make sure we don't accidentally end up
1121 with a bad block. This should not occur in practice. */
1122 if (first_new_block
->end
< last_insn
->pc
)
1124 warning (_("Error while trying to read delta trace. Falling back to "
1129 /* We adjust the last block to start at the end of our current trace. */
1130 gdb_assert (first_new_block
->begin
== 0);
1131 first_new_block
->begin
= last_insn
->pc
;
1133 /* We simply pop the last insn so we can insert it again as part of
1134 the normal branch trace computation.
1135 Since instruction iterators are based on indices in the instructions
1136 vector, we don't leave any pointers dangling. */
1137 DEBUG ("pruning insn at %s for stitching",
1138 ftrace_print_insn_addr (last_insn
));
1140 VEC_pop (btrace_insn_s
, last_bfun
->insn
);
1142 /* The instructions vector may become empty temporarily if this has
1143 been the only instruction in this function segment.
1144 This violates the invariant but will be remedied shortly by
1145 btrace_compute_ftrace when we add the new trace. */
1147 /* The only case where this would hurt is if the entire trace consisted
1148 of just that one instruction. If we remove it, we might turn the now
1149 empty btrace function segment into a gap. But we don't want gaps at
1150 the beginning. To avoid this, we remove the entire old trace. */
1151 if (last_bfun
== btinfo
->begin
&& VEC_empty (btrace_insn_s
, last_bfun
->insn
))
1157 /* Adjust the block trace in order to stitch old and new trace together.
1158 BTRACE is the new delta trace between the last and the current stop.
1159 TP is the traced thread.
1160 May modifx BTRACE as well as the existing trace in TP.
1161 Return 0 on success, -1 otherwise. */
1164 btrace_stitch_trace (struct btrace_data
*btrace
, struct thread_info
*tp
)
1166 /* If we don't have trace, there's nothing to do. */
1167 if (btrace_data_empty (btrace
))
1170 switch (btrace
->format
)
1172 case BTRACE_FORMAT_NONE
:
1175 case BTRACE_FORMAT_BTS
:
1176 return btrace_stitch_bts (&btrace
->variant
.bts
, tp
);
1178 case BTRACE_FORMAT_PT
:
1179 /* Delta reads are not supported. */
1183 internal_error (__FILE__
, __LINE__
, _("Unkown branch trace format."));
1186 /* Clear the branch trace histories in BTINFO. */
1189 btrace_clear_history (struct btrace_thread_info
*btinfo
)
1191 xfree (btinfo
->insn_history
);
1192 xfree (btinfo
->call_history
);
1193 xfree (btinfo
->replay
);
1195 btinfo
->insn_history
= NULL
;
1196 btinfo
->call_history
= NULL
;
1197 btinfo
->replay
= NULL
;
1203 btrace_fetch (struct thread_info
*tp
)
1205 struct btrace_thread_info
*btinfo
;
1206 struct btrace_target_info
*tinfo
;
1207 struct btrace_data btrace
;
1208 struct cleanup
*cleanup
;
1211 DEBUG ("fetch thread %d (%s)", tp
->num
, target_pid_to_str (tp
->ptid
));
1213 btinfo
= &tp
->btrace
;
1214 tinfo
= btinfo
->target
;
1218 /* There's no way we could get new trace while replaying.
1219 On the other hand, delta trace would return a partial record with the
1220 current PC, which is the replay PC, not the last PC, as expected. */
1221 if (btinfo
->replay
!= NULL
)
1224 btrace_data_init (&btrace
);
1225 cleanup
= make_cleanup_btrace_data (&btrace
);
1227 /* Let's first try to extend the trace we already have. */
1228 if (btinfo
->end
!= NULL
)
1230 errcode
= target_read_btrace (&btrace
, tinfo
, BTRACE_READ_DELTA
);
1233 /* Success. Let's try to stitch the traces together. */
1234 errcode
= btrace_stitch_trace (&btrace
, tp
);
1238 /* We failed to read delta trace. Let's try to read new trace. */
1239 errcode
= target_read_btrace (&btrace
, tinfo
, BTRACE_READ_NEW
);
1241 /* If we got any new trace, discard what we have. */
1242 if (errcode
== 0 && !btrace_data_empty (&btrace
))
1246 /* If we were not able to read the trace, we start over. */
1250 errcode
= target_read_btrace (&btrace
, tinfo
, BTRACE_READ_ALL
);
1254 errcode
= target_read_btrace (&btrace
, tinfo
, BTRACE_READ_ALL
);
1256 /* If we were not able to read the branch trace, signal an error. */
1258 error (_("Failed to read branch trace."));
1260 /* Compute the trace, provided we have any. */
1261 if (!btrace_data_empty (&btrace
))
1263 btrace_clear_history (btinfo
);
1264 btrace_compute_ftrace (tp
, &btrace
);
1267 do_cleanups (cleanup
);
1273 btrace_clear (struct thread_info
*tp
)
1275 struct btrace_thread_info
*btinfo
;
1276 struct btrace_function
*it
, *trash
;
1278 DEBUG ("clear thread %d (%s)", tp
->num
, target_pid_to_str (tp
->ptid
));
1280 /* Make sure btrace frames that may hold a pointer into the branch
1281 trace data are destroyed. */
1282 reinit_frame_cache ();
1284 btinfo
= &tp
->btrace
;
1295 btinfo
->begin
= NULL
;
1299 btrace_clear_history (btinfo
);
1305 btrace_free_objfile (struct objfile
*objfile
)
1307 struct thread_info
*tp
;
1309 DEBUG ("free objfile");
1311 ALL_NON_EXITED_THREADS (tp
)
1315 #if defined (HAVE_LIBEXPAT)
1317 /* Check the btrace document version. */
1320 check_xml_btrace_version (struct gdb_xml_parser
*parser
,
1321 const struct gdb_xml_element
*element
,
1322 void *user_data
, VEC (gdb_xml_value_s
) *attributes
)
1324 const char *version
= xml_find_attribute (attributes
, "version")->value
;
1326 if (strcmp (version
, "1.0") != 0)
1327 gdb_xml_error (parser
, _("Unsupported btrace version: \"%s\""), version
);
1330 /* Parse a btrace "block" xml record. */
1333 parse_xml_btrace_block (struct gdb_xml_parser
*parser
,
1334 const struct gdb_xml_element
*element
,
1335 void *user_data
, VEC (gdb_xml_value_s
) *attributes
)
1337 struct btrace_data
*btrace
;
1338 struct btrace_block
*block
;
1339 ULONGEST
*begin
, *end
;
1343 switch (btrace
->format
)
1345 case BTRACE_FORMAT_BTS
:
1348 case BTRACE_FORMAT_NONE
:
1349 btrace
->format
= BTRACE_FORMAT_BTS
;
1350 btrace
->variant
.bts
.blocks
= NULL
;
1354 gdb_xml_error (parser
, _("Btrace format error."));
1357 begin
= xml_find_attribute (attributes
, "begin")->value
;
1358 end
= xml_find_attribute (attributes
, "end")->value
;
1360 block
= VEC_safe_push (btrace_block_s
, btrace
->variant
.bts
.blocks
, NULL
);
1361 block
->begin
= *begin
;
1365 /* Parse a "raw" xml record. */
1368 parse_xml_raw (struct gdb_xml_parser
*parser
, const char *body_text
,
1369 gdb_byte
**pdata
, unsigned long *psize
)
1371 struct cleanup
*cleanup
;
1372 gdb_byte
*data
, *bin
;
1376 len
= strlen (body_text
);
1379 if ((size_t) size
* 2 != len
)
1380 gdb_xml_error (parser
, _("Bad raw data size."));
1382 bin
= data
= xmalloc (size
);
1383 cleanup
= make_cleanup (xfree
, data
);
1385 /* We use hex encoding - see common/rsp-low.h. */
1393 if (hi
== 0 || lo
== 0)
1394 gdb_xml_error (parser
, _("Bad hex encoding."));
1396 *bin
++ = fromhex (hi
) * 16 + fromhex (lo
);
1400 discard_cleanups (cleanup
);
1406 /* Parse a btrace pt-config "cpu" xml record. */
1409 parse_xml_btrace_pt_config_cpu (struct gdb_xml_parser
*parser
,
1410 const struct gdb_xml_element
*element
,
1412 VEC (gdb_xml_value_s
) *attributes
)
1414 struct btrace_data
*btrace
;
1416 ULONGEST
*family
, *model
, *stepping
;
1418 vendor
= xml_find_attribute (attributes
, "vendor")->value
;
1419 family
= xml_find_attribute (attributes
, "family")->value
;
1420 model
= xml_find_attribute (attributes
, "model")->value
;
1421 stepping
= xml_find_attribute (attributes
, "stepping")->value
;
1425 if (strcmp (vendor
, "GenuineIntel") == 0)
1426 btrace
->variant
.pt
.config
.cpu
.vendor
= CV_INTEL
;
1428 btrace
->variant
.pt
.config
.cpu
.family
= *family
;
1429 btrace
->variant
.pt
.config
.cpu
.model
= *model
;
1430 btrace
->variant
.pt
.config
.cpu
.stepping
= *stepping
;
1433 /* Parse a btrace pt "raw" xml record. */
1436 parse_xml_btrace_pt_raw (struct gdb_xml_parser
*parser
,
1437 const struct gdb_xml_element
*element
,
1438 void *user_data
, const char *body_text
)
1440 struct btrace_data
*btrace
;
1443 parse_xml_raw (parser
, body_text
, &btrace
->variant
.pt
.data
,
1444 &btrace
->variant
.pt
.size
);
1447 /* Parse a btrace "pt" xml record. */
1450 parse_xml_btrace_pt (struct gdb_xml_parser
*parser
,
1451 const struct gdb_xml_element
*element
,
1452 void *user_data
, VEC (gdb_xml_value_s
) *attributes
)
1454 struct btrace_data
*btrace
;
1457 btrace
->format
= BTRACE_FORMAT_PT
;
1458 btrace
->variant
.pt
.config
.cpu
.vendor
= CV_UNKNOWN
;
1459 btrace
->variant
.pt
.data
= NULL
;
1460 btrace
->variant
.pt
.size
= 0;
1463 static const struct gdb_xml_attribute block_attributes
[] = {
1464 { "begin", GDB_XML_AF_NONE
, gdb_xml_parse_attr_ulongest
, NULL
},
1465 { "end", GDB_XML_AF_NONE
, gdb_xml_parse_attr_ulongest
, NULL
},
1466 { NULL
, GDB_XML_AF_NONE
, NULL
, NULL
}
1469 static const struct gdb_xml_attribute btrace_pt_config_cpu_attributes
[] = {
1470 { "vendor", GDB_XML_AF_NONE
, NULL
, NULL
},
1471 { "family", GDB_XML_AF_NONE
, gdb_xml_parse_attr_ulongest
, NULL
},
1472 { "model", GDB_XML_AF_NONE
, gdb_xml_parse_attr_ulongest
, NULL
},
1473 { "stepping", GDB_XML_AF_NONE
, gdb_xml_parse_attr_ulongest
, NULL
},
1474 { NULL
, GDB_XML_AF_NONE
, NULL
, NULL
}
1477 static const struct gdb_xml_element btrace_pt_config_children
[] = {
1478 { "cpu", btrace_pt_config_cpu_attributes
, NULL
, GDB_XML_EF_OPTIONAL
,
1479 parse_xml_btrace_pt_config_cpu
, NULL
},
1480 { NULL
, NULL
, NULL
, GDB_XML_EF_NONE
, NULL
, NULL
}
1483 static const struct gdb_xml_element btrace_pt_children
[] = {
1484 { "pt-config", NULL
, btrace_pt_config_children
, GDB_XML_EF_OPTIONAL
, NULL
,
1486 { "raw", NULL
, NULL
, GDB_XML_EF_OPTIONAL
, NULL
, parse_xml_btrace_pt_raw
},
1487 { NULL
, NULL
, NULL
, GDB_XML_EF_NONE
, NULL
, NULL
}
1490 static const struct gdb_xml_attribute btrace_attributes
[] = {
1491 { "version", GDB_XML_AF_NONE
, NULL
, NULL
},
1492 { NULL
, GDB_XML_AF_NONE
, NULL
, NULL
}
1495 static const struct gdb_xml_element btrace_children
[] = {
1496 { "block", block_attributes
, NULL
,
1497 GDB_XML_EF_REPEATABLE
| GDB_XML_EF_OPTIONAL
, parse_xml_btrace_block
, NULL
},
1498 { "pt", NULL
, btrace_pt_children
, GDB_XML_EF_OPTIONAL
, parse_xml_btrace_pt
,
1500 { NULL
, NULL
, NULL
, GDB_XML_EF_NONE
, NULL
, NULL
}
1503 static const struct gdb_xml_element btrace_elements
[] = {
1504 { "btrace", btrace_attributes
, btrace_children
, GDB_XML_EF_NONE
,
1505 check_xml_btrace_version
, NULL
},
1506 { NULL
, NULL
, NULL
, GDB_XML_EF_NONE
, NULL
, NULL
}
1509 #endif /* defined (HAVE_LIBEXPAT) */
1514 parse_xml_btrace (struct btrace_data
*btrace
, const char *buffer
)
1516 struct cleanup
*cleanup
;
1519 #if defined (HAVE_LIBEXPAT)
1521 btrace
->format
= BTRACE_FORMAT_NONE
;
1523 cleanup
= make_cleanup_btrace_data (btrace
);
1524 errcode
= gdb_xml_parse_quick (_("btrace"), "btrace.dtd", btrace_elements
,
1527 error (_("Error parsing branch trace."));
1529 /* Keep parse results. */
1530 discard_cleanups (cleanup
);
1532 #else /* !defined (HAVE_LIBEXPAT) */
1534 error (_("Cannot process branch trace. XML parsing is not supported."));
1536 #endif /* !defined (HAVE_LIBEXPAT) */
1539 #if defined (HAVE_LIBEXPAT)
1541 /* Parse a btrace-conf "bts" xml record. */
1544 parse_xml_btrace_conf_bts (struct gdb_xml_parser
*parser
,
1545 const struct gdb_xml_element
*element
,
1546 void *user_data
, VEC (gdb_xml_value_s
) *attributes
)
1548 struct btrace_config
*conf
;
1549 struct gdb_xml_value
*size
;
1552 conf
->format
= BTRACE_FORMAT_BTS
;
1555 size
= xml_find_attribute (attributes
, "size");
1557 conf
->bts
.size
= (unsigned int) *(ULONGEST
*) size
->value
;
1560 /* Parse a btrace-conf "pt" xml record. */
1563 parse_xml_btrace_conf_pt (struct gdb_xml_parser
*parser
,
1564 const struct gdb_xml_element
*element
,
1565 void *user_data
, VEC (gdb_xml_value_s
) *attributes
)
1567 struct btrace_config
*conf
;
1568 struct gdb_xml_value
*size
;
1571 conf
->format
= BTRACE_FORMAT_PT
;
1574 size
= xml_find_attribute (attributes
, "size");
1576 conf
->pt
.size
= (unsigned int) *(ULONGEST
*) size
->value
;
1579 static const struct gdb_xml_attribute btrace_conf_pt_attributes
[] = {
1580 { "size", GDB_XML_AF_OPTIONAL
, gdb_xml_parse_attr_ulongest
, NULL
},
1581 { NULL
, GDB_XML_AF_NONE
, NULL
, NULL
}
1584 static const struct gdb_xml_attribute btrace_conf_bts_attributes
[] = {
1585 { "size", GDB_XML_AF_OPTIONAL
, gdb_xml_parse_attr_ulongest
, NULL
},
1586 { NULL
, GDB_XML_AF_NONE
, NULL
, NULL
}
1589 static const struct gdb_xml_element btrace_conf_children
[] = {
1590 { "bts", btrace_conf_bts_attributes
, NULL
, GDB_XML_EF_OPTIONAL
,
1591 parse_xml_btrace_conf_bts
, NULL
},
1592 { "pt", btrace_conf_pt_attributes
, NULL
, GDB_XML_EF_OPTIONAL
,
1593 parse_xml_btrace_conf_pt
, NULL
},
1594 { NULL
, NULL
, NULL
, GDB_XML_EF_NONE
, NULL
, NULL
}
1597 static const struct gdb_xml_attribute btrace_conf_attributes
[] = {
1598 { "version", GDB_XML_AF_NONE
, NULL
, NULL
},
1599 { NULL
, GDB_XML_AF_NONE
, NULL
, NULL
}
1602 static const struct gdb_xml_element btrace_conf_elements
[] = {
1603 { "btrace-conf", btrace_conf_attributes
, btrace_conf_children
,
1604 GDB_XML_EF_NONE
, NULL
, NULL
},
1605 { NULL
, NULL
, NULL
, GDB_XML_EF_NONE
, NULL
, NULL
}
1608 #endif /* defined (HAVE_LIBEXPAT) */
1613 parse_xml_btrace_conf (struct btrace_config
*conf
, const char *xml
)
1617 #if defined (HAVE_LIBEXPAT)
1619 errcode
= gdb_xml_parse_quick (_("btrace-conf"), "btrace-conf.dtd",
1620 btrace_conf_elements
, xml
, conf
);
1622 error (_("Error parsing branch trace configuration."));
1624 #else /* !defined (HAVE_LIBEXPAT) */
1626 error (_("XML parsing is not supported."));
1628 #endif /* !defined (HAVE_LIBEXPAT) */
1633 const struct btrace_insn
*
1634 btrace_insn_get (const struct btrace_insn_iterator
*it
)
1636 const struct btrace_function
*bfun
;
1637 unsigned int index
, end
;
1640 bfun
= it
->function
;
1642 /* Check if the iterator points to a gap in the trace. */
1643 if (bfun
->errcode
!= 0)
1646 /* The index is within the bounds of this function's instruction vector. */
1647 end
= VEC_length (btrace_insn_s
, bfun
->insn
);
1648 gdb_assert (0 < end
);
1649 gdb_assert (index
< end
);
1651 return VEC_index (btrace_insn_s
, bfun
->insn
, index
);
1657 btrace_insn_number (const struct btrace_insn_iterator
*it
)
1659 const struct btrace_function
*bfun
;
1661 bfun
= it
->function
;
1663 /* Return zero if the iterator points to a gap in the trace. */
1664 if (bfun
->errcode
!= 0)
1667 return bfun
->insn_offset
+ it
->index
;
1673 btrace_insn_begin (struct btrace_insn_iterator
*it
,
1674 const struct btrace_thread_info
*btinfo
)
1676 const struct btrace_function
*bfun
;
1678 bfun
= btinfo
->begin
;
1680 error (_("No trace."));
1682 it
->function
= bfun
;
1689 btrace_insn_end (struct btrace_insn_iterator
*it
,
1690 const struct btrace_thread_info
*btinfo
)
1692 const struct btrace_function
*bfun
;
1693 unsigned int length
;
1697 error (_("No trace."));
1699 length
= VEC_length (btrace_insn_s
, bfun
->insn
);
1701 /* The last function may either be a gap or it contains the current
1702 instruction, which is one past the end of the execution trace; ignore
1707 it
->function
= bfun
;
1714 btrace_insn_next (struct btrace_insn_iterator
*it
, unsigned int stride
)
1716 const struct btrace_function
*bfun
;
1717 unsigned int index
, steps
;
1719 bfun
= it
->function
;
1725 unsigned int end
, space
, adv
;
1727 end
= VEC_length (btrace_insn_s
, bfun
->insn
);
1729 /* An empty function segment represents a gap in the trace. We count
1730 it as one instruction. */
1733 const struct btrace_function
*next
;
1735 next
= bfun
->flow
.next
;
1748 gdb_assert (0 < end
);
1749 gdb_assert (index
< end
);
1751 /* Compute the number of instructions remaining in this segment. */
1752 space
= end
- index
;
1754 /* Advance the iterator as far as possible within this segment. */
1755 adv
= min (space
, stride
);
1760 /* Move to the next function if we're at the end of this one. */
1763 const struct btrace_function
*next
;
1765 next
= bfun
->flow
.next
;
1768 /* We stepped past the last function.
1770 Let's adjust the index to point to the last instruction in
1771 the previous function. */
1777 /* We now point to the first instruction in the new function. */
1782 /* We did make progress. */
1783 gdb_assert (adv
> 0);
1786 /* Update the iterator. */
1787 it
->function
= bfun
;
1796 btrace_insn_prev (struct btrace_insn_iterator
*it
, unsigned int stride
)
1798 const struct btrace_function
*bfun
;
1799 unsigned int index
, steps
;
1801 bfun
= it
->function
;
1809 /* Move to the previous function if we're at the start of this one. */
1812 const struct btrace_function
*prev
;
1814 prev
= bfun
->flow
.prev
;
1818 /* We point to one after the last instruction in the new function. */
1820 index
= VEC_length (btrace_insn_s
, bfun
->insn
);
1822 /* An empty function segment represents a gap in the trace. We count
1823 it as one instruction. */
1833 /* Advance the iterator as far as possible within this segment. */
1834 adv
= min (index
, stride
);
1840 /* We did make progress. */
1841 gdb_assert (adv
> 0);
1844 /* Update the iterator. */
1845 it
->function
= bfun
;
1854 btrace_insn_cmp (const struct btrace_insn_iterator
*lhs
,
1855 const struct btrace_insn_iterator
*rhs
)
1857 unsigned int lnum
, rnum
;
1859 lnum
= btrace_insn_number (lhs
);
1860 rnum
= btrace_insn_number (rhs
);
1862 /* A gap has an instruction number of zero. Things are getting more
1863 complicated if gaps are involved.
1865 We take the instruction number offset from the iterator's function.
1866 This is the number of the first instruction after the gap.
1868 This is OK as long as both lhs and rhs point to gaps. If only one of
1869 them does, we need to adjust the number based on the other's regular
1870 instruction number. Otherwise, a gap might compare equal to an
1873 if (lnum
== 0 && rnum
== 0)
1875 lnum
= lhs
->function
->insn_offset
;
1876 rnum
= rhs
->function
->insn_offset
;
1880 lnum
= lhs
->function
->insn_offset
;
1887 rnum
= rhs
->function
->insn_offset
;
1893 return (int) (lnum
- rnum
);
1899 btrace_find_insn_by_number (struct btrace_insn_iterator
*it
,
1900 const struct btrace_thread_info
*btinfo
,
1901 unsigned int number
)
1903 const struct btrace_function
*bfun
;
1904 unsigned int end
, length
;
1906 for (bfun
= btinfo
->end
; bfun
!= NULL
; bfun
= bfun
->flow
.prev
)
1909 if (bfun
->errcode
!= 0)
1912 if (bfun
->insn_offset
<= number
)
1919 length
= VEC_length (btrace_insn_s
, bfun
->insn
);
1920 gdb_assert (length
> 0);
1922 end
= bfun
->insn_offset
+ length
;
1926 it
->function
= bfun
;
1927 it
->index
= number
- bfun
->insn_offset
;
1934 const struct btrace_function
*
1935 btrace_call_get (const struct btrace_call_iterator
*it
)
1937 return it
->function
;
1943 btrace_call_number (const struct btrace_call_iterator
*it
)
1945 const struct btrace_thread_info
*btinfo
;
1946 const struct btrace_function
*bfun
;
1949 btinfo
= it
->btinfo
;
1950 bfun
= it
->function
;
1952 return bfun
->number
;
1954 /* For the end iterator, i.e. bfun == NULL, we return one more than the
1955 number of the last function. */
1957 insns
= VEC_length (btrace_insn_s
, bfun
->insn
);
1959 /* If the function contains only a single instruction (i.e. the current
1960 instruction), it will be skipped and its number is already the number
1963 return bfun
->number
;
1965 /* Otherwise, return one more than the number of the last function. */
1966 return bfun
->number
+ 1;
1972 btrace_call_begin (struct btrace_call_iterator
*it
,
1973 const struct btrace_thread_info
*btinfo
)
1975 const struct btrace_function
*bfun
;
1977 bfun
= btinfo
->begin
;
1979 error (_("No trace."));
1981 it
->btinfo
= btinfo
;
1982 it
->function
= bfun
;
1988 btrace_call_end (struct btrace_call_iterator
*it
,
1989 const struct btrace_thread_info
*btinfo
)
1991 const struct btrace_function
*bfun
;
1995 error (_("No trace."));
1997 it
->btinfo
= btinfo
;
1998 it
->function
= NULL
;
2004 btrace_call_next (struct btrace_call_iterator
*it
, unsigned int stride
)
2006 const struct btrace_function
*bfun
;
2009 bfun
= it
->function
;
2011 while (bfun
!= NULL
)
2013 const struct btrace_function
*next
;
2016 next
= bfun
->flow
.next
;
2019 /* Ignore the last function if it only contains a single
2020 (i.e. the current) instruction. */
2021 insns
= VEC_length (btrace_insn_s
, bfun
->insn
);
2026 if (stride
== steps
)
2033 it
->function
= bfun
;
2040 btrace_call_prev (struct btrace_call_iterator
*it
, unsigned int stride
)
2042 const struct btrace_thread_info
*btinfo
;
2043 const struct btrace_function
*bfun
;
2046 bfun
= it
->function
;
2053 btinfo
= it
->btinfo
;
2058 /* Ignore the last function if it only contains a single
2059 (i.e. the current) instruction. */
2060 insns
= VEC_length (btrace_insn_s
, bfun
->insn
);
2062 bfun
= bfun
->flow
.prev
;
2070 while (steps
< stride
)
2072 const struct btrace_function
*prev
;
2074 prev
= bfun
->flow
.prev
;
2082 it
->function
= bfun
;
2089 btrace_call_cmp (const struct btrace_call_iterator
*lhs
,
2090 const struct btrace_call_iterator
*rhs
)
2092 unsigned int lnum
, rnum
;
2094 lnum
= btrace_call_number (lhs
);
2095 rnum
= btrace_call_number (rhs
);
2097 return (int) (lnum
- rnum
);
2103 btrace_find_call_by_number (struct btrace_call_iterator
*it
,
2104 const struct btrace_thread_info
*btinfo
,
2105 unsigned int number
)
2107 const struct btrace_function
*bfun
;
2109 for (bfun
= btinfo
->end
; bfun
!= NULL
; bfun
= bfun
->flow
.prev
)
2113 bnum
= bfun
->number
;
2116 it
->btinfo
= btinfo
;
2117 it
->function
= bfun
;
2121 /* Functions are ordered and numbered consecutively. We could bail out
2122 earlier. On the other hand, it is very unlikely that we search for
2123 a nonexistent function. */
2132 btrace_set_insn_history (struct btrace_thread_info
*btinfo
,
2133 const struct btrace_insn_iterator
*begin
,
2134 const struct btrace_insn_iterator
*end
)
2136 if (btinfo
->insn_history
== NULL
)
2137 btinfo
->insn_history
= xzalloc (sizeof (*btinfo
->insn_history
));
2139 btinfo
->insn_history
->begin
= *begin
;
2140 btinfo
->insn_history
->end
= *end
;
2146 btrace_set_call_history (struct btrace_thread_info
*btinfo
,
2147 const struct btrace_call_iterator
*begin
,
2148 const struct btrace_call_iterator
*end
)
2150 gdb_assert (begin
->btinfo
== end
->btinfo
);
2152 if (btinfo
->call_history
== NULL
)
2153 btinfo
->call_history
= xzalloc (sizeof (*btinfo
->call_history
));
2155 btinfo
->call_history
->begin
= *begin
;
2156 btinfo
->call_history
->end
= *end
;
2162 btrace_is_replaying (struct thread_info
*tp
)
2164 return tp
->btrace
.replay
!= NULL
;
2170 btrace_is_empty (struct thread_info
*tp
)
2172 struct btrace_insn_iterator begin
, end
;
2173 struct btrace_thread_info
*btinfo
;
2175 btinfo
= &tp
->btrace
;
2177 if (btinfo
->begin
== NULL
)
2180 btrace_insn_begin (&begin
, btinfo
);
2181 btrace_insn_end (&end
, btinfo
);
2183 return btrace_insn_cmp (&begin
, &end
) == 0;
2186 /* Forward the cleanup request. */
2189 do_btrace_data_cleanup (void *arg
)
2191 btrace_data_fini (arg
);
2197 make_cleanup_btrace_data (struct btrace_data
*data
)
2199 return make_cleanup (do_btrace_data_cleanup
, data
);