1 /* Branch trace support for GDB, the GNU debugger.
3 Copyright (C) 2013-2015 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "gdbthread.h"
31 #include "filenames.h"
32 #include "xml-support.h"
38 static void btrace_add_pc (struct thread_info
*tp
);
40 /* Print a record debug message. Use do ... while (0) to avoid ambiguities
41 when used in if statements. */
43 #define DEBUG(msg, args...) \
46 if (record_debug != 0) \
47 fprintf_unfiltered (gdb_stdlog, \
48 "[btrace] " msg "\n", ##args); \
52 #define DEBUG_FTRACE(msg, args...) DEBUG ("[ftrace] " msg, ##args)
54 /* Return the function name of a recorded function segment for printing.
55 This function never returns NULL. */
58 ftrace_print_function_name (const struct btrace_function
*bfun
)
60 struct minimal_symbol
*msym
;
67 return SYMBOL_PRINT_NAME (sym
);
70 return MSYMBOL_PRINT_NAME (msym
);
75 /* Return the file name of a recorded function segment for printing.
76 This function never returns NULL. */
79 ftrace_print_filename (const struct btrace_function
*bfun
)
87 filename
= symtab_to_filename_for_display (symbol_symtab (sym
));
89 filename
= "<unknown>";
94 /* Return a string representation of the address of an instruction.
95 This function never returns NULL. */
98 ftrace_print_insn_addr (const struct btrace_insn
*insn
)
103 return core_addr_to_string_nz (insn
->pc
);
106 /* Print an ftrace debug status message. */
109 ftrace_debug (const struct btrace_function
*bfun
, const char *prefix
)
111 const char *fun
, *file
;
112 unsigned int ibegin
, iend
;
115 fun
= ftrace_print_function_name (bfun
);
116 file
= ftrace_print_filename (bfun
);
119 ibegin
= bfun
->insn_offset
;
120 iend
= ibegin
+ VEC_length (btrace_insn_s
, bfun
->insn
);
122 DEBUG_FTRACE ("%s: fun = %s, file = %s, level = %d, insn = [%u; %u)",
123 prefix
, fun
, file
, level
, ibegin
, iend
);
126 /* Return non-zero if BFUN does not match MFUN and FUN,
127 return zero otherwise. */
130 ftrace_function_switched (const struct btrace_function
*bfun
,
131 const struct minimal_symbol
*mfun
,
132 const struct symbol
*fun
)
134 struct minimal_symbol
*msym
;
140 /* If the minimal symbol changed, we certainly switched functions. */
141 if (mfun
!= NULL
&& msym
!= NULL
142 && strcmp (MSYMBOL_LINKAGE_NAME (mfun
), MSYMBOL_LINKAGE_NAME (msym
)) != 0)
145 /* If the symbol changed, we certainly switched functions. */
146 if (fun
!= NULL
&& sym
!= NULL
)
148 const char *bfname
, *fname
;
150 /* Check the function name. */
151 if (strcmp (SYMBOL_LINKAGE_NAME (fun
), SYMBOL_LINKAGE_NAME (sym
)) != 0)
154 /* Check the location of those functions, as well. */
155 bfname
= symtab_to_fullname (symbol_symtab (sym
));
156 fname
= symtab_to_fullname (symbol_symtab (fun
));
157 if (filename_cmp (fname
, bfname
) != 0)
161 /* If we lost symbol information, we switched functions. */
162 if (!(msym
== NULL
&& sym
== NULL
) && mfun
== NULL
&& fun
== NULL
)
165 /* If we gained symbol information, we switched functions. */
166 if (msym
== NULL
&& sym
== NULL
&& !(mfun
== NULL
&& fun
== NULL
))
172 /* Allocate and initialize a new branch trace function segment.
173 PREV is the chronologically preceding function segment.
174 MFUN and FUN are the symbol information we have for this function. */
176 static struct btrace_function
*
177 ftrace_new_function (struct btrace_function
*prev
,
178 struct minimal_symbol
*mfun
,
181 struct btrace_function
*bfun
;
183 bfun
= xzalloc (sizeof (*bfun
));
187 bfun
->flow
.prev
= prev
;
191 /* Start counting at one. */
193 bfun
->insn_offset
= 1;
197 gdb_assert (prev
->flow
.next
== NULL
);
198 prev
->flow
.next
= bfun
;
200 bfun
->number
= prev
->number
+ 1;
201 bfun
->insn_offset
= (prev
->insn_offset
202 + VEC_length (btrace_insn_s
, prev
->insn
));
203 bfun
->level
= prev
->level
;
209 /* Update the UP field of a function segment. */
212 ftrace_update_caller (struct btrace_function
*bfun
,
213 struct btrace_function
*caller
,
214 enum btrace_function_flag flags
)
216 if (bfun
->up
!= NULL
)
217 ftrace_debug (bfun
, "updating caller");
222 ftrace_debug (bfun
, "set caller");
225 /* Fix up the caller for all segments of a function. */
228 ftrace_fixup_caller (struct btrace_function
*bfun
,
229 struct btrace_function
*caller
,
230 enum btrace_function_flag flags
)
232 struct btrace_function
*prev
, *next
;
234 ftrace_update_caller (bfun
, caller
, flags
);
236 /* Update all function segments belonging to the same function. */
237 for (prev
= bfun
->segment
.prev
; prev
!= NULL
; prev
= prev
->segment
.prev
)
238 ftrace_update_caller (prev
, caller
, flags
);
240 for (next
= bfun
->segment
.next
; next
!= NULL
; next
= next
->segment
.next
)
241 ftrace_update_caller (next
, caller
, flags
);
244 /* Add a new function segment for a call.
245 CALLER is the chronologically preceding function segment.
246 MFUN and FUN are the symbol information we have for this function. */
248 static struct btrace_function
*
249 ftrace_new_call (struct btrace_function
*caller
,
250 struct minimal_symbol
*mfun
,
253 struct btrace_function
*bfun
;
255 bfun
= ftrace_new_function (caller
, mfun
, fun
);
259 ftrace_debug (bfun
, "new call");
264 /* Add a new function segment for a tail call.
265 CALLER is the chronologically preceding function segment.
266 MFUN and FUN are the symbol information we have for this function. */
268 static struct btrace_function
*
269 ftrace_new_tailcall (struct btrace_function
*caller
,
270 struct minimal_symbol
*mfun
,
273 struct btrace_function
*bfun
;
275 bfun
= ftrace_new_function (caller
, mfun
, fun
);
278 bfun
->flags
|= BFUN_UP_LINKS_TO_TAILCALL
;
280 ftrace_debug (bfun
, "new tail call");
285 /* Find the innermost caller in the back trace of BFUN with MFUN/FUN
286 symbol information. */
288 static struct btrace_function
*
289 ftrace_find_caller (struct btrace_function
*bfun
,
290 struct minimal_symbol
*mfun
,
293 for (; bfun
!= NULL
; bfun
= bfun
->up
)
295 /* Skip functions with incompatible symbol information. */
296 if (ftrace_function_switched (bfun
, mfun
, fun
))
299 /* This is the function segment we're looking for. */
306 /* Find the innermost caller in the back trace of BFUN, skipping all
307 function segments that do not end with a call instruction (e.g.
308 tail calls ending with a jump). */
310 static struct btrace_function
*
311 ftrace_find_call (struct btrace_function
*bfun
)
313 for (; bfun
!= NULL
; bfun
= bfun
->up
)
315 struct btrace_insn
*last
;
318 if (bfun
->errcode
!= 0)
321 last
= VEC_last (btrace_insn_s
, bfun
->insn
);
323 if (last
->iclass
== BTRACE_INSN_CALL
)
330 /* Add a continuation segment for a function into which we return.
331 PREV is the chronologically preceding function segment.
332 MFUN and FUN are the symbol information we have for this function. */
334 static struct btrace_function
*
335 ftrace_new_return (struct btrace_function
*prev
,
336 struct minimal_symbol
*mfun
,
339 struct btrace_function
*bfun
, *caller
;
341 bfun
= ftrace_new_function (prev
, mfun
, fun
);
343 /* It is important to start at PREV's caller. Otherwise, we might find
344 PREV itself, if PREV is a recursive function. */
345 caller
= ftrace_find_caller (prev
->up
, mfun
, fun
);
348 /* The caller of PREV is the preceding btrace function segment in this
349 function instance. */
350 gdb_assert (caller
->segment
.next
== NULL
);
352 caller
->segment
.next
= bfun
;
353 bfun
->segment
.prev
= caller
;
355 /* Maintain the function level. */
356 bfun
->level
= caller
->level
;
358 /* Maintain the call stack. */
359 bfun
->up
= caller
->up
;
360 bfun
->flags
= caller
->flags
;
362 ftrace_debug (bfun
, "new return");
366 /* We did not find a caller. This could mean that something went
367 wrong or that the call is simply not included in the trace. */
369 /* Let's search for some actual call. */
370 caller
= ftrace_find_call (prev
->up
);
373 /* There is no call in PREV's back trace. We assume that the
374 branch trace did not include it. */
376 /* Let's find the topmost call function - this skips tail calls. */
377 while (prev
->up
!= NULL
)
380 /* We maintain levels for a series of returns for which we have
382 We start at the preceding function's level in case this has
383 already been a return for which we have not seen the call.
384 We start at level 0 otherwise, to handle tail calls correctly. */
385 bfun
->level
= min (0, prev
->level
) - 1;
387 /* Fix up the call stack for PREV. */
388 ftrace_fixup_caller (prev
, bfun
, BFUN_UP_LINKS_TO_RET
);
390 ftrace_debug (bfun
, "new return - no caller");
394 /* There is a call in PREV's back trace to which we should have
395 returned. Let's remain at this level. */
396 bfun
->level
= prev
->level
;
398 ftrace_debug (bfun
, "new return - unknown caller");
405 /* Add a new function segment for a function switch.
406 PREV is the chronologically preceding function segment.
407 MFUN and FUN are the symbol information we have for this function. */
409 static struct btrace_function
*
410 ftrace_new_switch (struct btrace_function
*prev
,
411 struct minimal_symbol
*mfun
,
414 struct btrace_function
*bfun
;
416 /* This is an unexplained function switch. The call stack will likely
417 be wrong at this point. */
418 bfun
= ftrace_new_function (prev
, mfun
, fun
);
420 ftrace_debug (bfun
, "new switch");
425 /* Add a new function segment for a gap in the trace due to a decode error.
426 PREV is the chronologically preceding function segment.
427 ERRCODE is the format-specific error code. */
429 static struct btrace_function
*
430 ftrace_new_gap (struct btrace_function
*prev
, int errcode
)
432 struct btrace_function
*bfun
;
434 /* We hijack prev if it was empty. */
435 if (prev
!= NULL
&& prev
->errcode
== 0
436 && VEC_empty (btrace_insn_s
, prev
->insn
))
439 bfun
= ftrace_new_function (prev
, NULL
, NULL
);
441 bfun
->errcode
= errcode
;
443 ftrace_debug (bfun
, "new gap");
448 /* Update BFUN with respect to the instruction at PC. This may create new
450 Return the chronologically latest function segment, never NULL. */
452 static struct btrace_function
*
453 ftrace_update_function (struct btrace_function
*bfun
, CORE_ADDR pc
)
455 struct bound_minimal_symbol bmfun
;
456 struct minimal_symbol
*mfun
;
458 struct btrace_insn
*last
;
460 /* Try to determine the function we're in. We use both types of symbols
461 to avoid surprises when we sometimes get a full symbol and sometimes
462 only a minimal symbol. */
463 fun
= find_pc_function (pc
);
464 bmfun
= lookup_minimal_symbol_by_pc (pc
);
467 if (fun
== NULL
&& mfun
== NULL
)
468 DEBUG_FTRACE ("no symbol at %s", core_addr_to_string_nz (pc
));
470 /* If we didn't have a function or if we had a gap before, we create one. */
471 if (bfun
== NULL
|| bfun
->errcode
!= 0)
472 return ftrace_new_function (bfun
, mfun
, fun
);
474 /* Check the last instruction, if we have one.
475 We do this check first, since it allows us to fill in the call stack
476 links in addition to the normal flow links. */
478 if (!VEC_empty (btrace_insn_s
, bfun
->insn
))
479 last
= VEC_last (btrace_insn_s
, bfun
->insn
);
483 switch (last
->iclass
)
485 case BTRACE_INSN_RETURN
:
489 /* On some systems, _dl_runtime_resolve returns to the resolved
490 function instead of jumping to it. From our perspective,
491 however, this is a tailcall.
492 If we treated it as return, we wouldn't be able to find the
493 resolved function in our stack back trace. Hence, we would
494 lose the current stack back trace and start anew with an empty
495 back trace. When the resolved function returns, we would then
496 create a stack back trace with the same function names but
497 different frame id's. This will confuse stepping. */
498 fname
= ftrace_print_function_name (bfun
);
499 if (strcmp (fname
, "_dl_runtime_resolve") == 0)
500 return ftrace_new_tailcall (bfun
, mfun
, fun
);
502 return ftrace_new_return (bfun
, mfun
, fun
);
505 case BTRACE_INSN_CALL
:
506 /* Ignore calls to the next instruction. They are used for PIC. */
507 if (last
->pc
+ last
->size
== pc
)
510 return ftrace_new_call (bfun
, mfun
, fun
);
512 case BTRACE_INSN_JUMP
:
516 start
= get_pc_function_start (pc
);
518 /* If we can't determine the function for PC, we treat a jump at
519 the end of the block as tail call. */
520 if (start
== 0 || start
== pc
)
521 return ftrace_new_tailcall (bfun
, mfun
, fun
);
526 /* Check if we're switching functions for some other reason. */
527 if (ftrace_function_switched (bfun
, mfun
, fun
))
529 DEBUG_FTRACE ("switching from %s in %s at %s",
530 ftrace_print_insn_addr (last
),
531 ftrace_print_function_name (bfun
),
532 ftrace_print_filename (bfun
));
534 return ftrace_new_switch (bfun
, mfun
, fun
);
540 /* Add the instruction at PC to BFUN's instructions. */
543 ftrace_update_insns (struct btrace_function
*bfun
,
544 const struct btrace_insn
*insn
)
546 VEC_safe_push (btrace_insn_s
, bfun
->insn
, insn
);
548 if (record_debug
> 1)
549 ftrace_debug (bfun
, "update insn");
552 /* Classify the instruction at PC. */
554 static enum btrace_insn_class
555 ftrace_classify_insn (struct gdbarch
*gdbarch
, CORE_ADDR pc
)
557 enum btrace_insn_class iclass
;
559 iclass
= BTRACE_INSN_OTHER
;
562 if (gdbarch_insn_is_call (gdbarch
, pc
))
563 iclass
= BTRACE_INSN_CALL
;
564 else if (gdbarch_insn_is_ret (gdbarch
, pc
))
565 iclass
= BTRACE_INSN_RETURN
;
566 else if (gdbarch_insn_is_jump (gdbarch
, pc
))
567 iclass
= BTRACE_INSN_JUMP
;
569 CATCH (error
, RETURN_MASK_ERROR
)
577 /* Compute the function branch trace from BTS trace. */
580 btrace_compute_ftrace_bts (struct thread_info
*tp
,
581 const struct btrace_data_bts
*btrace
)
583 struct btrace_thread_info
*btinfo
;
584 struct btrace_function
*begin
, *end
;
585 struct gdbarch
*gdbarch
;
586 unsigned int blk
, ngaps
;
589 gdbarch
= target_gdbarch ();
590 btinfo
= &tp
->btrace
;
591 begin
= btinfo
->begin
;
593 ngaps
= btinfo
->ngaps
;
594 level
= begin
!= NULL
? -btinfo
->level
: INT_MAX
;
595 blk
= VEC_length (btrace_block_s
, btrace
->blocks
);
599 btrace_block_s
*block
;
604 block
= VEC_index (btrace_block_s
, btrace
->blocks
, blk
);
609 struct btrace_insn insn
;
612 /* We should hit the end of the block. Warn if we went too far. */
615 /* Indicate the gap in the trace - unless we're at the
619 warning (_("Recorded trace may be corrupted around %s."),
620 core_addr_to_string_nz (pc
));
622 end
= ftrace_new_gap (end
, BDE_BTS_OVERFLOW
);
628 end
= ftrace_update_function (end
, pc
);
632 /* Maintain the function level offset.
633 For all but the last block, we do it here. */
635 level
= min (level
, end
->level
);
640 size
= gdb_insn_length (gdbarch
, pc
);
642 CATCH (error
, RETURN_MASK_ERROR
)
649 insn
.iclass
= ftrace_classify_insn (gdbarch
, pc
);
651 ftrace_update_insns (end
, &insn
);
653 /* We're done once we pushed the instruction at the end. */
654 if (block
->end
== pc
)
657 /* We can't continue if we fail to compute the size. */
660 warning (_("Recorded trace may be incomplete around %s."),
661 core_addr_to_string_nz (pc
));
663 /* Indicate the gap in the trace. We just added INSN so we're
664 not at the beginning. */
665 end
= ftrace_new_gap (end
, BDE_BTS_INSN_SIZE
);
673 /* Maintain the function level offset.
674 For the last block, we do it here to not consider the last
676 Since the last instruction corresponds to the current instruction
677 and is not really part of the execution history, it shouldn't
680 level
= min (level
, end
->level
);
684 btinfo
->begin
= begin
;
686 btinfo
->ngaps
= ngaps
;
688 /* LEVEL is the minimal function level of all btrace function segments.
689 Define the global level offset to -LEVEL so all function levels are
690 normalized to start at zero. */
691 btinfo
->level
= -level
;
694 #if defined (HAVE_LIBIPT)
696 static enum btrace_insn_class
697 pt_reclassify_insn (enum pt_insn_class iclass
)
702 return BTRACE_INSN_CALL
;
705 return BTRACE_INSN_RETURN
;
708 return BTRACE_INSN_JUMP
;
711 return BTRACE_INSN_OTHER
;
715 /* Add function branch trace using DECODER. */
718 ftrace_add_pt (struct pt_insn_decoder
*decoder
,
719 struct btrace_function
**pbegin
,
720 struct btrace_function
**pend
, int *plevel
,
723 struct btrace_function
*begin
, *end
, *upd
;
725 int errcode
, nerrors
;
732 struct btrace_insn btinsn
;
735 errcode
= pt_insn_sync_forward (decoder
);
738 if (errcode
!= -pte_eos
)
739 warning (_("Failed to synchronize onto the Intel(R) Processor "
740 "Trace stream: %s."), pt_errstr (pt_errcode (errcode
)));
744 memset (&btinsn
, 0, sizeof (btinsn
));
747 errcode
= pt_insn_next (decoder
, &insn
, sizeof(insn
));
751 /* Look for gaps in the trace - unless we're at the beginning. */
754 /* Tracing is disabled and re-enabled each time we enter the
755 kernel. Most times, we continue from the same instruction we
756 stopped before. This is indicated via the RESUMED instruction
757 flag. The ENABLED instruction flag means that we continued
758 from some other instruction. Indicate this as a trace gap. */
760 *pend
= end
= ftrace_new_gap (end
, BDE_PT_DISABLED
);
762 /* Indicate trace overflows. */
764 *pend
= end
= ftrace_new_gap (end
, BDE_PT_OVERFLOW
);
767 upd
= ftrace_update_function (end
, insn
.ip
);
773 *pbegin
= begin
= upd
;
776 /* Maintain the function level offset. */
777 *plevel
= min (*plevel
, end
->level
);
779 btinsn
.pc
= (CORE_ADDR
) insn
.ip
;
780 btinsn
.size
= (gdb_byte
) insn
.size
;
781 btinsn
.iclass
= pt_reclassify_insn (insn
.iclass
);
783 ftrace_update_insns (end
, &btinsn
);
786 if (errcode
== -pte_eos
)
789 /* If the gap is at the very beginning, we ignore it - we will have
790 less trace, but we won't have any holes in the trace. */
794 pt_insn_get_offset (decoder
, &offset
);
796 warning (_("Failed to decode Intel(R) Processor Trace near trace "
797 "offset 0x%" PRIx64
" near recorded PC 0x%" PRIx64
": %s."),
798 offset
, insn
.ip
, pt_errstr (pt_errcode (errcode
)));
800 /* Indicate the gap in the trace. */
801 *pend
= end
= ftrace_new_gap (end
, errcode
);
806 warning (_("The recorded execution trace may have gaps."));
809 /* A callback function to allow the trace decoder to read the inferior's
813 btrace_pt_readmem_callback (gdb_byte
*buffer
, size_t size
,
814 const struct pt_asid
*asid
, CORE_ADDR pc
,
821 errcode
= target_read_code (pc
, buffer
, size
);
825 CATCH (error
, RETURN_MASK_ERROR
)
834 /* Translate the vendor from one enum to another. */
836 static enum pt_cpu_vendor
837 pt_translate_cpu_vendor (enum btrace_cpu_vendor vendor
)
849 /* Finalize the function branch trace after decode. */
851 static void btrace_finalize_ftrace_pt (struct pt_insn_decoder
*decoder
,
852 struct thread_info
*tp
, int level
)
854 pt_insn_free_decoder (decoder
);
856 /* LEVEL is the minimal function level of all btrace function segments.
857 Define the global level offset to -LEVEL so all function levels are
858 normalized to start at zero. */
859 tp
->btrace
.level
= -level
;
861 /* Add a single last instruction entry for the current PC.
862 This allows us to compute the backtrace at the current PC using both
863 standard unwind and btrace unwind.
864 This extra entry is ignored by all record commands. */
868 /* Compute the function branch trace from Intel(R) Processor Trace. */
871 btrace_compute_ftrace_pt (struct thread_info
*tp
,
872 const struct btrace_data_pt
*btrace
)
874 struct btrace_thread_info
*btinfo
;
875 struct pt_insn_decoder
*decoder
;
876 struct pt_config config
;
879 if (btrace
->size
== 0)
882 btinfo
= &tp
->btrace
;
883 level
= btinfo
->begin
!= NULL
? -btinfo
->level
: INT_MAX
;
885 pt_config_init(&config
);
886 config
.begin
= btrace
->data
;
887 config
.end
= btrace
->data
+ btrace
->size
;
889 config
.cpu
.vendor
= pt_translate_cpu_vendor (btrace
->config
.cpu
.vendor
);
890 config
.cpu
.family
= btrace
->config
.cpu
.family
;
891 config
.cpu
.model
= btrace
->config
.cpu
.model
;
892 config
.cpu
.stepping
= btrace
->config
.cpu
.stepping
;
894 errcode
= pt_cpu_errata (&config
.errata
, &config
.cpu
);
896 error (_("Failed to configure the Intel(R) Processor Trace decoder: %s."),
897 pt_errstr (pt_errcode (errcode
)));
899 decoder
= pt_insn_alloc_decoder (&config
);
901 error (_("Failed to allocate the Intel(R) Processor Trace decoder."));
905 struct pt_image
*image
;
907 image
= pt_insn_get_image(decoder
);
909 error (_("Failed to configure the Intel(R) Processor Trace decoder."));
911 errcode
= pt_image_set_callback(image
, btrace_pt_readmem_callback
, NULL
);
913 error (_("Failed to configure the Intel(R) Processor Trace decoder: "
914 "%s."), pt_errstr (pt_errcode (errcode
)));
916 ftrace_add_pt (decoder
, &btinfo
->begin
, &btinfo
->end
, &level
,
919 CATCH (error
, RETURN_MASK_ALL
)
921 /* Indicate a gap in the trace if we quit trace processing. */
922 if (error
.reason
== RETURN_QUIT
&& btinfo
->end
!= NULL
)
924 btinfo
->end
= ftrace_new_gap (btinfo
->end
, BDE_PT_USER_QUIT
);
928 btrace_finalize_ftrace_pt (decoder
, tp
, level
);
930 throw_exception (error
);
934 btrace_finalize_ftrace_pt (decoder
, tp
, level
);
937 #else /* defined (HAVE_LIBIPT) */
940 btrace_compute_ftrace_pt (struct thread_info
*tp
,
941 const struct btrace_data_pt
*btrace
)
943 internal_error (__FILE__
, __LINE__
, _("Unexpected branch trace format."));
946 #endif /* defined (HAVE_LIBIPT) */
948 /* Compute the function branch trace from a block branch trace BTRACE for
949 a thread given by BTINFO. */
952 btrace_compute_ftrace (struct thread_info
*tp
, struct btrace_data
*btrace
)
954 DEBUG ("compute ftrace");
956 switch (btrace
->format
)
958 case BTRACE_FORMAT_NONE
:
961 case BTRACE_FORMAT_BTS
:
962 btrace_compute_ftrace_bts (tp
, &btrace
->variant
.bts
);
965 case BTRACE_FORMAT_PT
:
966 btrace_compute_ftrace_pt (tp
, &btrace
->variant
.pt
);
970 internal_error (__FILE__
, __LINE__
, _("Unkown branch trace format."));
973 /* Add an entry for the current PC. */
976 btrace_add_pc (struct thread_info
*tp
)
978 struct btrace_data btrace
;
979 struct btrace_block
*block
;
980 struct regcache
*regcache
;
981 struct cleanup
*cleanup
;
984 regcache
= get_thread_regcache (tp
->ptid
);
985 pc
= regcache_read_pc (regcache
);
987 btrace_data_init (&btrace
);
988 btrace
.format
= BTRACE_FORMAT_BTS
;
989 btrace
.variant
.bts
.blocks
= NULL
;
991 cleanup
= make_cleanup_btrace_data (&btrace
);
993 block
= VEC_safe_push (btrace_block_s
, btrace
.variant
.bts
.blocks
, NULL
);
997 btrace_compute_ftrace (tp
, &btrace
);
999 do_cleanups (cleanup
);
1005 btrace_enable (struct thread_info
*tp
, const struct btrace_config
*conf
)
1007 if (tp
->btrace
.target
!= NULL
)
1010 if (!target_supports_btrace (conf
->format
))
1011 error (_("Target does not support branch tracing."));
1013 DEBUG ("enable thread %d (%s)", tp
->num
, target_pid_to_str (tp
->ptid
));
1015 tp
->btrace
.target
= target_enable_btrace (tp
->ptid
, conf
);
1017 /* Add an entry for the current PC so we start tracing from where we
1019 if (tp
->btrace
.target
!= NULL
)
1025 const struct btrace_config
*
1026 btrace_conf (const struct btrace_thread_info
*btinfo
)
1028 if (btinfo
->target
== NULL
)
1031 return target_btrace_conf (btinfo
->target
);
1037 btrace_disable (struct thread_info
*tp
)
1039 struct btrace_thread_info
*btp
= &tp
->btrace
;
1042 if (btp
->target
== NULL
)
1045 DEBUG ("disable thread %d (%s)", tp
->num
, target_pid_to_str (tp
->ptid
));
1047 target_disable_btrace (btp
->target
);
1056 btrace_teardown (struct thread_info
*tp
)
1058 struct btrace_thread_info
*btp
= &tp
->btrace
;
1061 if (btp
->target
== NULL
)
1064 DEBUG ("teardown thread %d (%s)", tp
->num
, target_pid_to_str (tp
->ptid
));
1066 target_teardown_btrace (btp
->target
);
1072 /* Stitch branch trace in BTS format. */
1075 btrace_stitch_bts (struct btrace_data_bts
*btrace
, struct thread_info
*tp
)
1077 struct btrace_thread_info
*btinfo
;
1078 struct btrace_function
*last_bfun
;
1079 struct btrace_insn
*last_insn
;
1080 btrace_block_s
*first_new_block
;
1082 btinfo
= &tp
->btrace
;
1083 last_bfun
= btinfo
->end
;
1084 gdb_assert (last_bfun
!= NULL
);
1085 gdb_assert (!VEC_empty (btrace_block_s
, btrace
->blocks
));
1087 /* If the existing trace ends with a gap, we just glue the traces
1088 together. We need to drop the last (i.e. chronologically first) block
1089 of the new trace, though, since we can't fill in the start address.*/
1090 if (VEC_empty (btrace_insn_s
, last_bfun
->insn
))
1092 VEC_pop (btrace_block_s
, btrace
->blocks
);
1096 /* Beware that block trace starts with the most recent block, so the
1097 chronologically first block in the new trace is the last block in
1098 the new trace's block vector. */
1099 first_new_block
= VEC_last (btrace_block_s
, btrace
->blocks
);
1100 last_insn
= VEC_last (btrace_insn_s
, last_bfun
->insn
);
1102 /* If the current PC at the end of the block is the same as in our current
1103 trace, there are two explanations:
1104 1. we executed the instruction and some branch brought us back.
1105 2. we have not made any progress.
1106 In the first case, the delta trace vector should contain at least two
1108 In the second case, the delta trace vector should contain exactly one
1109 entry for the partial block containing the current PC. Remove it. */
1110 if (first_new_block
->end
== last_insn
->pc
1111 && VEC_length (btrace_block_s
, btrace
->blocks
) == 1)
1113 VEC_pop (btrace_block_s
, btrace
->blocks
);
1117 DEBUG ("stitching %s to %s", ftrace_print_insn_addr (last_insn
),
1118 core_addr_to_string_nz (first_new_block
->end
));
1120 /* Do a simple sanity check to make sure we don't accidentally end up
1121 with a bad block. This should not occur in practice. */
1122 if (first_new_block
->end
< last_insn
->pc
)
1124 warning (_("Error while trying to read delta trace. Falling back to "
1129 /* We adjust the last block to start at the end of our current trace. */
1130 gdb_assert (first_new_block
->begin
== 0);
1131 first_new_block
->begin
= last_insn
->pc
;
1133 /* We simply pop the last insn so we can insert it again as part of
1134 the normal branch trace computation.
1135 Since instruction iterators are based on indices in the instructions
1136 vector, we don't leave any pointers dangling. */
1137 DEBUG ("pruning insn at %s for stitching",
1138 ftrace_print_insn_addr (last_insn
));
1140 VEC_pop (btrace_insn_s
, last_bfun
->insn
);
1142 /* The instructions vector may become empty temporarily if this has
1143 been the only instruction in this function segment.
1144 This violates the invariant but will be remedied shortly by
1145 btrace_compute_ftrace when we add the new trace. */
1147 /* The only case where this would hurt is if the entire trace consisted
1148 of just that one instruction. If we remove it, we might turn the now
1149 empty btrace function segment into a gap. But we don't want gaps at
1150 the beginning. To avoid this, we remove the entire old trace. */
1151 if (last_bfun
== btinfo
->begin
&& VEC_empty (btrace_insn_s
, last_bfun
->insn
))
1157 /* Adjust the block trace in order to stitch old and new trace together.
1158 BTRACE is the new delta trace between the last and the current stop.
1159 TP is the traced thread.
1160 May modifx BTRACE as well as the existing trace in TP.
1161 Return 0 on success, -1 otherwise. */
1164 btrace_stitch_trace (struct btrace_data
*btrace
, struct thread_info
*tp
)
1166 /* If we don't have trace, there's nothing to do. */
1167 if (btrace_data_empty (btrace
))
1170 switch (btrace
->format
)
1172 case BTRACE_FORMAT_NONE
:
1175 case BTRACE_FORMAT_BTS
:
1176 return btrace_stitch_bts (&btrace
->variant
.bts
, tp
);
1178 case BTRACE_FORMAT_PT
:
1179 /* Delta reads are not supported. */
1183 internal_error (__FILE__
, __LINE__
, _("Unkown branch trace format."));
1186 /* Clear the branch trace histories in BTINFO. */
1189 btrace_clear_history (struct btrace_thread_info
*btinfo
)
1191 xfree (btinfo
->insn_history
);
1192 xfree (btinfo
->call_history
);
1193 xfree (btinfo
->replay
);
1195 btinfo
->insn_history
= NULL
;
1196 btinfo
->call_history
= NULL
;
1197 btinfo
->replay
= NULL
;
1203 btrace_fetch (struct thread_info
*tp
)
1205 struct btrace_thread_info
*btinfo
;
1206 struct btrace_target_info
*tinfo
;
1207 struct btrace_data btrace
;
1208 struct cleanup
*cleanup
;
1211 DEBUG ("fetch thread %d (%s)", tp
->num
, target_pid_to_str (tp
->ptid
));
1213 btinfo
= &tp
->btrace
;
1214 tinfo
= btinfo
->target
;
1218 /* There's no way we could get new trace while replaying.
1219 On the other hand, delta trace would return a partial record with the
1220 current PC, which is the replay PC, not the last PC, as expected. */
1221 if (btinfo
->replay
!= NULL
)
1224 btrace_data_init (&btrace
);
1225 cleanup
= make_cleanup_btrace_data (&btrace
);
1227 /* Let's first try to extend the trace we already have. */
1228 if (btinfo
->end
!= NULL
)
1230 errcode
= target_read_btrace (&btrace
, tinfo
, BTRACE_READ_DELTA
);
1233 /* Success. Let's try to stitch the traces together. */
1234 errcode
= btrace_stitch_trace (&btrace
, tp
);
1238 /* We failed to read delta trace. Let's try to read new trace. */
1239 errcode
= target_read_btrace (&btrace
, tinfo
, BTRACE_READ_NEW
);
1241 /* If we got any new trace, discard what we have. */
1242 if (errcode
== 0 && !btrace_data_empty (&btrace
))
1246 /* If we were not able to read the trace, we start over. */
1250 errcode
= target_read_btrace (&btrace
, tinfo
, BTRACE_READ_ALL
);
1254 errcode
= target_read_btrace (&btrace
, tinfo
, BTRACE_READ_ALL
);
1256 /* If we were not able to read the branch trace, signal an error. */
1258 error (_("Failed to read branch trace."));
1260 /* Compute the trace, provided we have any. */
1261 if (!btrace_data_empty (&btrace
))
1263 /* Store the raw trace data. The stored data will be cleared in
1264 btrace_clear, so we always append the new trace. */
1265 btrace_data_append (&btinfo
->data
, &btrace
);
1267 btrace_clear_history (btinfo
);
1268 btrace_compute_ftrace (tp
, &btrace
);
1271 do_cleanups (cleanup
);
1277 btrace_clear (struct thread_info
*tp
)
1279 struct btrace_thread_info
*btinfo
;
1280 struct btrace_function
*it
, *trash
;
1282 DEBUG ("clear thread %d (%s)", tp
->num
, target_pid_to_str (tp
->ptid
));
1284 /* Make sure btrace frames that may hold a pointer into the branch
1285 trace data are destroyed. */
1286 reinit_frame_cache ();
1288 btinfo
= &tp
->btrace
;
1299 btinfo
->begin
= NULL
;
1303 btrace_data_clear (&btinfo
->data
);
1304 btrace_clear_history (btinfo
);
1310 btrace_free_objfile (struct objfile
*objfile
)
1312 struct thread_info
*tp
;
1314 DEBUG ("free objfile");
1316 ALL_NON_EXITED_THREADS (tp
)
1320 #if defined (HAVE_LIBEXPAT)
1322 /* Check the btrace document version. */
1325 check_xml_btrace_version (struct gdb_xml_parser
*parser
,
1326 const struct gdb_xml_element
*element
,
1327 void *user_data
, VEC (gdb_xml_value_s
) *attributes
)
1329 const char *version
= xml_find_attribute (attributes
, "version")->value
;
1331 if (strcmp (version
, "1.0") != 0)
1332 gdb_xml_error (parser
, _("Unsupported btrace version: \"%s\""), version
);
1335 /* Parse a btrace "block" xml record. */
1338 parse_xml_btrace_block (struct gdb_xml_parser
*parser
,
1339 const struct gdb_xml_element
*element
,
1340 void *user_data
, VEC (gdb_xml_value_s
) *attributes
)
1342 struct btrace_data
*btrace
;
1343 struct btrace_block
*block
;
1344 ULONGEST
*begin
, *end
;
1348 switch (btrace
->format
)
1350 case BTRACE_FORMAT_BTS
:
1353 case BTRACE_FORMAT_NONE
:
1354 btrace
->format
= BTRACE_FORMAT_BTS
;
1355 btrace
->variant
.bts
.blocks
= NULL
;
1359 gdb_xml_error (parser
, _("Btrace format error."));
1362 begin
= xml_find_attribute (attributes
, "begin")->value
;
1363 end
= xml_find_attribute (attributes
, "end")->value
;
1365 block
= VEC_safe_push (btrace_block_s
, btrace
->variant
.bts
.blocks
, NULL
);
1366 block
->begin
= *begin
;
1370 /* Parse a "raw" xml record. */
1373 parse_xml_raw (struct gdb_xml_parser
*parser
, const char *body_text
,
1374 gdb_byte
**pdata
, unsigned long *psize
)
1376 struct cleanup
*cleanup
;
1377 gdb_byte
*data
, *bin
;
1381 len
= strlen (body_text
);
1384 if ((size_t) size
* 2 != len
)
1385 gdb_xml_error (parser
, _("Bad raw data size."));
1387 bin
= data
= xmalloc (size
);
1388 cleanup
= make_cleanup (xfree
, data
);
1390 /* We use hex encoding - see common/rsp-low.h. */
1398 if (hi
== 0 || lo
== 0)
1399 gdb_xml_error (parser
, _("Bad hex encoding."));
1401 *bin
++ = fromhex (hi
) * 16 + fromhex (lo
);
1405 discard_cleanups (cleanup
);
1411 /* Parse a btrace pt-config "cpu" xml record. */
1414 parse_xml_btrace_pt_config_cpu (struct gdb_xml_parser
*parser
,
1415 const struct gdb_xml_element
*element
,
1417 VEC (gdb_xml_value_s
) *attributes
)
1419 struct btrace_data
*btrace
;
1421 ULONGEST
*family
, *model
, *stepping
;
1423 vendor
= xml_find_attribute (attributes
, "vendor")->value
;
1424 family
= xml_find_attribute (attributes
, "family")->value
;
1425 model
= xml_find_attribute (attributes
, "model")->value
;
1426 stepping
= xml_find_attribute (attributes
, "stepping")->value
;
1430 if (strcmp (vendor
, "GenuineIntel") == 0)
1431 btrace
->variant
.pt
.config
.cpu
.vendor
= CV_INTEL
;
1433 btrace
->variant
.pt
.config
.cpu
.family
= *family
;
1434 btrace
->variant
.pt
.config
.cpu
.model
= *model
;
1435 btrace
->variant
.pt
.config
.cpu
.stepping
= *stepping
;
1438 /* Parse a btrace pt "raw" xml record. */
1441 parse_xml_btrace_pt_raw (struct gdb_xml_parser
*parser
,
1442 const struct gdb_xml_element
*element
,
1443 void *user_data
, const char *body_text
)
1445 struct btrace_data
*btrace
;
1448 parse_xml_raw (parser
, body_text
, &btrace
->variant
.pt
.data
,
1449 &btrace
->variant
.pt
.size
);
1452 /* Parse a btrace "pt" xml record. */
1455 parse_xml_btrace_pt (struct gdb_xml_parser
*parser
,
1456 const struct gdb_xml_element
*element
,
1457 void *user_data
, VEC (gdb_xml_value_s
) *attributes
)
1459 struct btrace_data
*btrace
;
1462 btrace
->format
= BTRACE_FORMAT_PT
;
1463 btrace
->variant
.pt
.config
.cpu
.vendor
= CV_UNKNOWN
;
1464 btrace
->variant
.pt
.data
= NULL
;
1465 btrace
->variant
.pt
.size
= 0;
1468 static const struct gdb_xml_attribute block_attributes
[] = {
1469 { "begin", GDB_XML_AF_NONE
, gdb_xml_parse_attr_ulongest
, NULL
},
1470 { "end", GDB_XML_AF_NONE
, gdb_xml_parse_attr_ulongest
, NULL
},
1471 { NULL
, GDB_XML_AF_NONE
, NULL
, NULL
}
1474 static const struct gdb_xml_attribute btrace_pt_config_cpu_attributes
[] = {
1475 { "vendor", GDB_XML_AF_NONE
, NULL
, NULL
},
1476 { "family", GDB_XML_AF_NONE
, gdb_xml_parse_attr_ulongest
, NULL
},
1477 { "model", GDB_XML_AF_NONE
, gdb_xml_parse_attr_ulongest
, NULL
},
1478 { "stepping", GDB_XML_AF_NONE
, gdb_xml_parse_attr_ulongest
, NULL
},
1479 { NULL
, GDB_XML_AF_NONE
, NULL
, NULL
}
1482 static const struct gdb_xml_element btrace_pt_config_children
[] = {
1483 { "cpu", btrace_pt_config_cpu_attributes
, NULL
, GDB_XML_EF_OPTIONAL
,
1484 parse_xml_btrace_pt_config_cpu
, NULL
},
1485 { NULL
, NULL
, NULL
, GDB_XML_EF_NONE
, NULL
, NULL
}
1488 static const struct gdb_xml_element btrace_pt_children
[] = {
1489 { "pt-config", NULL
, btrace_pt_config_children
, GDB_XML_EF_OPTIONAL
, NULL
,
1491 { "raw", NULL
, NULL
, GDB_XML_EF_OPTIONAL
, NULL
, parse_xml_btrace_pt_raw
},
1492 { NULL
, NULL
, NULL
, GDB_XML_EF_NONE
, NULL
, NULL
}
1495 static const struct gdb_xml_attribute btrace_attributes
[] = {
1496 { "version", GDB_XML_AF_NONE
, NULL
, NULL
},
1497 { NULL
, GDB_XML_AF_NONE
, NULL
, NULL
}
1500 static const struct gdb_xml_element btrace_children
[] = {
1501 { "block", block_attributes
, NULL
,
1502 GDB_XML_EF_REPEATABLE
| GDB_XML_EF_OPTIONAL
, parse_xml_btrace_block
, NULL
},
1503 { "pt", NULL
, btrace_pt_children
, GDB_XML_EF_OPTIONAL
, parse_xml_btrace_pt
,
1505 { NULL
, NULL
, NULL
, GDB_XML_EF_NONE
, NULL
, NULL
}
1508 static const struct gdb_xml_element btrace_elements
[] = {
1509 { "btrace", btrace_attributes
, btrace_children
, GDB_XML_EF_NONE
,
1510 check_xml_btrace_version
, NULL
},
1511 { NULL
, NULL
, NULL
, GDB_XML_EF_NONE
, NULL
, NULL
}
1514 #endif /* defined (HAVE_LIBEXPAT) */
1519 parse_xml_btrace (struct btrace_data
*btrace
, const char *buffer
)
1521 struct cleanup
*cleanup
;
1524 #if defined (HAVE_LIBEXPAT)
1526 btrace
->format
= BTRACE_FORMAT_NONE
;
1528 cleanup
= make_cleanup_btrace_data (btrace
);
1529 errcode
= gdb_xml_parse_quick (_("btrace"), "btrace.dtd", btrace_elements
,
1532 error (_("Error parsing branch trace."));
1534 /* Keep parse results. */
1535 discard_cleanups (cleanup
);
1537 #else /* !defined (HAVE_LIBEXPAT) */
1539 error (_("Cannot process branch trace. XML parsing is not supported."));
1541 #endif /* !defined (HAVE_LIBEXPAT) */
1544 #if defined (HAVE_LIBEXPAT)
1546 /* Parse a btrace-conf "bts" xml record. */
1549 parse_xml_btrace_conf_bts (struct gdb_xml_parser
*parser
,
1550 const struct gdb_xml_element
*element
,
1551 void *user_data
, VEC (gdb_xml_value_s
) *attributes
)
1553 struct btrace_config
*conf
;
1554 struct gdb_xml_value
*size
;
1557 conf
->format
= BTRACE_FORMAT_BTS
;
1560 size
= xml_find_attribute (attributes
, "size");
1562 conf
->bts
.size
= (unsigned int) *(ULONGEST
*) size
->value
;
1565 /* Parse a btrace-conf "pt" xml record. */
1568 parse_xml_btrace_conf_pt (struct gdb_xml_parser
*parser
,
1569 const struct gdb_xml_element
*element
,
1570 void *user_data
, VEC (gdb_xml_value_s
) *attributes
)
1572 struct btrace_config
*conf
;
1573 struct gdb_xml_value
*size
;
1576 conf
->format
= BTRACE_FORMAT_PT
;
1579 size
= xml_find_attribute (attributes
, "size");
1581 conf
->pt
.size
= (unsigned int) *(ULONGEST
*) size
->value
;
1584 static const struct gdb_xml_attribute btrace_conf_pt_attributes
[] = {
1585 { "size", GDB_XML_AF_OPTIONAL
, gdb_xml_parse_attr_ulongest
, NULL
},
1586 { NULL
, GDB_XML_AF_NONE
, NULL
, NULL
}
1589 static const struct gdb_xml_attribute btrace_conf_bts_attributes
[] = {
1590 { "size", GDB_XML_AF_OPTIONAL
, gdb_xml_parse_attr_ulongest
, NULL
},
1591 { NULL
, GDB_XML_AF_NONE
, NULL
, NULL
}
1594 static const struct gdb_xml_element btrace_conf_children
[] = {
1595 { "bts", btrace_conf_bts_attributes
, NULL
, GDB_XML_EF_OPTIONAL
,
1596 parse_xml_btrace_conf_bts
, NULL
},
1597 { "pt", btrace_conf_pt_attributes
, NULL
, GDB_XML_EF_OPTIONAL
,
1598 parse_xml_btrace_conf_pt
, NULL
},
1599 { NULL
, NULL
, NULL
, GDB_XML_EF_NONE
, NULL
, NULL
}
1602 static const struct gdb_xml_attribute btrace_conf_attributes
[] = {
1603 { "version", GDB_XML_AF_NONE
, NULL
, NULL
},
1604 { NULL
, GDB_XML_AF_NONE
, NULL
, NULL
}
1607 static const struct gdb_xml_element btrace_conf_elements
[] = {
1608 { "btrace-conf", btrace_conf_attributes
, btrace_conf_children
,
1609 GDB_XML_EF_NONE
, NULL
, NULL
},
1610 { NULL
, NULL
, NULL
, GDB_XML_EF_NONE
, NULL
, NULL
}
1613 #endif /* defined (HAVE_LIBEXPAT) */
1618 parse_xml_btrace_conf (struct btrace_config
*conf
, const char *xml
)
1622 #if defined (HAVE_LIBEXPAT)
1624 errcode
= gdb_xml_parse_quick (_("btrace-conf"), "btrace-conf.dtd",
1625 btrace_conf_elements
, xml
, conf
);
1627 error (_("Error parsing branch trace configuration."));
1629 #else /* !defined (HAVE_LIBEXPAT) */
1631 error (_("XML parsing is not supported."));
1633 #endif /* !defined (HAVE_LIBEXPAT) */
1638 const struct btrace_insn
*
1639 btrace_insn_get (const struct btrace_insn_iterator
*it
)
1641 const struct btrace_function
*bfun
;
1642 unsigned int index
, end
;
1645 bfun
= it
->function
;
1647 /* Check if the iterator points to a gap in the trace. */
1648 if (bfun
->errcode
!= 0)
1651 /* The index is within the bounds of this function's instruction vector. */
1652 end
= VEC_length (btrace_insn_s
, bfun
->insn
);
1653 gdb_assert (0 < end
);
1654 gdb_assert (index
< end
);
1656 return VEC_index (btrace_insn_s
, bfun
->insn
, index
);
1662 btrace_insn_number (const struct btrace_insn_iterator
*it
)
1664 const struct btrace_function
*bfun
;
1666 bfun
= it
->function
;
1668 /* Return zero if the iterator points to a gap in the trace. */
1669 if (bfun
->errcode
!= 0)
1672 return bfun
->insn_offset
+ it
->index
;
1678 btrace_insn_begin (struct btrace_insn_iterator
*it
,
1679 const struct btrace_thread_info
*btinfo
)
1681 const struct btrace_function
*bfun
;
1683 bfun
= btinfo
->begin
;
1685 error (_("No trace."));
1687 it
->function
= bfun
;
1694 btrace_insn_end (struct btrace_insn_iterator
*it
,
1695 const struct btrace_thread_info
*btinfo
)
1697 const struct btrace_function
*bfun
;
1698 unsigned int length
;
1702 error (_("No trace."));
1704 length
= VEC_length (btrace_insn_s
, bfun
->insn
);
1706 /* The last function may either be a gap or it contains the current
1707 instruction, which is one past the end of the execution trace; ignore
1712 it
->function
= bfun
;
1719 btrace_insn_next (struct btrace_insn_iterator
*it
, unsigned int stride
)
1721 const struct btrace_function
*bfun
;
1722 unsigned int index
, steps
;
1724 bfun
= it
->function
;
1730 unsigned int end
, space
, adv
;
1732 end
= VEC_length (btrace_insn_s
, bfun
->insn
);
1734 /* An empty function segment represents a gap in the trace. We count
1735 it as one instruction. */
1738 const struct btrace_function
*next
;
1740 next
= bfun
->flow
.next
;
1753 gdb_assert (0 < end
);
1754 gdb_assert (index
< end
);
1756 /* Compute the number of instructions remaining in this segment. */
1757 space
= end
- index
;
1759 /* Advance the iterator as far as possible within this segment. */
1760 adv
= min (space
, stride
);
1765 /* Move to the next function if we're at the end of this one. */
1768 const struct btrace_function
*next
;
1770 next
= bfun
->flow
.next
;
1773 /* We stepped past the last function.
1775 Let's adjust the index to point to the last instruction in
1776 the previous function. */
1782 /* We now point to the first instruction in the new function. */
1787 /* We did make progress. */
1788 gdb_assert (adv
> 0);
1791 /* Update the iterator. */
1792 it
->function
= bfun
;
1801 btrace_insn_prev (struct btrace_insn_iterator
*it
, unsigned int stride
)
1803 const struct btrace_function
*bfun
;
1804 unsigned int index
, steps
;
1806 bfun
= it
->function
;
1814 /* Move to the previous function if we're at the start of this one. */
1817 const struct btrace_function
*prev
;
1819 prev
= bfun
->flow
.prev
;
1823 /* We point to one after the last instruction in the new function. */
1825 index
= VEC_length (btrace_insn_s
, bfun
->insn
);
1827 /* An empty function segment represents a gap in the trace. We count
1828 it as one instruction. */
1838 /* Advance the iterator as far as possible within this segment. */
1839 adv
= min (index
, stride
);
1845 /* We did make progress. */
1846 gdb_assert (adv
> 0);
1849 /* Update the iterator. */
1850 it
->function
= bfun
;
1859 btrace_insn_cmp (const struct btrace_insn_iterator
*lhs
,
1860 const struct btrace_insn_iterator
*rhs
)
1862 unsigned int lnum
, rnum
;
1864 lnum
= btrace_insn_number (lhs
);
1865 rnum
= btrace_insn_number (rhs
);
1867 /* A gap has an instruction number of zero. Things are getting more
1868 complicated if gaps are involved.
1870 We take the instruction number offset from the iterator's function.
1871 This is the number of the first instruction after the gap.
1873 This is OK as long as both lhs and rhs point to gaps. If only one of
1874 them does, we need to adjust the number based on the other's regular
1875 instruction number. Otherwise, a gap might compare equal to an
1878 if (lnum
== 0 && rnum
== 0)
1880 lnum
= lhs
->function
->insn_offset
;
1881 rnum
= rhs
->function
->insn_offset
;
1885 lnum
= lhs
->function
->insn_offset
;
1892 rnum
= rhs
->function
->insn_offset
;
1898 return (int) (lnum
- rnum
);
1904 btrace_find_insn_by_number (struct btrace_insn_iterator
*it
,
1905 const struct btrace_thread_info
*btinfo
,
1906 unsigned int number
)
1908 const struct btrace_function
*bfun
;
1909 unsigned int end
, length
;
1911 for (bfun
= btinfo
->end
; bfun
!= NULL
; bfun
= bfun
->flow
.prev
)
1914 if (bfun
->errcode
!= 0)
1917 if (bfun
->insn_offset
<= number
)
1924 length
= VEC_length (btrace_insn_s
, bfun
->insn
);
1925 gdb_assert (length
> 0);
1927 end
= bfun
->insn_offset
+ length
;
1931 it
->function
= bfun
;
1932 it
->index
= number
- bfun
->insn_offset
;
1939 const struct btrace_function
*
1940 btrace_call_get (const struct btrace_call_iterator
*it
)
1942 return it
->function
;
1948 btrace_call_number (const struct btrace_call_iterator
*it
)
1950 const struct btrace_thread_info
*btinfo
;
1951 const struct btrace_function
*bfun
;
1954 btinfo
= it
->btinfo
;
1955 bfun
= it
->function
;
1957 return bfun
->number
;
1959 /* For the end iterator, i.e. bfun == NULL, we return one more than the
1960 number of the last function. */
1962 insns
= VEC_length (btrace_insn_s
, bfun
->insn
);
1964 /* If the function contains only a single instruction (i.e. the current
1965 instruction), it will be skipped and its number is already the number
1968 return bfun
->number
;
1970 /* Otherwise, return one more than the number of the last function. */
1971 return bfun
->number
+ 1;
1977 btrace_call_begin (struct btrace_call_iterator
*it
,
1978 const struct btrace_thread_info
*btinfo
)
1980 const struct btrace_function
*bfun
;
1982 bfun
= btinfo
->begin
;
1984 error (_("No trace."));
1986 it
->btinfo
= btinfo
;
1987 it
->function
= bfun
;
1993 btrace_call_end (struct btrace_call_iterator
*it
,
1994 const struct btrace_thread_info
*btinfo
)
1996 const struct btrace_function
*bfun
;
2000 error (_("No trace."));
2002 it
->btinfo
= btinfo
;
2003 it
->function
= NULL
;
2009 btrace_call_next (struct btrace_call_iterator
*it
, unsigned int stride
)
2011 const struct btrace_function
*bfun
;
2014 bfun
= it
->function
;
2016 while (bfun
!= NULL
)
2018 const struct btrace_function
*next
;
2021 next
= bfun
->flow
.next
;
2024 /* Ignore the last function if it only contains a single
2025 (i.e. the current) instruction. */
2026 insns
= VEC_length (btrace_insn_s
, bfun
->insn
);
2031 if (stride
== steps
)
2038 it
->function
= bfun
;
2045 btrace_call_prev (struct btrace_call_iterator
*it
, unsigned int stride
)
2047 const struct btrace_thread_info
*btinfo
;
2048 const struct btrace_function
*bfun
;
2051 bfun
= it
->function
;
2058 btinfo
= it
->btinfo
;
2063 /* Ignore the last function if it only contains a single
2064 (i.e. the current) instruction. */
2065 insns
= VEC_length (btrace_insn_s
, bfun
->insn
);
2067 bfun
= bfun
->flow
.prev
;
2075 while (steps
< stride
)
2077 const struct btrace_function
*prev
;
2079 prev
= bfun
->flow
.prev
;
2087 it
->function
= bfun
;
2094 btrace_call_cmp (const struct btrace_call_iterator
*lhs
,
2095 const struct btrace_call_iterator
*rhs
)
2097 unsigned int lnum
, rnum
;
2099 lnum
= btrace_call_number (lhs
);
2100 rnum
= btrace_call_number (rhs
);
2102 return (int) (lnum
- rnum
);
2108 btrace_find_call_by_number (struct btrace_call_iterator
*it
,
2109 const struct btrace_thread_info
*btinfo
,
2110 unsigned int number
)
2112 const struct btrace_function
*bfun
;
2114 for (bfun
= btinfo
->end
; bfun
!= NULL
; bfun
= bfun
->flow
.prev
)
2118 bnum
= bfun
->number
;
2121 it
->btinfo
= btinfo
;
2122 it
->function
= bfun
;
2126 /* Functions are ordered and numbered consecutively. We could bail out
2127 earlier. On the other hand, it is very unlikely that we search for
2128 a nonexistent function. */
2137 btrace_set_insn_history (struct btrace_thread_info
*btinfo
,
2138 const struct btrace_insn_iterator
*begin
,
2139 const struct btrace_insn_iterator
*end
)
2141 if (btinfo
->insn_history
== NULL
)
2142 btinfo
->insn_history
= xzalloc (sizeof (*btinfo
->insn_history
));
2144 btinfo
->insn_history
->begin
= *begin
;
2145 btinfo
->insn_history
->end
= *end
;
2151 btrace_set_call_history (struct btrace_thread_info
*btinfo
,
2152 const struct btrace_call_iterator
*begin
,
2153 const struct btrace_call_iterator
*end
)
2155 gdb_assert (begin
->btinfo
== end
->btinfo
);
2157 if (btinfo
->call_history
== NULL
)
2158 btinfo
->call_history
= xzalloc (sizeof (*btinfo
->call_history
));
2160 btinfo
->call_history
->begin
= *begin
;
2161 btinfo
->call_history
->end
= *end
;
2167 btrace_is_replaying (struct thread_info
*tp
)
2169 return tp
->btrace
.replay
!= NULL
;
2175 btrace_is_empty (struct thread_info
*tp
)
2177 struct btrace_insn_iterator begin
, end
;
2178 struct btrace_thread_info
*btinfo
;
2180 btinfo
= &tp
->btrace
;
2182 if (btinfo
->begin
== NULL
)
2185 btrace_insn_begin (&begin
, btinfo
);
2186 btrace_insn_end (&end
, btinfo
);
2188 return btrace_insn_cmp (&begin
, &end
) == 0;
2191 /* Forward the cleanup request. */
2194 do_btrace_data_cleanup (void *arg
)
2196 btrace_data_fini (arg
);
2202 make_cleanup_btrace_data (struct btrace_data
*data
)
2204 return make_cleanup (do_btrace_data_cleanup
, data
);