1 /* Branch trace support for GDB, the GNU debugger.
3 Copyright (C) 2013-2014 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "gdbthread.h"
25 #include "exceptions.h"
32 #include "filenames.h"
33 #include "xml-support.h"
36 /* Print a record debug message. Use do ... while (0) to avoid ambiguities
37 when used in if statements. */
39 #define DEBUG(msg, args...) \
42 if (record_debug != 0) \
43 fprintf_unfiltered (gdb_stdlog, \
44 "[btrace] " msg "\n", ##args); \
48 #define DEBUG_FTRACE(msg, args...) DEBUG ("[ftrace] " msg, ##args)
50 /* Return the function name of a recorded function segment for printing.
51 This function never returns NULL. */
54 ftrace_print_function_name (const struct btrace_function
*bfun
)
56 struct minimal_symbol
*msym
;
63 return SYMBOL_PRINT_NAME (sym
);
66 return MSYMBOL_PRINT_NAME (msym
);
71 /* Return the file name of a recorded function segment for printing.
72 This function never returns NULL. */
75 ftrace_print_filename (const struct btrace_function
*bfun
)
83 filename
= symtab_to_filename_for_display (sym
->symtab
);
85 filename
= "<unknown>";
90 /* Return a string representation of the address of an instruction.
91 This function never returns NULL. */
94 ftrace_print_insn_addr (const struct btrace_insn
*insn
)
99 return core_addr_to_string_nz (insn
->pc
);
102 /* Print an ftrace debug status message. */
105 ftrace_debug (const struct btrace_function
*bfun
, const char *prefix
)
107 const char *fun
, *file
;
108 unsigned int ibegin
, iend
;
109 int lbegin
, lend
, level
;
111 fun
= ftrace_print_function_name (bfun
);
112 file
= ftrace_print_filename (bfun
);
115 lbegin
= bfun
->lbegin
;
118 ibegin
= bfun
->insn_offset
;
119 iend
= ibegin
+ VEC_length (btrace_insn_s
, bfun
->insn
);
121 DEBUG_FTRACE ("%s: fun = %s, file = %s, level = %d, lines = [%d; %d], "
122 "insn = [%u; %u)", prefix
, fun
, file
, level
, lbegin
, lend
,
126 /* Return non-zero if BFUN does not match MFUN and FUN,
127 return zero otherwise. */
130 ftrace_function_switched (const struct btrace_function
*bfun
,
131 const struct minimal_symbol
*mfun
,
132 const struct symbol
*fun
)
134 struct minimal_symbol
*msym
;
140 /* If the minimal symbol changed, we certainly switched functions. */
141 if (mfun
!= NULL
&& msym
!= NULL
142 && strcmp (MSYMBOL_LINKAGE_NAME (mfun
), MSYMBOL_LINKAGE_NAME (msym
)) != 0)
145 /* If the symbol changed, we certainly switched functions. */
146 if (fun
!= NULL
&& sym
!= NULL
)
148 const char *bfname
, *fname
;
150 /* Check the function name. */
151 if (strcmp (SYMBOL_LINKAGE_NAME (fun
), SYMBOL_LINKAGE_NAME (sym
)) != 0)
154 /* Check the location of those functions, as well. */
155 bfname
= symtab_to_fullname (sym
->symtab
);
156 fname
= symtab_to_fullname (fun
->symtab
);
157 if (filename_cmp (fname
, bfname
) != 0)
161 /* If we lost symbol information, we switched functions. */
162 if (!(msym
== NULL
&& sym
== NULL
) && mfun
== NULL
&& fun
== NULL
)
165 /* If we gained symbol information, we switched functions. */
166 if (msym
== NULL
&& sym
== NULL
&& !(mfun
== NULL
&& fun
== NULL
))
172 /* Return non-zero if we should skip this file when generating the function
173 call history, zero otherwise.
174 We would want to do that if, say, a macro that is defined in another file
175 is expanded in this function. */
178 ftrace_skip_file (const struct btrace_function
*bfun
, const char *fullname
)
187 bfile
= symtab_to_fullname (sym
->symtab
);
189 return (filename_cmp (bfile
, fullname
) != 0);
192 /* Allocate and initialize a new branch trace function segment.
193 PREV is the chronologically preceding function segment.
194 MFUN and FUN are the symbol information we have for this function. */
196 static struct btrace_function
*
197 ftrace_new_function (struct btrace_function
*prev
,
198 struct minimal_symbol
*mfun
,
201 struct btrace_function
*bfun
;
203 bfun
= xzalloc (sizeof (*bfun
));
207 bfun
->flow
.prev
= prev
;
209 /* We start with the identities of min and max, respectively. */
210 bfun
->lbegin
= INT_MAX
;
211 bfun
->lend
= INT_MIN
;
215 /* Start counting at one. */
217 bfun
->insn_offset
= 1;
221 gdb_assert (prev
->flow
.next
== NULL
);
222 prev
->flow
.next
= bfun
;
224 bfun
->number
= prev
->number
+ 1;
225 bfun
->insn_offset
= (prev
->insn_offset
226 + VEC_length (btrace_insn_s
, prev
->insn
));
232 /* Update the UP field of a function segment. */
235 ftrace_update_caller (struct btrace_function
*bfun
,
236 struct btrace_function
*caller
,
237 enum btrace_function_flag flags
)
239 if (bfun
->up
!= NULL
)
240 ftrace_debug (bfun
, "updating caller");
245 ftrace_debug (bfun
, "set caller");
248 /* Fix up the caller for all segments of a function. */
251 ftrace_fixup_caller (struct btrace_function
*bfun
,
252 struct btrace_function
*caller
,
253 enum btrace_function_flag flags
)
255 struct btrace_function
*prev
, *next
;
257 ftrace_update_caller (bfun
, caller
, flags
);
259 /* Update all function segments belonging to the same function. */
260 for (prev
= bfun
->segment
.prev
; prev
!= NULL
; prev
= prev
->segment
.prev
)
261 ftrace_update_caller (prev
, caller
, flags
);
263 for (next
= bfun
->segment
.next
; next
!= NULL
; next
= next
->segment
.next
)
264 ftrace_update_caller (next
, caller
, flags
);
267 /* Add a new function segment for a call.
268 CALLER is the chronologically preceding function segment.
269 MFUN and FUN are the symbol information we have for this function. */
271 static struct btrace_function
*
272 ftrace_new_call (struct btrace_function
*caller
,
273 struct minimal_symbol
*mfun
,
276 struct btrace_function
*bfun
;
278 bfun
= ftrace_new_function (caller
, mfun
, fun
);
280 bfun
->level
= caller
->level
+ 1;
282 ftrace_debug (bfun
, "new call");
287 /* Add a new function segment for a tail call.
288 CALLER is the chronologically preceding function segment.
289 MFUN and FUN are the symbol information we have for this function. */
291 static struct btrace_function
*
292 ftrace_new_tailcall (struct btrace_function
*caller
,
293 struct minimal_symbol
*mfun
,
296 struct btrace_function
*bfun
;
298 bfun
= ftrace_new_function (caller
, mfun
, fun
);
300 bfun
->level
= caller
->level
+ 1;
301 bfun
->flags
|= BFUN_UP_LINKS_TO_TAILCALL
;
303 ftrace_debug (bfun
, "new tail call");
308 /* Find the innermost caller in the back trace of BFUN with MFUN/FUN
309 symbol information. */
311 static struct btrace_function
*
312 ftrace_find_caller (struct btrace_function
*bfun
,
313 struct minimal_symbol
*mfun
,
316 for (; bfun
!= NULL
; bfun
= bfun
->up
)
318 /* Skip functions with incompatible symbol information. */
319 if (ftrace_function_switched (bfun
, mfun
, fun
))
322 /* This is the function segment we're looking for. */
329 /* Find the innermost caller in the back trace of BFUN, skipping all
330 function segments that do not end with a call instruction (e.g.
331 tail calls ending with a jump). */
333 static struct btrace_function
*
334 ftrace_find_call (struct gdbarch
*gdbarch
, struct btrace_function
*bfun
)
336 for (; bfun
!= NULL
; bfun
= bfun
->up
)
338 struct btrace_insn
*last
;
341 /* We do not allow empty function segments. */
342 gdb_assert (!VEC_empty (btrace_insn_s
, bfun
->insn
));
344 last
= VEC_last (btrace_insn_s
, bfun
->insn
);
347 if (gdbarch_insn_is_call (gdbarch
, pc
))
354 /* Add a continuation segment for a function into which we return.
355 PREV is the chronologically preceding function segment.
356 MFUN and FUN are the symbol information we have for this function. */
358 static struct btrace_function
*
359 ftrace_new_return (struct gdbarch
*gdbarch
,
360 struct btrace_function
*prev
,
361 struct minimal_symbol
*mfun
,
364 struct btrace_function
*bfun
, *caller
;
366 bfun
= ftrace_new_function (prev
, mfun
, fun
);
368 /* It is important to start at PREV's caller. Otherwise, we might find
369 PREV itself, if PREV is a recursive function. */
370 caller
= ftrace_find_caller (prev
->up
, mfun
, fun
);
373 /* The caller of PREV is the preceding btrace function segment in this
374 function instance. */
375 gdb_assert (caller
->segment
.next
== NULL
);
377 caller
->segment
.next
= bfun
;
378 bfun
->segment
.prev
= caller
;
380 /* Maintain the function level. */
381 bfun
->level
= caller
->level
;
383 /* Maintain the call stack. */
384 bfun
->up
= caller
->up
;
385 bfun
->flags
= caller
->flags
;
387 ftrace_debug (bfun
, "new return");
391 /* We did not find a caller. This could mean that something went
392 wrong or that the call is simply not included in the trace. */
394 /* Let's search for some actual call. */
395 caller
= ftrace_find_call (gdbarch
, prev
->up
);
398 /* There is no call in PREV's back trace. We assume that the
399 branch trace did not include it. */
401 /* Let's find the topmost call function - this skips tail calls. */
402 while (prev
->up
!= NULL
)
405 /* We maintain levels for a series of returns for which we have
407 We start at the preceding function's level in case this has
408 already been a return for which we have not seen the call.
409 We start at level 0 otherwise, to handle tail calls correctly. */
410 bfun
->level
= min (0, prev
->level
) - 1;
412 /* Fix up the call stack for PREV. */
413 ftrace_fixup_caller (prev
, bfun
, BFUN_UP_LINKS_TO_RET
);
415 ftrace_debug (bfun
, "new return - no caller");
419 /* There is a call in PREV's back trace to which we should have
420 returned. Let's remain at this level. */
421 bfun
->level
= prev
->level
;
423 ftrace_debug (bfun
, "new return - unknown caller");
430 /* Add a new function segment for a function switch.
431 PREV is the chronologically preceding function segment.
432 MFUN and FUN are the symbol information we have for this function. */
434 static struct btrace_function
*
435 ftrace_new_switch (struct btrace_function
*prev
,
436 struct minimal_symbol
*mfun
,
439 struct btrace_function
*bfun
;
441 /* This is an unexplained function switch. The call stack will likely
442 be wrong at this point. */
443 bfun
= ftrace_new_function (prev
, mfun
, fun
);
445 /* We keep the function level. */
446 bfun
->level
= prev
->level
;
448 ftrace_debug (bfun
, "new switch");
453 /* Update BFUN with respect to the instruction at PC. This may create new
455 Return the chronologically latest function segment, never NULL. */
457 static struct btrace_function
*
458 ftrace_update_function (struct gdbarch
*gdbarch
,
459 struct btrace_function
*bfun
, CORE_ADDR pc
)
461 struct bound_minimal_symbol bmfun
;
462 struct minimal_symbol
*mfun
;
464 struct btrace_insn
*last
;
466 /* Try to determine the function we're in. We use both types of symbols
467 to avoid surprises when we sometimes get a full symbol and sometimes
468 only a minimal symbol. */
469 fun
= find_pc_function (pc
);
470 bmfun
= lookup_minimal_symbol_by_pc (pc
);
473 if (fun
== NULL
&& mfun
== NULL
)
474 DEBUG_FTRACE ("no symbol at %s", core_addr_to_string_nz (pc
));
476 /* If we didn't have a function before, we create one. */
478 return ftrace_new_function (bfun
, mfun
, fun
);
480 /* Check the last instruction, if we have one.
481 We do this check first, since it allows us to fill in the call stack
482 links in addition to the normal flow links. */
484 if (!VEC_empty (btrace_insn_s
, bfun
->insn
))
485 last
= VEC_last (btrace_insn_s
, bfun
->insn
);
493 /* Check for returns. */
494 if (gdbarch_insn_is_ret (gdbarch
, lpc
))
495 return ftrace_new_return (gdbarch
, bfun
, mfun
, fun
);
497 /* Check for calls. */
498 if (gdbarch_insn_is_call (gdbarch
, lpc
))
502 size
= gdb_insn_length (gdbarch
, lpc
);
504 /* Ignore calls to the next instruction. They are used for PIC. */
505 if (lpc
+ size
!= pc
)
506 return ftrace_new_call (bfun
, mfun
, fun
);
510 /* Check if we're switching functions for some other reason. */
511 if (ftrace_function_switched (bfun
, mfun
, fun
))
513 DEBUG_FTRACE ("switching from %s in %s at %s",
514 ftrace_print_insn_addr (last
),
515 ftrace_print_function_name (bfun
),
516 ftrace_print_filename (bfun
));
520 CORE_ADDR start
, lpc
;
522 start
= get_pc_function_start (pc
);
524 /* If we can't determine the function for PC, we treat a jump at
525 the end of the block as tail call. */
531 /* Jumps indicate optimized tail calls. */
532 if (start
== pc
&& gdbarch_insn_is_jump (gdbarch
, lpc
))
533 return ftrace_new_tailcall (bfun
, mfun
, fun
);
536 return ftrace_new_switch (bfun
, mfun
, fun
);
542 /* Update BFUN's source range with respect to the instruction at PC. */
545 ftrace_update_lines (struct btrace_function
*bfun
, CORE_ADDR pc
)
547 struct symtab_and_line sal
;
548 const char *fullname
;
550 sal
= find_pc_line (pc
, 0);
551 if (sal
.symtab
== NULL
|| sal
.line
== 0)
553 DEBUG_FTRACE ("no lines at %s", core_addr_to_string_nz (pc
));
557 /* Check if we switched files. This could happen if, say, a macro that
558 is defined in another file is expanded here. */
559 fullname
= symtab_to_fullname (sal
.symtab
);
560 if (ftrace_skip_file (bfun
, fullname
))
562 DEBUG_FTRACE ("ignoring file at %s, file=%s",
563 core_addr_to_string_nz (pc
), fullname
);
567 /* Update the line range. */
568 bfun
->lbegin
= min (bfun
->lbegin
, sal
.line
);
569 bfun
->lend
= max (bfun
->lend
, sal
.line
);
571 if (record_debug
> 1)
572 ftrace_debug (bfun
, "update lines");
575 /* Add the instruction at PC to BFUN's instructions. */
578 ftrace_update_insns (struct btrace_function
*bfun
, CORE_ADDR pc
)
580 struct btrace_insn
*insn
;
582 insn
= VEC_safe_push (btrace_insn_s
, bfun
->insn
, NULL
);
585 if (record_debug
> 1)
586 ftrace_debug (bfun
, "update insn");
589 /* Compute the function branch trace from a block branch trace BTRACE for
590 a thread given by BTINFO. */
593 btrace_compute_ftrace (struct btrace_thread_info
*btinfo
,
594 VEC (btrace_block_s
) *btrace
)
596 struct btrace_function
*begin
, *end
;
597 struct gdbarch
*gdbarch
;
601 DEBUG ("compute ftrace");
603 gdbarch
= target_gdbarch ();
604 begin
= btinfo
->begin
;
606 level
= begin
!= NULL
? -btinfo
->level
: INT_MAX
;
607 blk
= VEC_length (btrace_block_s
, btrace
);
611 btrace_block_s
*block
;
616 block
= VEC_index (btrace_block_s
, btrace
, blk
);
623 /* We should hit the end of the block. Warn if we went too far. */
626 warning (_("Recorded trace may be corrupted around %s."),
627 core_addr_to_string_nz (pc
));
631 end
= ftrace_update_function (gdbarch
, end
, pc
);
635 /* Maintain the function level offset.
636 For all but the last block, we do it here. */
638 level
= min (level
, end
->level
);
640 ftrace_update_insns (end
, pc
);
641 ftrace_update_lines (end
, pc
);
643 /* We're done once we pushed the instruction at the end. */
644 if (block
->end
== pc
)
647 size
= gdb_insn_length (gdbarch
, pc
);
649 /* Make sure we terminate if we fail to compute the size. */
652 warning (_("Recorded trace may be incomplete around %s."),
653 core_addr_to_string_nz (pc
));
659 /* Maintain the function level offset.
660 For the last block, we do it here to not consider the last
662 Since the last instruction corresponds to the current instruction
663 and is not really part of the execution history, it shouldn't
666 level
= min (level
, end
->level
);
670 btinfo
->begin
= begin
;
673 /* LEVEL is the minimal function level of all btrace function segments.
674 Define the global level offset to -LEVEL so all function levels are
675 normalized to start at zero. */
676 btinfo
->level
= -level
;
679 /* Add an entry for the current PC. */
682 btrace_add_pc (struct thread_info
*tp
)
684 VEC (btrace_block_s
) *btrace
;
685 struct btrace_block
*block
;
686 struct regcache
*regcache
;
687 struct cleanup
*cleanup
;
690 regcache
= get_thread_regcache (tp
->ptid
);
691 pc
= regcache_read_pc (regcache
);
694 cleanup
= make_cleanup (VEC_cleanup (btrace_block_s
), &btrace
);
696 block
= VEC_safe_push (btrace_block_s
, btrace
, NULL
);
700 btrace_compute_ftrace (&tp
->btrace
, btrace
);
702 do_cleanups (cleanup
);
708 btrace_enable (struct thread_info
*tp
)
710 if (tp
->btrace
.target
!= NULL
)
713 if (!target_supports_btrace ())
714 error (_("Target does not support branch tracing."));
716 DEBUG ("enable thread %d (%s)", tp
->num
, target_pid_to_str (tp
->ptid
));
718 tp
->btrace
.target
= target_enable_btrace (tp
->ptid
);
720 /* Add an entry for the current PC so we start tracing from where we
722 if (tp
->btrace
.target
!= NULL
)
729 btrace_disable (struct thread_info
*tp
)
731 struct btrace_thread_info
*btp
= &tp
->btrace
;
734 if (btp
->target
== NULL
)
737 DEBUG ("disable thread %d (%s)", tp
->num
, target_pid_to_str (tp
->ptid
));
739 target_disable_btrace (btp
->target
);
748 btrace_teardown (struct thread_info
*tp
)
750 struct btrace_thread_info
*btp
= &tp
->btrace
;
753 if (btp
->target
== NULL
)
756 DEBUG ("teardown thread %d (%s)", tp
->num
, target_pid_to_str (tp
->ptid
));
758 target_teardown_btrace (btp
->target
);
764 /* Adjust the block trace in order to stitch old and new trace together.
765 BTRACE is the new delta trace between the last and the current stop.
766 BTINFO is the old branch trace until the last stop.
767 May modify BTRACE as well as the existing trace in BTINFO.
768 Return 0 on success, -1 otherwise. */
771 btrace_stitch_trace (VEC (btrace_block_s
) **btrace
,
772 const struct btrace_thread_info
*btinfo
)
774 struct btrace_function
*last_bfun
;
775 struct btrace_insn
*last_insn
;
776 btrace_block_s
*first_new_block
;
778 /* If we don't have trace, there's nothing to do. */
779 if (VEC_empty (btrace_block_s
, *btrace
))
782 last_bfun
= btinfo
->end
;
783 gdb_assert (last_bfun
!= NULL
);
785 /* Beware that block trace starts with the most recent block, so the
786 chronologically first block in the new trace is the last block in
787 the new trace's block vector. */
788 first_new_block
= VEC_last (btrace_block_s
, *btrace
);
789 last_insn
= VEC_last (btrace_insn_s
, last_bfun
->insn
);
791 /* If the current PC at the end of the block is the same as in our current
792 trace, there are two explanations:
793 1. we executed the instruction and some branch brought us back.
794 2. we have not made any progress.
795 In the first case, the delta trace vector should contain at least two
797 In the second case, the delta trace vector should contain exactly one
798 entry for the partial block containing the current PC. Remove it. */
799 if (first_new_block
->end
== last_insn
->pc
800 && VEC_length (btrace_block_s
, *btrace
) == 1)
802 VEC_pop (btrace_block_s
, *btrace
);
806 DEBUG ("stitching %s to %s", ftrace_print_insn_addr (last_insn
),
807 core_addr_to_string_nz (first_new_block
->end
));
809 /* Do a simple sanity check to make sure we don't accidentally end up
810 with a bad block. This should not occur in practice. */
811 if (first_new_block
->end
< last_insn
->pc
)
813 warning (_("Error while trying to read delta trace. Falling back to "
818 /* We adjust the last block to start at the end of our current trace. */
819 gdb_assert (first_new_block
->begin
== 0);
820 first_new_block
->begin
= last_insn
->pc
;
822 /* We simply pop the last insn so we can insert it again as part of
823 the normal branch trace computation.
824 Since instruction iterators are based on indices in the instructions
825 vector, we don't leave any pointers dangling. */
826 DEBUG ("pruning insn at %s for stitching",
827 ftrace_print_insn_addr (last_insn
));
829 VEC_pop (btrace_insn_s
, last_bfun
->insn
);
831 /* The instructions vector may become empty temporarily if this has
832 been the only instruction in this function segment.
833 This violates the invariant but will be remedied shortly by
834 btrace_compute_ftrace when we add the new trace. */
838 /* Clear the branch trace histories in BTINFO. */
841 btrace_clear_history (struct btrace_thread_info
*btinfo
)
843 xfree (btinfo
->insn_history
);
844 xfree (btinfo
->call_history
);
845 xfree (btinfo
->replay
);
847 btinfo
->insn_history
= NULL
;
848 btinfo
->call_history
= NULL
;
849 btinfo
->replay
= NULL
;
855 btrace_fetch (struct thread_info
*tp
)
857 struct btrace_thread_info
*btinfo
;
858 struct btrace_target_info
*tinfo
;
859 VEC (btrace_block_s
) *btrace
;
860 struct cleanup
*cleanup
;
863 DEBUG ("fetch thread %d (%s)", tp
->num
, target_pid_to_str (tp
->ptid
));
866 btinfo
= &tp
->btrace
;
867 tinfo
= btinfo
->target
;
871 /* There's no way we could get new trace while replaying.
872 On the other hand, delta trace would return a partial record with the
873 current PC, which is the replay PC, not the last PC, as expected. */
874 if (btinfo
->replay
!= NULL
)
877 cleanup
= make_cleanup (VEC_cleanup (btrace_block_s
), &btrace
);
879 /* Let's first try to extend the trace we already have. */
880 if (btinfo
->end
!= NULL
)
882 errcode
= target_read_btrace (&btrace
, tinfo
, BTRACE_READ_DELTA
);
885 /* Success. Let's try to stitch the traces together. */
886 errcode
= btrace_stitch_trace (&btrace
, btinfo
);
890 /* We failed to read delta trace. Let's try to read new trace. */
891 errcode
= target_read_btrace (&btrace
, tinfo
, BTRACE_READ_NEW
);
893 /* If we got any new trace, discard what we have. */
894 if (errcode
== 0 && !VEC_empty (btrace_block_s
, btrace
))
898 /* If we were not able to read the trace, we start over. */
902 errcode
= target_read_btrace (&btrace
, tinfo
, BTRACE_READ_ALL
);
906 errcode
= target_read_btrace (&btrace
, tinfo
, BTRACE_READ_ALL
);
908 /* If we were not able to read the branch trace, signal an error. */
910 error (_("Failed to read branch trace."));
912 /* Compute the trace, provided we have any. */
913 if (!VEC_empty (btrace_block_s
, btrace
))
915 btrace_clear_history (btinfo
);
916 btrace_compute_ftrace (btinfo
, btrace
);
919 do_cleanups (cleanup
);
925 btrace_clear (struct thread_info
*tp
)
927 struct btrace_thread_info
*btinfo
;
928 struct btrace_function
*it
, *trash
;
930 DEBUG ("clear thread %d (%s)", tp
->num
, target_pid_to_str (tp
->ptid
));
932 /* Make sure btrace frames that may hold a pointer into the branch
933 trace data are destroyed. */
934 reinit_frame_cache ();
936 btinfo
= &tp
->btrace
;
947 btinfo
->begin
= NULL
;
950 btrace_clear_history (btinfo
);
956 btrace_free_objfile (struct objfile
*objfile
)
958 struct thread_info
*tp
;
960 DEBUG ("free objfile");
962 ALL_NON_EXITED_THREADS (tp
)
966 #if defined (HAVE_LIBEXPAT)
968 /* Check the btrace document version. */
971 check_xml_btrace_version (struct gdb_xml_parser
*parser
,
972 const struct gdb_xml_element
*element
,
973 void *user_data
, VEC (gdb_xml_value_s
) *attributes
)
975 const char *version
= xml_find_attribute (attributes
, "version")->value
;
977 if (strcmp (version
, "1.0") != 0)
978 gdb_xml_error (parser
, _("Unsupported btrace version: \"%s\""), version
);
981 /* Parse a btrace "block" xml record. */
984 parse_xml_btrace_block (struct gdb_xml_parser
*parser
,
985 const struct gdb_xml_element
*element
,
986 void *user_data
, VEC (gdb_xml_value_s
) *attributes
)
988 VEC (btrace_block_s
) **btrace
;
989 struct btrace_block
*block
;
990 ULONGEST
*begin
, *end
;
993 block
= VEC_safe_push (btrace_block_s
, *btrace
, NULL
);
995 begin
= xml_find_attribute (attributes
, "begin")->value
;
996 end
= xml_find_attribute (attributes
, "end")->value
;
998 block
->begin
= *begin
;
1002 static const struct gdb_xml_attribute block_attributes
[] = {
1003 { "begin", GDB_XML_AF_NONE
, gdb_xml_parse_attr_ulongest
, NULL
},
1004 { "end", GDB_XML_AF_NONE
, gdb_xml_parse_attr_ulongest
, NULL
},
1005 { NULL
, GDB_XML_AF_NONE
, NULL
, NULL
}
1008 static const struct gdb_xml_attribute btrace_attributes
[] = {
1009 { "version", GDB_XML_AF_NONE
, NULL
, NULL
},
1010 { NULL
, GDB_XML_AF_NONE
, NULL
, NULL
}
1013 static const struct gdb_xml_element btrace_children
[] = {
1014 { "block", block_attributes
, NULL
,
1015 GDB_XML_EF_REPEATABLE
| GDB_XML_EF_OPTIONAL
, parse_xml_btrace_block
, NULL
},
1016 { NULL
, NULL
, NULL
, GDB_XML_EF_NONE
, NULL
, NULL
}
1019 static const struct gdb_xml_element btrace_elements
[] = {
1020 { "btrace", btrace_attributes
, btrace_children
, GDB_XML_EF_NONE
,
1021 check_xml_btrace_version
, NULL
},
1022 { NULL
, NULL
, NULL
, GDB_XML_EF_NONE
, NULL
, NULL
}
1025 #endif /* defined (HAVE_LIBEXPAT) */
1029 VEC (btrace_block_s
) *
1030 parse_xml_btrace (const char *buffer
)
1032 VEC (btrace_block_s
) *btrace
= NULL
;
1033 struct cleanup
*cleanup
;
1036 #if defined (HAVE_LIBEXPAT)
1038 cleanup
= make_cleanup (VEC_cleanup (btrace_block_s
), &btrace
);
1039 errcode
= gdb_xml_parse_quick (_("btrace"), "btrace.dtd", btrace_elements
,
1042 error (_("Error parsing branch trace."));
1044 /* Keep parse results. */
1045 discard_cleanups (cleanup
);
1047 #else /* !defined (HAVE_LIBEXPAT) */
1049 error (_("Cannot process branch trace. XML parsing is not supported."));
1051 #endif /* !defined (HAVE_LIBEXPAT) */
1058 const struct btrace_insn
*
1059 btrace_insn_get (const struct btrace_insn_iterator
*it
)
1061 const struct btrace_function
*bfun
;
1062 unsigned int index
, end
;
1065 bfun
= it
->function
;
1067 /* The index is within the bounds of this function's instruction vector. */
1068 end
= VEC_length (btrace_insn_s
, bfun
->insn
);
1069 gdb_assert (0 < end
);
1070 gdb_assert (index
< end
);
1072 return VEC_index (btrace_insn_s
, bfun
->insn
, index
);
1078 btrace_insn_number (const struct btrace_insn_iterator
*it
)
1080 const struct btrace_function
*bfun
;
1082 bfun
= it
->function
;
1083 return bfun
->insn_offset
+ it
->index
;
1089 btrace_insn_begin (struct btrace_insn_iterator
*it
,
1090 const struct btrace_thread_info
*btinfo
)
1092 const struct btrace_function
*bfun
;
1094 bfun
= btinfo
->begin
;
1096 error (_("No trace."));
1098 it
->function
= bfun
;
1105 btrace_insn_end (struct btrace_insn_iterator
*it
,
1106 const struct btrace_thread_info
*btinfo
)
1108 const struct btrace_function
*bfun
;
1109 unsigned int length
;
1113 error (_("No trace."));
1115 /* The last instruction in the last function is the current instruction.
1116 We point to it - it is one past the end of the execution trace. */
1117 length
= VEC_length (btrace_insn_s
, bfun
->insn
);
1119 it
->function
= bfun
;
1120 it
->index
= length
- 1;
1126 btrace_insn_next (struct btrace_insn_iterator
*it
, unsigned int stride
)
1128 const struct btrace_function
*bfun
;
1129 unsigned int index
, steps
;
1131 bfun
= it
->function
;
1137 unsigned int end
, space
, adv
;
1139 end
= VEC_length (btrace_insn_s
, bfun
->insn
);
1141 gdb_assert (0 < end
);
1142 gdb_assert (index
< end
);
1144 /* Compute the number of instructions remaining in this segment. */
1145 space
= end
- index
;
1147 /* Advance the iterator as far as possible within this segment. */
1148 adv
= min (space
, stride
);
1153 /* Move to the next function if we're at the end of this one. */
1156 const struct btrace_function
*next
;
1158 next
= bfun
->flow
.next
;
1161 /* We stepped past the last function.
1163 Let's adjust the index to point to the last instruction in
1164 the previous function. */
1170 /* We now point to the first instruction in the new function. */
1175 /* We did make progress. */
1176 gdb_assert (adv
> 0);
1179 /* Update the iterator. */
1180 it
->function
= bfun
;
1189 btrace_insn_prev (struct btrace_insn_iterator
*it
, unsigned int stride
)
1191 const struct btrace_function
*bfun
;
1192 unsigned int index
, steps
;
1194 bfun
= it
->function
;
1202 /* Move to the previous function if we're at the start of this one. */
1205 const struct btrace_function
*prev
;
1207 prev
= bfun
->flow
.prev
;
1211 /* We point to one after the last instruction in the new function. */
1213 index
= VEC_length (btrace_insn_s
, bfun
->insn
);
1215 /* There is at least one instruction in this function segment. */
1216 gdb_assert (index
> 0);
1219 /* Advance the iterator as far as possible within this segment. */
1220 adv
= min (index
, stride
);
1225 /* We did make progress. */
1226 gdb_assert (adv
> 0);
1229 /* Update the iterator. */
1230 it
->function
= bfun
;
1239 btrace_insn_cmp (const struct btrace_insn_iterator
*lhs
,
1240 const struct btrace_insn_iterator
*rhs
)
1242 unsigned int lnum
, rnum
;
1244 lnum
= btrace_insn_number (lhs
);
1245 rnum
= btrace_insn_number (rhs
);
1247 return (int) (lnum
- rnum
);
1253 btrace_find_insn_by_number (struct btrace_insn_iterator
*it
,
1254 const struct btrace_thread_info
*btinfo
,
1255 unsigned int number
)
1257 const struct btrace_function
*bfun
;
1260 for (bfun
= btinfo
->end
; bfun
!= NULL
; bfun
= bfun
->flow
.prev
)
1261 if (bfun
->insn_offset
<= number
)
1267 end
= bfun
->insn_offset
+ VEC_length (btrace_insn_s
, bfun
->insn
);
1271 it
->function
= bfun
;
1272 it
->index
= number
- bfun
->insn_offset
;
1279 const struct btrace_function
*
1280 btrace_call_get (const struct btrace_call_iterator
*it
)
1282 return it
->function
;
1288 btrace_call_number (const struct btrace_call_iterator
*it
)
1290 const struct btrace_thread_info
*btinfo
;
1291 const struct btrace_function
*bfun
;
1294 btinfo
= it
->btinfo
;
1295 bfun
= it
->function
;
1297 return bfun
->number
;
1299 /* For the end iterator, i.e. bfun == NULL, we return one more than the
1300 number of the last function. */
1302 insns
= VEC_length (btrace_insn_s
, bfun
->insn
);
1304 /* If the function contains only a single instruction (i.e. the current
1305 instruction), it will be skipped and its number is already the number
1308 return bfun
->number
;
1310 /* Otherwise, return one more than the number of the last function. */
1311 return bfun
->number
+ 1;
1317 btrace_call_begin (struct btrace_call_iterator
*it
,
1318 const struct btrace_thread_info
*btinfo
)
1320 const struct btrace_function
*bfun
;
1322 bfun
= btinfo
->begin
;
1324 error (_("No trace."));
1326 it
->btinfo
= btinfo
;
1327 it
->function
= bfun
;
1333 btrace_call_end (struct btrace_call_iterator
*it
,
1334 const struct btrace_thread_info
*btinfo
)
1336 const struct btrace_function
*bfun
;
1340 error (_("No trace."));
1342 it
->btinfo
= btinfo
;
1343 it
->function
= NULL
;
1349 btrace_call_next (struct btrace_call_iterator
*it
, unsigned int stride
)
1351 const struct btrace_function
*bfun
;
1354 bfun
= it
->function
;
1356 while (bfun
!= NULL
)
1358 const struct btrace_function
*next
;
1361 next
= bfun
->flow
.next
;
1364 /* Ignore the last function if it only contains a single
1365 (i.e. the current) instruction. */
1366 insns
= VEC_length (btrace_insn_s
, bfun
->insn
);
1371 if (stride
== steps
)
1378 it
->function
= bfun
;
1385 btrace_call_prev (struct btrace_call_iterator
*it
, unsigned int stride
)
1387 const struct btrace_thread_info
*btinfo
;
1388 const struct btrace_function
*bfun
;
1391 bfun
= it
->function
;
1398 btinfo
= it
->btinfo
;
1403 /* Ignore the last function if it only contains a single
1404 (i.e. the current) instruction. */
1405 insns
= VEC_length (btrace_insn_s
, bfun
->insn
);
1407 bfun
= bfun
->flow
.prev
;
1415 while (steps
< stride
)
1417 const struct btrace_function
*prev
;
1419 prev
= bfun
->flow
.prev
;
1427 it
->function
= bfun
;
1434 btrace_call_cmp (const struct btrace_call_iterator
*lhs
,
1435 const struct btrace_call_iterator
*rhs
)
1437 unsigned int lnum
, rnum
;
1439 lnum
= btrace_call_number (lhs
);
1440 rnum
= btrace_call_number (rhs
);
1442 return (int) (lnum
- rnum
);
1448 btrace_find_call_by_number (struct btrace_call_iterator
*it
,
1449 const struct btrace_thread_info
*btinfo
,
1450 unsigned int number
)
1452 const struct btrace_function
*bfun
;
1454 for (bfun
= btinfo
->end
; bfun
!= NULL
; bfun
= bfun
->flow
.prev
)
1458 bnum
= bfun
->number
;
1461 it
->btinfo
= btinfo
;
1462 it
->function
= bfun
;
1466 /* Functions are ordered and numbered consecutively. We could bail out
1467 earlier. On the other hand, it is very unlikely that we search for
1468 a nonexistent function. */
1477 btrace_set_insn_history (struct btrace_thread_info
*btinfo
,
1478 const struct btrace_insn_iterator
*begin
,
1479 const struct btrace_insn_iterator
*end
)
1481 if (btinfo
->insn_history
== NULL
)
1482 btinfo
->insn_history
= xzalloc (sizeof (*btinfo
->insn_history
));
1484 btinfo
->insn_history
->begin
= *begin
;
1485 btinfo
->insn_history
->end
= *end
;
1491 btrace_set_call_history (struct btrace_thread_info
*btinfo
,
1492 const struct btrace_call_iterator
*begin
,
1493 const struct btrace_call_iterator
*end
)
1495 gdb_assert (begin
->btinfo
== end
->btinfo
);
1497 if (btinfo
->call_history
== NULL
)
1498 btinfo
->call_history
= xzalloc (sizeof (*btinfo
->call_history
));
1500 btinfo
->call_history
->begin
= *begin
;
1501 btinfo
->call_history
->end
= *end
;
1507 btrace_is_replaying (struct thread_info
*tp
)
1509 return tp
->btrace
.replay
!= NULL
;
1515 btrace_is_empty (struct thread_info
*tp
)
1517 struct btrace_insn_iterator begin
, end
;
1518 struct btrace_thread_info
*btinfo
;
1520 btinfo
= &tp
->btrace
;
1522 if (btinfo
->begin
== NULL
)
1525 btrace_insn_begin (&begin
, btinfo
);
1526 btrace_insn_end (&end
, btinfo
);
1528 return btrace_insn_cmp (&begin
, &end
) == 0;