1 /* Branch trace support for GDB, the GNU debugger.
3 Copyright (C) 2013-2014 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "gdbthread.h"
29 #include "exceptions.h"
30 #include "cli/cli-utils.h"
34 #include "filenames.h"
36 #include "frame-unwind.h"
39 /* The target_ops of record-btrace. */
40 static struct target_ops record_btrace_ops
;
42 /* A new thread observer enabling branch tracing for the new thread. */
43 static struct observer
*record_btrace_thread_observer
;
45 /* Temporarily allow memory accesses. */
46 static int record_btrace_allow_memory_access
;
48 /* Print a record-btrace debug message. Use do ... while (0) to avoid
49 ambiguities when used in if statements. */
51 #define DEBUG(msg, args...) \
54 if (record_debug != 0) \
55 fprintf_unfiltered (gdb_stdlog, \
56 "[record-btrace] " msg "\n", ##args); \
61 /* Update the branch trace for the current thread and return a pointer to its
64 Throws an error if there is no thread or no trace. This function never
67 static struct thread_info
*
68 require_btrace_thread (void)
70 struct thread_info
*tp
;
74 tp
= find_thread_ptid (inferior_ptid
);
76 error (_("No thread."));
80 if (btrace_is_empty (tp
))
81 error (_("No trace."));
86 /* Update the branch trace for the current thread and return a pointer to its
87 branch trace information struct.
89 Throws an error if there is no thread or no trace. This function never
92 static struct btrace_thread_info
*
95 struct thread_info
*tp
;
97 tp
= require_btrace_thread ();
102 /* Enable branch tracing for one thread. Warn on errors. */
105 record_btrace_enable_warn (struct thread_info
*tp
)
107 volatile struct gdb_exception error
;
109 TRY_CATCH (error
, RETURN_MASK_ERROR
)
112 if (error
.message
!= NULL
)
113 warning ("%s", error
.message
);
116 /* Callback function to disable branch tracing for one thread. */
119 record_btrace_disable_callback (void *arg
)
121 struct thread_info
*tp
;
128 /* Enable automatic tracing of new threads. */
131 record_btrace_auto_enable (void)
133 DEBUG ("attach thread observer");
135 record_btrace_thread_observer
136 = observer_attach_new_thread (record_btrace_enable_warn
);
139 /* Disable automatic tracing of new threads. */
142 record_btrace_auto_disable (void)
144 /* The observer may have been detached, already. */
145 if (record_btrace_thread_observer
== NULL
)
148 DEBUG ("detach thread observer");
150 observer_detach_new_thread (record_btrace_thread_observer
);
151 record_btrace_thread_observer
= NULL
;
154 /* The to_open method of target record-btrace. */
157 record_btrace_open (char *args
, int from_tty
)
159 struct cleanup
*disable_chain
;
160 struct thread_info
*tp
;
166 if (!target_has_execution
)
167 error (_("The program is not being run."));
169 if (!target_supports_btrace ())
170 error (_("Target does not support branch tracing."));
173 error (_("Record btrace can't debug inferior in non-stop mode."));
175 gdb_assert (record_btrace_thread_observer
== NULL
);
177 disable_chain
= make_cleanup (null_cleanup
, NULL
);
179 if (args
== NULL
|| *args
== 0 || number_is_in_list (args
, tp
->num
))
183 make_cleanup (record_btrace_disable_callback
, tp
);
186 record_btrace_auto_enable ();
188 push_target (&record_btrace_ops
);
190 observer_notify_record_changed (current_inferior (), 1);
192 discard_cleanups (disable_chain
);
195 /* The to_stop_recording method of target record-btrace. */
198 record_btrace_stop_recording (void)
200 struct thread_info
*tp
;
202 DEBUG ("stop recording");
204 record_btrace_auto_disable ();
207 if (tp
->btrace
.target
!= NULL
)
211 /* The to_close method of target record-btrace. */
214 record_btrace_close (void)
216 /* Make sure automatic recording gets disabled even if we did not stop
217 recording before closing the record-btrace target. */
218 record_btrace_auto_disable ();
220 /* We already stopped recording. */
223 /* The to_info_record method of target record-btrace. */
226 record_btrace_info (void)
228 struct btrace_thread_info
*btinfo
;
229 struct thread_info
*tp
;
230 unsigned int insns
, calls
;
234 tp
= find_thread_ptid (inferior_ptid
);
236 error (_("No thread."));
243 btinfo
= &tp
->btrace
;
245 if (!btrace_is_empty (tp
))
247 struct btrace_call_iterator call
;
248 struct btrace_insn_iterator insn
;
250 btrace_call_end (&call
, btinfo
);
251 btrace_call_prev (&call
, 1);
252 calls
= btrace_call_number (&call
);
254 btrace_insn_end (&insn
, btinfo
);
255 btrace_insn_prev (&insn
, 1);
256 insns
= btrace_insn_number (&insn
);
259 printf_unfiltered (_("Recorded %u instructions in %u functions for thread "
260 "%d (%s).\n"), insns
, calls
, tp
->num
,
261 target_pid_to_str (tp
->ptid
));
263 if (btrace_is_replaying (tp
))
264 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
265 btrace_insn_number (btinfo
->replay
));
268 /* Print an unsigned int. */
271 ui_out_field_uint (struct ui_out
*uiout
, const char *fld
, unsigned int val
)
273 ui_out_field_fmt (uiout
, fld
, "%u", val
);
276 /* Disassemble a section of the recorded instruction trace. */
279 btrace_insn_history (struct ui_out
*uiout
,
280 const struct btrace_insn_iterator
*begin
,
281 const struct btrace_insn_iterator
*end
, int flags
)
283 struct gdbarch
*gdbarch
;
284 struct btrace_insn_iterator it
;
286 DEBUG ("itrace (0x%x): [%u; %u)", flags
, btrace_insn_number (begin
),
287 btrace_insn_number (end
));
289 gdbarch
= target_gdbarch ();
291 for (it
= *begin
; btrace_insn_cmp (&it
, end
) != 0; btrace_insn_next (&it
, 1))
293 const struct btrace_insn
*insn
;
295 insn
= btrace_insn_get (&it
);
297 /* Print the instruction index. */
298 ui_out_field_uint (uiout
, "index", btrace_insn_number (&it
));
299 ui_out_text (uiout
, "\t");
301 /* Disassembly with '/m' flag may not produce the expected result.
303 gdb_disassembly (gdbarch
, uiout
, NULL
, flags
, 1, insn
->pc
, insn
->pc
+ 1);
307 /* The to_insn_history method of target record-btrace. */
310 record_btrace_insn_history (int size
, int flags
)
312 struct btrace_thread_info
*btinfo
;
313 struct btrace_insn_history
*history
;
314 struct btrace_insn_iterator begin
, end
;
315 struct cleanup
*uiout_cleanup
;
316 struct ui_out
*uiout
;
317 unsigned int context
, covered
;
319 uiout
= current_uiout
;
320 uiout_cleanup
= make_cleanup_ui_out_tuple_begin_end (uiout
,
322 context
= abs (size
);
324 error (_("Bad record instruction-history-size."));
326 btinfo
= require_btrace ();
327 history
= btinfo
->insn_history
;
330 struct btrace_insn_iterator
*replay
;
332 DEBUG ("insn-history (0x%x): %d", flags
, size
);
334 /* If we're replaying, we start at the replay position. Otherwise, we
335 start at the tail of the trace. */
336 replay
= btinfo
->replay
;
340 btrace_insn_end (&begin
, btinfo
);
342 /* We start from here and expand in the requested direction. Then we
343 expand in the other direction, as well, to fill up any remaining
348 /* We want the current position covered, as well. */
349 covered
= btrace_insn_next (&end
, 1);
350 covered
+= btrace_insn_prev (&begin
, context
- covered
);
351 covered
+= btrace_insn_next (&end
, context
- covered
);
355 covered
= btrace_insn_next (&end
, context
);
356 covered
+= btrace_insn_prev (&begin
, context
- covered
);
361 begin
= history
->begin
;
364 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags
, size
,
365 btrace_insn_number (&begin
), btrace_insn_number (&end
));
370 covered
= btrace_insn_prev (&begin
, context
);
375 covered
= btrace_insn_next (&end
, context
);
380 btrace_insn_history (uiout
, &begin
, &end
, flags
);
384 printf_unfiltered (_("At the start of the branch trace record.\n"));
386 printf_unfiltered (_("At the end of the branch trace record.\n"));
389 btrace_set_insn_history (btinfo
, &begin
, &end
);
390 do_cleanups (uiout_cleanup
);
393 /* The to_insn_history_range method of target record-btrace. */
396 record_btrace_insn_history_range (ULONGEST from
, ULONGEST to
, int flags
)
398 struct btrace_thread_info
*btinfo
;
399 struct btrace_insn_history
*history
;
400 struct btrace_insn_iterator begin
, end
;
401 struct cleanup
*uiout_cleanup
;
402 struct ui_out
*uiout
;
403 unsigned int low
, high
;
406 uiout
= current_uiout
;
407 uiout_cleanup
= make_cleanup_ui_out_tuple_begin_end (uiout
,
412 DEBUG ("insn-history (0x%x): [%u; %u)", flags
, low
, high
);
414 /* Check for wrap-arounds. */
415 if (low
!= from
|| high
!= to
)
416 error (_("Bad range."));
419 error (_("Bad range."));
421 btinfo
= require_btrace ();
423 found
= btrace_find_insn_by_number (&begin
, btinfo
, low
);
425 error (_("Range out of bounds."));
427 found
= btrace_find_insn_by_number (&end
, btinfo
, high
);
430 /* Silently truncate the range. */
431 btrace_insn_end (&end
, btinfo
);
435 /* We want both begin and end to be inclusive. */
436 btrace_insn_next (&end
, 1);
439 btrace_insn_history (uiout
, &begin
, &end
, flags
);
440 btrace_set_insn_history (btinfo
, &begin
, &end
);
442 do_cleanups (uiout_cleanup
);
445 /* The to_insn_history_from method of target record-btrace. */
448 record_btrace_insn_history_from (ULONGEST from
, int size
, int flags
)
450 ULONGEST begin
, end
, context
;
452 context
= abs (size
);
454 error (_("Bad record instruction-history-size."));
463 begin
= from
- context
+ 1;
468 end
= from
+ context
- 1;
470 /* Check for wrap-around. */
475 record_btrace_insn_history_range (begin
, end
, flags
);
478 /* Print the instruction number range for a function call history line. */
481 btrace_call_history_insn_range (struct ui_out
*uiout
,
482 const struct btrace_function
*bfun
)
484 unsigned int begin
, end
, size
;
486 size
= VEC_length (btrace_insn_s
, bfun
->insn
);
487 gdb_assert (size
> 0);
489 begin
= bfun
->insn_offset
;
490 end
= begin
+ size
- 1;
492 ui_out_field_uint (uiout
, "insn begin", begin
);
493 ui_out_text (uiout
, ",");
494 ui_out_field_uint (uiout
, "insn end", end
);
497 /* Print the source line information for a function call history line. */
500 btrace_call_history_src_line (struct ui_out
*uiout
,
501 const struct btrace_function
*bfun
)
510 ui_out_field_string (uiout
, "file",
511 symtab_to_filename_for_display (sym
->symtab
));
513 begin
= bfun
->lbegin
;
519 ui_out_text (uiout
, ":");
520 ui_out_field_int (uiout
, "min line", begin
);
525 ui_out_text (uiout
, ",");
526 ui_out_field_int (uiout
, "max line", end
);
529 /* Get the name of a branch trace function. */
532 btrace_get_bfun_name (const struct btrace_function
*bfun
)
534 struct minimal_symbol
*msym
;
544 return SYMBOL_PRINT_NAME (sym
);
545 else if (msym
!= NULL
)
546 return SYMBOL_PRINT_NAME (msym
);
551 /* Disassemble a section of the recorded function trace. */
554 btrace_call_history (struct ui_out
*uiout
,
555 const struct btrace_thread_info
*btinfo
,
556 const struct btrace_call_iterator
*begin
,
557 const struct btrace_call_iterator
*end
,
558 enum record_print_flag flags
)
560 struct btrace_call_iterator it
;
562 DEBUG ("ftrace (0x%x): [%u; %u)", flags
, btrace_call_number (begin
),
563 btrace_call_number (end
));
565 for (it
= *begin
; btrace_call_cmp (&it
, end
) < 0; btrace_call_next (&it
, 1))
567 const struct btrace_function
*bfun
;
568 struct minimal_symbol
*msym
;
571 bfun
= btrace_call_get (&it
);
575 /* Print the function index. */
576 ui_out_field_uint (uiout
, "index", bfun
->number
);
577 ui_out_text (uiout
, "\t");
579 if ((flags
& RECORD_PRINT_INDENT_CALLS
) != 0)
581 int level
= bfun
->level
+ btinfo
->level
, i
;
583 for (i
= 0; i
< level
; ++i
)
584 ui_out_text (uiout
, " ");
588 ui_out_field_string (uiout
, "function", SYMBOL_PRINT_NAME (sym
));
589 else if (msym
!= NULL
)
590 ui_out_field_string (uiout
, "function", SYMBOL_PRINT_NAME (msym
));
591 else if (!ui_out_is_mi_like_p (uiout
))
592 ui_out_field_string (uiout
, "function", "??");
594 if ((flags
& RECORD_PRINT_INSN_RANGE
) != 0)
596 ui_out_text (uiout
, _("\tinst "));
597 btrace_call_history_insn_range (uiout
, bfun
);
600 if ((flags
& RECORD_PRINT_SRC_LINE
) != 0)
602 ui_out_text (uiout
, _("\tat "));
603 btrace_call_history_src_line (uiout
, bfun
);
606 ui_out_text (uiout
, "\n");
610 /* The to_call_history method of target record-btrace. */
613 record_btrace_call_history (int size
, int flags
)
615 struct btrace_thread_info
*btinfo
;
616 struct btrace_call_history
*history
;
617 struct btrace_call_iterator begin
, end
;
618 struct cleanup
*uiout_cleanup
;
619 struct ui_out
*uiout
;
620 unsigned int context
, covered
;
622 uiout
= current_uiout
;
623 uiout_cleanup
= make_cleanup_ui_out_tuple_begin_end (uiout
,
625 context
= abs (size
);
627 error (_("Bad record function-call-history-size."));
629 btinfo
= require_btrace ();
630 history
= btinfo
->call_history
;
633 struct btrace_insn_iterator
*replay
;
635 DEBUG ("call-history (0x%x): %d", flags
, size
);
637 /* If we're replaying, we start at the replay position. Otherwise, we
638 start at the tail of the trace. */
639 replay
= btinfo
->replay
;
642 begin
.function
= replay
->function
;
643 begin
.btinfo
= btinfo
;
646 btrace_call_end (&begin
, btinfo
);
648 /* We start from here and expand in the requested direction. Then we
649 expand in the other direction, as well, to fill up any remaining
654 /* We want the current position covered, as well. */
655 covered
= btrace_call_next (&end
, 1);
656 covered
+= btrace_call_prev (&begin
, context
- covered
);
657 covered
+= btrace_call_next (&end
, context
- covered
);
661 covered
= btrace_call_next (&end
, context
);
662 covered
+= btrace_call_prev (&begin
, context
- covered
);
667 begin
= history
->begin
;
670 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", flags
, size
,
671 btrace_call_number (&begin
), btrace_call_number (&end
));
676 covered
= btrace_call_prev (&begin
, context
);
681 covered
= btrace_call_next (&end
, context
);
686 btrace_call_history (uiout
, btinfo
, &begin
, &end
, flags
);
690 printf_unfiltered (_("At the start of the branch trace record.\n"));
692 printf_unfiltered (_("At the end of the branch trace record.\n"));
695 btrace_set_call_history (btinfo
, &begin
, &end
);
696 do_cleanups (uiout_cleanup
);
699 /* The to_call_history_range method of target record-btrace. */
702 record_btrace_call_history_range (ULONGEST from
, ULONGEST to
, int flags
)
704 struct btrace_thread_info
*btinfo
;
705 struct btrace_call_history
*history
;
706 struct btrace_call_iterator begin
, end
;
707 struct cleanup
*uiout_cleanup
;
708 struct ui_out
*uiout
;
709 unsigned int low
, high
;
712 uiout
= current_uiout
;
713 uiout_cleanup
= make_cleanup_ui_out_tuple_begin_end (uiout
,
718 DEBUG ("call-history (0x%x): [%u; %u)", flags
, low
, high
);
720 /* Check for wrap-arounds. */
721 if (low
!= from
|| high
!= to
)
722 error (_("Bad range."));
725 error (_("Bad range."));
727 btinfo
= require_btrace ();
729 found
= btrace_find_call_by_number (&begin
, btinfo
, low
);
731 error (_("Range out of bounds."));
733 found
= btrace_find_call_by_number (&end
, btinfo
, high
);
736 /* Silently truncate the range. */
737 btrace_call_end (&end
, btinfo
);
741 /* We want both begin and end to be inclusive. */
742 btrace_call_next (&end
, 1);
745 btrace_call_history (uiout
, btinfo
, &begin
, &end
, flags
);
746 btrace_set_call_history (btinfo
, &begin
, &end
);
748 do_cleanups (uiout_cleanup
);
751 /* The to_call_history_from method of target record-btrace. */
754 record_btrace_call_history_from (ULONGEST from
, int size
, int flags
)
756 ULONGEST begin
, end
, context
;
758 context
= abs (size
);
760 error (_("Bad record function-call-history-size."));
769 begin
= from
- context
+ 1;
774 end
= from
+ context
- 1;
776 /* Check for wrap-around. */
781 record_btrace_call_history_range (begin
, end
, flags
);
784 /* The to_record_is_replaying method of target record-btrace. */
787 record_btrace_is_replaying (void)
789 struct thread_info
*tp
;
792 if (btrace_is_replaying (tp
))
798 /* The to_xfer_partial method of target record-btrace. */
801 record_btrace_xfer_partial (struct target_ops
*ops
, enum target_object object
,
802 const char *annex
, gdb_byte
*readbuf
,
803 const gdb_byte
*writebuf
, ULONGEST offset
,
806 struct target_ops
*t
;
808 /* Filter out requests that don't make sense during replay. */
809 if (!record_btrace_allow_memory_access
&& record_btrace_is_replaying ())
813 case TARGET_OBJECT_MEMORY
:
815 struct target_section
*section
;
817 /* We do not allow writing memory in general. */
818 if (writebuf
!= NULL
)
819 return TARGET_XFER_E_UNAVAILABLE
;
821 /* We allow reading readonly memory. */
822 section
= target_section_by_addr (ops
, offset
);
825 /* Check if the section we found is readonly. */
826 if ((bfd_get_section_flags (section
->the_bfd_section
->owner
,
827 section
->the_bfd_section
)
828 & SEC_READONLY
) != 0)
830 /* Truncate the request to fit into this section. */
831 len
= min (len
, section
->endaddr
- offset
);
836 return TARGET_XFER_E_UNAVAILABLE
;
841 /* Forward the request. */
842 for (ops
= ops
->beneath
; ops
!= NULL
; ops
= ops
->beneath
)
843 if (ops
->to_xfer_partial
!= NULL
)
844 return ops
->to_xfer_partial (ops
, object
, annex
, readbuf
, writebuf
,
847 return TARGET_XFER_E_UNAVAILABLE
;
850 /* The to_insert_breakpoint method of target record-btrace. */
853 record_btrace_insert_breakpoint (struct target_ops
*ops
,
854 struct gdbarch
*gdbarch
,
855 struct bp_target_info
*bp_tgt
)
857 volatile struct gdb_exception except
;
860 /* Inserting breakpoints requires accessing memory. Allow it for the
861 duration of this function. */
862 old
= record_btrace_allow_memory_access
;
863 record_btrace_allow_memory_access
= 1;
866 TRY_CATCH (except
, RETURN_MASK_ALL
)
867 ret
= forward_target_insert_breakpoint (ops
->beneath
, gdbarch
, bp_tgt
);
869 record_btrace_allow_memory_access
= old
;
871 if (except
.reason
< 0)
872 throw_exception (except
);
877 /* The to_remove_breakpoint method of target record-btrace. */
880 record_btrace_remove_breakpoint (struct target_ops
*ops
,
881 struct gdbarch
*gdbarch
,
882 struct bp_target_info
*bp_tgt
)
884 volatile struct gdb_exception except
;
887 /* Removing breakpoints requires accessing memory. Allow it for the
888 duration of this function. */
889 old
= record_btrace_allow_memory_access
;
890 record_btrace_allow_memory_access
= 1;
893 TRY_CATCH (except
, RETURN_MASK_ALL
)
894 ret
= forward_target_remove_breakpoint (ops
->beneath
, gdbarch
, bp_tgt
);
896 record_btrace_allow_memory_access
= old
;
898 if (except
.reason
< 0)
899 throw_exception (except
);
904 /* The to_fetch_registers method of target record-btrace. */
907 record_btrace_fetch_registers (struct target_ops
*ops
,
908 struct regcache
*regcache
, int regno
)
910 struct btrace_insn_iterator
*replay
;
911 struct thread_info
*tp
;
913 tp
= find_thread_ptid (inferior_ptid
);
914 gdb_assert (tp
!= NULL
);
916 replay
= tp
->btrace
.replay
;
919 const struct btrace_insn
*insn
;
920 struct gdbarch
*gdbarch
;
923 gdbarch
= get_regcache_arch (regcache
);
924 pcreg
= gdbarch_pc_regnum (gdbarch
);
928 /* We can only provide the PC register. */
929 if (regno
>= 0 && regno
!= pcreg
)
932 insn
= btrace_insn_get (replay
);
933 gdb_assert (insn
!= NULL
);
935 regcache_raw_supply (regcache
, regno
, &insn
->pc
);
939 struct target_ops
*t
;
941 for (t
= ops
->beneath
; t
!= NULL
; t
= t
->beneath
)
942 if (t
->to_fetch_registers
!= NULL
)
944 t
->to_fetch_registers (t
, regcache
, regno
);
950 /* The to_store_registers method of target record-btrace. */
953 record_btrace_store_registers (struct target_ops
*ops
,
954 struct regcache
*regcache
, int regno
)
956 struct target_ops
*t
;
958 if (record_btrace_is_replaying ())
959 error (_("This record target does not allow writing registers."));
961 gdb_assert (may_write_registers
!= 0);
963 for (t
= ops
->beneath
; t
!= NULL
; t
= t
->beneath
)
964 if (t
->to_store_registers
!= NULL
)
966 t
->to_store_registers (t
, regcache
, regno
);
973 /* The to_prepare_to_store method of target record-btrace. */
976 record_btrace_prepare_to_store (struct target_ops
*ops
,
977 struct regcache
*regcache
)
979 struct target_ops
*t
;
981 if (record_btrace_is_replaying ())
984 for (t
= ops
->beneath
; t
!= NULL
; t
= t
->beneath
)
985 if (t
->to_prepare_to_store
!= NULL
)
987 t
->to_prepare_to_store (t
, regcache
);
992 /* The branch trace frame cache. */
994 struct btrace_frame_cache
997 struct thread_info
*tp
;
999 /* The frame info. */
1000 struct frame_info
*frame
;
1002 /* The branch trace function segment. */
1003 const struct btrace_function
*bfun
;
1006 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1008 static htab_t bfcache
;
1010 /* hash_f for htab_create_alloc of bfcache. */
1013 bfcache_hash (const void *arg
)
1015 const struct btrace_frame_cache
*cache
= arg
;
1017 return htab_hash_pointer (cache
->frame
);
1020 /* eq_f for htab_create_alloc of bfcache. */
1023 bfcache_eq (const void *arg1
, const void *arg2
)
1025 const struct btrace_frame_cache
*cache1
= arg1
;
1026 const struct btrace_frame_cache
*cache2
= arg2
;
1028 return cache1
->frame
== cache2
->frame
;
1031 /* Create a new btrace frame cache. */
1033 static struct btrace_frame_cache
*
1034 bfcache_new (struct frame_info
*frame
)
1036 struct btrace_frame_cache
*cache
;
1039 cache
= FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache
);
1040 cache
->frame
= frame
;
1042 slot
= htab_find_slot (bfcache
, cache
, INSERT
);
1043 gdb_assert (*slot
== NULL
);
1049 /* Extract the branch trace function from a branch trace frame. */
1051 static const struct btrace_function
*
1052 btrace_get_frame_function (struct frame_info
*frame
)
1054 const struct btrace_frame_cache
*cache
;
1055 const struct btrace_function
*bfun
;
1056 struct btrace_frame_cache pattern
;
1059 pattern
.frame
= frame
;
1061 slot
= htab_find_slot (bfcache
, &pattern
, NO_INSERT
);
1069 /* Implement stop_reason method for record_btrace_frame_unwind. */
1071 static enum unwind_stop_reason
1072 record_btrace_frame_unwind_stop_reason (struct frame_info
*this_frame
,
1075 const struct btrace_frame_cache
*cache
;
1076 const struct btrace_function
*bfun
;
1078 cache
= *this_cache
;
1080 gdb_assert (bfun
!= NULL
);
1082 if (bfun
->up
== NULL
)
1083 return UNWIND_UNAVAILABLE
;
1085 return UNWIND_NO_REASON
;
1088 /* Implement this_id method for record_btrace_frame_unwind. */
1091 record_btrace_frame_this_id (struct frame_info
*this_frame
, void **this_cache
,
1092 struct frame_id
*this_id
)
1094 const struct btrace_frame_cache
*cache
;
1095 const struct btrace_function
*bfun
;
1096 CORE_ADDR code
, special
;
1098 cache
= *this_cache
;
1101 gdb_assert (bfun
!= NULL
);
1103 while (bfun
->segment
.prev
!= NULL
)
1104 bfun
= bfun
->segment
.prev
;
1106 code
= get_frame_func (this_frame
);
1107 special
= bfun
->number
;
1109 *this_id
= frame_id_build_unavailable_stack_special (code
, special
);
1111 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1112 btrace_get_bfun_name (cache
->bfun
),
1113 core_addr_to_string_nz (this_id
->code_addr
),
1114 core_addr_to_string_nz (this_id
->special_addr
));
1117 /* Implement prev_register method for record_btrace_frame_unwind. */
1119 static struct value
*
1120 record_btrace_frame_prev_register (struct frame_info
*this_frame
,
1124 const struct btrace_frame_cache
*cache
;
1125 const struct btrace_function
*bfun
, *caller
;
1126 const struct btrace_insn
*insn
;
1127 struct gdbarch
*gdbarch
;
1131 gdbarch
= get_frame_arch (this_frame
);
1132 pcreg
= gdbarch_pc_regnum (gdbarch
);
1133 if (pcreg
< 0 || regnum
!= pcreg
)
1134 throw_error (NOT_AVAILABLE_ERROR
,
1135 _("Registers are not available in btrace record history"));
1137 cache
= *this_cache
;
1139 gdb_assert (bfun
!= NULL
);
1143 throw_error (NOT_AVAILABLE_ERROR
,
1144 _("No caller in btrace record history"));
1146 if ((bfun
->flags
& BFUN_UP_LINKS_TO_RET
) != 0)
1148 insn
= VEC_index (btrace_insn_s
, caller
->insn
, 0);
1153 insn
= VEC_last (btrace_insn_s
, caller
->insn
);
1156 pc
+= gdb_insn_length (gdbarch
, pc
);
1159 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1160 btrace_get_bfun_name (bfun
), bfun
->level
,
1161 core_addr_to_string_nz (pc
));
1163 return frame_unwind_got_address (this_frame
, regnum
, pc
);
1166 /* Implement sniffer method for record_btrace_frame_unwind. */
1169 record_btrace_frame_sniffer (const struct frame_unwind
*self
,
1170 struct frame_info
*this_frame
,
1173 const struct btrace_function
*bfun
;
1174 struct btrace_frame_cache
*cache
;
1175 struct thread_info
*tp
;
1176 struct frame_info
*next
;
1178 /* THIS_FRAME does not contain a reference to its thread. */
1179 tp
= find_thread_ptid (inferior_ptid
);
1180 gdb_assert (tp
!= NULL
);
1183 next
= get_next_frame (this_frame
);
1186 const struct btrace_insn_iterator
*replay
;
1188 replay
= tp
->btrace
.replay
;
1190 bfun
= replay
->function
;
1194 const struct btrace_function
*callee
;
1196 callee
= btrace_get_frame_function (next
);
1197 if (callee
!= NULL
&& (callee
->flags
& BFUN_UP_LINKS_TO_TAILCALL
) == 0)
1204 DEBUG ("[frame] sniffed frame for %s on level %d",
1205 btrace_get_bfun_name (bfun
), bfun
->level
);
1207 /* This is our frame. Initialize the frame cache. */
1208 cache
= bfcache_new (this_frame
);
1212 *this_cache
= cache
;
1216 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1219 record_btrace_tailcall_frame_sniffer (const struct frame_unwind
*self
,
1220 struct frame_info
*this_frame
,
1223 const struct btrace_function
*bfun
, *callee
;
1224 struct btrace_frame_cache
*cache
;
1225 struct frame_info
*next
;
1227 next
= get_next_frame (this_frame
);
1231 callee
= btrace_get_frame_function (next
);
1235 if ((callee
->flags
& BFUN_UP_LINKS_TO_TAILCALL
) == 0)
1242 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1243 btrace_get_bfun_name (bfun
), bfun
->level
);
1245 /* This is our frame. Initialize the frame cache. */
1246 cache
= bfcache_new (this_frame
);
1247 cache
->tp
= find_thread_ptid (inferior_ptid
);
1250 *this_cache
= cache
;
1255 record_btrace_frame_dealloc_cache (struct frame_info
*self
, void *this_cache
)
1257 struct btrace_frame_cache
*cache
;
1262 slot
= htab_find_slot (bfcache
, cache
, NO_INSERT
);
1263 gdb_assert (slot
!= NULL
);
1265 htab_remove_elt (bfcache
, cache
);
1268 /* btrace recording does not store previous memory content, neither the stack
1269 frames content. Any unwinding would return errorneous results as the stack
1270 contents no longer matches the changed PC value restored from history.
1271 Therefore this unwinder reports any possibly unwound registers as
1274 const struct frame_unwind record_btrace_frame_unwind
=
1277 record_btrace_frame_unwind_stop_reason
,
1278 record_btrace_frame_this_id
,
1279 record_btrace_frame_prev_register
,
1281 record_btrace_frame_sniffer
,
1282 record_btrace_frame_dealloc_cache
1285 const struct frame_unwind record_btrace_tailcall_frame_unwind
=
1288 record_btrace_frame_unwind_stop_reason
,
1289 record_btrace_frame_this_id
,
1290 record_btrace_frame_prev_register
,
1292 record_btrace_tailcall_frame_sniffer
,
1293 record_btrace_frame_dealloc_cache
1296 /* Indicate that TP should be resumed according to FLAG. */
1299 record_btrace_resume_thread (struct thread_info
*tp
,
1300 enum btrace_thread_flag flag
)
1302 struct btrace_thread_info
*btinfo
;
1304 DEBUG ("resuming %d (%s): %u", tp
->num
, target_pid_to_str (tp
->ptid
), flag
);
1306 btinfo
= &tp
->btrace
;
1308 if ((btinfo
->flags
& BTHR_MOVE
) != 0)
1309 error (_("Thread already moving."));
1311 /* Fetch the latest branch trace. */
1314 btinfo
->flags
|= flag
;
1317 /* Find the thread to resume given a PTID. */
1319 static struct thread_info
*
1320 record_btrace_find_resume_thread (ptid_t ptid
)
1322 struct thread_info
*tp
;
1324 /* When asked to resume everything, we pick the current thread. */
1325 if (ptid_equal (minus_one_ptid
, ptid
) || ptid_is_pid (ptid
))
1326 ptid
= inferior_ptid
;
1328 return find_thread_ptid (ptid
);
1331 /* Start replaying a thread. */
1333 static struct btrace_insn_iterator
*
1334 record_btrace_start_replaying (struct thread_info
*tp
)
1336 volatile struct gdb_exception except
;
1337 struct btrace_insn_iterator
*replay
;
1338 struct btrace_thread_info
*btinfo
;
1341 btinfo
= &tp
->btrace
;
1344 /* We can't start replaying without trace. */
1345 if (btinfo
->begin
== NULL
)
1348 /* Clear the executing flag to allow changes to the current frame.
1349 We are not actually running, yet. We just started a reverse execution
1350 command or a record goto command.
1351 For the latter, EXECUTING is false and this has no effect.
1352 For the former, EXECUTING is true and we're in to_wait, about to
1353 move the thread. Since we need to recompute the stack, we temporarily
1354 set EXECUTING to flase. */
1355 executing
= is_executing (tp
->ptid
);
1356 set_executing (tp
->ptid
, 0);
1358 /* GDB stores the current frame_id when stepping in order to detects steps
1360 Since frames are computed differently when we're replaying, we need to
1361 recompute those stored frames and fix them up so we can still detect
1362 subroutines after we started replaying. */
1363 TRY_CATCH (except
, RETURN_MASK_ALL
)
1365 struct frame_info
*frame
;
1366 struct frame_id frame_id
;
1367 int upd_step_frame_id
, upd_step_stack_frame_id
;
1369 /* The current frame without replaying - computed via normal unwind. */
1370 frame
= get_current_frame ();
1371 frame_id
= get_frame_id (frame
);
1373 /* Check if we need to update any stepping-related frame id's. */
1374 upd_step_frame_id
= frame_id_eq (frame_id
,
1375 tp
->control
.step_frame_id
);
1376 upd_step_stack_frame_id
= frame_id_eq (frame_id
,
1377 tp
->control
.step_stack_frame_id
);
1379 /* We start replaying at the end of the branch trace. This corresponds
1380 to the current instruction. */
1381 replay
= xmalloc (sizeof (*replay
));
1382 btrace_insn_end (replay
, btinfo
);
1384 /* We're not replaying, yet. */
1385 gdb_assert (btinfo
->replay
== NULL
);
1386 btinfo
->replay
= replay
;
1388 /* Make sure we're not using any stale registers. */
1389 registers_changed_ptid (tp
->ptid
);
1391 /* The current frame with replaying - computed via btrace unwind. */
1392 frame
= get_current_frame ();
1393 frame_id
= get_frame_id (frame
);
1395 /* Replace stepping related frames where necessary. */
1396 if (upd_step_frame_id
)
1397 tp
->control
.step_frame_id
= frame_id
;
1398 if (upd_step_stack_frame_id
)
1399 tp
->control
.step_stack_frame_id
= frame_id
;
1402 /* Restore the previous execution state. */
1403 set_executing (tp
->ptid
, executing
);
1405 if (except
.reason
< 0)
1407 xfree (btinfo
->replay
);
1408 btinfo
->replay
= NULL
;
1410 registers_changed_ptid (tp
->ptid
);
1412 throw_exception (except
);
1418 /* Stop replaying a thread. */
1421 record_btrace_stop_replaying (struct thread_info
*tp
)
1423 struct btrace_thread_info
*btinfo
;
1425 btinfo
= &tp
->btrace
;
1427 xfree (btinfo
->replay
);
1428 btinfo
->replay
= NULL
;
1430 /* Make sure we're not leaving any stale registers. */
1431 registers_changed_ptid (tp
->ptid
);
1434 /* The to_resume method of target record-btrace. */
1437 record_btrace_resume (struct target_ops
*ops
, ptid_t ptid
, int step
,
1438 enum gdb_signal signal
)
1440 struct thread_info
*tp
, *other
;
1441 enum btrace_thread_flag flag
;
1443 DEBUG ("resume %s: %s", target_pid_to_str (ptid
), step
? "step" : "cont");
1445 tp
= record_btrace_find_resume_thread (ptid
);
1447 error (_("Cannot find thread to resume."));
1449 /* Stop replaying other threads if the thread to resume is not replaying. */
1450 if (!btrace_is_replaying (tp
) && execution_direction
!= EXEC_REVERSE
)
1452 record_btrace_stop_replaying (other
);
1454 /* As long as we're not replaying, just forward the request. */
1455 if (!record_btrace_is_replaying () && execution_direction
!= EXEC_REVERSE
)
1457 for (ops
= ops
->beneath
; ops
!= NULL
; ops
= ops
->beneath
)
1458 if (ops
->to_resume
!= NULL
)
1459 return ops
->to_resume (ops
, ptid
, step
, signal
);
1461 error (_("Cannot find target for stepping."));
1464 /* Compute the btrace thread flag for the requested move. */
1466 flag
= execution_direction
== EXEC_REVERSE
? BTHR_RCONT
: BTHR_CONT
;
1468 flag
= execution_direction
== EXEC_REVERSE
? BTHR_RSTEP
: BTHR_STEP
;
1470 /* At the moment, we only move a single thread. We could also move
1471 all threads in parallel by single-stepping each resumed thread
1472 until the first runs into an event.
1473 When we do that, we would want to continue all other threads.
1474 For now, just resume one thread to not confuse to_wait. */
1475 record_btrace_resume_thread (tp
, flag
);
1477 /* We just indicate the resume intent here. The actual stepping happens in
1478 record_btrace_wait below. */
1481 /* Find a thread to move. */
1483 static struct thread_info
*
1484 record_btrace_find_thread_to_move (ptid_t ptid
)
1486 struct thread_info
*tp
;
1488 /* First check the parameter thread. */
1489 tp
= find_thread_ptid (ptid
);
1490 if (tp
!= NULL
&& (tp
->btrace
.flags
& BTHR_MOVE
) != 0)
1493 /* Otherwise, find one other thread that has been resumed. */
1495 if ((tp
->btrace
.flags
& BTHR_MOVE
) != 0)
1501 /* Return a target_waitstatus indicating that we ran out of history. */
1503 static struct target_waitstatus
1504 btrace_step_no_history (void)
1506 struct target_waitstatus status
;
1508 status
.kind
= TARGET_WAITKIND_NO_HISTORY
;
1513 /* Return a target_waitstatus indicating that a step finished. */
1515 static struct target_waitstatus
1516 btrace_step_stopped (void)
1518 struct target_waitstatus status
;
1520 status
.kind
= TARGET_WAITKIND_STOPPED
;
1521 status
.value
.sig
= GDB_SIGNAL_TRAP
;
1526 /* Clear the record histories. */
1529 record_btrace_clear_histories (struct btrace_thread_info
*btinfo
)
1531 xfree (btinfo
->insn_history
);
1532 xfree (btinfo
->call_history
);
1534 btinfo
->insn_history
= NULL
;
1535 btinfo
->call_history
= NULL
;
1538 /* Step a single thread. */
1540 static struct target_waitstatus
1541 record_btrace_step_thread (struct thread_info
*tp
)
1543 struct btrace_insn_iterator
*replay
, end
;
1544 struct btrace_thread_info
*btinfo
;
1545 struct address_space
*aspace
;
1546 struct inferior
*inf
;
1547 enum btrace_thread_flag flags
;
1550 btinfo
= &tp
->btrace
;
1551 replay
= btinfo
->replay
;
1553 flags
= btinfo
->flags
& BTHR_MOVE
;
1554 btinfo
->flags
&= ~BTHR_MOVE
;
1556 DEBUG ("stepping %d (%s): %u", tp
->num
, target_pid_to_str (tp
->ptid
), flags
);
1561 internal_error (__FILE__
, __LINE__
, _("invalid stepping type."));
1564 /* We're done if we're not replaying. */
1566 return btrace_step_no_history ();
1568 /* We are always able to step at least once. */
1569 steps
= btrace_insn_next (replay
, 1);
1570 gdb_assert (steps
== 1);
1572 /* Determine the end of the instruction trace. */
1573 btrace_insn_end (&end
, btinfo
);
1575 /* We stop replaying if we reached the end of the trace. */
1576 if (btrace_insn_cmp (replay
, &end
) == 0)
1577 record_btrace_stop_replaying (tp
);
1579 return btrace_step_stopped ();
1582 /* Start replaying if we're not already doing so. */
1584 replay
= record_btrace_start_replaying (tp
);
1586 /* If we can't step any further, we reached the end of the history. */
1587 steps
= btrace_insn_prev (replay
, 1);
1589 return btrace_step_no_history ();
1591 return btrace_step_stopped ();
1594 /* We're done if we're not replaying. */
1596 return btrace_step_no_history ();
1598 inf
= find_inferior_pid (ptid_get_pid (tp
->ptid
));
1599 aspace
= inf
->aspace
;
1601 /* Determine the end of the instruction trace. */
1602 btrace_insn_end (&end
, btinfo
);
1606 const struct btrace_insn
*insn
;
1608 /* We are always able to step at least once. */
1609 steps
= btrace_insn_next (replay
, 1);
1610 gdb_assert (steps
== 1);
1612 /* We stop replaying if we reached the end of the trace. */
1613 if (btrace_insn_cmp (replay
, &end
) == 0)
1615 record_btrace_stop_replaying (tp
);
1616 return btrace_step_no_history ();
1619 insn
= btrace_insn_get (replay
);
1622 DEBUG ("stepping %d (%s) ... %s", tp
->num
,
1623 target_pid_to_str (tp
->ptid
),
1624 core_addr_to_string_nz (insn
->pc
));
1626 if (breakpoint_here_p (aspace
, insn
->pc
))
1627 return btrace_step_stopped ();
1631 /* Start replaying if we're not already doing so. */
1633 replay
= record_btrace_start_replaying (tp
);
1635 inf
= find_inferior_pid (ptid_get_pid (tp
->ptid
));
1636 aspace
= inf
->aspace
;
1640 const struct btrace_insn
*insn
;
1642 /* If we can't step any further, we're done. */
1643 steps
= btrace_insn_prev (replay
, 1);
1645 return btrace_step_no_history ();
1647 insn
= btrace_insn_get (replay
);
1650 DEBUG ("reverse-stepping %d (%s) ... %s", tp
->num
,
1651 target_pid_to_str (tp
->ptid
),
1652 core_addr_to_string_nz (insn
->pc
));
1654 if (breakpoint_here_p (aspace
, insn
->pc
))
1655 return btrace_step_stopped ();
1660 /* The to_wait method of target record-btrace. */
1663 record_btrace_wait (struct target_ops
*ops
, ptid_t ptid
,
1664 struct target_waitstatus
*status
, int options
)
1666 struct thread_info
*tp
, *other
;
1668 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid
), options
);
1670 /* As long as we're not replaying, just forward the request. */
1671 if (!record_btrace_is_replaying () && execution_direction
!= EXEC_REVERSE
)
1673 for (ops
= ops
->beneath
; ops
!= NULL
; ops
= ops
->beneath
)
1674 if (ops
->to_wait
!= NULL
)
1675 return ops
->to_wait (ops
, ptid
, status
, options
);
1677 error (_("Cannot find target for waiting."));
1680 /* Let's find a thread to move. */
1681 tp
= record_btrace_find_thread_to_move (ptid
);
1684 DEBUG ("wait %s: no thread", target_pid_to_str (ptid
));
1686 status
->kind
= TARGET_WAITKIND_IGNORE
;
1687 return minus_one_ptid
;
1690 /* We only move a single thread. We're not able to correlate threads. */
1691 *status
= record_btrace_step_thread (tp
);
1693 /* Stop all other threads. */
1696 other
->btrace
.flags
&= ~BTHR_MOVE
;
1698 /* Start record histories anew from the current position. */
1699 record_btrace_clear_histories (&tp
->btrace
);
1701 /* We moved the replay position but did not update registers. */
1702 registers_changed_ptid (tp
->ptid
);
1707 /* The to_can_execute_reverse method of target record-btrace. */
1710 record_btrace_can_execute_reverse (void)
1715 /* The to_decr_pc_after_break method of target record-btrace. */
1718 record_btrace_decr_pc_after_break (struct target_ops
*ops
,
1719 struct gdbarch
*gdbarch
)
1721 /* When replaying, we do not actually execute the breakpoint instruction
1722 so there is no need to adjust the PC after hitting a breakpoint. */
1723 if (record_btrace_is_replaying ())
1726 return forward_target_decr_pc_after_break (ops
->beneath
, gdbarch
);
1729 /* The to_find_new_threads method of target record-btrace. */
1732 record_btrace_find_new_threads (struct target_ops
*ops
)
1734 /* Don't expect new threads if we're replaying. */
1735 if (record_btrace_is_replaying ())
1738 /* Forward the request. */
1739 for (ops
= ops
->beneath
; ops
!= NULL
; ops
= ops
->beneath
)
1740 if (ops
->to_find_new_threads
!= NULL
)
1742 ops
->to_find_new_threads (ops
);
1747 /* The to_thread_alive method of target record-btrace. */
1750 record_btrace_thread_alive (struct target_ops
*ops
, ptid_t ptid
)
1752 /* We don't add or remove threads during replay. */
1753 if (record_btrace_is_replaying ())
1754 return find_thread_ptid (ptid
) != NULL
;
1756 /* Forward the request. */
1757 for (ops
= ops
->beneath
; ops
!= NULL
; ops
= ops
->beneath
)
1758 if (ops
->to_thread_alive
!= NULL
)
1759 return ops
->to_thread_alive (ops
, ptid
);
1764 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
1768 record_btrace_set_replay (struct thread_info
*tp
,
1769 const struct btrace_insn_iterator
*it
)
1771 struct btrace_thread_info
*btinfo
;
1773 btinfo
= &tp
->btrace
;
1775 if (it
== NULL
|| it
->function
== NULL
)
1776 record_btrace_stop_replaying (tp
);
1779 if (btinfo
->replay
== NULL
)
1780 record_btrace_start_replaying (tp
);
1781 else if (btrace_insn_cmp (btinfo
->replay
, it
) == 0)
1784 *btinfo
->replay
= *it
;
1785 registers_changed_ptid (tp
->ptid
);
1788 /* Start anew from the new replay position. */
1789 record_btrace_clear_histories (btinfo
);
1792 /* The to_goto_record_begin method of target record-btrace. */
1795 record_btrace_goto_begin (void)
1797 struct thread_info
*tp
;
1798 struct btrace_insn_iterator begin
;
1800 tp
= require_btrace_thread ();
1802 btrace_insn_begin (&begin
, &tp
->btrace
);
1803 record_btrace_set_replay (tp
, &begin
);
1805 print_stack_frame (get_selected_frame (NULL
), 1, SRC_AND_LOC
, 1);
1808 /* The to_goto_record_end method of target record-btrace. */
1811 record_btrace_goto_end (void)
1813 struct thread_info
*tp
;
1815 tp
= require_btrace_thread ();
1817 record_btrace_set_replay (tp
, NULL
);
1819 print_stack_frame (get_selected_frame (NULL
), 1, SRC_AND_LOC
, 1);
1822 /* The to_goto_record method of target record-btrace. */
1825 record_btrace_goto (ULONGEST insn
)
1827 struct thread_info
*tp
;
1828 struct btrace_insn_iterator it
;
1829 unsigned int number
;
1834 /* Check for wrap-arounds. */
1836 error (_("Instruction number out of range."));
1838 tp
= require_btrace_thread ();
1840 found
= btrace_find_insn_by_number (&it
, &tp
->btrace
, number
);
1842 error (_("No such instruction."));
1844 record_btrace_set_replay (tp
, &it
);
1846 print_stack_frame (get_selected_frame (NULL
), 1, SRC_AND_LOC
, 1);
1849 /* Initialize the record-btrace target ops. */
1852 init_record_btrace_ops (void)
1854 struct target_ops
*ops
;
1856 ops
= &record_btrace_ops
;
1857 ops
->to_shortname
= "record-btrace";
1858 ops
->to_longname
= "Branch tracing target";
1859 ops
->to_doc
= "Collect control-flow trace and provide the execution history.";
1860 ops
->to_open
= record_btrace_open
;
1861 ops
->to_close
= record_btrace_close
;
1862 ops
->to_detach
= record_detach
;
1863 ops
->to_disconnect
= record_disconnect
;
1864 ops
->to_mourn_inferior
= record_mourn_inferior
;
1865 ops
->to_kill
= record_kill
;
1866 ops
->to_create_inferior
= find_default_create_inferior
;
1867 ops
->to_stop_recording
= record_btrace_stop_recording
;
1868 ops
->to_info_record
= record_btrace_info
;
1869 ops
->to_insn_history
= record_btrace_insn_history
;
1870 ops
->to_insn_history_from
= record_btrace_insn_history_from
;
1871 ops
->to_insn_history_range
= record_btrace_insn_history_range
;
1872 ops
->to_call_history
= record_btrace_call_history
;
1873 ops
->to_call_history_from
= record_btrace_call_history_from
;
1874 ops
->to_call_history_range
= record_btrace_call_history_range
;
1875 ops
->to_record_is_replaying
= record_btrace_is_replaying
;
1876 ops
->to_xfer_partial
= record_btrace_xfer_partial
;
1877 ops
->to_remove_breakpoint
= record_btrace_remove_breakpoint
;
1878 ops
->to_insert_breakpoint
= record_btrace_insert_breakpoint
;
1879 ops
->to_fetch_registers
= record_btrace_fetch_registers
;
1880 ops
->to_store_registers
= record_btrace_store_registers
;
1881 ops
->to_prepare_to_store
= record_btrace_prepare_to_store
;
1882 ops
->to_get_unwinder
= &record_btrace_frame_unwind
;
1883 ops
->to_get_tailcall_unwinder
= &record_btrace_tailcall_frame_unwind
;
1884 ops
->to_resume
= record_btrace_resume
;
1885 ops
->to_wait
= record_btrace_wait
;
1886 ops
->to_find_new_threads
= record_btrace_find_new_threads
;
1887 ops
->to_thread_alive
= record_btrace_thread_alive
;
1888 ops
->to_goto_record_begin
= record_btrace_goto_begin
;
1889 ops
->to_goto_record_end
= record_btrace_goto_end
;
1890 ops
->to_goto_record
= record_btrace_goto
;
1891 ops
->to_can_execute_reverse
= record_btrace_can_execute_reverse
;
1892 ops
->to_decr_pc_after_break
= record_btrace_decr_pc_after_break
;
1893 ops
->to_stratum
= record_stratum
;
1894 ops
->to_magic
= OPS_MAGIC
;
1897 /* Alias for "target record". */
1900 cmd_record_btrace_start (char *args
, int from_tty
)
1902 if (args
!= NULL
&& *args
!= 0)
1903 error (_("Invalid argument."));
1905 execute_command ("target record-btrace", from_tty
);
1908 void _initialize_record_btrace (void);
1910 /* Initialize btrace commands. */
1913 _initialize_record_btrace (void)
1915 add_cmd ("btrace", class_obscure
, cmd_record_btrace_start
,
1916 _("Start branch trace recording."),
1918 add_alias_cmd ("b", "btrace", class_obscure
, 1, &record_cmdlist
);
1920 init_record_btrace_ops ();
1921 add_target (&record_btrace_ops
);
1923 bfcache
= htab_create_alloc (50, bfcache_hash
, bfcache_eq
, NULL
,