1 /* Branch trace support for GDB, the GNU debugger.
3 Copyright (C) 2013-2014 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "gdbthread.h"
29 #include "exceptions.h"
30 #include "cli/cli-utils.h"
34 #include "filenames.h"
36 #include "frame-unwind.h"
39 /* The target_ops of record-btrace. */
40 static struct target_ops record_btrace_ops
;
42 /* A new thread observer enabling branch tracing for the new thread. */
43 static struct observer
*record_btrace_thread_observer
;
45 /* Temporarily allow memory accesses. */
46 static int record_btrace_allow_memory_access
;
48 /* Print a record-btrace debug message. Use do ... while (0) to avoid
49 ambiguities when used in if statements. */
51 #define DEBUG(msg, args...) \
54 if (record_debug != 0) \
55 fprintf_unfiltered (gdb_stdlog, \
56 "[record-btrace] " msg "\n", ##args); \
61 /* Update the branch trace for the current thread and return a pointer to its
64 Throws an error if there is no thread or no trace. This function never
67 static struct thread_info
*
68 require_btrace_thread (void)
70 struct thread_info
*tp
;
74 tp
= find_thread_ptid (inferior_ptid
);
76 error (_("No thread."));
80 if (btrace_is_empty (tp
))
81 error (_("No trace."));
86 /* Update the branch trace for the current thread and return a pointer to its
87 branch trace information struct.
89 Throws an error if there is no thread or no trace. This function never
92 static struct btrace_thread_info
*
95 struct thread_info
*tp
;
97 tp
= require_btrace_thread ();
102 /* Enable branch tracing for one thread. Warn on errors. */
105 record_btrace_enable_warn (struct thread_info
*tp
)
107 volatile struct gdb_exception error
;
109 TRY_CATCH (error
, RETURN_MASK_ERROR
)
112 if (error
.message
!= NULL
)
113 warning ("%s", error
.message
);
116 /* Callback function to disable branch tracing for one thread. */
119 record_btrace_disable_callback (void *arg
)
121 struct thread_info
*tp
;
128 /* Enable automatic tracing of new threads. */
131 record_btrace_auto_enable (void)
133 DEBUG ("attach thread observer");
135 record_btrace_thread_observer
136 = observer_attach_new_thread (record_btrace_enable_warn
);
139 /* Disable automatic tracing of new threads. */
142 record_btrace_auto_disable (void)
144 /* The observer may have been detached, already. */
145 if (record_btrace_thread_observer
== NULL
)
148 DEBUG ("detach thread observer");
150 observer_detach_new_thread (record_btrace_thread_observer
);
151 record_btrace_thread_observer
= NULL
;
154 /* The to_open method of target record-btrace. */
157 record_btrace_open (char *args
, int from_tty
)
159 struct cleanup
*disable_chain
;
160 struct thread_info
*tp
;
166 if (!target_has_execution
)
167 error (_("The program is not being run."));
169 if (!target_supports_btrace ())
170 error (_("Target does not support branch tracing."));
173 error (_("Record btrace can't debug inferior in non-stop mode."));
175 gdb_assert (record_btrace_thread_observer
== NULL
);
177 disable_chain
= make_cleanup (null_cleanup
, NULL
);
179 if (args
== NULL
|| *args
== 0 || number_is_in_list (args
, tp
->num
))
183 make_cleanup (record_btrace_disable_callback
, tp
);
186 record_btrace_auto_enable ();
188 push_target (&record_btrace_ops
);
190 observer_notify_record_changed (current_inferior (), 1);
192 discard_cleanups (disable_chain
);
195 /* The to_stop_recording method of target record-btrace. */
198 record_btrace_stop_recording (struct target_ops
*self
)
200 struct thread_info
*tp
;
202 DEBUG ("stop recording");
204 record_btrace_auto_disable ();
207 if (tp
->btrace
.target
!= NULL
)
211 /* The to_close method of target record-btrace. */
214 record_btrace_close (struct target_ops
*self
)
216 struct thread_info
*tp
;
218 /* Make sure automatic recording gets disabled even if we did not stop
219 recording before closing the record-btrace target. */
220 record_btrace_auto_disable ();
222 /* We should have already stopped recording.
223 Tear down btrace in case we have not. */
225 btrace_teardown (tp
);
228 /* The to_info_record method of target record-btrace. */
231 record_btrace_info (struct target_ops
*self
)
233 struct btrace_thread_info
*btinfo
;
234 struct thread_info
*tp
;
235 unsigned int insns
, calls
;
239 tp
= find_thread_ptid (inferior_ptid
);
241 error (_("No thread."));
248 btinfo
= &tp
->btrace
;
250 if (!btrace_is_empty (tp
))
252 struct btrace_call_iterator call
;
253 struct btrace_insn_iterator insn
;
255 btrace_call_end (&call
, btinfo
);
256 btrace_call_prev (&call
, 1);
257 calls
= btrace_call_number (&call
);
259 btrace_insn_end (&insn
, btinfo
);
260 btrace_insn_prev (&insn
, 1);
261 insns
= btrace_insn_number (&insn
);
264 printf_unfiltered (_("Recorded %u instructions in %u functions for thread "
265 "%d (%s).\n"), insns
, calls
, tp
->num
,
266 target_pid_to_str (tp
->ptid
));
268 if (btrace_is_replaying (tp
))
269 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
270 btrace_insn_number (btinfo
->replay
));
273 /* Print an unsigned int. */
276 ui_out_field_uint (struct ui_out
*uiout
, const char *fld
, unsigned int val
)
278 ui_out_field_fmt (uiout
, fld
, "%u", val
);
281 /* Disassemble a section of the recorded instruction trace. */
284 btrace_insn_history (struct ui_out
*uiout
,
285 const struct btrace_insn_iterator
*begin
,
286 const struct btrace_insn_iterator
*end
, int flags
)
288 struct gdbarch
*gdbarch
;
289 struct btrace_insn_iterator it
;
291 DEBUG ("itrace (0x%x): [%u; %u)", flags
, btrace_insn_number (begin
),
292 btrace_insn_number (end
));
294 gdbarch
= target_gdbarch ();
296 for (it
= *begin
; btrace_insn_cmp (&it
, end
) != 0; btrace_insn_next (&it
, 1))
298 const struct btrace_insn
*insn
;
300 insn
= btrace_insn_get (&it
);
302 /* Print the instruction index. */
303 ui_out_field_uint (uiout
, "index", btrace_insn_number (&it
));
304 ui_out_text (uiout
, "\t");
306 /* Disassembly with '/m' flag may not produce the expected result.
308 gdb_disassembly (gdbarch
, uiout
, NULL
, flags
, 1, insn
->pc
, insn
->pc
+ 1);
312 /* The to_insn_history method of target record-btrace. */
315 record_btrace_insn_history (struct target_ops
*self
, int size
, int flags
)
317 struct btrace_thread_info
*btinfo
;
318 struct btrace_insn_history
*history
;
319 struct btrace_insn_iterator begin
, end
;
320 struct cleanup
*uiout_cleanup
;
321 struct ui_out
*uiout
;
322 unsigned int context
, covered
;
324 uiout
= current_uiout
;
325 uiout_cleanup
= make_cleanup_ui_out_tuple_begin_end (uiout
,
327 context
= abs (size
);
329 error (_("Bad record instruction-history-size."));
331 btinfo
= require_btrace ();
332 history
= btinfo
->insn_history
;
335 struct btrace_insn_iterator
*replay
;
337 DEBUG ("insn-history (0x%x): %d", flags
, size
);
339 /* If we're replaying, we start at the replay position. Otherwise, we
340 start at the tail of the trace. */
341 replay
= btinfo
->replay
;
345 btrace_insn_end (&begin
, btinfo
);
347 /* We start from here and expand in the requested direction. Then we
348 expand in the other direction, as well, to fill up any remaining
353 /* We want the current position covered, as well. */
354 covered
= btrace_insn_next (&end
, 1);
355 covered
+= btrace_insn_prev (&begin
, context
- covered
);
356 covered
+= btrace_insn_next (&end
, context
- covered
);
360 covered
= btrace_insn_next (&end
, context
);
361 covered
+= btrace_insn_prev (&begin
, context
- covered
);
366 begin
= history
->begin
;
369 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags
, size
,
370 btrace_insn_number (&begin
), btrace_insn_number (&end
));
375 covered
= btrace_insn_prev (&begin
, context
);
380 covered
= btrace_insn_next (&end
, context
);
385 btrace_insn_history (uiout
, &begin
, &end
, flags
);
389 printf_unfiltered (_("At the start of the branch trace record.\n"));
391 printf_unfiltered (_("At the end of the branch trace record.\n"));
394 btrace_set_insn_history (btinfo
, &begin
, &end
);
395 do_cleanups (uiout_cleanup
);
398 /* The to_insn_history_range method of target record-btrace. */
401 record_btrace_insn_history_range (ULONGEST from
, ULONGEST to
, int flags
)
403 struct btrace_thread_info
*btinfo
;
404 struct btrace_insn_history
*history
;
405 struct btrace_insn_iterator begin
, end
;
406 struct cleanup
*uiout_cleanup
;
407 struct ui_out
*uiout
;
408 unsigned int low
, high
;
411 uiout
= current_uiout
;
412 uiout_cleanup
= make_cleanup_ui_out_tuple_begin_end (uiout
,
417 DEBUG ("insn-history (0x%x): [%u; %u)", flags
, low
, high
);
419 /* Check for wrap-arounds. */
420 if (low
!= from
|| high
!= to
)
421 error (_("Bad range."));
424 error (_("Bad range."));
426 btinfo
= require_btrace ();
428 found
= btrace_find_insn_by_number (&begin
, btinfo
, low
);
430 error (_("Range out of bounds."));
432 found
= btrace_find_insn_by_number (&end
, btinfo
, high
);
435 /* Silently truncate the range. */
436 btrace_insn_end (&end
, btinfo
);
440 /* We want both begin and end to be inclusive. */
441 btrace_insn_next (&end
, 1);
444 btrace_insn_history (uiout
, &begin
, &end
, flags
);
445 btrace_set_insn_history (btinfo
, &begin
, &end
);
447 do_cleanups (uiout_cleanup
);
450 /* The to_insn_history_from method of target record-btrace. */
453 record_btrace_insn_history_from (struct target_ops
*self
,
454 ULONGEST from
, int size
, int flags
)
456 ULONGEST begin
, end
, context
;
458 context
= abs (size
);
460 error (_("Bad record instruction-history-size."));
469 begin
= from
- context
+ 1;
474 end
= from
+ context
- 1;
476 /* Check for wrap-around. */
481 record_btrace_insn_history_range (begin
, end
, flags
);
484 /* Print the instruction number range for a function call history line. */
487 btrace_call_history_insn_range (struct ui_out
*uiout
,
488 const struct btrace_function
*bfun
)
490 unsigned int begin
, end
, size
;
492 size
= VEC_length (btrace_insn_s
, bfun
->insn
);
493 gdb_assert (size
> 0);
495 begin
= bfun
->insn_offset
;
496 end
= begin
+ size
- 1;
498 ui_out_field_uint (uiout
, "insn begin", begin
);
499 ui_out_text (uiout
, ",");
500 ui_out_field_uint (uiout
, "insn end", end
);
503 /* Print the source line information for a function call history line. */
506 btrace_call_history_src_line (struct ui_out
*uiout
,
507 const struct btrace_function
*bfun
)
516 ui_out_field_string (uiout
, "file",
517 symtab_to_filename_for_display (sym
->symtab
));
519 begin
= bfun
->lbegin
;
525 ui_out_text (uiout
, ":");
526 ui_out_field_int (uiout
, "min line", begin
);
531 ui_out_text (uiout
, ",");
532 ui_out_field_int (uiout
, "max line", end
);
535 /* Get the name of a branch trace function. */
538 btrace_get_bfun_name (const struct btrace_function
*bfun
)
540 struct minimal_symbol
*msym
;
550 return SYMBOL_PRINT_NAME (sym
);
551 else if (msym
!= NULL
)
552 return SYMBOL_PRINT_NAME (msym
);
557 /* Disassemble a section of the recorded function trace. */
560 btrace_call_history (struct ui_out
*uiout
,
561 const struct btrace_thread_info
*btinfo
,
562 const struct btrace_call_iterator
*begin
,
563 const struct btrace_call_iterator
*end
,
564 enum record_print_flag flags
)
566 struct btrace_call_iterator it
;
568 DEBUG ("ftrace (0x%x): [%u; %u)", flags
, btrace_call_number (begin
),
569 btrace_call_number (end
));
571 for (it
= *begin
; btrace_call_cmp (&it
, end
) < 0; btrace_call_next (&it
, 1))
573 const struct btrace_function
*bfun
;
574 struct minimal_symbol
*msym
;
577 bfun
= btrace_call_get (&it
);
581 /* Print the function index. */
582 ui_out_field_uint (uiout
, "index", bfun
->number
);
583 ui_out_text (uiout
, "\t");
585 if ((flags
& RECORD_PRINT_INDENT_CALLS
) != 0)
587 int level
= bfun
->level
+ btinfo
->level
, i
;
589 for (i
= 0; i
< level
; ++i
)
590 ui_out_text (uiout
, " ");
594 ui_out_field_string (uiout
, "function", SYMBOL_PRINT_NAME (sym
));
595 else if (msym
!= NULL
)
596 ui_out_field_string (uiout
, "function", SYMBOL_PRINT_NAME (msym
));
597 else if (!ui_out_is_mi_like_p (uiout
))
598 ui_out_field_string (uiout
, "function", "??");
600 if ((flags
& RECORD_PRINT_INSN_RANGE
) != 0)
602 ui_out_text (uiout
, _("\tinst "));
603 btrace_call_history_insn_range (uiout
, bfun
);
606 if ((flags
& RECORD_PRINT_SRC_LINE
) != 0)
608 ui_out_text (uiout
, _("\tat "));
609 btrace_call_history_src_line (uiout
, bfun
);
612 ui_out_text (uiout
, "\n");
616 /* The to_call_history method of target record-btrace. */
619 record_btrace_call_history (int size
, int flags
)
621 struct btrace_thread_info
*btinfo
;
622 struct btrace_call_history
*history
;
623 struct btrace_call_iterator begin
, end
;
624 struct cleanup
*uiout_cleanup
;
625 struct ui_out
*uiout
;
626 unsigned int context
, covered
;
628 uiout
= current_uiout
;
629 uiout_cleanup
= make_cleanup_ui_out_tuple_begin_end (uiout
,
631 context
= abs (size
);
633 error (_("Bad record function-call-history-size."));
635 btinfo
= require_btrace ();
636 history
= btinfo
->call_history
;
639 struct btrace_insn_iterator
*replay
;
641 DEBUG ("call-history (0x%x): %d", flags
, size
);
643 /* If we're replaying, we start at the replay position. Otherwise, we
644 start at the tail of the trace. */
645 replay
= btinfo
->replay
;
648 begin
.function
= replay
->function
;
649 begin
.btinfo
= btinfo
;
652 btrace_call_end (&begin
, btinfo
);
654 /* We start from here and expand in the requested direction. Then we
655 expand in the other direction, as well, to fill up any remaining
660 /* We want the current position covered, as well. */
661 covered
= btrace_call_next (&end
, 1);
662 covered
+= btrace_call_prev (&begin
, context
- covered
);
663 covered
+= btrace_call_next (&end
, context
- covered
);
667 covered
= btrace_call_next (&end
, context
);
668 covered
+= btrace_call_prev (&begin
, context
- covered
);
673 begin
= history
->begin
;
676 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", flags
, size
,
677 btrace_call_number (&begin
), btrace_call_number (&end
));
682 covered
= btrace_call_prev (&begin
, context
);
687 covered
= btrace_call_next (&end
, context
);
692 btrace_call_history (uiout
, btinfo
, &begin
, &end
, flags
);
696 printf_unfiltered (_("At the start of the branch trace record.\n"));
698 printf_unfiltered (_("At the end of the branch trace record.\n"));
701 btrace_set_call_history (btinfo
, &begin
, &end
);
702 do_cleanups (uiout_cleanup
);
705 /* The to_call_history_range method of target record-btrace. */
708 record_btrace_call_history_range (ULONGEST from
, ULONGEST to
, int flags
)
710 struct btrace_thread_info
*btinfo
;
711 struct btrace_call_history
*history
;
712 struct btrace_call_iterator begin
, end
;
713 struct cleanup
*uiout_cleanup
;
714 struct ui_out
*uiout
;
715 unsigned int low
, high
;
718 uiout
= current_uiout
;
719 uiout_cleanup
= make_cleanup_ui_out_tuple_begin_end (uiout
,
724 DEBUG ("call-history (0x%x): [%u; %u)", flags
, low
, high
);
726 /* Check for wrap-arounds. */
727 if (low
!= from
|| high
!= to
)
728 error (_("Bad range."));
731 error (_("Bad range."));
733 btinfo
= require_btrace ();
735 found
= btrace_find_call_by_number (&begin
, btinfo
, low
);
737 error (_("Range out of bounds."));
739 found
= btrace_find_call_by_number (&end
, btinfo
, high
);
742 /* Silently truncate the range. */
743 btrace_call_end (&end
, btinfo
);
747 /* We want both begin and end to be inclusive. */
748 btrace_call_next (&end
, 1);
751 btrace_call_history (uiout
, btinfo
, &begin
, &end
, flags
);
752 btrace_set_call_history (btinfo
, &begin
, &end
);
754 do_cleanups (uiout_cleanup
);
757 /* The to_call_history_from method of target record-btrace. */
760 record_btrace_call_history_from (ULONGEST from
, int size
, int flags
)
762 ULONGEST begin
, end
, context
;
764 context
= abs (size
);
766 error (_("Bad record function-call-history-size."));
775 begin
= from
- context
+ 1;
780 end
= from
+ context
- 1;
782 /* Check for wrap-around. */
787 record_btrace_call_history_range (begin
, end
, flags
);
790 /* The to_record_is_replaying method of target record-btrace. */
793 record_btrace_is_replaying (struct target_ops
*self
)
795 struct thread_info
*tp
;
798 if (btrace_is_replaying (tp
))
804 /* The to_xfer_partial method of target record-btrace. */
806 static enum target_xfer_status
807 record_btrace_xfer_partial (struct target_ops
*ops
, enum target_object object
,
808 const char *annex
, gdb_byte
*readbuf
,
809 const gdb_byte
*writebuf
, ULONGEST offset
,
810 ULONGEST len
, ULONGEST
*xfered_len
)
812 struct target_ops
*t
;
814 /* Filter out requests that don't make sense during replay. */
815 if (!record_btrace_allow_memory_access
&& record_btrace_is_replaying (ops
))
819 case TARGET_OBJECT_MEMORY
:
821 struct target_section
*section
;
823 /* We do not allow writing memory in general. */
824 if (writebuf
!= NULL
)
827 return TARGET_XFER_E_UNAVAILABLE
;
830 /* We allow reading readonly memory. */
831 section
= target_section_by_addr (ops
, offset
);
834 /* Check if the section we found is readonly. */
835 if ((bfd_get_section_flags (section
->the_bfd_section
->owner
,
836 section
->the_bfd_section
)
837 & SEC_READONLY
) != 0)
839 /* Truncate the request to fit into this section. */
840 len
= min (len
, section
->endaddr
- offset
);
846 return TARGET_XFER_E_UNAVAILABLE
;
851 /* Forward the request. */
852 for (ops
= ops
->beneath
; ops
!= NULL
; ops
= ops
->beneath
)
853 if (ops
->to_xfer_partial
!= NULL
)
854 return ops
->to_xfer_partial (ops
, object
, annex
, readbuf
, writebuf
,
855 offset
, len
, xfered_len
);
858 return TARGET_XFER_E_UNAVAILABLE
;
861 /* The to_insert_breakpoint method of target record-btrace. */
864 record_btrace_insert_breakpoint (struct target_ops
*ops
,
865 struct gdbarch
*gdbarch
,
866 struct bp_target_info
*bp_tgt
)
868 volatile struct gdb_exception except
;
871 /* Inserting breakpoints requires accessing memory. Allow it for the
872 duration of this function. */
873 old
= record_btrace_allow_memory_access
;
874 record_btrace_allow_memory_access
= 1;
877 TRY_CATCH (except
, RETURN_MASK_ALL
)
878 ret
= ops
->beneath
->to_insert_breakpoint (ops
->beneath
, gdbarch
, bp_tgt
);
880 record_btrace_allow_memory_access
= old
;
882 if (except
.reason
< 0)
883 throw_exception (except
);
888 /* The to_remove_breakpoint method of target record-btrace. */
891 record_btrace_remove_breakpoint (struct target_ops
*ops
,
892 struct gdbarch
*gdbarch
,
893 struct bp_target_info
*bp_tgt
)
895 volatile struct gdb_exception except
;
898 /* Removing breakpoints requires accessing memory. Allow it for the
899 duration of this function. */
900 old
= record_btrace_allow_memory_access
;
901 record_btrace_allow_memory_access
= 1;
904 TRY_CATCH (except
, RETURN_MASK_ALL
)
905 ret
= ops
->beneath
->to_remove_breakpoint (ops
->beneath
, gdbarch
, bp_tgt
);
907 record_btrace_allow_memory_access
= old
;
909 if (except
.reason
< 0)
910 throw_exception (except
);
915 /* The to_fetch_registers method of target record-btrace. */
918 record_btrace_fetch_registers (struct target_ops
*ops
,
919 struct regcache
*regcache
, int regno
)
921 struct btrace_insn_iterator
*replay
;
922 struct thread_info
*tp
;
924 tp
= find_thread_ptid (inferior_ptid
);
925 gdb_assert (tp
!= NULL
);
927 replay
= tp
->btrace
.replay
;
930 const struct btrace_insn
*insn
;
931 struct gdbarch
*gdbarch
;
934 gdbarch
= get_regcache_arch (regcache
);
935 pcreg
= gdbarch_pc_regnum (gdbarch
);
939 /* We can only provide the PC register. */
940 if (regno
>= 0 && regno
!= pcreg
)
943 insn
= btrace_insn_get (replay
);
944 gdb_assert (insn
!= NULL
);
946 regcache_raw_supply (regcache
, regno
, &insn
->pc
);
950 struct target_ops
*t
;
952 for (t
= ops
->beneath
; t
!= NULL
; t
= t
->beneath
)
953 if (t
->to_fetch_registers
!= NULL
)
955 t
->to_fetch_registers (t
, regcache
, regno
);
961 /* The to_store_registers method of target record-btrace. */
964 record_btrace_store_registers (struct target_ops
*ops
,
965 struct regcache
*regcache
, int regno
)
967 struct target_ops
*t
;
969 if (record_btrace_is_replaying (ops
))
970 error (_("This record target does not allow writing registers."));
972 gdb_assert (may_write_registers
!= 0);
974 for (t
= ops
->beneath
; t
!= NULL
; t
= t
->beneath
)
975 if (t
->to_store_registers
!= NULL
)
977 t
->to_store_registers (t
, regcache
, regno
);
984 /* The to_prepare_to_store method of target record-btrace. */
987 record_btrace_prepare_to_store (struct target_ops
*ops
,
988 struct regcache
*regcache
)
990 struct target_ops
*t
;
992 if (record_btrace_is_replaying (ops
))
995 for (t
= ops
->beneath
; t
!= NULL
; t
= t
->beneath
)
996 if (t
->to_prepare_to_store
!= NULL
)
998 t
->to_prepare_to_store (t
, regcache
);
1003 /* The branch trace frame cache. */
1005 struct btrace_frame_cache
1008 struct thread_info
*tp
;
1010 /* The frame info. */
1011 struct frame_info
*frame
;
1013 /* The branch trace function segment. */
1014 const struct btrace_function
*bfun
;
1017 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1019 static htab_t bfcache
;
1021 /* hash_f for htab_create_alloc of bfcache. */
1024 bfcache_hash (const void *arg
)
1026 const struct btrace_frame_cache
*cache
= arg
;
1028 return htab_hash_pointer (cache
->frame
);
1031 /* eq_f for htab_create_alloc of bfcache. */
1034 bfcache_eq (const void *arg1
, const void *arg2
)
1036 const struct btrace_frame_cache
*cache1
= arg1
;
1037 const struct btrace_frame_cache
*cache2
= arg2
;
1039 return cache1
->frame
== cache2
->frame
;
1042 /* Create a new btrace frame cache. */
1044 static struct btrace_frame_cache
*
1045 bfcache_new (struct frame_info
*frame
)
1047 struct btrace_frame_cache
*cache
;
1050 cache
= FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache
);
1051 cache
->frame
= frame
;
1053 slot
= htab_find_slot (bfcache
, cache
, INSERT
);
1054 gdb_assert (*slot
== NULL
);
1060 /* Extract the branch trace function from a branch trace frame. */
1062 static const struct btrace_function
*
1063 btrace_get_frame_function (struct frame_info
*frame
)
1065 const struct btrace_frame_cache
*cache
;
1066 const struct btrace_function
*bfun
;
1067 struct btrace_frame_cache pattern
;
1070 pattern
.frame
= frame
;
1072 slot
= htab_find_slot (bfcache
, &pattern
, NO_INSERT
);
1080 /* Implement stop_reason method for record_btrace_frame_unwind. */
1082 static enum unwind_stop_reason
1083 record_btrace_frame_unwind_stop_reason (struct frame_info
*this_frame
,
1086 const struct btrace_frame_cache
*cache
;
1087 const struct btrace_function
*bfun
;
1089 cache
= *this_cache
;
1091 gdb_assert (bfun
!= NULL
);
1093 if (bfun
->up
== NULL
)
1094 return UNWIND_UNAVAILABLE
;
1096 return UNWIND_NO_REASON
;
1099 /* Implement this_id method for record_btrace_frame_unwind. */
1102 record_btrace_frame_this_id (struct frame_info
*this_frame
, void **this_cache
,
1103 struct frame_id
*this_id
)
1105 const struct btrace_frame_cache
*cache
;
1106 const struct btrace_function
*bfun
;
1107 CORE_ADDR code
, special
;
1109 cache
= *this_cache
;
1112 gdb_assert (bfun
!= NULL
);
1114 while (bfun
->segment
.prev
!= NULL
)
1115 bfun
= bfun
->segment
.prev
;
1117 code
= get_frame_func (this_frame
);
1118 special
= bfun
->number
;
1120 *this_id
= frame_id_build_unavailable_stack_special (code
, special
);
1122 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1123 btrace_get_bfun_name (cache
->bfun
),
1124 core_addr_to_string_nz (this_id
->code_addr
),
1125 core_addr_to_string_nz (this_id
->special_addr
));
1128 /* Implement prev_register method for record_btrace_frame_unwind. */
1130 static struct value
*
1131 record_btrace_frame_prev_register (struct frame_info
*this_frame
,
1135 const struct btrace_frame_cache
*cache
;
1136 const struct btrace_function
*bfun
, *caller
;
1137 const struct btrace_insn
*insn
;
1138 struct gdbarch
*gdbarch
;
1142 gdbarch
= get_frame_arch (this_frame
);
1143 pcreg
= gdbarch_pc_regnum (gdbarch
);
1144 if (pcreg
< 0 || regnum
!= pcreg
)
1145 throw_error (NOT_AVAILABLE_ERROR
,
1146 _("Registers are not available in btrace record history"));
1148 cache
= *this_cache
;
1150 gdb_assert (bfun
!= NULL
);
1154 throw_error (NOT_AVAILABLE_ERROR
,
1155 _("No caller in btrace record history"));
1157 if ((bfun
->flags
& BFUN_UP_LINKS_TO_RET
) != 0)
1159 insn
= VEC_index (btrace_insn_s
, caller
->insn
, 0);
1164 insn
= VEC_last (btrace_insn_s
, caller
->insn
);
1167 pc
+= gdb_insn_length (gdbarch
, pc
);
1170 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1171 btrace_get_bfun_name (bfun
), bfun
->level
,
1172 core_addr_to_string_nz (pc
));
1174 return frame_unwind_got_address (this_frame
, regnum
, pc
);
1177 /* Implement sniffer method for record_btrace_frame_unwind. */
1180 record_btrace_frame_sniffer (const struct frame_unwind
*self
,
1181 struct frame_info
*this_frame
,
1184 const struct btrace_function
*bfun
;
1185 struct btrace_frame_cache
*cache
;
1186 struct thread_info
*tp
;
1187 struct frame_info
*next
;
1189 /* THIS_FRAME does not contain a reference to its thread. */
1190 tp
= find_thread_ptid (inferior_ptid
);
1191 gdb_assert (tp
!= NULL
);
1194 next
= get_next_frame (this_frame
);
1197 const struct btrace_insn_iterator
*replay
;
1199 replay
= tp
->btrace
.replay
;
1201 bfun
= replay
->function
;
1205 const struct btrace_function
*callee
;
1207 callee
= btrace_get_frame_function (next
);
1208 if (callee
!= NULL
&& (callee
->flags
& BFUN_UP_LINKS_TO_TAILCALL
) == 0)
1215 DEBUG ("[frame] sniffed frame for %s on level %d",
1216 btrace_get_bfun_name (bfun
), bfun
->level
);
1218 /* This is our frame. Initialize the frame cache. */
1219 cache
= bfcache_new (this_frame
);
1223 *this_cache
= cache
;
1227 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1230 record_btrace_tailcall_frame_sniffer (const struct frame_unwind
*self
,
1231 struct frame_info
*this_frame
,
1234 const struct btrace_function
*bfun
, *callee
;
1235 struct btrace_frame_cache
*cache
;
1236 struct frame_info
*next
;
1238 next
= get_next_frame (this_frame
);
1242 callee
= btrace_get_frame_function (next
);
1246 if ((callee
->flags
& BFUN_UP_LINKS_TO_TAILCALL
) == 0)
1253 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1254 btrace_get_bfun_name (bfun
), bfun
->level
);
1256 /* This is our frame. Initialize the frame cache. */
1257 cache
= bfcache_new (this_frame
);
1258 cache
->tp
= find_thread_ptid (inferior_ptid
);
1261 *this_cache
= cache
;
1266 record_btrace_frame_dealloc_cache (struct frame_info
*self
, void *this_cache
)
1268 struct btrace_frame_cache
*cache
;
1273 slot
= htab_find_slot (bfcache
, cache
, NO_INSERT
);
1274 gdb_assert (slot
!= NULL
);
1276 htab_remove_elt (bfcache
, cache
);
1279 /* btrace recording does not store previous memory content, neither the stack
1280 frames content. Any unwinding would return errorneous results as the stack
1281 contents no longer matches the changed PC value restored from history.
1282 Therefore this unwinder reports any possibly unwound registers as
1285 const struct frame_unwind record_btrace_frame_unwind
=
1288 record_btrace_frame_unwind_stop_reason
,
1289 record_btrace_frame_this_id
,
1290 record_btrace_frame_prev_register
,
1292 record_btrace_frame_sniffer
,
1293 record_btrace_frame_dealloc_cache
1296 const struct frame_unwind record_btrace_tailcall_frame_unwind
=
1299 record_btrace_frame_unwind_stop_reason
,
1300 record_btrace_frame_this_id
,
1301 record_btrace_frame_prev_register
,
1303 record_btrace_tailcall_frame_sniffer
,
1304 record_btrace_frame_dealloc_cache
1307 /* Indicate that TP should be resumed according to FLAG. */
1310 record_btrace_resume_thread (struct thread_info
*tp
,
1311 enum btrace_thread_flag flag
)
1313 struct btrace_thread_info
*btinfo
;
1315 DEBUG ("resuming %d (%s): %u", tp
->num
, target_pid_to_str (tp
->ptid
), flag
);
1317 btinfo
= &tp
->btrace
;
1319 if ((btinfo
->flags
& BTHR_MOVE
) != 0)
1320 error (_("Thread already moving."));
1322 /* Fetch the latest branch trace. */
1325 btinfo
->flags
|= flag
;
1328 /* Find the thread to resume given a PTID. */
1330 static struct thread_info
*
1331 record_btrace_find_resume_thread (ptid_t ptid
)
1333 struct thread_info
*tp
;
1335 /* When asked to resume everything, we pick the current thread. */
1336 if (ptid_equal (minus_one_ptid
, ptid
) || ptid_is_pid (ptid
))
1337 ptid
= inferior_ptid
;
1339 return find_thread_ptid (ptid
);
1342 /* Start replaying a thread. */
1344 static struct btrace_insn_iterator
*
1345 record_btrace_start_replaying (struct thread_info
*tp
)
1347 volatile struct gdb_exception except
;
1348 struct btrace_insn_iterator
*replay
;
1349 struct btrace_thread_info
*btinfo
;
1352 btinfo
= &tp
->btrace
;
1355 /* We can't start replaying without trace. */
1356 if (btinfo
->begin
== NULL
)
1359 /* Clear the executing flag to allow changes to the current frame.
1360 We are not actually running, yet. We just started a reverse execution
1361 command or a record goto command.
1362 For the latter, EXECUTING is false and this has no effect.
1363 For the former, EXECUTING is true and we're in to_wait, about to
1364 move the thread. Since we need to recompute the stack, we temporarily
1365 set EXECUTING to flase. */
1366 executing
= is_executing (tp
->ptid
);
1367 set_executing (tp
->ptid
, 0);
1369 /* GDB stores the current frame_id when stepping in order to detects steps
1371 Since frames are computed differently when we're replaying, we need to
1372 recompute those stored frames and fix them up so we can still detect
1373 subroutines after we started replaying. */
1374 TRY_CATCH (except
, RETURN_MASK_ALL
)
1376 struct frame_info
*frame
;
1377 struct frame_id frame_id
;
1378 int upd_step_frame_id
, upd_step_stack_frame_id
;
1380 /* The current frame without replaying - computed via normal unwind. */
1381 frame
= get_current_frame ();
1382 frame_id
= get_frame_id (frame
);
1384 /* Check if we need to update any stepping-related frame id's. */
1385 upd_step_frame_id
= frame_id_eq (frame_id
,
1386 tp
->control
.step_frame_id
);
1387 upd_step_stack_frame_id
= frame_id_eq (frame_id
,
1388 tp
->control
.step_stack_frame_id
);
1390 /* We start replaying at the end of the branch trace. This corresponds
1391 to the current instruction. */
1392 replay
= xmalloc (sizeof (*replay
));
1393 btrace_insn_end (replay
, btinfo
);
1395 /* We're not replaying, yet. */
1396 gdb_assert (btinfo
->replay
== NULL
);
1397 btinfo
->replay
= replay
;
1399 /* Make sure we're not using any stale registers. */
1400 registers_changed_ptid (tp
->ptid
);
1402 /* The current frame with replaying - computed via btrace unwind. */
1403 frame
= get_current_frame ();
1404 frame_id
= get_frame_id (frame
);
1406 /* Replace stepping related frames where necessary. */
1407 if (upd_step_frame_id
)
1408 tp
->control
.step_frame_id
= frame_id
;
1409 if (upd_step_stack_frame_id
)
1410 tp
->control
.step_stack_frame_id
= frame_id
;
1413 /* Restore the previous execution state. */
1414 set_executing (tp
->ptid
, executing
);
1416 if (except
.reason
< 0)
1418 xfree (btinfo
->replay
);
1419 btinfo
->replay
= NULL
;
1421 registers_changed_ptid (tp
->ptid
);
1423 throw_exception (except
);
1429 /* Stop replaying a thread. */
1432 record_btrace_stop_replaying (struct thread_info
*tp
)
1434 struct btrace_thread_info
*btinfo
;
1436 btinfo
= &tp
->btrace
;
1438 xfree (btinfo
->replay
);
1439 btinfo
->replay
= NULL
;
1441 /* Make sure we're not leaving any stale registers. */
1442 registers_changed_ptid (tp
->ptid
);
1445 /* The to_resume method of target record-btrace. */
1448 record_btrace_resume (struct target_ops
*ops
, ptid_t ptid
, int step
,
1449 enum gdb_signal signal
)
1451 struct thread_info
*tp
, *other
;
1452 enum btrace_thread_flag flag
;
1454 DEBUG ("resume %s: %s", target_pid_to_str (ptid
), step
? "step" : "cont");
1456 tp
= record_btrace_find_resume_thread (ptid
);
1458 error (_("Cannot find thread to resume."));
1460 /* Stop replaying other threads if the thread to resume is not replaying. */
1461 if (!btrace_is_replaying (tp
) && execution_direction
!= EXEC_REVERSE
)
1463 record_btrace_stop_replaying (other
);
1465 /* As long as we're not replaying, just forward the request. */
1466 if (!record_btrace_is_replaying (ops
) && execution_direction
!= EXEC_REVERSE
)
1468 for (ops
= ops
->beneath
; ops
!= NULL
; ops
= ops
->beneath
)
1469 if (ops
->to_resume
!= NULL
)
1470 return ops
->to_resume (ops
, ptid
, step
, signal
);
1472 error (_("Cannot find target for stepping."));
1475 /* Compute the btrace thread flag for the requested move. */
1477 flag
= execution_direction
== EXEC_REVERSE
? BTHR_RCONT
: BTHR_CONT
;
1479 flag
= execution_direction
== EXEC_REVERSE
? BTHR_RSTEP
: BTHR_STEP
;
1481 /* At the moment, we only move a single thread. We could also move
1482 all threads in parallel by single-stepping each resumed thread
1483 until the first runs into an event.
1484 When we do that, we would want to continue all other threads.
1485 For now, just resume one thread to not confuse to_wait. */
1486 record_btrace_resume_thread (tp
, flag
);
1488 /* We just indicate the resume intent here. The actual stepping happens in
1489 record_btrace_wait below. */
1492 /* Find a thread to move. */
1494 static struct thread_info
*
1495 record_btrace_find_thread_to_move (ptid_t ptid
)
1497 struct thread_info
*tp
;
1499 /* First check the parameter thread. */
1500 tp
= find_thread_ptid (ptid
);
1501 if (tp
!= NULL
&& (tp
->btrace
.flags
& BTHR_MOVE
) != 0)
1504 /* Otherwise, find one other thread that has been resumed. */
1506 if ((tp
->btrace
.flags
& BTHR_MOVE
) != 0)
1512 /* Return a target_waitstatus indicating that we ran out of history. */
1514 static struct target_waitstatus
1515 btrace_step_no_history (void)
1517 struct target_waitstatus status
;
1519 status
.kind
= TARGET_WAITKIND_NO_HISTORY
;
1524 /* Return a target_waitstatus indicating that a step finished. */
1526 static struct target_waitstatus
1527 btrace_step_stopped (void)
1529 struct target_waitstatus status
;
1531 status
.kind
= TARGET_WAITKIND_STOPPED
;
1532 status
.value
.sig
= GDB_SIGNAL_TRAP
;
1537 /* Clear the record histories. */
1540 record_btrace_clear_histories (struct btrace_thread_info
*btinfo
)
1542 xfree (btinfo
->insn_history
);
1543 xfree (btinfo
->call_history
);
1545 btinfo
->insn_history
= NULL
;
1546 btinfo
->call_history
= NULL
;
1549 /* Step a single thread. */
1551 static struct target_waitstatus
1552 record_btrace_step_thread (struct thread_info
*tp
)
1554 struct btrace_insn_iterator
*replay
, end
;
1555 struct btrace_thread_info
*btinfo
;
1556 struct address_space
*aspace
;
1557 struct inferior
*inf
;
1558 enum btrace_thread_flag flags
;
1561 btinfo
= &tp
->btrace
;
1562 replay
= btinfo
->replay
;
1564 flags
= btinfo
->flags
& BTHR_MOVE
;
1565 btinfo
->flags
&= ~BTHR_MOVE
;
1567 DEBUG ("stepping %d (%s): %u", tp
->num
, target_pid_to_str (tp
->ptid
), flags
);
1572 internal_error (__FILE__
, __LINE__
, _("invalid stepping type."));
1575 /* We're done if we're not replaying. */
1577 return btrace_step_no_history ();
1579 /* We are always able to step at least once. */
1580 steps
= btrace_insn_next (replay
, 1);
1581 gdb_assert (steps
== 1);
1583 /* Determine the end of the instruction trace. */
1584 btrace_insn_end (&end
, btinfo
);
1586 /* We stop replaying if we reached the end of the trace. */
1587 if (btrace_insn_cmp (replay
, &end
) == 0)
1588 record_btrace_stop_replaying (tp
);
1590 return btrace_step_stopped ();
1593 /* Start replaying if we're not already doing so. */
1595 replay
= record_btrace_start_replaying (tp
);
1597 /* If we can't step any further, we reached the end of the history. */
1598 steps
= btrace_insn_prev (replay
, 1);
1600 return btrace_step_no_history ();
1602 return btrace_step_stopped ();
1605 /* We're done if we're not replaying. */
1607 return btrace_step_no_history ();
1609 inf
= find_inferior_pid (ptid_get_pid (tp
->ptid
));
1610 aspace
= inf
->aspace
;
1612 /* Determine the end of the instruction trace. */
1613 btrace_insn_end (&end
, btinfo
);
1617 const struct btrace_insn
*insn
;
1619 /* We are always able to step at least once. */
1620 steps
= btrace_insn_next (replay
, 1);
1621 gdb_assert (steps
== 1);
1623 /* We stop replaying if we reached the end of the trace. */
1624 if (btrace_insn_cmp (replay
, &end
) == 0)
1626 record_btrace_stop_replaying (tp
);
1627 return btrace_step_no_history ();
1630 insn
= btrace_insn_get (replay
);
1633 DEBUG ("stepping %d (%s) ... %s", tp
->num
,
1634 target_pid_to_str (tp
->ptid
),
1635 core_addr_to_string_nz (insn
->pc
));
1637 if (breakpoint_here_p (aspace
, insn
->pc
))
1638 return btrace_step_stopped ();
1642 /* Start replaying if we're not already doing so. */
1644 replay
= record_btrace_start_replaying (tp
);
1646 inf
= find_inferior_pid (ptid_get_pid (tp
->ptid
));
1647 aspace
= inf
->aspace
;
1651 const struct btrace_insn
*insn
;
1653 /* If we can't step any further, we're done. */
1654 steps
= btrace_insn_prev (replay
, 1);
1656 return btrace_step_no_history ();
1658 insn
= btrace_insn_get (replay
);
1661 DEBUG ("reverse-stepping %d (%s) ... %s", tp
->num
,
1662 target_pid_to_str (tp
->ptid
),
1663 core_addr_to_string_nz (insn
->pc
));
1665 if (breakpoint_here_p (aspace
, insn
->pc
))
1666 return btrace_step_stopped ();
1671 /* The to_wait method of target record-btrace. */
1674 record_btrace_wait (struct target_ops
*ops
, ptid_t ptid
,
1675 struct target_waitstatus
*status
, int options
)
1677 struct thread_info
*tp
, *other
;
1679 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid
), options
);
1681 /* As long as we're not replaying, just forward the request. */
1682 if (!record_btrace_is_replaying (ops
) && execution_direction
!= EXEC_REVERSE
)
1684 for (ops
= ops
->beneath
; ops
!= NULL
; ops
= ops
->beneath
)
1685 if (ops
->to_wait
!= NULL
)
1686 return ops
->to_wait (ops
, ptid
, status
, options
);
1688 error (_("Cannot find target for waiting."));
1691 /* Let's find a thread to move. */
1692 tp
= record_btrace_find_thread_to_move (ptid
);
1695 DEBUG ("wait %s: no thread", target_pid_to_str (ptid
));
1697 status
->kind
= TARGET_WAITKIND_IGNORE
;
1698 return minus_one_ptid
;
1701 /* We only move a single thread. We're not able to correlate threads. */
1702 *status
= record_btrace_step_thread (tp
);
1704 /* Stop all other threads. */
1707 other
->btrace
.flags
&= ~BTHR_MOVE
;
1709 /* Start record histories anew from the current position. */
1710 record_btrace_clear_histories (&tp
->btrace
);
1712 /* We moved the replay position but did not update registers. */
1713 registers_changed_ptid (tp
->ptid
);
1718 /* The to_can_execute_reverse method of target record-btrace. */
1721 record_btrace_can_execute_reverse (struct target_ops
*self
)
1726 /* The to_decr_pc_after_break method of target record-btrace. */
1729 record_btrace_decr_pc_after_break (struct target_ops
*ops
,
1730 struct gdbarch
*gdbarch
)
1732 /* When replaying, we do not actually execute the breakpoint instruction
1733 so there is no need to adjust the PC after hitting a breakpoint. */
1734 if (record_btrace_is_replaying (ops
))
1737 return forward_target_decr_pc_after_break (ops
->beneath
, gdbarch
);
1740 /* The to_find_new_threads method of target record-btrace. */
1743 record_btrace_find_new_threads (struct target_ops
*ops
)
1745 /* Don't expect new threads if we're replaying. */
1746 if (record_btrace_is_replaying (ops
))
1749 /* Forward the request. */
1750 for (ops
= ops
->beneath
; ops
!= NULL
; ops
= ops
->beneath
)
1751 if (ops
->to_find_new_threads
!= NULL
)
1753 ops
->to_find_new_threads (ops
);
1758 /* The to_thread_alive method of target record-btrace. */
1761 record_btrace_thread_alive (struct target_ops
*ops
, ptid_t ptid
)
1763 /* We don't add or remove threads during replay. */
1764 if (record_btrace_is_replaying (ops
))
1765 return find_thread_ptid (ptid
) != NULL
;
1767 /* Forward the request. */
1768 for (ops
= ops
->beneath
; ops
!= NULL
; ops
= ops
->beneath
)
1769 if (ops
->to_thread_alive
!= NULL
)
1770 return ops
->to_thread_alive (ops
, ptid
);
1775 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
1779 record_btrace_set_replay (struct thread_info
*tp
,
1780 const struct btrace_insn_iterator
*it
)
1782 struct btrace_thread_info
*btinfo
;
1784 btinfo
= &tp
->btrace
;
1786 if (it
== NULL
|| it
->function
== NULL
)
1787 record_btrace_stop_replaying (tp
);
1790 if (btinfo
->replay
== NULL
)
1791 record_btrace_start_replaying (tp
);
1792 else if (btrace_insn_cmp (btinfo
->replay
, it
) == 0)
1795 *btinfo
->replay
= *it
;
1796 registers_changed_ptid (tp
->ptid
);
1799 /* Start anew from the new replay position. */
1800 record_btrace_clear_histories (btinfo
);
1803 /* The to_goto_record_begin method of target record-btrace. */
1806 record_btrace_goto_begin (struct target_ops
*self
)
1808 struct thread_info
*tp
;
1809 struct btrace_insn_iterator begin
;
1811 tp
= require_btrace_thread ();
1813 btrace_insn_begin (&begin
, &tp
->btrace
);
1814 record_btrace_set_replay (tp
, &begin
);
1816 print_stack_frame (get_selected_frame (NULL
), 1, SRC_AND_LOC
, 1);
1819 /* The to_goto_record_end method of target record-btrace. */
1822 record_btrace_goto_end (struct target_ops
*ops
)
1824 struct thread_info
*tp
;
1826 tp
= require_btrace_thread ();
1828 record_btrace_set_replay (tp
, NULL
);
1830 print_stack_frame (get_selected_frame (NULL
), 1, SRC_AND_LOC
, 1);
1833 /* The to_goto_record method of target record-btrace. */
1836 record_btrace_goto (struct target_ops
*self
, ULONGEST insn
)
1838 struct thread_info
*tp
;
1839 struct btrace_insn_iterator it
;
1840 unsigned int number
;
1845 /* Check for wrap-arounds. */
1847 error (_("Instruction number out of range."));
1849 tp
= require_btrace_thread ();
1851 found
= btrace_find_insn_by_number (&it
, &tp
->btrace
, number
);
1853 error (_("No such instruction."));
1855 record_btrace_set_replay (tp
, &it
);
1857 print_stack_frame (get_selected_frame (NULL
), 1, SRC_AND_LOC
, 1);
1860 /* Initialize the record-btrace target ops. */
1863 init_record_btrace_ops (void)
1865 struct target_ops
*ops
;
1867 ops
= &record_btrace_ops
;
1868 ops
->to_shortname
= "record-btrace";
1869 ops
->to_longname
= "Branch tracing target";
1870 ops
->to_doc
= "Collect control-flow trace and provide the execution history.";
1871 ops
->to_open
= record_btrace_open
;
1872 ops
->to_close
= record_btrace_close
;
1873 ops
->to_detach
= record_detach
;
1874 ops
->to_disconnect
= record_disconnect
;
1875 ops
->to_mourn_inferior
= record_mourn_inferior
;
1876 ops
->to_kill
= record_kill
;
1877 ops
->to_create_inferior
= find_default_create_inferior
;
1878 ops
->to_stop_recording
= record_btrace_stop_recording
;
1879 ops
->to_info_record
= record_btrace_info
;
1880 ops
->to_insn_history
= record_btrace_insn_history
;
1881 ops
->to_insn_history_from
= record_btrace_insn_history_from
;
1882 ops
->to_insn_history_range
= record_btrace_insn_history_range
;
1883 ops
->to_call_history
= record_btrace_call_history
;
1884 ops
->to_call_history_from
= record_btrace_call_history_from
;
1885 ops
->to_call_history_range
= record_btrace_call_history_range
;
1886 ops
->to_record_is_replaying
= record_btrace_is_replaying
;
1887 ops
->to_xfer_partial
= record_btrace_xfer_partial
;
1888 ops
->to_remove_breakpoint
= record_btrace_remove_breakpoint
;
1889 ops
->to_insert_breakpoint
= record_btrace_insert_breakpoint
;
1890 ops
->to_fetch_registers
= record_btrace_fetch_registers
;
1891 ops
->to_store_registers
= record_btrace_store_registers
;
1892 ops
->to_prepare_to_store
= record_btrace_prepare_to_store
;
1893 ops
->to_get_unwinder
= &record_btrace_frame_unwind
;
1894 ops
->to_get_tailcall_unwinder
= &record_btrace_tailcall_frame_unwind
;
1895 ops
->to_resume
= record_btrace_resume
;
1896 ops
->to_wait
= record_btrace_wait
;
1897 ops
->to_find_new_threads
= record_btrace_find_new_threads
;
1898 ops
->to_thread_alive
= record_btrace_thread_alive
;
1899 ops
->to_goto_record_begin
= record_btrace_goto_begin
;
1900 ops
->to_goto_record_end
= record_btrace_goto_end
;
1901 ops
->to_goto_record
= record_btrace_goto
;
1902 ops
->to_can_execute_reverse
= record_btrace_can_execute_reverse
;
1903 ops
->to_decr_pc_after_break
= record_btrace_decr_pc_after_break
;
1904 ops
->to_stratum
= record_stratum
;
1905 ops
->to_magic
= OPS_MAGIC
;
1908 /* Alias for "target record". */
1911 cmd_record_btrace_start (char *args
, int from_tty
)
1913 if (args
!= NULL
&& *args
!= 0)
1914 error (_("Invalid argument."));
1916 execute_command ("target record-btrace", from_tty
);
1919 void _initialize_record_btrace (void);
1921 /* Initialize btrace commands. */
1924 _initialize_record_btrace (void)
1926 add_cmd ("btrace", class_obscure
, cmd_record_btrace_start
,
1927 _("Start branch trace recording."),
1929 add_alias_cmd ("b", "btrace", class_obscure
, 1, &record_cmdlist
);
1931 init_record_btrace_ops ();
1932 add_target (&record_btrace_ops
);
1934 bfcache
= htab_create_alloc (50, bfcache_hash
, bfcache_eq
, NULL
,