1 /* Branch trace support for GDB, the GNU debugger.
3 Copyright (C) 2013-2015 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "gdbthread.h"
29 #include "cli/cli-utils.h"
33 #include "filenames.h"
35 #include "frame-unwind.h"
38 #include "event-loop.h"
41 /* The target_ops of record-btrace. */
42 static struct target_ops record_btrace_ops
;
44 /* A new thread observer enabling branch tracing for the new thread. */
45 static struct observer
*record_btrace_thread_observer
;
47 /* Memory access types used in set/show record btrace replay-memory-access. */
48 static const char replay_memory_access_read_only
[] = "read-only";
49 static const char replay_memory_access_read_write
[] = "read-write";
50 static const char *const replay_memory_access_types
[] =
52 replay_memory_access_read_only
,
53 replay_memory_access_read_write
,
57 /* The currently allowed replay memory access type. */
58 static const char *replay_memory_access
= replay_memory_access_read_only
;
60 /* Command lists for "set/show record btrace". */
61 static struct cmd_list_element
*set_record_btrace_cmdlist
;
62 static struct cmd_list_element
*show_record_btrace_cmdlist
;
64 /* The execution direction of the last resume we got. See record-full.c. */
65 static enum exec_direction_kind record_btrace_resume_exec_dir
= EXEC_FORWARD
;
67 /* The async event handler for reverse/replay execution. */
68 static struct async_event_handler
*record_btrace_async_inferior_event_handler
;
70 /* A flag indicating that we are currently generating a core file. */
71 static int record_btrace_generating_corefile
;
73 /* The current branch trace configuration. */
74 static struct btrace_config record_btrace_conf
;
76 /* Command list for "record btrace". */
77 static struct cmd_list_element
*record_btrace_cmdlist
;
79 /* Command lists for "set/show record btrace". */
80 static struct cmd_list_element
*set_record_btrace_cmdlist
;
81 static struct cmd_list_element
*show_record_btrace_cmdlist
;
83 /* Command lists for "set/show record btrace bts". */
84 static struct cmd_list_element
*set_record_btrace_bts_cmdlist
;
85 static struct cmd_list_element
*show_record_btrace_bts_cmdlist
;
87 /* Print a record-btrace debug message. Use do ... while (0) to avoid
88 ambiguities when used in if statements. */
90 #define DEBUG(msg, args...) \
93 if (record_debug != 0) \
94 fprintf_unfiltered (gdb_stdlog, \
95 "[record-btrace] " msg "\n", ##args); \
100 /* Update the branch trace for the current thread and return a pointer to its
103 Throws an error if there is no thread or no trace. This function never
106 static struct thread_info
*
107 require_btrace_thread (void)
109 struct thread_info
*tp
;
113 tp
= find_thread_ptid (inferior_ptid
);
115 error (_("No thread."));
119 if (btrace_is_empty (tp
))
120 error (_("No trace."));
125 /* Update the branch trace for the current thread and return a pointer to its
126 branch trace information struct.
128 Throws an error if there is no thread or no trace. This function never
131 static struct btrace_thread_info
*
132 require_btrace (void)
134 struct thread_info
*tp
;
136 tp
= require_btrace_thread ();
141 /* Enable branch tracing for one thread. Warn on errors. */
144 record_btrace_enable_warn (struct thread_info
*tp
)
146 volatile struct gdb_exception error
;
148 TRY_CATCH (error
, RETURN_MASK_ERROR
)
149 btrace_enable (tp
, &record_btrace_conf
);
151 if (error
.message
!= NULL
)
152 warning ("%s", error
.message
);
155 /* Callback function to disable branch tracing for one thread. */
158 record_btrace_disable_callback (void *arg
)
160 struct thread_info
*tp
;
167 /* Enable automatic tracing of new threads. */
170 record_btrace_auto_enable (void)
172 DEBUG ("attach thread observer");
174 record_btrace_thread_observer
175 = observer_attach_new_thread (record_btrace_enable_warn
);
178 /* Disable automatic tracing of new threads. */
181 record_btrace_auto_disable (void)
183 /* The observer may have been detached, already. */
184 if (record_btrace_thread_observer
== NULL
)
187 DEBUG ("detach thread observer");
189 observer_detach_new_thread (record_btrace_thread_observer
);
190 record_btrace_thread_observer
= NULL
;
193 /* The record-btrace async event handler function. */
196 record_btrace_handle_async_inferior_event (gdb_client_data data
)
198 inferior_event_handler (INF_REG_EVENT
, NULL
);
201 /* The to_open method of target record-btrace. */
204 record_btrace_open (const char *args
, int from_tty
)
206 struct cleanup
*disable_chain
;
207 struct thread_info
*tp
;
213 if (!target_has_execution
)
214 error (_("The program is not being run."));
217 error (_("Record btrace can't debug inferior in non-stop mode."));
219 gdb_assert (record_btrace_thread_observer
== NULL
);
221 disable_chain
= make_cleanup (null_cleanup
, NULL
);
222 ALL_NON_EXITED_THREADS (tp
)
223 if (args
== NULL
|| *args
== 0 || number_is_in_list (args
, tp
->num
))
225 btrace_enable (tp
, &record_btrace_conf
);
227 make_cleanup (record_btrace_disable_callback
, tp
);
230 record_btrace_auto_enable ();
232 push_target (&record_btrace_ops
);
234 record_btrace_async_inferior_event_handler
235 = create_async_event_handler (record_btrace_handle_async_inferior_event
,
237 record_btrace_generating_corefile
= 0;
239 observer_notify_record_changed (current_inferior (), 1);
241 discard_cleanups (disable_chain
);
244 /* The to_stop_recording method of target record-btrace. */
247 record_btrace_stop_recording (struct target_ops
*self
)
249 struct thread_info
*tp
;
251 DEBUG ("stop recording");
253 record_btrace_auto_disable ();
255 ALL_NON_EXITED_THREADS (tp
)
256 if (tp
->btrace
.target
!= NULL
)
260 /* The to_close method of target record-btrace. */
263 record_btrace_close (struct target_ops
*self
)
265 struct thread_info
*tp
;
267 if (record_btrace_async_inferior_event_handler
!= NULL
)
268 delete_async_event_handler (&record_btrace_async_inferior_event_handler
);
270 /* Make sure automatic recording gets disabled even if we did not stop
271 recording before closing the record-btrace target. */
272 record_btrace_auto_disable ();
274 /* We should have already stopped recording.
275 Tear down btrace in case we have not. */
276 ALL_NON_EXITED_THREADS (tp
)
277 btrace_teardown (tp
);
280 /* The to_async method of target record-btrace. */
283 record_btrace_async (struct target_ops
*ops
,
284 void (*callback
) (enum inferior_event_type event_type
,
288 if (callback
!= NULL
)
289 mark_async_event_handler (record_btrace_async_inferior_event_handler
);
291 clear_async_event_handler (record_btrace_async_inferior_event_handler
);
293 ops
->beneath
->to_async (ops
->beneath
, callback
, context
);
296 /* Adjusts the size and returns a human readable size suffix. */
299 record_btrace_adjust_size (unsigned int *size
)
305 if ((sz
& ((1u << 30) - 1)) == 0)
310 else if ((sz
& ((1u << 20) - 1)) == 0)
315 else if ((sz
& ((1u << 10) - 1)) == 0)
324 /* Print a BTS configuration. */
327 record_btrace_print_bts_conf (const struct btrace_config_bts
*conf
)
335 suffix
= record_btrace_adjust_size (&size
);
336 printf_unfiltered (_("Buffer size: %u%s.\n"), size
, suffix
);
340 /* Print a branch tracing configuration. */
343 record_btrace_print_conf (const struct btrace_config
*conf
)
345 printf_unfiltered (_("Recording format: %s.\n"),
346 btrace_format_string (conf
->format
));
348 switch (conf
->format
)
350 case BTRACE_FORMAT_NONE
:
353 case BTRACE_FORMAT_BTS
:
354 record_btrace_print_bts_conf (&conf
->bts
);
358 internal_error (__FILE__
, __LINE__
, _("Unkown branch trace format."));
361 /* The to_info_record method of target record-btrace. */
364 record_btrace_info (struct target_ops
*self
)
366 struct btrace_thread_info
*btinfo
;
367 const struct btrace_config
*conf
;
368 struct thread_info
*tp
;
369 unsigned int insns
, calls
, gaps
;
373 tp
= find_thread_ptid (inferior_ptid
);
375 error (_("No thread."));
377 btinfo
= &tp
->btrace
;
379 conf
= btrace_conf (btinfo
);
381 record_btrace_print_conf (conf
);
389 if (!btrace_is_empty (tp
))
391 struct btrace_call_iterator call
;
392 struct btrace_insn_iterator insn
;
394 btrace_call_end (&call
, btinfo
);
395 btrace_call_prev (&call
, 1);
396 calls
= btrace_call_number (&call
);
398 btrace_insn_end (&insn
, btinfo
);
400 insns
= btrace_insn_number (&insn
);
403 /* The last instruction does not really belong to the trace. */
410 /* Skip gaps at the end. */
413 steps
= btrace_insn_prev (&insn
, 1);
417 insns
= btrace_insn_number (&insn
);
422 gaps
= btinfo
->ngaps
;
425 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
426 "for thread %d (%s).\n"), insns
, calls
, gaps
,
427 tp
->num
, target_pid_to_str (tp
->ptid
));
429 if (btrace_is_replaying (tp
))
430 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
431 btrace_insn_number (btinfo
->replay
));
434 /* Print a decode error. */
437 btrace_ui_out_decode_error (struct ui_out
*uiout
, int errcode
,
438 enum btrace_format format
)
443 errstr
= _("unknown");
451 case BTRACE_FORMAT_BTS
:
457 case BDE_BTS_OVERFLOW
:
458 errstr
= _("instruction overflow");
461 case BDE_BTS_INSN_SIZE
:
462 errstr
= _("unknown instruction");
468 ui_out_text (uiout
, _("["));
471 ui_out_text (uiout
, _("decode error ("));
472 ui_out_field_int (uiout
, "errcode", errcode
);
473 ui_out_text (uiout
, _("): "));
475 ui_out_text (uiout
, errstr
);
476 ui_out_text (uiout
, _("]\n"));
479 /* Print an unsigned int. */
482 ui_out_field_uint (struct ui_out
*uiout
, const char *fld
, unsigned int val
)
484 ui_out_field_fmt (uiout
, fld
, "%u", val
);
487 /* Disassemble a section of the recorded instruction trace. */
490 btrace_insn_history (struct ui_out
*uiout
,
491 const struct btrace_thread_info
*btinfo
,
492 const struct btrace_insn_iterator
*begin
,
493 const struct btrace_insn_iterator
*end
, int flags
)
495 struct gdbarch
*gdbarch
;
496 struct btrace_insn_iterator it
;
498 DEBUG ("itrace (0x%x): [%u; %u)", flags
, btrace_insn_number (begin
),
499 btrace_insn_number (end
));
501 gdbarch
= target_gdbarch ();
503 for (it
= *begin
; btrace_insn_cmp (&it
, end
) != 0; btrace_insn_next (&it
, 1))
505 const struct btrace_insn
*insn
;
507 insn
= btrace_insn_get (&it
);
509 /* A NULL instruction indicates a gap in the trace. */
512 const struct btrace_config
*conf
;
514 conf
= btrace_conf (btinfo
);
516 /* We have trace so we must have a configuration. */
517 gdb_assert (conf
!= NULL
);
519 btrace_ui_out_decode_error (uiout
, it
.function
->errcode
,
524 /* Print the instruction index. */
525 ui_out_field_uint (uiout
, "index", btrace_insn_number (&it
));
526 ui_out_text (uiout
, "\t");
528 /* Disassembly with '/m' flag may not produce the expected result.
530 gdb_disassembly (gdbarch
, uiout
, NULL
, flags
, 1, insn
->pc
,
536 /* The to_insn_history method of target record-btrace. */
539 record_btrace_insn_history (struct target_ops
*self
, int size
, int flags
)
541 struct btrace_thread_info
*btinfo
;
542 struct btrace_insn_history
*history
;
543 struct btrace_insn_iterator begin
, end
;
544 struct cleanup
*uiout_cleanup
;
545 struct ui_out
*uiout
;
546 unsigned int context
, covered
;
548 uiout
= current_uiout
;
549 uiout_cleanup
= make_cleanup_ui_out_tuple_begin_end (uiout
,
551 context
= abs (size
);
553 error (_("Bad record instruction-history-size."));
555 btinfo
= require_btrace ();
556 history
= btinfo
->insn_history
;
559 struct btrace_insn_iterator
*replay
;
561 DEBUG ("insn-history (0x%x): %d", flags
, size
);
563 /* If we're replaying, we start at the replay position. Otherwise, we
564 start at the tail of the trace. */
565 replay
= btinfo
->replay
;
569 btrace_insn_end (&begin
, btinfo
);
571 /* We start from here and expand in the requested direction. Then we
572 expand in the other direction, as well, to fill up any remaining
577 /* We want the current position covered, as well. */
578 covered
= btrace_insn_next (&end
, 1);
579 covered
+= btrace_insn_prev (&begin
, context
- covered
);
580 covered
+= btrace_insn_next (&end
, context
- covered
);
584 covered
= btrace_insn_next (&end
, context
);
585 covered
+= btrace_insn_prev (&begin
, context
- covered
);
590 begin
= history
->begin
;
593 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags
, size
,
594 btrace_insn_number (&begin
), btrace_insn_number (&end
));
599 covered
= btrace_insn_prev (&begin
, context
);
604 covered
= btrace_insn_next (&end
, context
);
609 btrace_insn_history (uiout
, btinfo
, &begin
, &end
, flags
);
613 printf_unfiltered (_("At the start of the branch trace record.\n"));
615 printf_unfiltered (_("At the end of the branch trace record.\n"));
618 btrace_set_insn_history (btinfo
, &begin
, &end
);
619 do_cleanups (uiout_cleanup
);
622 /* The to_insn_history_range method of target record-btrace. */
625 record_btrace_insn_history_range (struct target_ops
*self
,
626 ULONGEST from
, ULONGEST to
, int flags
)
628 struct btrace_thread_info
*btinfo
;
629 struct btrace_insn_history
*history
;
630 struct btrace_insn_iterator begin
, end
;
631 struct cleanup
*uiout_cleanup
;
632 struct ui_out
*uiout
;
633 unsigned int low
, high
;
636 uiout
= current_uiout
;
637 uiout_cleanup
= make_cleanup_ui_out_tuple_begin_end (uiout
,
642 DEBUG ("insn-history (0x%x): [%u; %u)", flags
, low
, high
);
644 /* Check for wrap-arounds. */
645 if (low
!= from
|| high
!= to
)
646 error (_("Bad range."));
649 error (_("Bad range."));
651 btinfo
= require_btrace ();
653 found
= btrace_find_insn_by_number (&begin
, btinfo
, low
);
655 error (_("Range out of bounds."));
657 found
= btrace_find_insn_by_number (&end
, btinfo
, high
);
660 /* Silently truncate the range. */
661 btrace_insn_end (&end
, btinfo
);
665 /* We want both begin and end to be inclusive. */
666 btrace_insn_next (&end
, 1);
669 btrace_insn_history (uiout
, btinfo
, &begin
, &end
, flags
);
670 btrace_set_insn_history (btinfo
, &begin
, &end
);
672 do_cleanups (uiout_cleanup
);
675 /* The to_insn_history_from method of target record-btrace. */
678 record_btrace_insn_history_from (struct target_ops
*self
,
679 ULONGEST from
, int size
, int flags
)
681 ULONGEST begin
, end
, context
;
683 context
= abs (size
);
685 error (_("Bad record instruction-history-size."));
694 begin
= from
- context
+ 1;
699 end
= from
+ context
- 1;
701 /* Check for wrap-around. */
706 record_btrace_insn_history_range (self
, begin
, end
, flags
);
709 /* Print the instruction number range for a function call history line. */
712 btrace_call_history_insn_range (struct ui_out
*uiout
,
713 const struct btrace_function
*bfun
)
715 unsigned int begin
, end
, size
;
717 size
= VEC_length (btrace_insn_s
, bfun
->insn
);
718 gdb_assert (size
> 0);
720 begin
= bfun
->insn_offset
;
721 end
= begin
+ size
- 1;
723 ui_out_field_uint (uiout
, "insn begin", begin
);
724 ui_out_text (uiout
, ",");
725 ui_out_field_uint (uiout
, "insn end", end
);
728 /* Print the source line information for a function call history line. */
731 btrace_call_history_src_line (struct ui_out
*uiout
,
732 const struct btrace_function
*bfun
)
741 ui_out_field_string (uiout
, "file",
742 symtab_to_filename_for_display (symbol_symtab (sym
)));
744 begin
= bfun
->lbegin
;
750 ui_out_text (uiout
, ":");
751 ui_out_field_int (uiout
, "min line", begin
);
756 ui_out_text (uiout
, ",");
757 ui_out_field_int (uiout
, "max line", end
);
760 /* Get the name of a branch trace function. */
763 btrace_get_bfun_name (const struct btrace_function
*bfun
)
765 struct minimal_symbol
*msym
;
775 return SYMBOL_PRINT_NAME (sym
);
776 else if (msym
!= NULL
)
777 return MSYMBOL_PRINT_NAME (msym
);
782 /* Disassemble a section of the recorded function trace. */
785 btrace_call_history (struct ui_out
*uiout
,
786 const struct btrace_thread_info
*btinfo
,
787 const struct btrace_call_iterator
*begin
,
788 const struct btrace_call_iterator
*end
,
789 enum record_print_flag flags
)
791 struct btrace_call_iterator it
;
793 DEBUG ("ftrace (0x%x): [%u; %u)", flags
, btrace_call_number (begin
),
794 btrace_call_number (end
));
796 for (it
= *begin
; btrace_call_cmp (&it
, end
) < 0; btrace_call_next (&it
, 1))
798 const struct btrace_function
*bfun
;
799 struct minimal_symbol
*msym
;
802 bfun
= btrace_call_get (&it
);
806 /* Print the function index. */
807 ui_out_field_uint (uiout
, "index", bfun
->number
);
808 ui_out_text (uiout
, "\t");
810 /* Indicate gaps in the trace. */
811 if (bfun
->errcode
!= 0)
813 const struct btrace_config
*conf
;
815 conf
= btrace_conf (btinfo
);
817 /* We have trace so we must have a configuration. */
818 gdb_assert (conf
!= NULL
);
820 btrace_ui_out_decode_error (uiout
, bfun
->errcode
, conf
->format
);
825 if ((flags
& RECORD_PRINT_INDENT_CALLS
) != 0)
827 int level
= bfun
->level
+ btinfo
->level
, i
;
829 for (i
= 0; i
< level
; ++i
)
830 ui_out_text (uiout
, " ");
834 ui_out_field_string (uiout
, "function", SYMBOL_PRINT_NAME (sym
));
835 else if (msym
!= NULL
)
836 ui_out_field_string (uiout
, "function", MSYMBOL_PRINT_NAME (msym
));
837 else if (!ui_out_is_mi_like_p (uiout
))
838 ui_out_field_string (uiout
, "function", "??");
840 if ((flags
& RECORD_PRINT_INSN_RANGE
) != 0)
842 ui_out_text (uiout
, _("\tinst "));
843 btrace_call_history_insn_range (uiout
, bfun
);
846 if ((flags
& RECORD_PRINT_SRC_LINE
) != 0)
848 ui_out_text (uiout
, _("\tat "));
849 btrace_call_history_src_line (uiout
, bfun
);
852 ui_out_text (uiout
, "\n");
856 /* The to_call_history method of target record-btrace. */
859 record_btrace_call_history (struct target_ops
*self
, int size
, int flags
)
861 struct btrace_thread_info
*btinfo
;
862 struct btrace_call_history
*history
;
863 struct btrace_call_iterator begin
, end
;
864 struct cleanup
*uiout_cleanup
;
865 struct ui_out
*uiout
;
866 unsigned int context
, covered
;
868 uiout
= current_uiout
;
869 uiout_cleanup
= make_cleanup_ui_out_tuple_begin_end (uiout
,
871 context
= abs (size
);
873 error (_("Bad record function-call-history-size."));
875 btinfo
= require_btrace ();
876 history
= btinfo
->call_history
;
879 struct btrace_insn_iterator
*replay
;
881 DEBUG ("call-history (0x%x): %d", flags
, size
);
883 /* If we're replaying, we start at the replay position. Otherwise, we
884 start at the tail of the trace. */
885 replay
= btinfo
->replay
;
888 begin
.function
= replay
->function
;
889 begin
.btinfo
= btinfo
;
892 btrace_call_end (&begin
, btinfo
);
894 /* We start from here and expand in the requested direction. Then we
895 expand in the other direction, as well, to fill up any remaining
900 /* We want the current position covered, as well. */
901 covered
= btrace_call_next (&end
, 1);
902 covered
+= btrace_call_prev (&begin
, context
- covered
);
903 covered
+= btrace_call_next (&end
, context
- covered
);
907 covered
= btrace_call_next (&end
, context
);
908 covered
+= btrace_call_prev (&begin
, context
- covered
);
913 begin
= history
->begin
;
916 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", flags
, size
,
917 btrace_call_number (&begin
), btrace_call_number (&end
));
922 covered
= btrace_call_prev (&begin
, context
);
927 covered
= btrace_call_next (&end
, context
);
932 btrace_call_history (uiout
, btinfo
, &begin
, &end
, flags
);
936 printf_unfiltered (_("At the start of the branch trace record.\n"));
938 printf_unfiltered (_("At the end of the branch trace record.\n"));
941 btrace_set_call_history (btinfo
, &begin
, &end
);
942 do_cleanups (uiout_cleanup
);
945 /* The to_call_history_range method of target record-btrace. */
948 record_btrace_call_history_range (struct target_ops
*self
,
949 ULONGEST from
, ULONGEST to
, int flags
)
951 struct btrace_thread_info
*btinfo
;
952 struct btrace_call_history
*history
;
953 struct btrace_call_iterator begin
, end
;
954 struct cleanup
*uiout_cleanup
;
955 struct ui_out
*uiout
;
956 unsigned int low
, high
;
959 uiout
= current_uiout
;
960 uiout_cleanup
= make_cleanup_ui_out_tuple_begin_end (uiout
,
965 DEBUG ("call-history (0x%x): [%u; %u)", flags
, low
, high
);
967 /* Check for wrap-arounds. */
968 if (low
!= from
|| high
!= to
)
969 error (_("Bad range."));
972 error (_("Bad range."));
974 btinfo
= require_btrace ();
976 found
= btrace_find_call_by_number (&begin
, btinfo
, low
);
978 error (_("Range out of bounds."));
980 found
= btrace_find_call_by_number (&end
, btinfo
, high
);
983 /* Silently truncate the range. */
984 btrace_call_end (&end
, btinfo
);
988 /* We want both begin and end to be inclusive. */
989 btrace_call_next (&end
, 1);
992 btrace_call_history (uiout
, btinfo
, &begin
, &end
, flags
);
993 btrace_set_call_history (btinfo
, &begin
, &end
);
995 do_cleanups (uiout_cleanup
);
998 /* The to_call_history_from method of target record-btrace. */
1001 record_btrace_call_history_from (struct target_ops
*self
,
1002 ULONGEST from
, int size
, int flags
)
1004 ULONGEST begin
, end
, context
;
1006 context
= abs (size
);
1008 error (_("Bad record function-call-history-size."));
1017 begin
= from
- context
+ 1;
1022 end
= from
+ context
- 1;
1024 /* Check for wrap-around. */
1029 record_btrace_call_history_range (self
, begin
, end
, flags
);
1032 /* The to_record_is_replaying method of target record-btrace. */
1035 record_btrace_is_replaying (struct target_ops
*self
)
1037 struct thread_info
*tp
;
1039 ALL_NON_EXITED_THREADS (tp
)
1040 if (btrace_is_replaying (tp
))
1046 /* The to_xfer_partial method of target record-btrace. */
1048 static enum target_xfer_status
1049 record_btrace_xfer_partial (struct target_ops
*ops
, enum target_object object
,
1050 const char *annex
, gdb_byte
*readbuf
,
1051 const gdb_byte
*writebuf
, ULONGEST offset
,
1052 ULONGEST len
, ULONGEST
*xfered_len
)
1054 struct target_ops
*t
;
1056 /* Filter out requests that don't make sense during replay. */
1057 if (replay_memory_access
== replay_memory_access_read_only
1058 && !record_btrace_generating_corefile
1059 && record_btrace_is_replaying (ops
))
1063 case TARGET_OBJECT_MEMORY
:
1065 struct target_section
*section
;
1067 /* We do not allow writing memory in general. */
1068 if (writebuf
!= NULL
)
1071 return TARGET_XFER_UNAVAILABLE
;
1074 /* We allow reading readonly memory. */
1075 section
= target_section_by_addr (ops
, offset
);
1076 if (section
!= NULL
)
1078 /* Check if the section we found is readonly. */
1079 if ((bfd_get_section_flags (section
->the_bfd_section
->owner
,
1080 section
->the_bfd_section
)
1081 & SEC_READONLY
) != 0)
1083 /* Truncate the request to fit into this section. */
1084 len
= min (len
, section
->endaddr
- offset
);
1090 return TARGET_XFER_UNAVAILABLE
;
1095 /* Forward the request. */
1097 return ops
->to_xfer_partial (ops
, object
, annex
, readbuf
, writebuf
,
1098 offset
, len
, xfered_len
);
1101 /* The to_insert_breakpoint method of target record-btrace. */
1104 record_btrace_insert_breakpoint (struct target_ops
*ops
,
1105 struct gdbarch
*gdbarch
,
1106 struct bp_target_info
*bp_tgt
)
1108 volatile struct gdb_exception except
;
1112 /* Inserting breakpoints requires accessing memory. Allow it for the
1113 duration of this function. */
1114 old
= replay_memory_access
;
1115 replay_memory_access
= replay_memory_access_read_write
;
1118 TRY_CATCH (except
, RETURN_MASK_ALL
)
1119 ret
= ops
->beneath
->to_insert_breakpoint (ops
->beneath
, gdbarch
, bp_tgt
);
1121 replay_memory_access
= old
;
1123 if (except
.reason
< 0)
1124 throw_exception (except
);
1129 /* The to_remove_breakpoint method of target record-btrace. */
1132 record_btrace_remove_breakpoint (struct target_ops
*ops
,
1133 struct gdbarch
*gdbarch
,
1134 struct bp_target_info
*bp_tgt
)
1136 volatile struct gdb_exception except
;
1140 /* Removing breakpoints requires accessing memory. Allow it for the
1141 duration of this function. */
1142 old
= replay_memory_access
;
1143 replay_memory_access
= replay_memory_access_read_write
;
1146 TRY_CATCH (except
, RETURN_MASK_ALL
)
1147 ret
= ops
->beneath
->to_remove_breakpoint (ops
->beneath
, gdbarch
, bp_tgt
);
1149 replay_memory_access
= old
;
1151 if (except
.reason
< 0)
1152 throw_exception (except
);
1157 /* The to_fetch_registers method of target record-btrace. */
1160 record_btrace_fetch_registers (struct target_ops
*ops
,
1161 struct regcache
*regcache
, int regno
)
1163 struct btrace_insn_iterator
*replay
;
1164 struct thread_info
*tp
;
1166 tp
= find_thread_ptid (inferior_ptid
);
1167 gdb_assert (tp
!= NULL
);
1169 replay
= tp
->btrace
.replay
;
1170 if (replay
!= NULL
&& !record_btrace_generating_corefile
)
1172 const struct btrace_insn
*insn
;
1173 struct gdbarch
*gdbarch
;
1176 gdbarch
= get_regcache_arch (regcache
);
1177 pcreg
= gdbarch_pc_regnum (gdbarch
);
1181 /* We can only provide the PC register. */
1182 if (regno
>= 0 && regno
!= pcreg
)
1185 insn
= btrace_insn_get (replay
);
1186 gdb_assert (insn
!= NULL
);
1188 regcache_raw_supply (regcache
, regno
, &insn
->pc
);
1192 struct target_ops
*t
= ops
->beneath
;
1194 t
->to_fetch_registers (t
, regcache
, regno
);
1198 /* The to_store_registers method of target record-btrace. */
1201 record_btrace_store_registers (struct target_ops
*ops
,
1202 struct regcache
*regcache
, int regno
)
1204 struct target_ops
*t
;
1206 if (!record_btrace_generating_corefile
&& record_btrace_is_replaying (ops
))
1207 error (_("This record target does not allow writing registers."));
1209 gdb_assert (may_write_registers
!= 0);
1212 t
->to_store_registers (t
, regcache
, regno
);
1215 /* The to_prepare_to_store method of target record-btrace. */
1218 record_btrace_prepare_to_store (struct target_ops
*ops
,
1219 struct regcache
*regcache
)
1221 struct target_ops
*t
;
1223 if (!record_btrace_generating_corefile
&& record_btrace_is_replaying (ops
))
1227 t
->to_prepare_to_store (t
, regcache
);
1230 /* The branch trace frame cache. */
1232 struct btrace_frame_cache
1235 struct thread_info
*tp
;
1237 /* The frame info. */
1238 struct frame_info
*frame
;
1240 /* The branch trace function segment. */
1241 const struct btrace_function
*bfun
;
1244 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1246 static htab_t bfcache
;
1248 /* hash_f for htab_create_alloc of bfcache. */
1251 bfcache_hash (const void *arg
)
1253 const struct btrace_frame_cache
*cache
= arg
;
1255 return htab_hash_pointer (cache
->frame
);
1258 /* eq_f for htab_create_alloc of bfcache. */
1261 bfcache_eq (const void *arg1
, const void *arg2
)
1263 const struct btrace_frame_cache
*cache1
= arg1
;
1264 const struct btrace_frame_cache
*cache2
= arg2
;
1266 return cache1
->frame
== cache2
->frame
;
1269 /* Create a new btrace frame cache. */
1271 static struct btrace_frame_cache
*
1272 bfcache_new (struct frame_info
*frame
)
1274 struct btrace_frame_cache
*cache
;
1277 cache
= FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache
);
1278 cache
->frame
= frame
;
1280 slot
= htab_find_slot (bfcache
, cache
, INSERT
);
1281 gdb_assert (*slot
== NULL
);
1287 /* Extract the branch trace function from a branch trace frame. */
1289 static const struct btrace_function
*
1290 btrace_get_frame_function (struct frame_info
*frame
)
1292 const struct btrace_frame_cache
*cache
;
1293 const struct btrace_function
*bfun
;
1294 struct btrace_frame_cache pattern
;
1297 pattern
.frame
= frame
;
1299 slot
= htab_find_slot (bfcache
, &pattern
, NO_INSERT
);
1307 /* Implement stop_reason method for record_btrace_frame_unwind. */
1309 static enum unwind_stop_reason
1310 record_btrace_frame_unwind_stop_reason (struct frame_info
*this_frame
,
1313 const struct btrace_frame_cache
*cache
;
1314 const struct btrace_function
*bfun
;
1316 cache
= *this_cache
;
1318 gdb_assert (bfun
!= NULL
);
1320 if (bfun
->up
== NULL
)
1321 return UNWIND_UNAVAILABLE
;
1323 return UNWIND_NO_REASON
;
1326 /* Implement this_id method for record_btrace_frame_unwind. */
1329 record_btrace_frame_this_id (struct frame_info
*this_frame
, void **this_cache
,
1330 struct frame_id
*this_id
)
1332 const struct btrace_frame_cache
*cache
;
1333 const struct btrace_function
*bfun
;
1334 CORE_ADDR code
, special
;
1336 cache
= *this_cache
;
1339 gdb_assert (bfun
!= NULL
);
1341 while (bfun
->segment
.prev
!= NULL
)
1342 bfun
= bfun
->segment
.prev
;
1344 code
= get_frame_func (this_frame
);
1345 special
= bfun
->number
;
1347 *this_id
= frame_id_build_unavailable_stack_special (code
, special
);
1349 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1350 btrace_get_bfun_name (cache
->bfun
),
1351 core_addr_to_string_nz (this_id
->code_addr
),
1352 core_addr_to_string_nz (this_id
->special_addr
));
1355 /* Implement prev_register method for record_btrace_frame_unwind. */
1357 static struct value
*
1358 record_btrace_frame_prev_register (struct frame_info
*this_frame
,
1362 const struct btrace_frame_cache
*cache
;
1363 const struct btrace_function
*bfun
, *caller
;
1364 const struct btrace_insn
*insn
;
1365 struct gdbarch
*gdbarch
;
1369 gdbarch
= get_frame_arch (this_frame
);
1370 pcreg
= gdbarch_pc_regnum (gdbarch
);
1371 if (pcreg
< 0 || regnum
!= pcreg
)
1372 throw_error (NOT_AVAILABLE_ERROR
,
1373 _("Registers are not available in btrace record history"));
1375 cache
= *this_cache
;
1377 gdb_assert (bfun
!= NULL
);
1381 throw_error (NOT_AVAILABLE_ERROR
,
1382 _("No caller in btrace record history"));
1384 if ((bfun
->flags
& BFUN_UP_LINKS_TO_RET
) != 0)
1386 insn
= VEC_index (btrace_insn_s
, caller
->insn
, 0);
1391 insn
= VEC_last (btrace_insn_s
, caller
->insn
);
1394 pc
+= gdb_insn_length (gdbarch
, pc
);
1397 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1398 btrace_get_bfun_name (bfun
), bfun
->level
,
1399 core_addr_to_string_nz (pc
));
1401 return frame_unwind_got_address (this_frame
, regnum
, pc
);
1404 /* Implement sniffer method for record_btrace_frame_unwind. */
1407 record_btrace_frame_sniffer (const struct frame_unwind
*self
,
1408 struct frame_info
*this_frame
,
1411 const struct btrace_function
*bfun
;
1412 struct btrace_frame_cache
*cache
;
1413 struct thread_info
*tp
;
1414 struct frame_info
*next
;
1416 /* THIS_FRAME does not contain a reference to its thread. */
1417 tp
= find_thread_ptid (inferior_ptid
);
1418 gdb_assert (tp
!= NULL
);
1421 next
= get_next_frame (this_frame
);
1424 const struct btrace_insn_iterator
*replay
;
1426 replay
= tp
->btrace
.replay
;
1428 bfun
= replay
->function
;
1432 const struct btrace_function
*callee
;
1434 callee
= btrace_get_frame_function (next
);
1435 if (callee
!= NULL
&& (callee
->flags
& BFUN_UP_LINKS_TO_TAILCALL
) == 0)
1442 DEBUG ("[frame] sniffed frame for %s on level %d",
1443 btrace_get_bfun_name (bfun
), bfun
->level
);
1445 /* This is our frame. Initialize the frame cache. */
1446 cache
= bfcache_new (this_frame
);
1450 *this_cache
= cache
;
1454 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1457 record_btrace_tailcall_frame_sniffer (const struct frame_unwind
*self
,
1458 struct frame_info
*this_frame
,
1461 const struct btrace_function
*bfun
, *callee
;
1462 struct btrace_frame_cache
*cache
;
1463 struct frame_info
*next
;
1465 next
= get_next_frame (this_frame
);
1469 callee
= btrace_get_frame_function (next
);
1473 if ((callee
->flags
& BFUN_UP_LINKS_TO_TAILCALL
) == 0)
1480 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1481 btrace_get_bfun_name (bfun
), bfun
->level
);
1483 /* This is our frame. Initialize the frame cache. */
1484 cache
= bfcache_new (this_frame
);
1485 cache
->tp
= find_thread_ptid (inferior_ptid
);
1488 *this_cache
= cache
;
1493 record_btrace_frame_dealloc_cache (struct frame_info
*self
, void *this_cache
)
1495 struct btrace_frame_cache
*cache
;
1500 slot
= htab_find_slot (bfcache
, cache
, NO_INSERT
);
1501 gdb_assert (slot
!= NULL
);
1503 htab_remove_elt (bfcache
, cache
);
1506 /* btrace recording does not store previous memory content, neither the stack
1507 frames content. Any unwinding would return errorneous results as the stack
1508 contents no longer matches the changed PC value restored from history.
1509 Therefore this unwinder reports any possibly unwound registers as
1512 const struct frame_unwind record_btrace_frame_unwind
=
1515 record_btrace_frame_unwind_stop_reason
,
1516 record_btrace_frame_this_id
,
1517 record_btrace_frame_prev_register
,
1519 record_btrace_frame_sniffer
,
1520 record_btrace_frame_dealloc_cache
1523 const struct frame_unwind record_btrace_tailcall_frame_unwind
=
1526 record_btrace_frame_unwind_stop_reason
,
1527 record_btrace_frame_this_id
,
1528 record_btrace_frame_prev_register
,
1530 record_btrace_tailcall_frame_sniffer
,
1531 record_btrace_frame_dealloc_cache
1534 /* Implement the to_get_unwinder method. */
1536 static const struct frame_unwind
*
1537 record_btrace_to_get_unwinder (struct target_ops
*self
)
1539 return &record_btrace_frame_unwind
;
1542 /* Implement the to_get_tailcall_unwinder method. */
1544 static const struct frame_unwind
*
1545 record_btrace_to_get_tailcall_unwinder (struct target_ops
*self
)
1547 return &record_btrace_tailcall_frame_unwind
;
1550 /* Indicate that TP should be resumed according to FLAG. */
1553 record_btrace_resume_thread (struct thread_info
*tp
,
1554 enum btrace_thread_flag flag
)
1556 struct btrace_thread_info
*btinfo
;
1558 DEBUG ("resuming %d (%s): %u", tp
->num
, target_pid_to_str (tp
->ptid
), flag
);
1560 btinfo
= &tp
->btrace
;
1562 if ((btinfo
->flags
& BTHR_MOVE
) != 0)
1563 error (_("Thread already moving."));
1565 /* Fetch the latest branch trace. */
1568 btinfo
->flags
|= flag
;
1571 /* Find the thread to resume given a PTID. */
1573 static struct thread_info
*
1574 record_btrace_find_resume_thread (ptid_t ptid
)
1576 struct thread_info
*tp
;
1578 /* When asked to resume everything, we pick the current thread. */
1579 if (ptid_equal (minus_one_ptid
, ptid
) || ptid_is_pid (ptid
))
1580 ptid
= inferior_ptid
;
1582 return find_thread_ptid (ptid
);
1585 /* Start replaying a thread. */
1587 static struct btrace_insn_iterator
*
1588 record_btrace_start_replaying (struct thread_info
*tp
)
1590 volatile struct gdb_exception except
;
1591 struct btrace_insn_iterator
*replay
;
1592 struct btrace_thread_info
*btinfo
;
1595 btinfo
= &tp
->btrace
;
1598 /* We can't start replaying without trace. */
1599 if (btinfo
->begin
== NULL
)
1602 /* Clear the executing flag to allow changes to the current frame.
1603 We are not actually running, yet. We just started a reverse execution
1604 command or a record goto command.
1605 For the latter, EXECUTING is false and this has no effect.
1606 For the former, EXECUTING is true and we're in to_wait, about to
1607 move the thread. Since we need to recompute the stack, we temporarily
1608 set EXECUTING to flase. */
1609 executing
= is_executing (tp
->ptid
);
1610 set_executing (tp
->ptid
, 0);
1612 /* GDB stores the current frame_id when stepping in order to detects steps
1614 Since frames are computed differently when we're replaying, we need to
1615 recompute those stored frames and fix them up so we can still detect
1616 subroutines after we started replaying. */
1617 TRY_CATCH (except
, RETURN_MASK_ALL
)
1619 struct frame_info
*frame
;
1620 struct frame_id frame_id
;
1621 int upd_step_frame_id
, upd_step_stack_frame_id
;
1623 /* The current frame without replaying - computed via normal unwind. */
1624 frame
= get_current_frame ();
1625 frame_id
= get_frame_id (frame
);
1627 /* Check if we need to update any stepping-related frame id's. */
1628 upd_step_frame_id
= frame_id_eq (frame_id
,
1629 tp
->control
.step_frame_id
);
1630 upd_step_stack_frame_id
= frame_id_eq (frame_id
,
1631 tp
->control
.step_stack_frame_id
);
1633 /* We start replaying at the end of the branch trace. This corresponds
1634 to the current instruction. */
1635 replay
= xmalloc (sizeof (*replay
));
1636 btrace_insn_end (replay
, btinfo
);
1638 /* Skip gaps at the end of the trace. */
1639 while (btrace_insn_get (replay
) == NULL
)
1643 steps
= btrace_insn_prev (replay
, 1);
1645 error (_("No trace."));
1648 /* We're not replaying, yet. */
1649 gdb_assert (btinfo
->replay
== NULL
);
1650 btinfo
->replay
= replay
;
1652 /* Make sure we're not using any stale registers. */
1653 registers_changed_ptid (tp
->ptid
);
1655 /* The current frame with replaying - computed via btrace unwind. */
1656 frame
= get_current_frame ();
1657 frame_id
= get_frame_id (frame
);
1659 /* Replace stepping related frames where necessary. */
1660 if (upd_step_frame_id
)
1661 tp
->control
.step_frame_id
= frame_id
;
1662 if (upd_step_stack_frame_id
)
1663 tp
->control
.step_stack_frame_id
= frame_id
;
1666 /* Restore the previous execution state. */
1667 set_executing (tp
->ptid
, executing
);
1669 if (except
.reason
< 0)
1671 xfree (btinfo
->replay
);
1672 btinfo
->replay
= NULL
;
1674 registers_changed_ptid (tp
->ptid
);
1676 throw_exception (except
);
1682 /* Stop replaying a thread. */
1685 record_btrace_stop_replaying (struct thread_info
*tp
)
1687 struct btrace_thread_info
*btinfo
;
1689 btinfo
= &tp
->btrace
;
1691 xfree (btinfo
->replay
);
1692 btinfo
->replay
= NULL
;
1694 /* Make sure we're not leaving any stale registers. */
1695 registers_changed_ptid (tp
->ptid
);
1698 /* The to_resume method of target record-btrace. */
1701 record_btrace_resume (struct target_ops
*ops
, ptid_t ptid
, int step
,
1702 enum gdb_signal signal
)
1704 struct thread_info
*tp
, *other
;
1705 enum btrace_thread_flag flag
;
1707 DEBUG ("resume %s: %s", target_pid_to_str (ptid
), step
? "step" : "cont");
1709 /* Store the execution direction of the last resume. */
1710 record_btrace_resume_exec_dir
= execution_direction
;
1712 tp
= record_btrace_find_resume_thread (ptid
);
1714 error (_("Cannot find thread to resume."));
1716 /* Stop replaying other threads if the thread to resume is not replaying. */
1717 if (!btrace_is_replaying (tp
) && execution_direction
!= EXEC_REVERSE
)
1718 ALL_NON_EXITED_THREADS (other
)
1719 record_btrace_stop_replaying (other
);
1721 /* As long as we're not replaying, just forward the request. */
1722 if (!record_btrace_is_replaying (ops
) && execution_direction
!= EXEC_REVERSE
)
1725 return ops
->to_resume (ops
, ptid
, step
, signal
);
1728 /* Compute the btrace thread flag for the requested move. */
1730 flag
= execution_direction
== EXEC_REVERSE
? BTHR_RCONT
: BTHR_CONT
;
1732 flag
= execution_direction
== EXEC_REVERSE
? BTHR_RSTEP
: BTHR_STEP
;
1734 /* At the moment, we only move a single thread. We could also move
1735 all threads in parallel by single-stepping each resumed thread
1736 until the first runs into an event.
1737 When we do that, we would want to continue all other threads.
1738 For now, just resume one thread to not confuse to_wait. */
1739 record_btrace_resume_thread (tp
, flag
);
1741 /* We just indicate the resume intent here. The actual stepping happens in
1742 record_btrace_wait below. */
1744 /* Async support. */
1745 if (target_can_async_p ())
1747 target_async (inferior_event_handler
, 0);
1748 mark_async_event_handler (record_btrace_async_inferior_event_handler
);
1752 /* Find a thread to move. */
1754 static struct thread_info
*
1755 record_btrace_find_thread_to_move (ptid_t ptid
)
1757 struct thread_info
*tp
;
1759 /* First check the parameter thread. */
1760 tp
= find_thread_ptid (ptid
);
1761 if (tp
!= NULL
&& (tp
->btrace
.flags
& BTHR_MOVE
) != 0)
1764 /* Otherwise, find one other thread that has been resumed. */
1765 ALL_NON_EXITED_THREADS (tp
)
1766 if ((tp
->btrace
.flags
& BTHR_MOVE
) != 0)
1772 /* Return a target_waitstatus indicating that we ran out of history. */
1774 static struct target_waitstatus
1775 btrace_step_no_history (void)
1777 struct target_waitstatus status
;
1779 status
.kind
= TARGET_WAITKIND_NO_HISTORY
;
1784 /* Return a target_waitstatus indicating that a step finished. */
1786 static struct target_waitstatus
1787 btrace_step_stopped (void)
1789 struct target_waitstatus status
;
1791 status
.kind
= TARGET_WAITKIND_STOPPED
;
1792 status
.value
.sig
= GDB_SIGNAL_TRAP
;
1797 /* Clear the record histories. */
1800 record_btrace_clear_histories (struct btrace_thread_info
*btinfo
)
1802 xfree (btinfo
->insn_history
);
1803 xfree (btinfo
->call_history
);
1805 btinfo
->insn_history
= NULL
;
1806 btinfo
->call_history
= NULL
;
1809 /* Step a single thread. */
1811 static struct target_waitstatus
1812 record_btrace_step_thread (struct thread_info
*tp
)
1814 struct btrace_insn_iterator
*replay
, end
;
1815 struct btrace_thread_info
*btinfo
;
1816 struct address_space
*aspace
;
1817 struct inferior
*inf
;
1818 enum btrace_thread_flag flags
;
1821 /* We can't step without an execution history. */
1822 if (btrace_is_empty (tp
))
1823 return btrace_step_no_history ();
1825 btinfo
= &tp
->btrace
;
1826 replay
= btinfo
->replay
;
1828 flags
= btinfo
->flags
& BTHR_MOVE
;
1829 btinfo
->flags
&= ~BTHR_MOVE
;
1831 DEBUG ("stepping %d (%s): %u", tp
->num
, target_pid_to_str (tp
->ptid
), flags
);
1836 internal_error (__FILE__
, __LINE__
, _("invalid stepping type."));
1839 /* We're done if we're not replaying. */
1841 return btrace_step_no_history ();
1843 /* Skip gaps during replay. */
1846 steps
= btrace_insn_next (replay
, 1);
1849 record_btrace_stop_replaying (tp
);
1850 return btrace_step_no_history ();
1853 while (btrace_insn_get (replay
) == NULL
);
1855 /* Determine the end of the instruction trace. */
1856 btrace_insn_end (&end
, btinfo
);
1858 /* We stop replaying if we reached the end of the trace. */
1859 if (btrace_insn_cmp (replay
, &end
) == 0)
1860 record_btrace_stop_replaying (tp
);
1862 return btrace_step_stopped ();
1865 /* Start replaying if we're not already doing so. */
1867 replay
= record_btrace_start_replaying (tp
);
1869 /* If we can't step any further, we reached the end of the history.
1870 Skip gaps during replay. */
1873 steps
= btrace_insn_prev (replay
, 1);
1875 return btrace_step_no_history ();
1878 while (btrace_insn_get (replay
) == NULL
);
1880 return btrace_step_stopped ();
1883 /* We're done if we're not replaying. */
1885 return btrace_step_no_history ();
1887 inf
= find_inferior_ptid (tp
->ptid
);
1888 aspace
= inf
->aspace
;
1890 /* Determine the end of the instruction trace. */
1891 btrace_insn_end (&end
, btinfo
);
1895 const struct btrace_insn
*insn
;
1897 /* Skip gaps during replay. */
1900 steps
= btrace_insn_next (replay
, 1);
1903 record_btrace_stop_replaying (tp
);
1904 return btrace_step_no_history ();
1907 insn
= btrace_insn_get (replay
);
1909 while (insn
== NULL
);
1911 /* We stop replaying if we reached the end of the trace. */
1912 if (btrace_insn_cmp (replay
, &end
) == 0)
1914 record_btrace_stop_replaying (tp
);
1915 return btrace_step_no_history ();
1918 DEBUG ("stepping %d (%s) ... %s", tp
->num
,
1919 target_pid_to_str (tp
->ptid
),
1920 core_addr_to_string_nz (insn
->pc
));
1922 if (breakpoint_here_p (aspace
, insn
->pc
))
1923 return btrace_step_stopped ();
1927 /* Start replaying if we're not already doing so. */
1929 replay
= record_btrace_start_replaying (tp
);
1931 inf
= find_inferior_ptid (tp
->ptid
);
1932 aspace
= inf
->aspace
;
1936 const struct btrace_insn
*insn
;
1938 /* If we can't step any further, we reached the end of the history.
1939 Skip gaps during replay. */
1942 steps
= btrace_insn_prev (replay
, 1);
1944 return btrace_step_no_history ();
1946 insn
= btrace_insn_get (replay
);
1948 while (insn
== NULL
);
1950 DEBUG ("reverse-stepping %d (%s) ... %s", tp
->num
,
1951 target_pid_to_str (tp
->ptid
),
1952 core_addr_to_string_nz (insn
->pc
));
1954 if (breakpoint_here_p (aspace
, insn
->pc
))
1955 return btrace_step_stopped ();
1960 /* The to_wait method of target record-btrace. */
1963 record_btrace_wait (struct target_ops
*ops
, ptid_t ptid
,
1964 struct target_waitstatus
*status
, int options
)
1966 struct thread_info
*tp
, *other
;
1968 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid
), options
);
1970 /* As long as we're not replaying, just forward the request. */
1971 if (!record_btrace_is_replaying (ops
) && execution_direction
!= EXEC_REVERSE
)
1974 return ops
->to_wait (ops
, ptid
, status
, options
);
1977 /* Let's find a thread to move. */
1978 tp
= record_btrace_find_thread_to_move (ptid
);
1981 DEBUG ("wait %s: no thread", target_pid_to_str (ptid
));
1983 status
->kind
= TARGET_WAITKIND_IGNORE
;
1984 return minus_one_ptid
;
1987 /* We only move a single thread. We're not able to correlate threads. */
1988 *status
= record_btrace_step_thread (tp
);
1990 /* Stop all other threads. */
1992 ALL_NON_EXITED_THREADS (other
)
1993 other
->btrace
.flags
&= ~BTHR_MOVE
;
1995 /* Start record histories anew from the current position. */
1996 record_btrace_clear_histories (&tp
->btrace
);
1998 /* We moved the replay position but did not update registers. */
1999 registers_changed_ptid (tp
->ptid
);
2004 /* The to_can_execute_reverse method of target record-btrace. */
2007 record_btrace_can_execute_reverse (struct target_ops
*self
)
2012 /* The to_decr_pc_after_break method of target record-btrace. */
2015 record_btrace_decr_pc_after_break (struct target_ops
*ops
,
2016 struct gdbarch
*gdbarch
)
2018 /* When replaying, we do not actually execute the breakpoint instruction
2019 so there is no need to adjust the PC after hitting a breakpoint. */
2020 if (record_btrace_is_replaying (ops
))
2023 return ops
->beneath
->to_decr_pc_after_break (ops
->beneath
, gdbarch
);
2026 /* The to_update_thread_list method of target record-btrace. */
2029 record_btrace_update_thread_list (struct target_ops
*ops
)
2031 /* We don't add or remove threads during replay. */
2032 if (record_btrace_is_replaying (ops
))
2035 /* Forward the request. */
2037 ops
->to_update_thread_list (ops
);
2040 /* The to_thread_alive method of target record-btrace. */
2043 record_btrace_thread_alive (struct target_ops
*ops
, ptid_t ptid
)
2045 /* We don't add or remove threads during replay. */
2046 if (record_btrace_is_replaying (ops
))
2047 return find_thread_ptid (ptid
) != NULL
;
2049 /* Forward the request. */
2051 return ops
->to_thread_alive (ops
, ptid
);
2054 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
2058 record_btrace_set_replay (struct thread_info
*tp
,
2059 const struct btrace_insn_iterator
*it
)
2061 struct btrace_thread_info
*btinfo
;
2063 btinfo
= &tp
->btrace
;
2065 if (it
== NULL
|| it
->function
== NULL
)
2066 record_btrace_stop_replaying (tp
);
2069 if (btinfo
->replay
== NULL
)
2070 record_btrace_start_replaying (tp
);
2071 else if (btrace_insn_cmp (btinfo
->replay
, it
) == 0)
2074 *btinfo
->replay
= *it
;
2075 registers_changed_ptid (tp
->ptid
);
2078 /* Start anew from the new replay position. */
2079 record_btrace_clear_histories (btinfo
);
2082 /* The to_goto_record_begin method of target record-btrace. */
2085 record_btrace_goto_begin (struct target_ops
*self
)
2087 struct thread_info
*tp
;
2088 struct btrace_insn_iterator begin
;
2090 tp
= require_btrace_thread ();
2092 btrace_insn_begin (&begin
, &tp
->btrace
);
2093 record_btrace_set_replay (tp
, &begin
);
2095 print_stack_frame (get_selected_frame (NULL
), 1, SRC_AND_LOC
, 1);
2098 /* The to_goto_record_end method of target record-btrace. */
2101 record_btrace_goto_end (struct target_ops
*ops
)
2103 struct thread_info
*tp
;
2105 tp
= require_btrace_thread ();
2107 record_btrace_set_replay (tp
, NULL
);
2109 print_stack_frame (get_selected_frame (NULL
), 1, SRC_AND_LOC
, 1);
2112 /* The to_goto_record method of target record-btrace. */
2115 record_btrace_goto (struct target_ops
*self
, ULONGEST insn
)
2117 struct thread_info
*tp
;
2118 struct btrace_insn_iterator it
;
2119 unsigned int number
;
2124 /* Check for wrap-arounds. */
2126 error (_("Instruction number out of range."));
2128 tp
= require_btrace_thread ();
2130 found
= btrace_find_insn_by_number (&it
, &tp
->btrace
, number
);
2132 error (_("No such instruction."));
2134 record_btrace_set_replay (tp
, &it
);
2136 print_stack_frame (get_selected_frame (NULL
), 1, SRC_AND_LOC
, 1);
2139 /* The to_execution_direction target method. */
2141 static enum exec_direction_kind
2142 record_btrace_execution_direction (struct target_ops
*self
)
2144 return record_btrace_resume_exec_dir
;
2147 /* The to_prepare_to_generate_core target method. */
2150 record_btrace_prepare_to_generate_core (struct target_ops
*self
)
2152 record_btrace_generating_corefile
= 1;
2155 /* The to_done_generating_core target method. */
2158 record_btrace_done_generating_core (struct target_ops
*self
)
2160 record_btrace_generating_corefile
= 0;
2163 /* Initialize the record-btrace target ops. */
2166 init_record_btrace_ops (void)
2168 struct target_ops
*ops
;
2170 ops
= &record_btrace_ops
;
2171 ops
->to_shortname
= "record-btrace";
2172 ops
->to_longname
= "Branch tracing target";
2173 ops
->to_doc
= "Collect control-flow trace and provide the execution history.";
2174 ops
->to_open
= record_btrace_open
;
2175 ops
->to_close
= record_btrace_close
;
2176 ops
->to_async
= record_btrace_async
;
2177 ops
->to_detach
= record_detach
;
2178 ops
->to_disconnect
= record_disconnect
;
2179 ops
->to_mourn_inferior
= record_mourn_inferior
;
2180 ops
->to_kill
= record_kill
;
2181 ops
->to_stop_recording
= record_btrace_stop_recording
;
2182 ops
->to_info_record
= record_btrace_info
;
2183 ops
->to_insn_history
= record_btrace_insn_history
;
2184 ops
->to_insn_history_from
= record_btrace_insn_history_from
;
2185 ops
->to_insn_history_range
= record_btrace_insn_history_range
;
2186 ops
->to_call_history
= record_btrace_call_history
;
2187 ops
->to_call_history_from
= record_btrace_call_history_from
;
2188 ops
->to_call_history_range
= record_btrace_call_history_range
;
2189 ops
->to_record_is_replaying
= record_btrace_is_replaying
;
2190 ops
->to_xfer_partial
= record_btrace_xfer_partial
;
2191 ops
->to_remove_breakpoint
= record_btrace_remove_breakpoint
;
2192 ops
->to_insert_breakpoint
= record_btrace_insert_breakpoint
;
2193 ops
->to_fetch_registers
= record_btrace_fetch_registers
;
2194 ops
->to_store_registers
= record_btrace_store_registers
;
2195 ops
->to_prepare_to_store
= record_btrace_prepare_to_store
;
2196 ops
->to_get_unwinder
= &record_btrace_to_get_unwinder
;
2197 ops
->to_get_tailcall_unwinder
= &record_btrace_to_get_tailcall_unwinder
;
2198 ops
->to_resume
= record_btrace_resume
;
2199 ops
->to_wait
= record_btrace_wait
;
2200 ops
->to_update_thread_list
= record_btrace_update_thread_list
;
2201 ops
->to_thread_alive
= record_btrace_thread_alive
;
2202 ops
->to_goto_record_begin
= record_btrace_goto_begin
;
2203 ops
->to_goto_record_end
= record_btrace_goto_end
;
2204 ops
->to_goto_record
= record_btrace_goto
;
2205 ops
->to_can_execute_reverse
= record_btrace_can_execute_reverse
;
2206 ops
->to_decr_pc_after_break
= record_btrace_decr_pc_after_break
;
2207 ops
->to_execution_direction
= record_btrace_execution_direction
;
2208 ops
->to_prepare_to_generate_core
= record_btrace_prepare_to_generate_core
;
2209 ops
->to_done_generating_core
= record_btrace_done_generating_core
;
2210 ops
->to_stratum
= record_stratum
;
2211 ops
->to_magic
= OPS_MAGIC
;
2214 /* Start recording in BTS format. */
2217 cmd_record_btrace_bts_start (char *args
, int from_tty
)
2219 volatile struct gdb_exception exception
;
2221 if (args
!= NULL
&& *args
!= 0)
2222 error (_("Invalid argument."));
2224 record_btrace_conf
.format
= BTRACE_FORMAT_BTS
;
2226 TRY_CATCH (exception
, RETURN_MASK_ALL
)
2227 execute_command ("target record-btrace", from_tty
);
2229 if (exception
.error
!= 0)
2231 record_btrace_conf
.format
= BTRACE_FORMAT_NONE
;
2232 throw_exception (exception
);
2236 /* Alias for "target record". */
2239 cmd_record_btrace_start (char *args
, int from_tty
)
2241 volatile struct gdb_exception exception
;
2243 if (args
!= NULL
&& *args
!= 0)
2244 error (_("Invalid argument."));
2246 record_btrace_conf
.format
= BTRACE_FORMAT_BTS
;
2248 TRY_CATCH (exception
, RETURN_MASK_ALL
)
2249 execute_command ("target record-btrace", from_tty
);
2251 if (exception
.error
== 0)
2254 record_btrace_conf
.format
= BTRACE_FORMAT_NONE
;
2255 throw_exception (exception
);
2258 /* The "set record btrace" command. */
2261 cmd_set_record_btrace (char *args
, int from_tty
)
2263 cmd_show_list (set_record_btrace_cmdlist
, from_tty
, "");
2266 /* The "show record btrace" command. */
2269 cmd_show_record_btrace (char *args
, int from_tty
)
2271 cmd_show_list (show_record_btrace_cmdlist
, from_tty
, "");
2274 /* The "show record btrace replay-memory-access" command. */
2277 cmd_show_replay_memory_access (struct ui_file
*file
, int from_tty
,
2278 struct cmd_list_element
*c
, const char *value
)
2280 fprintf_filtered (gdb_stdout
, _("Replay memory access is %s.\n"),
2281 replay_memory_access
);
2284 /* The "set record btrace bts" command. */
2287 cmd_set_record_btrace_bts (char *args
, int from_tty
)
2289 printf_unfiltered (_("\"set record btrace bts\" must be followed "
2290 "by an apporpriate subcommand.\n"));
2291 help_list (set_record_btrace_bts_cmdlist
, "set record btrace bts ",
2292 all_commands
, gdb_stdout
);
2295 /* The "show record btrace bts" command. */
2298 cmd_show_record_btrace_bts (char *args
, int from_tty
)
2300 cmd_show_list (show_record_btrace_bts_cmdlist
, from_tty
, "");
2303 void _initialize_record_btrace (void);
2305 /* Initialize btrace commands. */
2308 _initialize_record_btrace (void)
2310 add_prefix_cmd ("btrace", class_obscure
, cmd_record_btrace_start
,
2311 _("Start branch trace recording."), &record_btrace_cmdlist
,
2312 "record btrace ", 0, &record_cmdlist
);
2313 add_alias_cmd ("b", "btrace", class_obscure
, 1, &record_cmdlist
);
2315 add_cmd ("bts", class_obscure
, cmd_record_btrace_bts_start
,
2317 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
2318 The processor stores a from/to record for each branch into a cyclic buffer.\n\
2319 This format may not be available on all processors."),
2320 &record_btrace_cmdlist
);
2321 add_alias_cmd ("bts", "btrace bts", class_obscure
, 1, &record_cmdlist
);
2323 add_prefix_cmd ("btrace", class_support
, cmd_set_record_btrace
,
2324 _("Set record options"), &set_record_btrace_cmdlist
,
2325 "set record btrace ", 0, &set_record_cmdlist
);
2327 add_prefix_cmd ("btrace", class_support
, cmd_show_record_btrace
,
2328 _("Show record options"), &show_record_btrace_cmdlist
,
2329 "show record btrace ", 0, &show_record_cmdlist
);
2331 add_setshow_enum_cmd ("replay-memory-access", no_class
,
2332 replay_memory_access_types
, &replay_memory_access
, _("\
2333 Set what memory accesses are allowed during replay."), _("\
2334 Show what memory accesses are allowed during replay."),
2335 _("Default is READ-ONLY.\n\n\
2336 The btrace record target does not trace data.\n\
2337 The memory therefore corresponds to the live target and not \
2338 to the current replay position.\n\n\
2339 When READ-ONLY, allow accesses to read-only memory during replay.\n\
2340 When READ-WRITE, allow accesses to read-only and read-write memory during \
2342 NULL
, cmd_show_replay_memory_access
,
2343 &set_record_btrace_cmdlist
,
2344 &show_record_btrace_cmdlist
);
2346 add_prefix_cmd ("bts", class_support
, cmd_set_record_btrace_bts
,
2347 _("Set record btrace bts options"),
2348 &set_record_btrace_bts_cmdlist
,
2349 "set record btrace bts ", 0, &set_record_btrace_cmdlist
);
2351 add_prefix_cmd ("bts", class_support
, cmd_show_record_btrace_bts
,
2352 _("Show record btrace bts options"),
2353 &show_record_btrace_bts_cmdlist
,
2354 "show record btrace bts ", 0, &show_record_btrace_cmdlist
);
2356 add_setshow_uinteger_cmd ("buffer-size", no_class
,
2357 &record_btrace_conf
.bts
.size
,
2358 _("Set the record/replay bts buffer size."),
2359 _("Show the record/replay bts buffer size."), _("\
2360 When starting recording request a trace buffer of this size. \
2361 The actual buffer size may differ from the requested size. \
2362 Use \"info record\" to see the actual buffer size.\n\n\
2363 Bigger buffers allow longer recording but also take more time to process \
2364 the recorded execution trace.\n\n\
2365 The trace buffer size may not be changed while recording."), NULL
, NULL
,
2366 &set_record_btrace_bts_cmdlist
,
2367 &show_record_btrace_bts_cmdlist
);
2369 init_record_btrace_ops ();
2370 add_target (&record_btrace_ops
);
2372 bfcache
= htab_create_alloc (50, bfcache_hash
, bfcache_eq
, NULL
,
2375 record_btrace_conf
.bts
.size
= 64 * 1024;