1 /* Branch trace support for GDB, the GNU debugger.
3 Copyright (C) 2013-2015 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "gdbthread.h"
29 #include "cli/cli-utils.h"
33 #include "filenames.h"
35 #include "frame-unwind.h"
38 #include "event-loop.h"
41 /* The target_ops of record-btrace. */
42 static struct target_ops record_btrace_ops
;
44 /* A new thread observer enabling branch tracing for the new thread. */
45 static struct observer
*record_btrace_thread_observer
;
47 /* Memory access types used in set/show record btrace replay-memory-access. */
48 static const char replay_memory_access_read_only
[] = "read-only";
49 static const char replay_memory_access_read_write
[] = "read-write";
50 static const char *const replay_memory_access_types
[] =
52 replay_memory_access_read_only
,
53 replay_memory_access_read_write
,
57 /* The currently allowed replay memory access type. */
58 static const char *replay_memory_access
= replay_memory_access_read_only
;
60 /* Command lists for "set/show record btrace". */
61 static struct cmd_list_element
*set_record_btrace_cmdlist
;
62 static struct cmd_list_element
*show_record_btrace_cmdlist
;
64 /* The execution direction of the last resume we got. See record-full.c. */
65 static enum exec_direction_kind record_btrace_resume_exec_dir
= EXEC_FORWARD
;
67 /* The async event handler for reverse/replay execution. */
68 static struct async_event_handler
*record_btrace_async_inferior_event_handler
;
70 /* A flag indicating that we are currently generating a core file. */
71 static int record_btrace_generating_corefile
;
73 /* The current branch trace configuration. */
74 static struct btrace_config record_btrace_conf
;
76 /* Command list for "record btrace". */
77 static struct cmd_list_element
*record_btrace_cmdlist
;
79 /* Command lists for "set/show record btrace bts". */
80 static struct cmd_list_element
*set_record_btrace_bts_cmdlist
;
81 static struct cmd_list_element
*show_record_btrace_bts_cmdlist
;
83 /* Print a record-btrace debug message. Use do ... while (0) to avoid
84 ambiguities when used in if statements. */
86 #define DEBUG(msg, args...) \
89 if (record_debug != 0) \
90 fprintf_unfiltered (gdb_stdlog, \
91 "[record-btrace] " msg "\n", ##args); \
96 /* Update the branch trace for the current thread and return a pointer to its
99 Throws an error if there is no thread or no trace. This function never
102 static struct thread_info
*
103 require_btrace_thread (void)
105 struct thread_info
*tp
;
109 tp
= find_thread_ptid (inferior_ptid
);
111 error (_("No thread."));
115 if (btrace_is_empty (tp
))
116 error (_("No trace."));
121 /* Update the branch trace for the current thread and return a pointer to its
122 branch trace information struct.
124 Throws an error if there is no thread or no trace. This function never
127 static struct btrace_thread_info
*
128 require_btrace (void)
130 struct thread_info
*tp
;
132 tp
= require_btrace_thread ();
137 /* Enable branch tracing for one thread. Warn on errors. */
140 record_btrace_enable_warn (struct thread_info
*tp
)
142 volatile struct gdb_exception error
;
144 TRY_CATCH (error
, RETURN_MASK_ERROR
)
145 btrace_enable (tp
, &record_btrace_conf
);
147 if (error
.message
!= NULL
)
148 warning ("%s", error
.message
);
151 /* Callback function to disable branch tracing for one thread. */
154 record_btrace_disable_callback (void *arg
)
156 struct thread_info
*tp
;
163 /* Enable automatic tracing of new threads. */
166 record_btrace_auto_enable (void)
168 DEBUG ("attach thread observer");
170 record_btrace_thread_observer
171 = observer_attach_new_thread (record_btrace_enable_warn
);
174 /* Disable automatic tracing of new threads. */
177 record_btrace_auto_disable (void)
179 /* The observer may have been detached, already. */
180 if (record_btrace_thread_observer
== NULL
)
183 DEBUG ("detach thread observer");
185 observer_detach_new_thread (record_btrace_thread_observer
);
186 record_btrace_thread_observer
= NULL
;
189 /* The record-btrace async event handler function. */
192 record_btrace_handle_async_inferior_event (gdb_client_data data
)
194 inferior_event_handler (INF_REG_EVENT
, NULL
);
197 /* The to_open method of target record-btrace. */
200 record_btrace_open (const char *args
, int from_tty
)
202 struct cleanup
*disable_chain
;
203 struct thread_info
*tp
;
209 if (!target_has_execution
)
210 error (_("The program is not being run."));
213 error (_("Record btrace can't debug inferior in non-stop mode."));
215 gdb_assert (record_btrace_thread_observer
== NULL
);
217 disable_chain
= make_cleanup (null_cleanup
, NULL
);
218 ALL_NON_EXITED_THREADS (tp
)
219 if (args
== NULL
|| *args
== 0 || number_is_in_list (args
, tp
->num
))
221 btrace_enable (tp
, &record_btrace_conf
);
223 make_cleanup (record_btrace_disable_callback
, tp
);
226 record_btrace_auto_enable ();
228 push_target (&record_btrace_ops
);
230 record_btrace_async_inferior_event_handler
231 = create_async_event_handler (record_btrace_handle_async_inferior_event
,
233 record_btrace_generating_corefile
= 0;
235 observer_notify_record_changed (current_inferior (), 1);
237 discard_cleanups (disable_chain
);
240 /* The to_stop_recording method of target record-btrace. */
243 record_btrace_stop_recording (struct target_ops
*self
)
245 struct thread_info
*tp
;
247 DEBUG ("stop recording");
249 record_btrace_auto_disable ();
251 ALL_NON_EXITED_THREADS (tp
)
252 if (tp
->btrace
.target
!= NULL
)
256 /* The to_close method of target record-btrace. */
259 record_btrace_close (struct target_ops
*self
)
261 struct thread_info
*tp
;
263 if (record_btrace_async_inferior_event_handler
!= NULL
)
264 delete_async_event_handler (&record_btrace_async_inferior_event_handler
);
266 /* Make sure automatic recording gets disabled even if we did not stop
267 recording before closing the record-btrace target. */
268 record_btrace_auto_disable ();
270 /* We should have already stopped recording.
271 Tear down btrace in case we have not. */
272 ALL_NON_EXITED_THREADS (tp
)
273 btrace_teardown (tp
);
276 /* The to_async method of target record-btrace. */
279 record_btrace_async (struct target_ops
*ops
,
280 void (*callback
) (enum inferior_event_type event_type
,
284 if (callback
!= NULL
)
285 mark_async_event_handler (record_btrace_async_inferior_event_handler
);
287 clear_async_event_handler (record_btrace_async_inferior_event_handler
);
289 ops
->beneath
->to_async (ops
->beneath
, callback
, context
);
292 /* Adjusts the size and returns a human readable size suffix. */
295 record_btrace_adjust_size (unsigned int *size
)
301 if ((sz
& ((1u << 30) - 1)) == 0)
306 else if ((sz
& ((1u << 20) - 1)) == 0)
311 else if ((sz
& ((1u << 10) - 1)) == 0)
320 /* Print a BTS configuration. */
323 record_btrace_print_bts_conf (const struct btrace_config_bts
*conf
)
331 suffix
= record_btrace_adjust_size (&size
);
332 printf_unfiltered (_("Buffer size: %u%s.\n"), size
, suffix
);
336 /* Print a branch tracing configuration. */
339 record_btrace_print_conf (const struct btrace_config
*conf
)
341 printf_unfiltered (_("Recording format: %s.\n"),
342 btrace_format_string (conf
->format
));
344 switch (conf
->format
)
346 case BTRACE_FORMAT_NONE
:
349 case BTRACE_FORMAT_BTS
:
350 record_btrace_print_bts_conf (&conf
->bts
);
354 internal_error (__FILE__
, __LINE__
, _("Unkown branch trace format."));
357 /* The to_info_record method of target record-btrace. */
360 record_btrace_info (struct target_ops
*self
)
362 struct btrace_thread_info
*btinfo
;
363 const struct btrace_config
*conf
;
364 struct thread_info
*tp
;
365 unsigned int insns
, calls
, gaps
;
369 tp
= find_thread_ptid (inferior_ptid
);
371 error (_("No thread."));
373 btinfo
= &tp
->btrace
;
375 conf
= btrace_conf (btinfo
);
377 record_btrace_print_conf (conf
);
385 if (!btrace_is_empty (tp
))
387 struct btrace_call_iterator call
;
388 struct btrace_insn_iterator insn
;
390 btrace_call_end (&call
, btinfo
);
391 btrace_call_prev (&call
, 1);
392 calls
= btrace_call_number (&call
);
394 btrace_insn_end (&insn
, btinfo
);
396 insns
= btrace_insn_number (&insn
);
399 /* The last instruction does not really belong to the trace. */
406 /* Skip gaps at the end. */
409 steps
= btrace_insn_prev (&insn
, 1);
413 insns
= btrace_insn_number (&insn
);
418 gaps
= btinfo
->ngaps
;
421 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
422 "for thread %d (%s).\n"), insns
, calls
, gaps
,
423 tp
->num
, target_pid_to_str (tp
->ptid
));
425 if (btrace_is_replaying (tp
))
426 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
427 btrace_insn_number (btinfo
->replay
));
430 /* Print a decode error. */
433 btrace_ui_out_decode_error (struct ui_out
*uiout
, int errcode
,
434 enum btrace_format format
)
439 errstr
= _("unknown");
447 case BTRACE_FORMAT_BTS
:
453 case BDE_BTS_OVERFLOW
:
454 errstr
= _("instruction overflow");
457 case BDE_BTS_INSN_SIZE
:
458 errstr
= _("unknown instruction");
464 ui_out_text (uiout
, _("["));
467 ui_out_text (uiout
, _("decode error ("));
468 ui_out_field_int (uiout
, "errcode", errcode
);
469 ui_out_text (uiout
, _("): "));
471 ui_out_text (uiout
, errstr
);
472 ui_out_text (uiout
, _("]\n"));
475 /* Print an unsigned int. */
478 ui_out_field_uint (struct ui_out
*uiout
, const char *fld
, unsigned int val
)
480 ui_out_field_fmt (uiout
, fld
, "%u", val
);
483 /* Disassemble a section of the recorded instruction trace. */
486 btrace_insn_history (struct ui_out
*uiout
,
487 const struct btrace_thread_info
*btinfo
,
488 const struct btrace_insn_iterator
*begin
,
489 const struct btrace_insn_iterator
*end
, int flags
)
491 struct gdbarch
*gdbarch
;
492 struct btrace_insn_iterator it
;
494 DEBUG ("itrace (0x%x): [%u; %u)", flags
, btrace_insn_number (begin
),
495 btrace_insn_number (end
));
497 gdbarch
= target_gdbarch ();
499 for (it
= *begin
; btrace_insn_cmp (&it
, end
) != 0; btrace_insn_next (&it
, 1))
501 const struct btrace_insn
*insn
;
503 insn
= btrace_insn_get (&it
);
505 /* A NULL instruction indicates a gap in the trace. */
508 const struct btrace_config
*conf
;
510 conf
= btrace_conf (btinfo
);
512 /* We have trace so we must have a configuration. */
513 gdb_assert (conf
!= NULL
);
515 btrace_ui_out_decode_error (uiout
, it
.function
->errcode
,
520 /* Print the instruction index. */
521 ui_out_field_uint (uiout
, "index", btrace_insn_number (&it
));
522 ui_out_text (uiout
, "\t");
524 /* Disassembly with '/m' flag may not produce the expected result.
526 gdb_disassembly (gdbarch
, uiout
, NULL
, flags
, 1, insn
->pc
,
532 /* The to_insn_history method of target record-btrace. */
535 record_btrace_insn_history (struct target_ops
*self
, int size
, int flags
)
537 struct btrace_thread_info
*btinfo
;
538 struct btrace_insn_history
*history
;
539 struct btrace_insn_iterator begin
, end
;
540 struct cleanup
*uiout_cleanup
;
541 struct ui_out
*uiout
;
542 unsigned int context
, covered
;
544 uiout
= current_uiout
;
545 uiout_cleanup
= make_cleanup_ui_out_tuple_begin_end (uiout
,
547 context
= abs (size
);
549 error (_("Bad record instruction-history-size."));
551 btinfo
= require_btrace ();
552 history
= btinfo
->insn_history
;
555 struct btrace_insn_iterator
*replay
;
557 DEBUG ("insn-history (0x%x): %d", flags
, size
);
559 /* If we're replaying, we start at the replay position. Otherwise, we
560 start at the tail of the trace. */
561 replay
= btinfo
->replay
;
565 btrace_insn_end (&begin
, btinfo
);
567 /* We start from here and expand in the requested direction. Then we
568 expand in the other direction, as well, to fill up any remaining
573 /* We want the current position covered, as well. */
574 covered
= btrace_insn_next (&end
, 1);
575 covered
+= btrace_insn_prev (&begin
, context
- covered
);
576 covered
+= btrace_insn_next (&end
, context
- covered
);
580 covered
= btrace_insn_next (&end
, context
);
581 covered
+= btrace_insn_prev (&begin
, context
- covered
);
586 begin
= history
->begin
;
589 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags
, size
,
590 btrace_insn_number (&begin
), btrace_insn_number (&end
));
595 covered
= btrace_insn_prev (&begin
, context
);
600 covered
= btrace_insn_next (&end
, context
);
605 btrace_insn_history (uiout
, btinfo
, &begin
, &end
, flags
);
609 printf_unfiltered (_("At the start of the branch trace record.\n"));
611 printf_unfiltered (_("At the end of the branch trace record.\n"));
614 btrace_set_insn_history (btinfo
, &begin
, &end
);
615 do_cleanups (uiout_cleanup
);
618 /* The to_insn_history_range method of target record-btrace. */
621 record_btrace_insn_history_range (struct target_ops
*self
,
622 ULONGEST from
, ULONGEST to
, int flags
)
624 struct btrace_thread_info
*btinfo
;
625 struct btrace_insn_history
*history
;
626 struct btrace_insn_iterator begin
, end
;
627 struct cleanup
*uiout_cleanup
;
628 struct ui_out
*uiout
;
629 unsigned int low
, high
;
632 uiout
= current_uiout
;
633 uiout_cleanup
= make_cleanup_ui_out_tuple_begin_end (uiout
,
638 DEBUG ("insn-history (0x%x): [%u; %u)", flags
, low
, high
);
640 /* Check for wrap-arounds. */
641 if (low
!= from
|| high
!= to
)
642 error (_("Bad range."));
645 error (_("Bad range."));
647 btinfo
= require_btrace ();
649 found
= btrace_find_insn_by_number (&begin
, btinfo
, low
);
651 error (_("Range out of bounds."));
653 found
= btrace_find_insn_by_number (&end
, btinfo
, high
);
656 /* Silently truncate the range. */
657 btrace_insn_end (&end
, btinfo
);
661 /* We want both begin and end to be inclusive. */
662 btrace_insn_next (&end
, 1);
665 btrace_insn_history (uiout
, btinfo
, &begin
, &end
, flags
);
666 btrace_set_insn_history (btinfo
, &begin
, &end
);
668 do_cleanups (uiout_cleanup
);
671 /* The to_insn_history_from method of target record-btrace. */
674 record_btrace_insn_history_from (struct target_ops
*self
,
675 ULONGEST from
, int size
, int flags
)
677 ULONGEST begin
, end
, context
;
679 context
= abs (size
);
681 error (_("Bad record instruction-history-size."));
690 begin
= from
- context
+ 1;
695 end
= from
+ context
- 1;
697 /* Check for wrap-around. */
702 record_btrace_insn_history_range (self
, begin
, end
, flags
);
705 /* Print the instruction number range for a function call history line. */
708 btrace_call_history_insn_range (struct ui_out
*uiout
,
709 const struct btrace_function
*bfun
)
711 unsigned int begin
, end
, size
;
713 size
= VEC_length (btrace_insn_s
, bfun
->insn
);
714 gdb_assert (size
> 0);
716 begin
= bfun
->insn_offset
;
717 end
= begin
+ size
- 1;
719 ui_out_field_uint (uiout
, "insn begin", begin
);
720 ui_out_text (uiout
, ",");
721 ui_out_field_uint (uiout
, "insn end", end
);
724 /* Print the source line information for a function call history line. */
727 btrace_call_history_src_line (struct ui_out
*uiout
,
728 const struct btrace_function
*bfun
)
737 ui_out_field_string (uiout
, "file",
738 symtab_to_filename_for_display (symbol_symtab (sym
)));
740 begin
= bfun
->lbegin
;
746 ui_out_text (uiout
, ":");
747 ui_out_field_int (uiout
, "min line", begin
);
752 ui_out_text (uiout
, ",");
753 ui_out_field_int (uiout
, "max line", end
);
756 /* Get the name of a branch trace function. */
759 btrace_get_bfun_name (const struct btrace_function
*bfun
)
761 struct minimal_symbol
*msym
;
771 return SYMBOL_PRINT_NAME (sym
);
772 else if (msym
!= NULL
)
773 return MSYMBOL_PRINT_NAME (msym
);
778 /* Disassemble a section of the recorded function trace. */
781 btrace_call_history (struct ui_out
*uiout
,
782 const struct btrace_thread_info
*btinfo
,
783 const struct btrace_call_iterator
*begin
,
784 const struct btrace_call_iterator
*end
,
785 enum record_print_flag flags
)
787 struct btrace_call_iterator it
;
789 DEBUG ("ftrace (0x%x): [%u; %u)", flags
, btrace_call_number (begin
),
790 btrace_call_number (end
));
792 for (it
= *begin
; btrace_call_cmp (&it
, end
) < 0; btrace_call_next (&it
, 1))
794 const struct btrace_function
*bfun
;
795 struct minimal_symbol
*msym
;
798 bfun
= btrace_call_get (&it
);
802 /* Print the function index. */
803 ui_out_field_uint (uiout
, "index", bfun
->number
);
804 ui_out_text (uiout
, "\t");
806 /* Indicate gaps in the trace. */
807 if (bfun
->errcode
!= 0)
809 const struct btrace_config
*conf
;
811 conf
= btrace_conf (btinfo
);
813 /* We have trace so we must have a configuration. */
814 gdb_assert (conf
!= NULL
);
816 btrace_ui_out_decode_error (uiout
, bfun
->errcode
, conf
->format
);
821 if ((flags
& RECORD_PRINT_INDENT_CALLS
) != 0)
823 int level
= bfun
->level
+ btinfo
->level
, i
;
825 for (i
= 0; i
< level
; ++i
)
826 ui_out_text (uiout
, " ");
830 ui_out_field_string (uiout
, "function", SYMBOL_PRINT_NAME (sym
));
831 else if (msym
!= NULL
)
832 ui_out_field_string (uiout
, "function", MSYMBOL_PRINT_NAME (msym
));
833 else if (!ui_out_is_mi_like_p (uiout
))
834 ui_out_field_string (uiout
, "function", "??");
836 if ((flags
& RECORD_PRINT_INSN_RANGE
) != 0)
838 ui_out_text (uiout
, _("\tinst "));
839 btrace_call_history_insn_range (uiout
, bfun
);
842 if ((flags
& RECORD_PRINT_SRC_LINE
) != 0)
844 ui_out_text (uiout
, _("\tat "));
845 btrace_call_history_src_line (uiout
, bfun
);
848 ui_out_text (uiout
, "\n");
852 /* The to_call_history method of target record-btrace. */
855 record_btrace_call_history (struct target_ops
*self
, int size
, int flags
)
857 struct btrace_thread_info
*btinfo
;
858 struct btrace_call_history
*history
;
859 struct btrace_call_iterator begin
, end
;
860 struct cleanup
*uiout_cleanup
;
861 struct ui_out
*uiout
;
862 unsigned int context
, covered
;
864 uiout
= current_uiout
;
865 uiout_cleanup
= make_cleanup_ui_out_tuple_begin_end (uiout
,
867 context
= abs (size
);
869 error (_("Bad record function-call-history-size."));
871 btinfo
= require_btrace ();
872 history
= btinfo
->call_history
;
875 struct btrace_insn_iterator
*replay
;
877 DEBUG ("call-history (0x%x): %d", flags
, size
);
879 /* If we're replaying, we start at the replay position. Otherwise, we
880 start at the tail of the trace. */
881 replay
= btinfo
->replay
;
884 begin
.function
= replay
->function
;
885 begin
.btinfo
= btinfo
;
888 btrace_call_end (&begin
, btinfo
);
890 /* We start from here and expand in the requested direction. Then we
891 expand in the other direction, as well, to fill up any remaining
896 /* We want the current position covered, as well. */
897 covered
= btrace_call_next (&end
, 1);
898 covered
+= btrace_call_prev (&begin
, context
- covered
);
899 covered
+= btrace_call_next (&end
, context
- covered
);
903 covered
= btrace_call_next (&end
, context
);
904 covered
+= btrace_call_prev (&begin
, context
- covered
);
909 begin
= history
->begin
;
912 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", flags
, size
,
913 btrace_call_number (&begin
), btrace_call_number (&end
));
918 covered
= btrace_call_prev (&begin
, context
);
923 covered
= btrace_call_next (&end
, context
);
928 btrace_call_history (uiout
, btinfo
, &begin
, &end
, flags
);
932 printf_unfiltered (_("At the start of the branch trace record.\n"));
934 printf_unfiltered (_("At the end of the branch trace record.\n"));
937 btrace_set_call_history (btinfo
, &begin
, &end
);
938 do_cleanups (uiout_cleanup
);
941 /* The to_call_history_range method of target record-btrace. */
944 record_btrace_call_history_range (struct target_ops
*self
,
945 ULONGEST from
, ULONGEST to
, int flags
)
947 struct btrace_thread_info
*btinfo
;
948 struct btrace_call_history
*history
;
949 struct btrace_call_iterator begin
, end
;
950 struct cleanup
*uiout_cleanup
;
951 struct ui_out
*uiout
;
952 unsigned int low
, high
;
955 uiout
= current_uiout
;
956 uiout_cleanup
= make_cleanup_ui_out_tuple_begin_end (uiout
,
961 DEBUG ("call-history (0x%x): [%u; %u)", flags
, low
, high
);
963 /* Check for wrap-arounds. */
964 if (low
!= from
|| high
!= to
)
965 error (_("Bad range."));
968 error (_("Bad range."));
970 btinfo
= require_btrace ();
972 found
= btrace_find_call_by_number (&begin
, btinfo
, low
);
974 error (_("Range out of bounds."));
976 found
= btrace_find_call_by_number (&end
, btinfo
, high
);
979 /* Silently truncate the range. */
980 btrace_call_end (&end
, btinfo
);
984 /* We want both begin and end to be inclusive. */
985 btrace_call_next (&end
, 1);
988 btrace_call_history (uiout
, btinfo
, &begin
, &end
, flags
);
989 btrace_set_call_history (btinfo
, &begin
, &end
);
991 do_cleanups (uiout_cleanup
);
994 /* The to_call_history_from method of target record-btrace. */
997 record_btrace_call_history_from (struct target_ops
*self
,
998 ULONGEST from
, int size
, int flags
)
1000 ULONGEST begin
, end
, context
;
1002 context
= abs (size
);
1004 error (_("Bad record function-call-history-size."));
1013 begin
= from
- context
+ 1;
1018 end
= from
+ context
- 1;
1020 /* Check for wrap-around. */
1025 record_btrace_call_history_range (self
, begin
, end
, flags
);
1028 /* The to_record_is_replaying method of target record-btrace. */
1031 record_btrace_is_replaying (struct target_ops
*self
)
1033 struct thread_info
*tp
;
1035 ALL_NON_EXITED_THREADS (tp
)
1036 if (btrace_is_replaying (tp
))
1042 /* The to_xfer_partial method of target record-btrace. */
1044 static enum target_xfer_status
1045 record_btrace_xfer_partial (struct target_ops
*ops
, enum target_object object
,
1046 const char *annex
, gdb_byte
*readbuf
,
1047 const gdb_byte
*writebuf
, ULONGEST offset
,
1048 ULONGEST len
, ULONGEST
*xfered_len
)
1050 struct target_ops
*t
;
1052 /* Filter out requests that don't make sense during replay. */
1053 if (replay_memory_access
== replay_memory_access_read_only
1054 && !record_btrace_generating_corefile
1055 && record_btrace_is_replaying (ops
))
1059 case TARGET_OBJECT_MEMORY
:
1061 struct target_section
*section
;
1063 /* We do not allow writing memory in general. */
1064 if (writebuf
!= NULL
)
1067 return TARGET_XFER_UNAVAILABLE
;
1070 /* We allow reading readonly memory. */
1071 section
= target_section_by_addr (ops
, offset
);
1072 if (section
!= NULL
)
1074 /* Check if the section we found is readonly. */
1075 if ((bfd_get_section_flags (section
->the_bfd_section
->owner
,
1076 section
->the_bfd_section
)
1077 & SEC_READONLY
) != 0)
1079 /* Truncate the request to fit into this section. */
1080 len
= min (len
, section
->endaddr
- offset
);
1086 return TARGET_XFER_UNAVAILABLE
;
1091 /* Forward the request. */
1093 return ops
->to_xfer_partial (ops
, object
, annex
, readbuf
, writebuf
,
1094 offset
, len
, xfered_len
);
1097 /* The to_insert_breakpoint method of target record-btrace. */
1100 record_btrace_insert_breakpoint (struct target_ops
*ops
,
1101 struct gdbarch
*gdbarch
,
1102 struct bp_target_info
*bp_tgt
)
1104 volatile struct gdb_exception except
;
1108 /* Inserting breakpoints requires accessing memory. Allow it for the
1109 duration of this function. */
1110 old
= replay_memory_access
;
1111 replay_memory_access
= replay_memory_access_read_write
;
1114 TRY_CATCH (except
, RETURN_MASK_ALL
)
1115 ret
= ops
->beneath
->to_insert_breakpoint (ops
->beneath
, gdbarch
, bp_tgt
);
1117 replay_memory_access
= old
;
1119 if (except
.reason
< 0)
1120 throw_exception (except
);
1125 /* The to_remove_breakpoint method of target record-btrace. */
1128 record_btrace_remove_breakpoint (struct target_ops
*ops
,
1129 struct gdbarch
*gdbarch
,
1130 struct bp_target_info
*bp_tgt
)
1132 volatile struct gdb_exception except
;
1136 /* Removing breakpoints requires accessing memory. Allow it for the
1137 duration of this function. */
1138 old
= replay_memory_access
;
1139 replay_memory_access
= replay_memory_access_read_write
;
1142 TRY_CATCH (except
, RETURN_MASK_ALL
)
1143 ret
= ops
->beneath
->to_remove_breakpoint (ops
->beneath
, gdbarch
, bp_tgt
);
1145 replay_memory_access
= old
;
1147 if (except
.reason
< 0)
1148 throw_exception (except
);
1153 /* The to_fetch_registers method of target record-btrace. */
1156 record_btrace_fetch_registers (struct target_ops
*ops
,
1157 struct regcache
*regcache
, int regno
)
1159 struct btrace_insn_iterator
*replay
;
1160 struct thread_info
*tp
;
1162 tp
= find_thread_ptid (inferior_ptid
);
1163 gdb_assert (tp
!= NULL
);
1165 replay
= tp
->btrace
.replay
;
1166 if (replay
!= NULL
&& !record_btrace_generating_corefile
)
1168 const struct btrace_insn
*insn
;
1169 struct gdbarch
*gdbarch
;
1172 gdbarch
= get_regcache_arch (regcache
);
1173 pcreg
= gdbarch_pc_regnum (gdbarch
);
1177 /* We can only provide the PC register. */
1178 if (regno
>= 0 && regno
!= pcreg
)
1181 insn
= btrace_insn_get (replay
);
1182 gdb_assert (insn
!= NULL
);
1184 regcache_raw_supply (regcache
, regno
, &insn
->pc
);
1188 struct target_ops
*t
= ops
->beneath
;
1190 t
->to_fetch_registers (t
, regcache
, regno
);
1194 /* The to_store_registers method of target record-btrace. */
1197 record_btrace_store_registers (struct target_ops
*ops
,
1198 struct regcache
*regcache
, int regno
)
1200 struct target_ops
*t
;
1202 if (!record_btrace_generating_corefile
&& record_btrace_is_replaying (ops
))
1203 error (_("This record target does not allow writing registers."));
1205 gdb_assert (may_write_registers
!= 0);
1208 t
->to_store_registers (t
, regcache
, regno
);
1211 /* The to_prepare_to_store method of target record-btrace. */
1214 record_btrace_prepare_to_store (struct target_ops
*ops
,
1215 struct regcache
*regcache
)
1217 struct target_ops
*t
;
1219 if (!record_btrace_generating_corefile
&& record_btrace_is_replaying (ops
))
1223 t
->to_prepare_to_store (t
, regcache
);
1226 /* The branch trace frame cache. */
1228 struct btrace_frame_cache
1231 struct thread_info
*tp
;
1233 /* The frame info. */
1234 struct frame_info
*frame
;
1236 /* The branch trace function segment. */
1237 const struct btrace_function
*bfun
;
1240 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1242 static htab_t bfcache
;
1244 /* hash_f for htab_create_alloc of bfcache. */
1247 bfcache_hash (const void *arg
)
1249 const struct btrace_frame_cache
*cache
= arg
;
1251 return htab_hash_pointer (cache
->frame
);
1254 /* eq_f for htab_create_alloc of bfcache. */
1257 bfcache_eq (const void *arg1
, const void *arg2
)
1259 const struct btrace_frame_cache
*cache1
= arg1
;
1260 const struct btrace_frame_cache
*cache2
= arg2
;
1262 return cache1
->frame
== cache2
->frame
;
1265 /* Create a new btrace frame cache. */
1267 static struct btrace_frame_cache
*
1268 bfcache_new (struct frame_info
*frame
)
1270 struct btrace_frame_cache
*cache
;
1273 cache
= FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache
);
1274 cache
->frame
= frame
;
1276 slot
= htab_find_slot (bfcache
, cache
, INSERT
);
1277 gdb_assert (*slot
== NULL
);
1283 /* Extract the branch trace function from a branch trace frame. */
1285 static const struct btrace_function
*
1286 btrace_get_frame_function (struct frame_info
*frame
)
1288 const struct btrace_frame_cache
*cache
;
1289 const struct btrace_function
*bfun
;
1290 struct btrace_frame_cache pattern
;
1293 pattern
.frame
= frame
;
1295 slot
= htab_find_slot (bfcache
, &pattern
, NO_INSERT
);
1303 /* Implement stop_reason method for record_btrace_frame_unwind. */
1305 static enum unwind_stop_reason
1306 record_btrace_frame_unwind_stop_reason (struct frame_info
*this_frame
,
1309 const struct btrace_frame_cache
*cache
;
1310 const struct btrace_function
*bfun
;
1312 cache
= *this_cache
;
1314 gdb_assert (bfun
!= NULL
);
1316 if (bfun
->up
== NULL
)
1317 return UNWIND_UNAVAILABLE
;
1319 return UNWIND_NO_REASON
;
1322 /* Implement this_id method for record_btrace_frame_unwind. */
1325 record_btrace_frame_this_id (struct frame_info
*this_frame
, void **this_cache
,
1326 struct frame_id
*this_id
)
1328 const struct btrace_frame_cache
*cache
;
1329 const struct btrace_function
*bfun
;
1330 CORE_ADDR code
, special
;
1332 cache
= *this_cache
;
1335 gdb_assert (bfun
!= NULL
);
1337 while (bfun
->segment
.prev
!= NULL
)
1338 bfun
= bfun
->segment
.prev
;
1340 code
= get_frame_func (this_frame
);
1341 special
= bfun
->number
;
1343 *this_id
= frame_id_build_unavailable_stack_special (code
, special
);
1345 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1346 btrace_get_bfun_name (cache
->bfun
),
1347 core_addr_to_string_nz (this_id
->code_addr
),
1348 core_addr_to_string_nz (this_id
->special_addr
));
1351 /* Implement prev_register method for record_btrace_frame_unwind. */
1353 static struct value
*
1354 record_btrace_frame_prev_register (struct frame_info
*this_frame
,
1358 const struct btrace_frame_cache
*cache
;
1359 const struct btrace_function
*bfun
, *caller
;
1360 const struct btrace_insn
*insn
;
1361 struct gdbarch
*gdbarch
;
1365 gdbarch
= get_frame_arch (this_frame
);
1366 pcreg
= gdbarch_pc_regnum (gdbarch
);
1367 if (pcreg
< 0 || regnum
!= pcreg
)
1368 throw_error (NOT_AVAILABLE_ERROR
,
1369 _("Registers are not available in btrace record history"));
1371 cache
= *this_cache
;
1373 gdb_assert (bfun
!= NULL
);
1377 throw_error (NOT_AVAILABLE_ERROR
,
1378 _("No caller in btrace record history"));
1380 if ((bfun
->flags
& BFUN_UP_LINKS_TO_RET
) != 0)
1382 insn
= VEC_index (btrace_insn_s
, caller
->insn
, 0);
1387 insn
= VEC_last (btrace_insn_s
, caller
->insn
);
1390 pc
+= gdb_insn_length (gdbarch
, pc
);
1393 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1394 btrace_get_bfun_name (bfun
), bfun
->level
,
1395 core_addr_to_string_nz (pc
));
1397 return frame_unwind_got_address (this_frame
, regnum
, pc
);
1400 /* Implement sniffer method for record_btrace_frame_unwind. */
1403 record_btrace_frame_sniffer (const struct frame_unwind
*self
,
1404 struct frame_info
*this_frame
,
1407 const struct btrace_function
*bfun
;
1408 struct btrace_frame_cache
*cache
;
1409 struct thread_info
*tp
;
1410 struct frame_info
*next
;
1412 /* THIS_FRAME does not contain a reference to its thread. */
1413 tp
= find_thread_ptid (inferior_ptid
);
1414 gdb_assert (tp
!= NULL
);
1417 next
= get_next_frame (this_frame
);
1420 const struct btrace_insn_iterator
*replay
;
1422 replay
= tp
->btrace
.replay
;
1424 bfun
= replay
->function
;
1428 const struct btrace_function
*callee
;
1430 callee
= btrace_get_frame_function (next
);
1431 if (callee
!= NULL
&& (callee
->flags
& BFUN_UP_LINKS_TO_TAILCALL
) == 0)
1438 DEBUG ("[frame] sniffed frame for %s on level %d",
1439 btrace_get_bfun_name (bfun
), bfun
->level
);
1441 /* This is our frame. Initialize the frame cache. */
1442 cache
= bfcache_new (this_frame
);
1446 *this_cache
= cache
;
1450 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1453 record_btrace_tailcall_frame_sniffer (const struct frame_unwind
*self
,
1454 struct frame_info
*this_frame
,
1457 const struct btrace_function
*bfun
, *callee
;
1458 struct btrace_frame_cache
*cache
;
1459 struct frame_info
*next
;
1461 next
= get_next_frame (this_frame
);
1465 callee
= btrace_get_frame_function (next
);
1469 if ((callee
->flags
& BFUN_UP_LINKS_TO_TAILCALL
) == 0)
1476 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1477 btrace_get_bfun_name (bfun
), bfun
->level
);
1479 /* This is our frame. Initialize the frame cache. */
1480 cache
= bfcache_new (this_frame
);
1481 cache
->tp
= find_thread_ptid (inferior_ptid
);
1484 *this_cache
= cache
;
1489 record_btrace_frame_dealloc_cache (struct frame_info
*self
, void *this_cache
)
1491 struct btrace_frame_cache
*cache
;
1496 slot
= htab_find_slot (bfcache
, cache
, NO_INSERT
);
1497 gdb_assert (slot
!= NULL
);
1499 htab_remove_elt (bfcache
, cache
);
1502 /* btrace recording does not store previous memory content, neither the stack
1503 frames content. Any unwinding would return errorneous results as the stack
1504 contents no longer matches the changed PC value restored from history.
1505 Therefore this unwinder reports any possibly unwound registers as
1508 const struct frame_unwind record_btrace_frame_unwind
=
1511 record_btrace_frame_unwind_stop_reason
,
1512 record_btrace_frame_this_id
,
1513 record_btrace_frame_prev_register
,
1515 record_btrace_frame_sniffer
,
1516 record_btrace_frame_dealloc_cache
1519 const struct frame_unwind record_btrace_tailcall_frame_unwind
=
1522 record_btrace_frame_unwind_stop_reason
,
1523 record_btrace_frame_this_id
,
1524 record_btrace_frame_prev_register
,
1526 record_btrace_tailcall_frame_sniffer
,
1527 record_btrace_frame_dealloc_cache
1530 /* Implement the to_get_unwinder method. */
1532 static const struct frame_unwind
*
1533 record_btrace_to_get_unwinder (struct target_ops
*self
)
1535 return &record_btrace_frame_unwind
;
1538 /* Implement the to_get_tailcall_unwinder method. */
1540 static const struct frame_unwind
*
1541 record_btrace_to_get_tailcall_unwinder (struct target_ops
*self
)
1543 return &record_btrace_tailcall_frame_unwind
;
1546 /* Indicate that TP should be resumed according to FLAG. */
1549 record_btrace_resume_thread (struct thread_info
*tp
,
1550 enum btrace_thread_flag flag
)
1552 struct btrace_thread_info
*btinfo
;
1554 DEBUG ("resuming %d (%s): %u", tp
->num
, target_pid_to_str (tp
->ptid
), flag
);
1556 btinfo
= &tp
->btrace
;
1558 if ((btinfo
->flags
& BTHR_MOVE
) != 0)
1559 error (_("Thread already moving."));
1561 /* Fetch the latest branch trace. */
1564 btinfo
->flags
|= flag
;
1567 /* Find the thread to resume given a PTID. */
1569 static struct thread_info
*
1570 record_btrace_find_resume_thread (ptid_t ptid
)
1572 struct thread_info
*tp
;
1574 /* When asked to resume everything, we pick the current thread. */
1575 if (ptid_equal (minus_one_ptid
, ptid
) || ptid_is_pid (ptid
))
1576 ptid
= inferior_ptid
;
1578 return find_thread_ptid (ptid
);
1581 /* Start replaying a thread. */
1583 static struct btrace_insn_iterator
*
1584 record_btrace_start_replaying (struct thread_info
*tp
)
1586 volatile struct gdb_exception except
;
1587 struct btrace_insn_iterator
*replay
;
1588 struct btrace_thread_info
*btinfo
;
1591 btinfo
= &tp
->btrace
;
1594 /* We can't start replaying without trace. */
1595 if (btinfo
->begin
== NULL
)
1598 /* Clear the executing flag to allow changes to the current frame.
1599 We are not actually running, yet. We just started a reverse execution
1600 command or a record goto command.
1601 For the latter, EXECUTING is false and this has no effect.
1602 For the former, EXECUTING is true and we're in to_wait, about to
1603 move the thread. Since we need to recompute the stack, we temporarily
1604 set EXECUTING to flase. */
1605 executing
= is_executing (tp
->ptid
);
1606 set_executing (tp
->ptid
, 0);
1608 /* GDB stores the current frame_id when stepping in order to detects steps
1610 Since frames are computed differently when we're replaying, we need to
1611 recompute those stored frames and fix them up so we can still detect
1612 subroutines after we started replaying. */
1613 TRY_CATCH (except
, RETURN_MASK_ALL
)
1615 struct frame_info
*frame
;
1616 struct frame_id frame_id
;
1617 int upd_step_frame_id
, upd_step_stack_frame_id
;
1619 /* The current frame without replaying - computed via normal unwind. */
1620 frame
= get_current_frame ();
1621 frame_id
= get_frame_id (frame
);
1623 /* Check if we need to update any stepping-related frame id's. */
1624 upd_step_frame_id
= frame_id_eq (frame_id
,
1625 tp
->control
.step_frame_id
);
1626 upd_step_stack_frame_id
= frame_id_eq (frame_id
,
1627 tp
->control
.step_stack_frame_id
);
1629 /* We start replaying at the end of the branch trace. This corresponds
1630 to the current instruction. */
1631 replay
= xmalloc (sizeof (*replay
));
1632 btrace_insn_end (replay
, btinfo
);
1634 /* Skip gaps at the end of the trace. */
1635 while (btrace_insn_get (replay
) == NULL
)
1639 steps
= btrace_insn_prev (replay
, 1);
1641 error (_("No trace."));
1644 /* We're not replaying, yet. */
1645 gdb_assert (btinfo
->replay
== NULL
);
1646 btinfo
->replay
= replay
;
1648 /* Make sure we're not using any stale registers. */
1649 registers_changed_ptid (tp
->ptid
);
1651 /* The current frame with replaying - computed via btrace unwind. */
1652 frame
= get_current_frame ();
1653 frame_id
= get_frame_id (frame
);
1655 /* Replace stepping related frames where necessary. */
1656 if (upd_step_frame_id
)
1657 tp
->control
.step_frame_id
= frame_id
;
1658 if (upd_step_stack_frame_id
)
1659 tp
->control
.step_stack_frame_id
= frame_id
;
1662 /* Restore the previous execution state. */
1663 set_executing (tp
->ptid
, executing
);
1665 if (except
.reason
< 0)
1667 xfree (btinfo
->replay
);
1668 btinfo
->replay
= NULL
;
1670 registers_changed_ptid (tp
->ptid
);
1672 throw_exception (except
);
1678 /* Stop replaying a thread. */
1681 record_btrace_stop_replaying (struct thread_info
*tp
)
1683 struct btrace_thread_info
*btinfo
;
1685 btinfo
= &tp
->btrace
;
1687 xfree (btinfo
->replay
);
1688 btinfo
->replay
= NULL
;
1690 /* Make sure we're not leaving any stale registers. */
1691 registers_changed_ptid (tp
->ptid
);
1694 /* The to_resume method of target record-btrace. */
1697 record_btrace_resume (struct target_ops
*ops
, ptid_t ptid
, int step
,
1698 enum gdb_signal signal
)
1700 struct thread_info
*tp
, *other
;
1701 enum btrace_thread_flag flag
;
1703 DEBUG ("resume %s: %s", target_pid_to_str (ptid
), step
? "step" : "cont");
1705 /* Store the execution direction of the last resume. */
1706 record_btrace_resume_exec_dir
= execution_direction
;
1708 tp
= record_btrace_find_resume_thread (ptid
);
1710 error (_("Cannot find thread to resume."));
1712 /* Stop replaying other threads if the thread to resume is not replaying. */
1713 if (!btrace_is_replaying (tp
) && execution_direction
!= EXEC_REVERSE
)
1714 ALL_NON_EXITED_THREADS (other
)
1715 record_btrace_stop_replaying (other
);
1717 /* As long as we're not replaying, just forward the request. */
1718 if (!record_btrace_is_replaying (ops
) && execution_direction
!= EXEC_REVERSE
)
1721 return ops
->to_resume (ops
, ptid
, step
, signal
);
1724 /* Compute the btrace thread flag for the requested move. */
1726 flag
= execution_direction
== EXEC_REVERSE
? BTHR_RCONT
: BTHR_CONT
;
1728 flag
= execution_direction
== EXEC_REVERSE
? BTHR_RSTEP
: BTHR_STEP
;
1730 /* At the moment, we only move a single thread. We could also move
1731 all threads in parallel by single-stepping each resumed thread
1732 until the first runs into an event.
1733 When we do that, we would want to continue all other threads.
1734 For now, just resume one thread to not confuse to_wait. */
1735 record_btrace_resume_thread (tp
, flag
);
1737 /* We just indicate the resume intent here. The actual stepping happens in
1738 record_btrace_wait below. */
1740 /* Async support. */
1741 if (target_can_async_p ())
1743 target_async (inferior_event_handler
, 0);
1744 mark_async_event_handler (record_btrace_async_inferior_event_handler
);
1748 /* Find a thread to move. */
1750 static struct thread_info
*
1751 record_btrace_find_thread_to_move (ptid_t ptid
)
1753 struct thread_info
*tp
;
1755 /* First check the parameter thread. */
1756 tp
= find_thread_ptid (ptid
);
1757 if (tp
!= NULL
&& (tp
->btrace
.flags
& BTHR_MOVE
) != 0)
1760 /* Otherwise, find one other thread that has been resumed. */
1761 ALL_NON_EXITED_THREADS (tp
)
1762 if ((tp
->btrace
.flags
& BTHR_MOVE
) != 0)
1768 /* Return a target_waitstatus indicating that we ran out of history. */
1770 static struct target_waitstatus
1771 btrace_step_no_history (void)
1773 struct target_waitstatus status
;
1775 status
.kind
= TARGET_WAITKIND_NO_HISTORY
;
1780 /* Return a target_waitstatus indicating that a step finished. */
1782 static struct target_waitstatus
1783 btrace_step_stopped (void)
1785 struct target_waitstatus status
;
1787 status
.kind
= TARGET_WAITKIND_STOPPED
;
1788 status
.value
.sig
= GDB_SIGNAL_TRAP
;
1793 /* Clear the record histories. */
1796 record_btrace_clear_histories (struct btrace_thread_info
*btinfo
)
1798 xfree (btinfo
->insn_history
);
1799 xfree (btinfo
->call_history
);
1801 btinfo
->insn_history
= NULL
;
1802 btinfo
->call_history
= NULL
;
1805 /* Step a single thread. */
1807 static struct target_waitstatus
1808 record_btrace_step_thread (struct thread_info
*tp
)
1810 struct btrace_insn_iterator
*replay
, end
;
1811 struct btrace_thread_info
*btinfo
;
1812 struct address_space
*aspace
;
1813 struct inferior
*inf
;
1814 enum btrace_thread_flag flags
;
1817 /* We can't step without an execution history. */
1818 if (btrace_is_empty (tp
))
1819 return btrace_step_no_history ();
1821 btinfo
= &tp
->btrace
;
1822 replay
= btinfo
->replay
;
1824 flags
= btinfo
->flags
& BTHR_MOVE
;
1825 btinfo
->flags
&= ~BTHR_MOVE
;
1827 DEBUG ("stepping %d (%s): %u", tp
->num
, target_pid_to_str (tp
->ptid
), flags
);
1832 internal_error (__FILE__
, __LINE__
, _("invalid stepping type."));
1835 /* We're done if we're not replaying. */
1837 return btrace_step_no_history ();
1839 /* Skip gaps during replay. */
1842 steps
= btrace_insn_next (replay
, 1);
1845 record_btrace_stop_replaying (tp
);
1846 return btrace_step_no_history ();
1849 while (btrace_insn_get (replay
) == NULL
);
1851 /* Determine the end of the instruction trace. */
1852 btrace_insn_end (&end
, btinfo
);
1854 /* We stop replaying if we reached the end of the trace. */
1855 if (btrace_insn_cmp (replay
, &end
) == 0)
1856 record_btrace_stop_replaying (tp
);
1858 return btrace_step_stopped ();
1861 /* Start replaying if we're not already doing so. */
1863 replay
= record_btrace_start_replaying (tp
);
1865 /* If we can't step any further, we reached the end of the history.
1866 Skip gaps during replay. */
1869 steps
= btrace_insn_prev (replay
, 1);
1871 return btrace_step_no_history ();
1874 while (btrace_insn_get (replay
) == NULL
);
1876 return btrace_step_stopped ();
1879 /* We're done if we're not replaying. */
1881 return btrace_step_no_history ();
1883 inf
= find_inferior_ptid (tp
->ptid
);
1884 aspace
= inf
->aspace
;
1886 /* Determine the end of the instruction trace. */
1887 btrace_insn_end (&end
, btinfo
);
1891 const struct btrace_insn
*insn
;
1893 /* Skip gaps during replay. */
1896 steps
= btrace_insn_next (replay
, 1);
1899 record_btrace_stop_replaying (tp
);
1900 return btrace_step_no_history ();
1903 insn
= btrace_insn_get (replay
);
1905 while (insn
== NULL
);
1907 /* We stop replaying if we reached the end of the trace. */
1908 if (btrace_insn_cmp (replay
, &end
) == 0)
1910 record_btrace_stop_replaying (tp
);
1911 return btrace_step_no_history ();
1914 DEBUG ("stepping %d (%s) ... %s", tp
->num
,
1915 target_pid_to_str (tp
->ptid
),
1916 core_addr_to_string_nz (insn
->pc
));
1918 if (breakpoint_here_p (aspace
, insn
->pc
))
1919 return btrace_step_stopped ();
1923 /* Start replaying if we're not already doing so. */
1925 replay
= record_btrace_start_replaying (tp
);
1927 inf
= find_inferior_ptid (tp
->ptid
);
1928 aspace
= inf
->aspace
;
1932 const struct btrace_insn
*insn
;
1934 /* If we can't step any further, we reached the end of the history.
1935 Skip gaps during replay. */
1938 steps
= btrace_insn_prev (replay
, 1);
1940 return btrace_step_no_history ();
1942 insn
= btrace_insn_get (replay
);
1944 while (insn
== NULL
);
1946 DEBUG ("reverse-stepping %d (%s) ... %s", tp
->num
,
1947 target_pid_to_str (tp
->ptid
),
1948 core_addr_to_string_nz (insn
->pc
));
1950 if (breakpoint_here_p (aspace
, insn
->pc
))
1951 return btrace_step_stopped ();
1956 /* The to_wait method of target record-btrace. */
1959 record_btrace_wait (struct target_ops
*ops
, ptid_t ptid
,
1960 struct target_waitstatus
*status
, int options
)
1962 struct thread_info
*tp
, *other
;
1964 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid
), options
);
1966 /* As long as we're not replaying, just forward the request. */
1967 if (!record_btrace_is_replaying (ops
) && execution_direction
!= EXEC_REVERSE
)
1970 return ops
->to_wait (ops
, ptid
, status
, options
);
1973 /* Let's find a thread to move. */
1974 tp
= record_btrace_find_thread_to_move (ptid
);
1977 DEBUG ("wait %s: no thread", target_pid_to_str (ptid
));
1979 status
->kind
= TARGET_WAITKIND_IGNORE
;
1980 return minus_one_ptid
;
1983 /* We only move a single thread. We're not able to correlate threads. */
1984 *status
= record_btrace_step_thread (tp
);
1986 /* Stop all other threads. */
1988 ALL_NON_EXITED_THREADS (other
)
1989 other
->btrace
.flags
&= ~BTHR_MOVE
;
1991 /* Start record histories anew from the current position. */
1992 record_btrace_clear_histories (&tp
->btrace
);
1994 /* We moved the replay position but did not update registers. */
1995 registers_changed_ptid (tp
->ptid
);
2000 /* The to_can_execute_reverse method of target record-btrace. */
2003 record_btrace_can_execute_reverse (struct target_ops
*self
)
2008 /* The to_decr_pc_after_break method of target record-btrace. */
2011 record_btrace_decr_pc_after_break (struct target_ops
*ops
,
2012 struct gdbarch
*gdbarch
)
2014 /* When replaying, we do not actually execute the breakpoint instruction
2015 so there is no need to adjust the PC after hitting a breakpoint. */
2016 if (record_btrace_is_replaying (ops
))
2019 return ops
->beneath
->to_decr_pc_after_break (ops
->beneath
, gdbarch
);
2022 /* The to_update_thread_list method of target record-btrace. */
2025 record_btrace_update_thread_list (struct target_ops
*ops
)
2027 /* We don't add or remove threads during replay. */
2028 if (record_btrace_is_replaying (ops
))
2031 /* Forward the request. */
2033 ops
->to_update_thread_list (ops
);
2036 /* The to_thread_alive method of target record-btrace. */
2039 record_btrace_thread_alive (struct target_ops
*ops
, ptid_t ptid
)
2041 /* We don't add or remove threads during replay. */
2042 if (record_btrace_is_replaying (ops
))
2043 return find_thread_ptid (ptid
) != NULL
;
2045 /* Forward the request. */
2047 return ops
->to_thread_alive (ops
, ptid
);
2050 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
2054 record_btrace_set_replay (struct thread_info
*tp
,
2055 const struct btrace_insn_iterator
*it
)
2057 struct btrace_thread_info
*btinfo
;
2059 btinfo
= &tp
->btrace
;
2061 if (it
== NULL
|| it
->function
== NULL
)
2062 record_btrace_stop_replaying (tp
);
2065 if (btinfo
->replay
== NULL
)
2066 record_btrace_start_replaying (tp
);
2067 else if (btrace_insn_cmp (btinfo
->replay
, it
) == 0)
2070 *btinfo
->replay
= *it
;
2071 registers_changed_ptid (tp
->ptid
);
2074 /* Start anew from the new replay position. */
2075 record_btrace_clear_histories (btinfo
);
2078 /* The to_goto_record_begin method of target record-btrace. */
2081 record_btrace_goto_begin (struct target_ops
*self
)
2083 struct thread_info
*tp
;
2084 struct btrace_insn_iterator begin
;
2086 tp
= require_btrace_thread ();
2088 btrace_insn_begin (&begin
, &tp
->btrace
);
2089 record_btrace_set_replay (tp
, &begin
);
2091 print_stack_frame (get_selected_frame (NULL
), 1, SRC_AND_LOC
, 1);
2094 /* The to_goto_record_end method of target record-btrace. */
2097 record_btrace_goto_end (struct target_ops
*ops
)
2099 struct thread_info
*tp
;
2101 tp
= require_btrace_thread ();
2103 record_btrace_set_replay (tp
, NULL
);
2105 print_stack_frame (get_selected_frame (NULL
), 1, SRC_AND_LOC
, 1);
2108 /* The to_goto_record method of target record-btrace. */
2111 record_btrace_goto (struct target_ops
*self
, ULONGEST insn
)
2113 struct thread_info
*tp
;
2114 struct btrace_insn_iterator it
;
2115 unsigned int number
;
2120 /* Check for wrap-arounds. */
2122 error (_("Instruction number out of range."));
2124 tp
= require_btrace_thread ();
2126 found
= btrace_find_insn_by_number (&it
, &tp
->btrace
, number
);
2128 error (_("No such instruction."));
2130 record_btrace_set_replay (tp
, &it
);
2132 print_stack_frame (get_selected_frame (NULL
), 1, SRC_AND_LOC
, 1);
2135 /* The to_execution_direction target method. */
2137 static enum exec_direction_kind
2138 record_btrace_execution_direction (struct target_ops
*self
)
2140 return record_btrace_resume_exec_dir
;
2143 /* The to_prepare_to_generate_core target method. */
2146 record_btrace_prepare_to_generate_core (struct target_ops
*self
)
2148 record_btrace_generating_corefile
= 1;
2151 /* The to_done_generating_core target method. */
2154 record_btrace_done_generating_core (struct target_ops
*self
)
2156 record_btrace_generating_corefile
= 0;
2159 /* Initialize the record-btrace target ops. */
2162 init_record_btrace_ops (void)
2164 struct target_ops
*ops
;
2166 ops
= &record_btrace_ops
;
2167 ops
->to_shortname
= "record-btrace";
2168 ops
->to_longname
= "Branch tracing target";
2169 ops
->to_doc
= "Collect control-flow trace and provide the execution history.";
2170 ops
->to_open
= record_btrace_open
;
2171 ops
->to_close
= record_btrace_close
;
2172 ops
->to_async
= record_btrace_async
;
2173 ops
->to_detach
= record_detach
;
2174 ops
->to_disconnect
= record_disconnect
;
2175 ops
->to_mourn_inferior
= record_mourn_inferior
;
2176 ops
->to_kill
= record_kill
;
2177 ops
->to_stop_recording
= record_btrace_stop_recording
;
2178 ops
->to_info_record
= record_btrace_info
;
2179 ops
->to_insn_history
= record_btrace_insn_history
;
2180 ops
->to_insn_history_from
= record_btrace_insn_history_from
;
2181 ops
->to_insn_history_range
= record_btrace_insn_history_range
;
2182 ops
->to_call_history
= record_btrace_call_history
;
2183 ops
->to_call_history_from
= record_btrace_call_history_from
;
2184 ops
->to_call_history_range
= record_btrace_call_history_range
;
2185 ops
->to_record_is_replaying
= record_btrace_is_replaying
;
2186 ops
->to_xfer_partial
= record_btrace_xfer_partial
;
2187 ops
->to_remove_breakpoint
= record_btrace_remove_breakpoint
;
2188 ops
->to_insert_breakpoint
= record_btrace_insert_breakpoint
;
2189 ops
->to_fetch_registers
= record_btrace_fetch_registers
;
2190 ops
->to_store_registers
= record_btrace_store_registers
;
2191 ops
->to_prepare_to_store
= record_btrace_prepare_to_store
;
2192 ops
->to_get_unwinder
= &record_btrace_to_get_unwinder
;
2193 ops
->to_get_tailcall_unwinder
= &record_btrace_to_get_tailcall_unwinder
;
2194 ops
->to_resume
= record_btrace_resume
;
2195 ops
->to_wait
= record_btrace_wait
;
2196 ops
->to_update_thread_list
= record_btrace_update_thread_list
;
2197 ops
->to_thread_alive
= record_btrace_thread_alive
;
2198 ops
->to_goto_record_begin
= record_btrace_goto_begin
;
2199 ops
->to_goto_record_end
= record_btrace_goto_end
;
2200 ops
->to_goto_record
= record_btrace_goto
;
2201 ops
->to_can_execute_reverse
= record_btrace_can_execute_reverse
;
2202 ops
->to_decr_pc_after_break
= record_btrace_decr_pc_after_break
;
2203 ops
->to_execution_direction
= record_btrace_execution_direction
;
2204 ops
->to_prepare_to_generate_core
= record_btrace_prepare_to_generate_core
;
2205 ops
->to_done_generating_core
= record_btrace_done_generating_core
;
2206 ops
->to_stratum
= record_stratum
;
2207 ops
->to_magic
= OPS_MAGIC
;
2210 /* Start recording in BTS format. */
2213 cmd_record_btrace_bts_start (char *args
, int from_tty
)
2215 volatile struct gdb_exception exception
;
2217 if (args
!= NULL
&& *args
!= 0)
2218 error (_("Invalid argument."));
2220 record_btrace_conf
.format
= BTRACE_FORMAT_BTS
;
2222 TRY_CATCH (exception
, RETURN_MASK_ALL
)
2223 execute_command ("target record-btrace", from_tty
);
2225 if (exception
.error
!= 0)
2227 record_btrace_conf
.format
= BTRACE_FORMAT_NONE
;
2228 throw_exception (exception
);
2232 /* Alias for "target record". */
2235 cmd_record_btrace_start (char *args
, int from_tty
)
2237 volatile struct gdb_exception exception
;
2239 if (args
!= NULL
&& *args
!= 0)
2240 error (_("Invalid argument."));
2242 record_btrace_conf
.format
= BTRACE_FORMAT_BTS
;
2244 TRY_CATCH (exception
, RETURN_MASK_ALL
)
2245 execute_command ("target record-btrace", from_tty
);
2247 if (exception
.error
== 0)
2250 record_btrace_conf
.format
= BTRACE_FORMAT_NONE
;
2251 throw_exception (exception
);
2254 /* The "set record btrace" command. */
2257 cmd_set_record_btrace (char *args
, int from_tty
)
2259 cmd_show_list (set_record_btrace_cmdlist
, from_tty
, "");
2262 /* The "show record btrace" command. */
2265 cmd_show_record_btrace (char *args
, int from_tty
)
2267 cmd_show_list (show_record_btrace_cmdlist
, from_tty
, "");
2270 /* The "show record btrace replay-memory-access" command. */
2273 cmd_show_replay_memory_access (struct ui_file
*file
, int from_tty
,
2274 struct cmd_list_element
*c
, const char *value
)
2276 fprintf_filtered (gdb_stdout
, _("Replay memory access is %s.\n"),
2277 replay_memory_access
);
2280 /* The "set record btrace bts" command. */
2283 cmd_set_record_btrace_bts (char *args
, int from_tty
)
2285 printf_unfiltered (_("\"set record btrace bts\" must be followed "
2286 "by an apporpriate subcommand.\n"));
2287 help_list (set_record_btrace_bts_cmdlist
, "set record btrace bts ",
2288 all_commands
, gdb_stdout
);
2291 /* The "show record btrace bts" command. */
2294 cmd_show_record_btrace_bts (char *args
, int from_tty
)
2296 cmd_show_list (show_record_btrace_bts_cmdlist
, from_tty
, "");
2299 void _initialize_record_btrace (void);
2301 /* Initialize btrace commands. */
2304 _initialize_record_btrace (void)
2306 add_prefix_cmd ("btrace", class_obscure
, cmd_record_btrace_start
,
2307 _("Start branch trace recording."), &record_btrace_cmdlist
,
2308 "record btrace ", 0, &record_cmdlist
);
2309 add_alias_cmd ("b", "btrace", class_obscure
, 1, &record_cmdlist
);
2311 add_cmd ("bts", class_obscure
, cmd_record_btrace_bts_start
,
2313 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
2314 The processor stores a from/to record for each branch into a cyclic buffer.\n\
2315 This format may not be available on all processors."),
2316 &record_btrace_cmdlist
);
2317 add_alias_cmd ("bts", "btrace bts", class_obscure
, 1, &record_cmdlist
);
2319 add_prefix_cmd ("btrace", class_support
, cmd_set_record_btrace
,
2320 _("Set record options"), &set_record_btrace_cmdlist
,
2321 "set record btrace ", 0, &set_record_cmdlist
);
2323 add_prefix_cmd ("btrace", class_support
, cmd_show_record_btrace
,
2324 _("Show record options"), &show_record_btrace_cmdlist
,
2325 "show record btrace ", 0, &show_record_cmdlist
);
2327 add_setshow_enum_cmd ("replay-memory-access", no_class
,
2328 replay_memory_access_types
, &replay_memory_access
, _("\
2329 Set what memory accesses are allowed during replay."), _("\
2330 Show what memory accesses are allowed during replay."),
2331 _("Default is READ-ONLY.\n\n\
2332 The btrace record target does not trace data.\n\
2333 The memory therefore corresponds to the live target and not \
2334 to the current replay position.\n\n\
2335 When READ-ONLY, allow accesses to read-only memory during replay.\n\
2336 When READ-WRITE, allow accesses to read-only and read-write memory during \
2338 NULL
, cmd_show_replay_memory_access
,
2339 &set_record_btrace_cmdlist
,
2340 &show_record_btrace_cmdlist
);
2342 add_prefix_cmd ("bts", class_support
, cmd_set_record_btrace_bts
,
2343 _("Set record btrace bts options"),
2344 &set_record_btrace_bts_cmdlist
,
2345 "set record btrace bts ", 0, &set_record_btrace_cmdlist
);
2347 add_prefix_cmd ("bts", class_support
, cmd_show_record_btrace_bts
,
2348 _("Show record btrace bts options"),
2349 &show_record_btrace_bts_cmdlist
,
2350 "show record btrace bts ", 0, &show_record_btrace_cmdlist
);
2352 add_setshow_uinteger_cmd ("buffer-size", no_class
,
2353 &record_btrace_conf
.bts
.size
,
2354 _("Set the record/replay bts buffer size."),
2355 _("Show the record/replay bts buffer size."), _("\
2356 When starting recording request a trace buffer of this size. \
2357 The actual buffer size may differ from the requested size. \
2358 Use \"info record\" to see the actual buffer size.\n\n\
2359 Bigger buffers allow longer recording but also take more time to process \
2360 the recorded execution trace.\n\n\
2361 The trace buffer size may not be changed while recording."), NULL
, NULL
,
2362 &set_record_btrace_bts_cmdlist
,
2363 &show_record_btrace_bts_cmdlist
);
2365 init_record_btrace_ops ();
2366 add_target (&record_btrace_ops
);
2368 bfcache
= htab_create_alloc (50, bfcache_hash
, bfcache_eq
, NULL
,
2371 record_btrace_conf
.bts
.size
= 64 * 1024;