1 /* Branch trace support for GDB, the GNU debugger.
3 Copyright (C) 2013-2017 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "record-btrace.h"
25 #include "gdbthread.h"
30 #include "cli/cli-utils.h"
34 #include "filenames.h"
36 #include "frame-unwind.h"
39 #include "event-loop.h"
44 /* The target_ops of record-btrace. */
45 static struct target_ops record_btrace_ops
;
47 /* A new thread observer enabling branch tracing for the new thread. */
48 static struct observer
*record_btrace_thread_observer
;
50 /* Memory access types used in set/show record btrace replay-memory-access. */
51 static const char replay_memory_access_read_only
[] = "read-only";
52 static const char replay_memory_access_read_write
[] = "read-write";
53 static const char *const replay_memory_access_types
[] =
55 replay_memory_access_read_only
,
56 replay_memory_access_read_write
,
60 /* The currently allowed replay memory access type. */
61 static const char *replay_memory_access
= replay_memory_access_read_only
;
63 /* Command lists for "set/show record btrace". */
64 static struct cmd_list_element
*set_record_btrace_cmdlist
;
65 static struct cmd_list_element
*show_record_btrace_cmdlist
;
67 /* The execution direction of the last resume we got. See record-full.c. */
68 static enum exec_direction_kind record_btrace_resume_exec_dir
= EXEC_FORWARD
;
70 /* The async event handler for reverse/replay execution. */
71 static struct async_event_handler
*record_btrace_async_inferior_event_handler
;
73 /* A flag indicating that we are currently generating a core file. */
74 static int record_btrace_generating_corefile
;
76 /* The current branch trace configuration. */
77 static struct btrace_config record_btrace_conf
;
79 /* Command list for "record btrace". */
80 static struct cmd_list_element
*record_btrace_cmdlist
;
82 /* Command lists for "set/show record btrace bts". */
83 static struct cmd_list_element
*set_record_btrace_bts_cmdlist
;
84 static struct cmd_list_element
*show_record_btrace_bts_cmdlist
;
86 /* Command lists for "set/show record btrace pt". */
87 static struct cmd_list_element
*set_record_btrace_pt_cmdlist
;
88 static struct cmd_list_element
*show_record_btrace_pt_cmdlist
;
90 /* Print a record-btrace debug message. Use do ... while (0) to avoid
91 ambiguities when used in if statements. */
93 #define DEBUG(msg, args...) \
96 if (record_debug != 0) \
97 fprintf_unfiltered (gdb_stdlog, \
98 "[record-btrace] " msg "\n", ##args); \
103 /* Update the branch trace for the current thread and return a pointer to its
106 Throws an error if there is no thread or no trace. This function never
109 static struct thread_info
*
110 require_btrace_thread (void)
112 struct thread_info
*tp
;
116 tp
= find_thread_ptid (inferior_ptid
);
118 error (_("No thread."));
120 validate_registers_access ();
124 if (btrace_is_empty (tp
))
125 error (_("No trace."));
130 /* Update the branch trace for the current thread and return a pointer to its
131 branch trace information struct.
133 Throws an error if there is no thread or no trace. This function never
136 static struct btrace_thread_info
*
137 require_btrace (void)
139 struct thread_info
*tp
;
141 tp
= require_btrace_thread ();
146 /* Enable branch tracing for one thread. Warn on errors. */
149 record_btrace_enable_warn (struct thread_info
*tp
)
153 btrace_enable (tp
, &record_btrace_conf
);
155 CATCH (error
, RETURN_MASK_ERROR
)
157 warning ("%s", error
.message
);
162 /* Callback function to disable branch tracing for one thread. */
165 record_btrace_disable_callback (void *arg
)
167 struct thread_info
*tp
= (struct thread_info
*) arg
;
172 /* Enable automatic tracing of new threads. */
175 record_btrace_auto_enable (void)
177 DEBUG ("attach thread observer");
179 record_btrace_thread_observer
180 = observer_attach_new_thread (record_btrace_enable_warn
);
183 /* Disable automatic tracing of new threads. */
186 record_btrace_auto_disable (void)
188 /* The observer may have been detached, already. */
189 if (record_btrace_thread_observer
== NULL
)
192 DEBUG ("detach thread observer");
194 observer_detach_new_thread (record_btrace_thread_observer
);
195 record_btrace_thread_observer
= NULL
;
198 /* The record-btrace async event handler function. */
201 record_btrace_handle_async_inferior_event (gdb_client_data data
)
203 inferior_event_handler (INF_REG_EVENT
, NULL
);
206 /* See record-btrace.h. */
209 record_btrace_push_target (void)
213 record_btrace_auto_enable ();
215 push_target (&record_btrace_ops
);
217 record_btrace_async_inferior_event_handler
218 = create_async_event_handler (record_btrace_handle_async_inferior_event
,
220 record_btrace_generating_corefile
= 0;
222 format
= btrace_format_short_string (record_btrace_conf
.format
);
223 observer_notify_record_changed (current_inferior (), 1, "btrace", format
);
226 /* The to_open method of target record-btrace. */
229 record_btrace_open (const char *args
, int from_tty
)
231 struct cleanup
*disable_chain
;
232 struct thread_info
*tp
;
238 if (!target_has_execution
)
239 error (_("The program is not being run."));
241 gdb_assert (record_btrace_thread_observer
== NULL
);
243 disable_chain
= make_cleanup (null_cleanup
, NULL
);
244 ALL_NON_EXITED_THREADS (tp
)
245 if (args
== NULL
|| *args
== 0 || number_is_in_list (args
, tp
->global_num
))
247 btrace_enable (tp
, &record_btrace_conf
);
249 make_cleanup (record_btrace_disable_callback
, tp
);
252 record_btrace_push_target ();
254 discard_cleanups (disable_chain
);
257 /* The to_stop_recording method of target record-btrace. */
260 record_btrace_stop_recording (struct target_ops
*self
)
262 struct thread_info
*tp
;
264 DEBUG ("stop recording");
266 record_btrace_auto_disable ();
268 ALL_NON_EXITED_THREADS (tp
)
269 if (tp
->btrace
.target
!= NULL
)
273 /* The to_disconnect method of target record-btrace. */
276 record_btrace_disconnect (struct target_ops
*self
, const char *args
,
279 struct target_ops
*beneath
= self
->beneath
;
281 /* Do not stop recording, just clean up GDB side. */
282 unpush_target (self
);
284 /* Forward disconnect. */
285 beneath
->to_disconnect (beneath
, args
, from_tty
);
288 /* The to_close method of target record-btrace. */
291 record_btrace_close (struct target_ops
*self
)
293 struct thread_info
*tp
;
295 if (record_btrace_async_inferior_event_handler
!= NULL
)
296 delete_async_event_handler (&record_btrace_async_inferior_event_handler
);
298 /* Make sure automatic recording gets disabled even if we did not stop
299 recording before closing the record-btrace target. */
300 record_btrace_auto_disable ();
302 /* We should have already stopped recording.
303 Tear down btrace in case we have not. */
304 ALL_NON_EXITED_THREADS (tp
)
305 btrace_teardown (tp
);
308 /* The to_async method of target record-btrace. */
311 record_btrace_async (struct target_ops
*ops
, int enable
)
314 mark_async_event_handler (record_btrace_async_inferior_event_handler
);
316 clear_async_event_handler (record_btrace_async_inferior_event_handler
);
318 ops
->beneath
->to_async (ops
->beneath
, enable
);
321 /* Adjusts the size and returns a human readable size suffix. */
324 record_btrace_adjust_size (unsigned int *size
)
330 if ((sz
& ((1u << 30) - 1)) == 0)
335 else if ((sz
& ((1u << 20) - 1)) == 0)
340 else if ((sz
& ((1u << 10) - 1)) == 0)
349 /* Print a BTS configuration. */
352 record_btrace_print_bts_conf (const struct btrace_config_bts
*conf
)
360 suffix
= record_btrace_adjust_size (&size
);
361 printf_unfiltered (_("Buffer size: %u%s.\n"), size
, suffix
);
365 /* Print an Intel Processor Trace configuration. */
368 record_btrace_print_pt_conf (const struct btrace_config_pt
*conf
)
376 suffix
= record_btrace_adjust_size (&size
);
377 printf_unfiltered (_("Buffer size: %u%s.\n"), size
, suffix
);
381 /* Print a branch tracing configuration. */
384 record_btrace_print_conf (const struct btrace_config
*conf
)
386 printf_unfiltered (_("Recording format: %s.\n"),
387 btrace_format_string (conf
->format
));
389 switch (conf
->format
)
391 case BTRACE_FORMAT_NONE
:
394 case BTRACE_FORMAT_BTS
:
395 record_btrace_print_bts_conf (&conf
->bts
);
398 case BTRACE_FORMAT_PT
:
399 record_btrace_print_pt_conf (&conf
->pt
);
403 internal_error (__FILE__
, __LINE__
, _("Unkown branch trace format."));
406 /* The to_info_record method of target record-btrace. */
409 record_btrace_info (struct target_ops
*self
)
411 struct btrace_thread_info
*btinfo
;
412 const struct btrace_config
*conf
;
413 struct thread_info
*tp
;
414 unsigned int insns
, calls
, gaps
;
418 tp
= find_thread_ptid (inferior_ptid
);
420 error (_("No thread."));
422 validate_registers_access ();
424 btinfo
= &tp
->btrace
;
426 conf
= btrace_conf (btinfo
);
428 record_btrace_print_conf (conf
);
436 if (!btrace_is_empty (tp
))
438 struct btrace_call_iterator call
;
439 struct btrace_insn_iterator insn
;
441 btrace_call_end (&call
, btinfo
);
442 btrace_call_prev (&call
, 1);
443 calls
= btrace_call_number (&call
);
445 btrace_insn_end (&insn
, btinfo
);
447 insns
= btrace_insn_number (&insn
);
450 /* The last instruction does not really belong to the trace. */
457 /* Skip gaps at the end. */
460 steps
= btrace_insn_prev (&insn
, 1);
464 insns
= btrace_insn_number (&insn
);
469 gaps
= btinfo
->ngaps
;
472 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
473 "for thread %s (%s).\n"), insns
, calls
, gaps
,
474 print_thread_id (tp
), target_pid_to_str (tp
->ptid
));
476 if (btrace_is_replaying (tp
))
477 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
478 btrace_insn_number (btinfo
->replay
));
481 /* Print a decode error. */
484 btrace_ui_out_decode_error (struct ui_out
*uiout
, int errcode
,
485 enum btrace_format format
)
490 errstr
= _("unknown");
498 case BTRACE_FORMAT_BTS
:
504 case BDE_BTS_OVERFLOW
:
505 errstr
= _("instruction overflow");
508 case BDE_BTS_INSN_SIZE
:
509 errstr
= _("unknown instruction");
514 #if defined (HAVE_LIBIPT)
515 case BTRACE_FORMAT_PT
:
518 case BDE_PT_USER_QUIT
:
520 errstr
= _("trace decode cancelled");
523 case BDE_PT_DISABLED
:
525 errstr
= _("disabled");
528 case BDE_PT_OVERFLOW
:
530 errstr
= _("overflow");
535 errstr
= pt_errstr (pt_errcode (errcode
));
539 #endif /* defined (HAVE_LIBIPT) */
542 uiout
->text (_("["));
545 uiout
->text (_("decode error ("));
546 uiout
->field_int ("errcode", errcode
);
547 uiout
->text (_("): "));
549 uiout
->text (errstr
);
550 uiout
->text (_("]\n"));
553 /* Print an unsigned int. */
556 ui_out_field_uint (struct ui_out
*uiout
, const char *fld
, unsigned int val
)
558 uiout
->field_fmt (fld
, "%u", val
);
561 /* A range of source lines. */
563 struct btrace_line_range
565 /* The symtab this line is from. */
566 struct symtab
*symtab
;
568 /* The first line (inclusive). */
571 /* The last line (exclusive). */
575 /* Construct a line range. */
577 static struct btrace_line_range
578 btrace_mk_line_range (struct symtab
*symtab
, int begin
, int end
)
580 struct btrace_line_range range
;
582 range
.symtab
= symtab
;
589 /* Add a line to a line range. */
591 static struct btrace_line_range
592 btrace_line_range_add (struct btrace_line_range range
, int line
)
594 if (range
.end
<= range
.begin
)
596 /* This is the first entry. */
598 range
.end
= line
+ 1;
600 else if (line
< range
.begin
)
602 else if (range
.end
< line
)
608 /* Return non-zero if RANGE is empty, zero otherwise. */
611 btrace_line_range_is_empty (struct btrace_line_range range
)
613 return range
.end
<= range
.begin
;
616 /* Return non-zero if LHS contains RHS, zero otherwise. */
619 btrace_line_range_contains_range (struct btrace_line_range lhs
,
620 struct btrace_line_range rhs
)
622 return ((lhs
.symtab
== rhs
.symtab
)
623 && (lhs
.begin
<= rhs
.begin
)
624 && (rhs
.end
<= lhs
.end
));
627 /* Find the line range associated with PC. */
629 static struct btrace_line_range
630 btrace_find_line_range (CORE_ADDR pc
)
632 struct btrace_line_range range
;
633 struct linetable_entry
*lines
;
634 struct linetable
*ltable
;
635 struct symtab
*symtab
;
638 symtab
= find_pc_line_symtab (pc
);
640 return btrace_mk_line_range (NULL
, 0, 0);
642 ltable
= SYMTAB_LINETABLE (symtab
);
644 return btrace_mk_line_range (symtab
, 0, 0);
646 nlines
= ltable
->nitems
;
647 lines
= ltable
->item
;
649 return btrace_mk_line_range (symtab
, 0, 0);
651 range
= btrace_mk_line_range (symtab
, 0, 0);
652 for (i
= 0; i
< nlines
- 1; i
++)
654 if ((lines
[i
].pc
== pc
) && (lines
[i
].line
!= 0))
655 range
= btrace_line_range_add (range
, lines
[i
].line
);
661 /* Print source lines in LINES to UIOUT.
663 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
664 instructions corresponding to that source line. When printing a new source
665 line, we do the cleanups for the open chain and open a new cleanup chain for
666 the new source line. If the source line range in LINES is not empty, this
667 function will leave the cleanup chain for the last printed source line open
668 so instructions can be added to it. */
671 btrace_print_lines (struct btrace_line_range lines
, struct ui_out
*uiout
,
672 struct cleanup
**ui_item_chain
, int flags
)
674 print_source_lines_flags psl_flags
;
678 if (flags
& DISASSEMBLY_FILENAME
)
679 psl_flags
|= PRINT_SOURCE_LINES_FILENAME
;
681 for (line
= lines
.begin
; line
< lines
.end
; ++line
)
683 if (*ui_item_chain
!= NULL
)
684 do_cleanups (*ui_item_chain
);
687 = make_cleanup_ui_out_tuple_begin_end (uiout
, "src_and_asm_line");
689 print_source_lines (lines
.symtab
, line
, line
+ 1, psl_flags
);
691 make_cleanup_ui_out_list_begin_end (uiout
, "line_asm_insn");
695 /* Disassemble a section of the recorded instruction trace. */
698 btrace_insn_history (struct ui_out
*uiout
,
699 const struct btrace_thread_info
*btinfo
,
700 const struct btrace_insn_iterator
*begin
,
701 const struct btrace_insn_iterator
*end
, int flags
)
703 struct cleanup
*cleanups
, *ui_item_chain
;
704 struct gdbarch
*gdbarch
;
705 struct btrace_insn_iterator it
;
706 struct btrace_line_range last_lines
;
708 DEBUG ("itrace (0x%x): [%u; %u)", flags
, btrace_insn_number (begin
),
709 btrace_insn_number (end
));
711 flags
|= DISASSEMBLY_SPECULATIVE
;
713 gdbarch
= target_gdbarch ();
714 last_lines
= btrace_mk_line_range (NULL
, 0, 0);
716 cleanups
= make_cleanup_ui_out_list_begin_end (uiout
, "asm_insns");
718 /* UI_ITEM_CHAIN is a cleanup chain for the last source line and the
719 instructions corresponding to that line. */
720 ui_item_chain
= NULL
;
722 gdb_pretty_print_disassembler
disasm (gdbarch
);
724 for (it
= *begin
; btrace_insn_cmp (&it
, end
) != 0; btrace_insn_next (&it
, 1))
726 const struct btrace_insn
*insn
;
728 insn
= btrace_insn_get (&it
);
730 /* A NULL instruction indicates a gap in the trace. */
733 const struct btrace_config
*conf
;
735 conf
= btrace_conf (btinfo
);
737 /* We have trace so we must have a configuration. */
738 gdb_assert (conf
!= NULL
);
740 btrace_ui_out_decode_error (uiout
, it
.function
->errcode
,
745 struct disasm_insn dinsn
;
747 if ((flags
& DISASSEMBLY_SOURCE
) != 0)
749 struct btrace_line_range lines
;
751 lines
= btrace_find_line_range (insn
->pc
);
752 if (!btrace_line_range_is_empty (lines
)
753 && !btrace_line_range_contains_range (last_lines
, lines
))
755 btrace_print_lines (lines
, uiout
, &ui_item_chain
, flags
);
758 else if (ui_item_chain
== NULL
)
761 = make_cleanup_ui_out_tuple_begin_end (uiout
,
763 /* No source information. */
764 make_cleanup_ui_out_list_begin_end (uiout
, "line_asm_insn");
767 gdb_assert (ui_item_chain
!= NULL
);
770 memset (&dinsn
, 0, sizeof (dinsn
));
771 dinsn
.number
= btrace_insn_number (&it
);
772 dinsn
.addr
= insn
->pc
;
774 if ((insn
->flags
& BTRACE_INSN_FLAG_SPECULATIVE
) != 0)
775 dinsn
.is_speculative
= 1;
777 disasm
.pretty_print_insn (uiout
, &dinsn
, flags
);
781 do_cleanups (cleanups
);
784 /* The to_insn_history method of target record-btrace. */
787 record_btrace_insn_history (struct target_ops
*self
, int size
, int flags
)
789 struct btrace_thread_info
*btinfo
;
790 struct btrace_insn_history
*history
;
791 struct btrace_insn_iterator begin
, end
;
792 struct cleanup
*uiout_cleanup
;
793 struct ui_out
*uiout
;
794 unsigned int context
, covered
;
796 uiout
= current_uiout
;
797 uiout_cleanup
= make_cleanup_ui_out_tuple_begin_end (uiout
,
799 context
= abs (size
);
801 error (_("Bad record instruction-history-size."));
803 btinfo
= require_btrace ();
804 history
= btinfo
->insn_history
;
807 struct btrace_insn_iterator
*replay
;
809 DEBUG ("insn-history (0x%x): %d", flags
, size
);
811 /* If we're replaying, we start at the replay position. Otherwise, we
812 start at the tail of the trace. */
813 replay
= btinfo
->replay
;
817 btrace_insn_end (&begin
, btinfo
);
819 /* We start from here and expand in the requested direction. Then we
820 expand in the other direction, as well, to fill up any remaining
825 /* We want the current position covered, as well. */
826 covered
= btrace_insn_next (&end
, 1);
827 covered
+= btrace_insn_prev (&begin
, context
- covered
);
828 covered
+= btrace_insn_next (&end
, context
- covered
);
832 covered
= btrace_insn_next (&end
, context
);
833 covered
+= btrace_insn_prev (&begin
, context
- covered
);
838 begin
= history
->begin
;
841 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags
, size
,
842 btrace_insn_number (&begin
), btrace_insn_number (&end
));
847 covered
= btrace_insn_prev (&begin
, context
);
852 covered
= btrace_insn_next (&end
, context
);
857 btrace_insn_history (uiout
, btinfo
, &begin
, &end
, flags
);
861 printf_unfiltered (_("At the start of the branch trace record.\n"));
863 printf_unfiltered (_("At the end of the branch trace record.\n"));
866 btrace_set_insn_history (btinfo
, &begin
, &end
);
867 do_cleanups (uiout_cleanup
);
870 /* The to_insn_history_range method of target record-btrace. */
873 record_btrace_insn_history_range (struct target_ops
*self
,
874 ULONGEST from
, ULONGEST to
, int flags
)
876 struct btrace_thread_info
*btinfo
;
877 struct btrace_insn_history
*history
;
878 struct btrace_insn_iterator begin
, end
;
879 struct cleanup
*uiout_cleanup
;
880 struct ui_out
*uiout
;
881 unsigned int low
, high
;
884 uiout
= current_uiout
;
885 uiout_cleanup
= make_cleanup_ui_out_tuple_begin_end (uiout
,
890 DEBUG ("insn-history (0x%x): [%u; %u)", flags
, low
, high
);
892 /* Check for wrap-arounds. */
893 if (low
!= from
|| high
!= to
)
894 error (_("Bad range."));
897 error (_("Bad range."));
899 btinfo
= require_btrace ();
901 found
= btrace_find_insn_by_number (&begin
, btinfo
, low
);
903 error (_("Range out of bounds."));
905 found
= btrace_find_insn_by_number (&end
, btinfo
, high
);
908 /* Silently truncate the range. */
909 btrace_insn_end (&end
, btinfo
);
913 /* We want both begin and end to be inclusive. */
914 btrace_insn_next (&end
, 1);
917 btrace_insn_history (uiout
, btinfo
, &begin
, &end
, flags
);
918 btrace_set_insn_history (btinfo
, &begin
, &end
);
920 do_cleanups (uiout_cleanup
);
923 /* The to_insn_history_from method of target record-btrace. */
926 record_btrace_insn_history_from (struct target_ops
*self
,
927 ULONGEST from
, int size
, int flags
)
929 ULONGEST begin
, end
, context
;
931 context
= abs (size
);
933 error (_("Bad record instruction-history-size."));
942 begin
= from
- context
+ 1;
947 end
= from
+ context
- 1;
949 /* Check for wrap-around. */
954 record_btrace_insn_history_range (self
, begin
, end
, flags
);
957 /* Print the instruction number range for a function call history line. */
960 btrace_call_history_insn_range (struct ui_out
*uiout
,
961 const struct btrace_function
*bfun
)
963 unsigned int begin
, end
, size
;
965 size
= VEC_length (btrace_insn_s
, bfun
->insn
);
966 gdb_assert (size
> 0);
968 begin
= bfun
->insn_offset
;
969 end
= begin
+ size
- 1;
971 ui_out_field_uint (uiout
, "insn begin", begin
);
973 ui_out_field_uint (uiout
, "insn end", end
);
976 /* Compute the lowest and highest source line for the instructions in BFUN
977 and return them in PBEGIN and PEND.
978 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
979 result from inlining or macro expansion. */
982 btrace_compute_src_line_range (const struct btrace_function
*bfun
,
983 int *pbegin
, int *pend
)
985 struct btrace_insn
*insn
;
986 struct symtab
*symtab
;
998 symtab
= symbol_symtab (sym
);
1000 for (idx
= 0; VEC_iterate (btrace_insn_s
, bfun
->insn
, idx
, insn
); ++idx
)
1002 struct symtab_and_line sal
;
1004 sal
= find_pc_line (insn
->pc
, 0);
1005 if (sal
.symtab
!= symtab
|| sal
.line
== 0)
1008 begin
= std::min (begin
, sal
.line
);
1009 end
= std::max (end
, sal
.line
);
1017 /* Print the source line information for a function call history line. */
1020 btrace_call_history_src_line (struct ui_out
*uiout
,
1021 const struct btrace_function
*bfun
)
1030 uiout
->field_string ("file",
1031 symtab_to_filename_for_display (symbol_symtab (sym
)));
1033 btrace_compute_src_line_range (bfun
, &begin
, &end
);
1038 uiout
->field_int ("min line", begin
);
1044 uiout
->field_int ("max line", end
);
1047 /* Get the name of a branch trace function. */
1050 btrace_get_bfun_name (const struct btrace_function
*bfun
)
1052 struct minimal_symbol
*msym
;
1062 return SYMBOL_PRINT_NAME (sym
);
1063 else if (msym
!= NULL
)
1064 return MSYMBOL_PRINT_NAME (msym
);
1069 /* Disassemble a section of the recorded function trace. */
1072 btrace_call_history (struct ui_out
*uiout
,
1073 const struct btrace_thread_info
*btinfo
,
1074 const struct btrace_call_iterator
*begin
,
1075 const struct btrace_call_iterator
*end
,
1078 struct btrace_call_iterator it
;
1079 record_print_flags flags
= (enum record_print_flag
) int_flags
;
1081 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags
, btrace_call_number (begin
),
1082 btrace_call_number (end
));
1084 for (it
= *begin
; btrace_call_cmp (&it
, end
) < 0; btrace_call_next (&it
, 1))
1086 const struct btrace_function
*bfun
;
1087 struct minimal_symbol
*msym
;
1090 bfun
= btrace_call_get (&it
);
1094 /* Print the function index. */
1095 ui_out_field_uint (uiout
, "index", bfun
->number
);
1098 /* Indicate gaps in the trace. */
1099 if (bfun
->errcode
!= 0)
1101 const struct btrace_config
*conf
;
1103 conf
= btrace_conf (btinfo
);
1105 /* We have trace so we must have a configuration. */
1106 gdb_assert (conf
!= NULL
);
1108 btrace_ui_out_decode_error (uiout
, bfun
->errcode
, conf
->format
);
1113 if ((flags
& RECORD_PRINT_INDENT_CALLS
) != 0)
1115 int level
= bfun
->level
+ btinfo
->level
, i
;
1117 for (i
= 0; i
< level
; ++i
)
1122 uiout
->field_string ("function", SYMBOL_PRINT_NAME (sym
));
1123 else if (msym
!= NULL
)
1124 uiout
->field_string ("function", MSYMBOL_PRINT_NAME (msym
));
1125 else if (!uiout
->is_mi_like_p ())
1126 uiout
->field_string ("function", "??");
1128 if ((flags
& RECORD_PRINT_INSN_RANGE
) != 0)
1130 uiout
->text (_("\tinst "));
1131 btrace_call_history_insn_range (uiout
, bfun
);
1134 if ((flags
& RECORD_PRINT_SRC_LINE
) != 0)
1136 uiout
->text (_("\tat "));
1137 btrace_call_history_src_line (uiout
, bfun
);
1144 /* The to_call_history method of target record-btrace. */
1147 record_btrace_call_history (struct target_ops
*self
, int size
, int int_flags
)
1149 struct btrace_thread_info
*btinfo
;
1150 struct btrace_call_history
*history
;
1151 struct btrace_call_iterator begin
, end
;
1152 struct cleanup
*uiout_cleanup
;
1153 struct ui_out
*uiout
;
1154 unsigned int context
, covered
;
1155 record_print_flags flags
= (enum record_print_flag
) int_flags
;
1157 uiout
= current_uiout
;
1158 uiout_cleanup
= make_cleanup_ui_out_tuple_begin_end (uiout
,
1160 context
= abs (size
);
1162 error (_("Bad record function-call-history-size."));
1164 btinfo
= require_btrace ();
1165 history
= btinfo
->call_history
;
1166 if (history
== NULL
)
1168 struct btrace_insn_iterator
*replay
;
1170 DEBUG ("call-history (0x%x): %d", int_flags
, size
);
1172 /* If we're replaying, we start at the replay position. Otherwise, we
1173 start at the tail of the trace. */
1174 replay
= btinfo
->replay
;
1177 begin
.function
= replay
->function
;
1178 begin
.btinfo
= btinfo
;
1181 btrace_call_end (&begin
, btinfo
);
1183 /* We start from here and expand in the requested direction. Then we
1184 expand in the other direction, as well, to fill up any remaining
1189 /* We want the current position covered, as well. */
1190 covered
= btrace_call_next (&end
, 1);
1191 covered
+= btrace_call_prev (&begin
, context
- covered
);
1192 covered
+= btrace_call_next (&end
, context
- covered
);
1196 covered
= btrace_call_next (&end
, context
);
1197 covered
+= btrace_call_prev (&begin
, context
- covered
);
1202 begin
= history
->begin
;
1205 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", int_flags
, size
,
1206 btrace_call_number (&begin
), btrace_call_number (&end
));
1211 covered
= btrace_call_prev (&begin
, context
);
1216 covered
= btrace_call_next (&end
, context
);
1221 btrace_call_history (uiout
, btinfo
, &begin
, &end
, flags
);
1225 printf_unfiltered (_("At the start of the branch trace record.\n"));
1227 printf_unfiltered (_("At the end of the branch trace record.\n"));
1230 btrace_set_call_history (btinfo
, &begin
, &end
);
1231 do_cleanups (uiout_cleanup
);
1234 /* The to_call_history_range method of target record-btrace. */
1237 record_btrace_call_history_range (struct target_ops
*self
,
1238 ULONGEST from
, ULONGEST to
,
1241 struct btrace_thread_info
*btinfo
;
1242 struct btrace_call_history
*history
;
1243 struct btrace_call_iterator begin
, end
;
1244 struct cleanup
*uiout_cleanup
;
1245 struct ui_out
*uiout
;
1246 unsigned int low
, high
;
1248 record_print_flags flags
= (enum record_print_flag
) int_flags
;
1250 uiout
= current_uiout
;
1251 uiout_cleanup
= make_cleanup_ui_out_tuple_begin_end (uiout
,
1256 DEBUG ("call-history (0x%x): [%u; %u)", int_flags
, low
, high
);
1258 /* Check for wrap-arounds. */
1259 if (low
!= from
|| high
!= to
)
1260 error (_("Bad range."));
1263 error (_("Bad range."));
1265 btinfo
= require_btrace ();
1267 found
= btrace_find_call_by_number (&begin
, btinfo
, low
);
1269 error (_("Range out of bounds."));
1271 found
= btrace_find_call_by_number (&end
, btinfo
, high
);
1274 /* Silently truncate the range. */
1275 btrace_call_end (&end
, btinfo
);
1279 /* We want both begin and end to be inclusive. */
1280 btrace_call_next (&end
, 1);
1283 btrace_call_history (uiout
, btinfo
, &begin
, &end
, flags
);
1284 btrace_set_call_history (btinfo
, &begin
, &end
);
1286 do_cleanups (uiout_cleanup
);
1289 /* The to_call_history_from method of target record-btrace. */
1292 record_btrace_call_history_from (struct target_ops
*self
,
1293 ULONGEST from
, int size
,
1296 ULONGEST begin
, end
, context
;
1297 record_print_flags flags
= (enum record_print_flag
) int_flags
;
1299 context
= abs (size
);
1301 error (_("Bad record function-call-history-size."));
1310 begin
= from
- context
+ 1;
1315 end
= from
+ context
- 1;
1317 /* Check for wrap-around. */
1322 record_btrace_call_history_range (self
, begin
, end
, flags
);
1325 /* The to_record_is_replaying method of target record-btrace. */
1328 record_btrace_is_replaying (struct target_ops
*self
, ptid_t ptid
)
1330 struct thread_info
*tp
;
1332 ALL_NON_EXITED_THREADS (tp
)
1333 if (ptid_match (tp
->ptid
, ptid
) && btrace_is_replaying (tp
))
1339 /* The to_record_will_replay method of target record-btrace. */
1342 record_btrace_will_replay (struct target_ops
*self
, ptid_t ptid
, int dir
)
1344 return dir
== EXEC_REVERSE
|| record_btrace_is_replaying (self
, ptid
);
1347 /* The to_xfer_partial method of target record-btrace. */
1349 static enum target_xfer_status
1350 record_btrace_xfer_partial (struct target_ops
*ops
, enum target_object object
,
1351 const char *annex
, gdb_byte
*readbuf
,
1352 const gdb_byte
*writebuf
, ULONGEST offset
,
1353 ULONGEST len
, ULONGEST
*xfered_len
)
1355 struct target_ops
*t
;
1357 /* Filter out requests that don't make sense during replay. */
1358 if (replay_memory_access
== replay_memory_access_read_only
1359 && !record_btrace_generating_corefile
1360 && record_btrace_is_replaying (ops
, inferior_ptid
))
1364 case TARGET_OBJECT_MEMORY
:
1366 struct target_section
*section
;
1368 /* We do not allow writing memory in general. */
1369 if (writebuf
!= NULL
)
1372 return TARGET_XFER_UNAVAILABLE
;
1375 /* We allow reading readonly memory. */
1376 section
= target_section_by_addr (ops
, offset
);
1377 if (section
!= NULL
)
1379 /* Check if the section we found is readonly. */
1380 if ((bfd_get_section_flags (section
->the_bfd_section
->owner
,
1381 section
->the_bfd_section
)
1382 & SEC_READONLY
) != 0)
1384 /* Truncate the request to fit into this section. */
1385 len
= std::min (len
, section
->endaddr
- offset
);
1391 return TARGET_XFER_UNAVAILABLE
;
1396 /* Forward the request. */
1398 return ops
->to_xfer_partial (ops
, object
, annex
, readbuf
, writebuf
,
1399 offset
, len
, xfered_len
);
1402 /* The to_insert_breakpoint method of target record-btrace. */
1405 record_btrace_insert_breakpoint (struct target_ops
*ops
,
1406 struct gdbarch
*gdbarch
,
1407 struct bp_target_info
*bp_tgt
)
1412 /* Inserting breakpoints requires accessing memory. Allow it for the
1413 duration of this function. */
1414 old
= replay_memory_access
;
1415 replay_memory_access
= replay_memory_access_read_write
;
1420 ret
= ops
->beneath
->to_insert_breakpoint (ops
->beneath
, gdbarch
, bp_tgt
);
1422 CATCH (except
, RETURN_MASK_ALL
)
1424 replay_memory_access
= old
;
1425 throw_exception (except
);
1428 replay_memory_access
= old
;
1433 /* The to_remove_breakpoint method of target record-btrace. */
1436 record_btrace_remove_breakpoint (struct target_ops
*ops
,
1437 struct gdbarch
*gdbarch
,
1438 struct bp_target_info
*bp_tgt
,
1439 enum remove_bp_reason reason
)
1444 /* Removing breakpoints requires accessing memory. Allow it for the
1445 duration of this function. */
1446 old
= replay_memory_access
;
1447 replay_memory_access
= replay_memory_access_read_write
;
1452 ret
= ops
->beneath
->to_remove_breakpoint (ops
->beneath
, gdbarch
, bp_tgt
,
1455 CATCH (except
, RETURN_MASK_ALL
)
1457 replay_memory_access
= old
;
1458 throw_exception (except
);
1461 replay_memory_access
= old
;
1466 /* The to_fetch_registers method of target record-btrace. */
1469 record_btrace_fetch_registers (struct target_ops
*ops
,
1470 struct regcache
*regcache
, int regno
)
1472 struct btrace_insn_iterator
*replay
;
1473 struct thread_info
*tp
;
1475 tp
= find_thread_ptid (inferior_ptid
);
1476 gdb_assert (tp
!= NULL
);
1478 replay
= tp
->btrace
.replay
;
1479 if (replay
!= NULL
&& !record_btrace_generating_corefile
)
1481 const struct btrace_insn
*insn
;
1482 struct gdbarch
*gdbarch
;
1485 gdbarch
= get_regcache_arch (regcache
);
1486 pcreg
= gdbarch_pc_regnum (gdbarch
);
1490 /* We can only provide the PC register. */
1491 if (regno
>= 0 && regno
!= pcreg
)
1494 insn
= btrace_insn_get (replay
);
1495 gdb_assert (insn
!= NULL
);
1497 regcache_raw_supply (regcache
, regno
, &insn
->pc
);
1501 struct target_ops
*t
= ops
->beneath
;
1503 t
->to_fetch_registers (t
, regcache
, regno
);
1507 /* The to_store_registers method of target record-btrace. */
1510 record_btrace_store_registers (struct target_ops
*ops
,
1511 struct regcache
*regcache
, int regno
)
1513 struct target_ops
*t
;
1515 if (!record_btrace_generating_corefile
1516 && record_btrace_is_replaying (ops
, inferior_ptid
))
1517 error (_("Cannot write registers while replaying."));
1519 gdb_assert (may_write_registers
!= 0);
1522 t
->to_store_registers (t
, regcache
, regno
);
1525 /* The to_prepare_to_store method of target record-btrace. */
1528 record_btrace_prepare_to_store (struct target_ops
*ops
,
1529 struct regcache
*regcache
)
1531 struct target_ops
*t
;
1533 if (!record_btrace_generating_corefile
1534 && record_btrace_is_replaying (ops
, inferior_ptid
))
1538 t
->to_prepare_to_store (t
, regcache
);
1541 /* The branch trace frame cache. */
1543 struct btrace_frame_cache
1546 struct thread_info
*tp
;
1548 /* The frame info. */
1549 struct frame_info
*frame
;
1551 /* The branch trace function segment. */
1552 const struct btrace_function
*bfun
;
1555 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1557 static htab_t bfcache
;
1559 /* hash_f for htab_create_alloc of bfcache. */
1562 bfcache_hash (const void *arg
)
1564 const struct btrace_frame_cache
*cache
1565 = (const struct btrace_frame_cache
*) arg
;
1567 return htab_hash_pointer (cache
->frame
);
1570 /* eq_f for htab_create_alloc of bfcache. */
1573 bfcache_eq (const void *arg1
, const void *arg2
)
1575 const struct btrace_frame_cache
*cache1
1576 = (const struct btrace_frame_cache
*) arg1
;
1577 const struct btrace_frame_cache
*cache2
1578 = (const struct btrace_frame_cache
*) arg2
;
1580 return cache1
->frame
== cache2
->frame
;
1583 /* Create a new btrace frame cache. */
1585 static struct btrace_frame_cache
*
1586 bfcache_new (struct frame_info
*frame
)
1588 struct btrace_frame_cache
*cache
;
1591 cache
= FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache
);
1592 cache
->frame
= frame
;
1594 slot
= htab_find_slot (bfcache
, cache
, INSERT
);
1595 gdb_assert (*slot
== NULL
);
1601 /* Extract the branch trace function from a branch trace frame. */
1603 static const struct btrace_function
*
1604 btrace_get_frame_function (struct frame_info
*frame
)
1606 const struct btrace_frame_cache
*cache
;
1607 const struct btrace_function
*bfun
;
1608 struct btrace_frame_cache pattern
;
1611 pattern
.frame
= frame
;
1613 slot
= htab_find_slot (bfcache
, &pattern
, NO_INSERT
);
1617 cache
= (const struct btrace_frame_cache
*) *slot
;
1621 /* Implement stop_reason method for record_btrace_frame_unwind. */
1623 static enum unwind_stop_reason
1624 record_btrace_frame_unwind_stop_reason (struct frame_info
*this_frame
,
1627 const struct btrace_frame_cache
*cache
;
1628 const struct btrace_function
*bfun
;
1630 cache
= (const struct btrace_frame_cache
*) *this_cache
;
1632 gdb_assert (bfun
!= NULL
);
1634 if (bfun
->up
== NULL
)
1635 return UNWIND_UNAVAILABLE
;
1637 return UNWIND_NO_REASON
;
1640 /* Implement this_id method for record_btrace_frame_unwind. */
1643 record_btrace_frame_this_id (struct frame_info
*this_frame
, void **this_cache
,
1644 struct frame_id
*this_id
)
1646 const struct btrace_frame_cache
*cache
;
1647 const struct btrace_function
*bfun
;
1648 CORE_ADDR code
, special
;
1650 cache
= (const struct btrace_frame_cache
*) *this_cache
;
1653 gdb_assert (bfun
!= NULL
);
1655 while (bfun
->segment
.prev
!= NULL
)
1656 bfun
= bfun
->segment
.prev
;
1658 code
= get_frame_func (this_frame
);
1659 special
= bfun
->number
;
1661 *this_id
= frame_id_build_unavailable_stack_special (code
, special
);
1663 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1664 btrace_get_bfun_name (cache
->bfun
),
1665 core_addr_to_string_nz (this_id
->code_addr
),
1666 core_addr_to_string_nz (this_id
->special_addr
));
1669 /* Implement prev_register method for record_btrace_frame_unwind. */
1671 static struct value
*
1672 record_btrace_frame_prev_register (struct frame_info
*this_frame
,
1676 const struct btrace_frame_cache
*cache
;
1677 const struct btrace_function
*bfun
, *caller
;
1678 const struct btrace_insn
*insn
;
1679 struct gdbarch
*gdbarch
;
1683 gdbarch
= get_frame_arch (this_frame
);
1684 pcreg
= gdbarch_pc_regnum (gdbarch
);
1685 if (pcreg
< 0 || regnum
!= pcreg
)
1686 throw_error (NOT_AVAILABLE_ERROR
,
1687 _("Registers are not available in btrace record history"));
1689 cache
= (const struct btrace_frame_cache
*) *this_cache
;
1691 gdb_assert (bfun
!= NULL
);
1695 throw_error (NOT_AVAILABLE_ERROR
,
1696 _("No caller in btrace record history"));
1698 if ((bfun
->flags
& BFUN_UP_LINKS_TO_RET
) != 0)
1700 insn
= VEC_index (btrace_insn_s
, caller
->insn
, 0);
1705 insn
= VEC_last (btrace_insn_s
, caller
->insn
);
1708 pc
+= gdb_insn_length (gdbarch
, pc
);
1711 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1712 btrace_get_bfun_name (bfun
), bfun
->level
,
1713 core_addr_to_string_nz (pc
));
1715 return frame_unwind_got_address (this_frame
, regnum
, pc
);
1718 /* Implement sniffer method for record_btrace_frame_unwind. */
1721 record_btrace_frame_sniffer (const struct frame_unwind
*self
,
1722 struct frame_info
*this_frame
,
1725 const struct btrace_function
*bfun
;
1726 struct btrace_frame_cache
*cache
;
1727 struct thread_info
*tp
;
1728 struct frame_info
*next
;
1730 /* THIS_FRAME does not contain a reference to its thread. */
1731 tp
= find_thread_ptid (inferior_ptid
);
1732 gdb_assert (tp
!= NULL
);
1735 next
= get_next_frame (this_frame
);
1738 const struct btrace_insn_iterator
*replay
;
1740 replay
= tp
->btrace
.replay
;
1742 bfun
= replay
->function
;
1746 const struct btrace_function
*callee
;
1748 callee
= btrace_get_frame_function (next
);
1749 if (callee
!= NULL
&& (callee
->flags
& BFUN_UP_LINKS_TO_TAILCALL
) == 0)
1756 DEBUG ("[frame] sniffed frame for %s on level %d",
1757 btrace_get_bfun_name (bfun
), bfun
->level
);
1759 /* This is our frame. Initialize the frame cache. */
1760 cache
= bfcache_new (this_frame
);
1764 *this_cache
= cache
;
1768 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1771 record_btrace_tailcall_frame_sniffer (const struct frame_unwind
*self
,
1772 struct frame_info
*this_frame
,
1775 const struct btrace_function
*bfun
, *callee
;
1776 struct btrace_frame_cache
*cache
;
1777 struct frame_info
*next
;
1779 next
= get_next_frame (this_frame
);
1783 callee
= btrace_get_frame_function (next
);
1787 if ((callee
->flags
& BFUN_UP_LINKS_TO_TAILCALL
) == 0)
1794 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1795 btrace_get_bfun_name (bfun
), bfun
->level
);
1797 /* This is our frame. Initialize the frame cache. */
1798 cache
= bfcache_new (this_frame
);
1799 cache
->tp
= find_thread_ptid (inferior_ptid
);
1802 *this_cache
= cache
;
1807 record_btrace_frame_dealloc_cache (struct frame_info
*self
, void *this_cache
)
1809 struct btrace_frame_cache
*cache
;
1812 cache
= (struct btrace_frame_cache
*) this_cache
;
1814 slot
= htab_find_slot (bfcache
, cache
, NO_INSERT
);
1815 gdb_assert (slot
!= NULL
);
1817 htab_remove_elt (bfcache
, cache
);
1820 /* btrace recording does not store previous memory content, neither the stack
1821 frames content. Any unwinding would return errorneous results as the stack
1822 contents no longer matches the changed PC value restored from history.
1823 Therefore this unwinder reports any possibly unwound registers as
1826 const struct frame_unwind record_btrace_frame_unwind
=
1829 record_btrace_frame_unwind_stop_reason
,
1830 record_btrace_frame_this_id
,
1831 record_btrace_frame_prev_register
,
1833 record_btrace_frame_sniffer
,
1834 record_btrace_frame_dealloc_cache
1837 const struct frame_unwind record_btrace_tailcall_frame_unwind
=
1840 record_btrace_frame_unwind_stop_reason
,
1841 record_btrace_frame_this_id
,
1842 record_btrace_frame_prev_register
,
1844 record_btrace_tailcall_frame_sniffer
,
1845 record_btrace_frame_dealloc_cache
1848 /* Implement the to_get_unwinder method. */
1850 static const struct frame_unwind
*
1851 record_btrace_to_get_unwinder (struct target_ops
*self
)
1853 return &record_btrace_frame_unwind
;
1856 /* Implement the to_get_tailcall_unwinder method. */
1858 static const struct frame_unwind
*
1859 record_btrace_to_get_tailcall_unwinder (struct target_ops
*self
)
1861 return &record_btrace_tailcall_frame_unwind
;
1864 /* Return a human-readable string for FLAG. */
1867 btrace_thread_flag_to_str (enum btrace_thread_flag flag
)
1875 return "reverse-step";
1881 return "reverse-cont";
1890 /* Indicate that TP should be resumed according to FLAG. */
1893 record_btrace_resume_thread (struct thread_info
*tp
,
1894 enum btrace_thread_flag flag
)
1896 struct btrace_thread_info
*btinfo
;
1898 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp
),
1899 target_pid_to_str (tp
->ptid
), flag
, btrace_thread_flag_to_str (flag
));
1901 btinfo
= &tp
->btrace
;
1903 /* Fetch the latest branch trace. */
1906 /* A resume request overwrites a preceding resume or stop request. */
1907 btinfo
->flags
&= ~(BTHR_MOVE
| BTHR_STOP
);
1908 btinfo
->flags
|= flag
;
1911 /* Get the current frame for TP. */
1913 static struct frame_info
*
1914 get_thread_current_frame (struct thread_info
*tp
)
1916 struct frame_info
*frame
;
1917 ptid_t old_inferior_ptid
;
1920 /* Set INFERIOR_PTID, which is implicitly used by get_current_frame. */
1921 old_inferior_ptid
= inferior_ptid
;
1922 inferior_ptid
= tp
->ptid
;
1924 /* Clear the executing flag to allow changes to the current frame.
1925 We are not actually running, yet. We just started a reverse execution
1926 command or a record goto command.
1927 For the latter, EXECUTING is false and this has no effect.
1928 For the former, EXECUTING is true and we're in to_wait, about to
1929 move the thread. Since we need to recompute the stack, we temporarily
1930 set EXECUTING to flase. */
1931 executing
= is_executing (inferior_ptid
);
1932 set_executing (inferior_ptid
, 0);
1937 frame
= get_current_frame ();
1939 CATCH (except
, RETURN_MASK_ALL
)
1941 /* Restore the previous execution state. */
1942 set_executing (inferior_ptid
, executing
);
1944 /* Restore the previous inferior_ptid. */
1945 inferior_ptid
= old_inferior_ptid
;
1947 throw_exception (except
);
1951 /* Restore the previous execution state. */
1952 set_executing (inferior_ptid
, executing
);
1954 /* Restore the previous inferior_ptid. */
1955 inferior_ptid
= old_inferior_ptid
;
1960 /* Start replaying a thread. */
1962 static struct btrace_insn_iterator
*
1963 record_btrace_start_replaying (struct thread_info
*tp
)
1965 struct btrace_insn_iterator
*replay
;
1966 struct btrace_thread_info
*btinfo
;
1968 btinfo
= &tp
->btrace
;
1971 /* We can't start replaying without trace. */
1972 if (btinfo
->begin
== NULL
)
1975 /* GDB stores the current frame_id when stepping in order to detects steps
1977 Since frames are computed differently when we're replaying, we need to
1978 recompute those stored frames and fix them up so we can still detect
1979 subroutines after we started replaying. */
1982 struct frame_info
*frame
;
1983 struct frame_id frame_id
;
1984 int upd_step_frame_id
, upd_step_stack_frame_id
;
1986 /* The current frame without replaying - computed via normal unwind. */
1987 frame
= get_thread_current_frame (tp
);
1988 frame_id
= get_frame_id (frame
);
1990 /* Check if we need to update any stepping-related frame id's. */
1991 upd_step_frame_id
= frame_id_eq (frame_id
,
1992 tp
->control
.step_frame_id
);
1993 upd_step_stack_frame_id
= frame_id_eq (frame_id
,
1994 tp
->control
.step_stack_frame_id
);
1996 /* We start replaying at the end of the branch trace. This corresponds
1997 to the current instruction. */
1998 replay
= XNEW (struct btrace_insn_iterator
);
1999 btrace_insn_end (replay
, btinfo
);
2001 /* Skip gaps at the end of the trace. */
2002 while (btrace_insn_get (replay
) == NULL
)
2006 steps
= btrace_insn_prev (replay
, 1);
2008 error (_("No trace."));
2011 /* We're not replaying, yet. */
2012 gdb_assert (btinfo
->replay
== NULL
);
2013 btinfo
->replay
= replay
;
2015 /* Make sure we're not using any stale registers. */
2016 registers_changed_ptid (tp
->ptid
);
2018 /* The current frame with replaying - computed via btrace unwind. */
2019 frame
= get_thread_current_frame (tp
);
2020 frame_id
= get_frame_id (frame
);
2022 /* Replace stepping related frames where necessary. */
2023 if (upd_step_frame_id
)
2024 tp
->control
.step_frame_id
= frame_id
;
2025 if (upd_step_stack_frame_id
)
2026 tp
->control
.step_stack_frame_id
= frame_id
;
2028 CATCH (except
, RETURN_MASK_ALL
)
2030 xfree (btinfo
->replay
);
2031 btinfo
->replay
= NULL
;
2033 registers_changed_ptid (tp
->ptid
);
2035 throw_exception (except
);
2042 /* Stop replaying a thread. */
2045 record_btrace_stop_replaying (struct thread_info
*tp
)
2047 struct btrace_thread_info
*btinfo
;
2049 btinfo
= &tp
->btrace
;
2051 xfree (btinfo
->replay
);
2052 btinfo
->replay
= NULL
;
2054 /* Make sure we're not leaving any stale registers. */
2055 registers_changed_ptid (tp
->ptid
);
2058 /* Stop replaying TP if it is at the end of its execution history. */
2061 record_btrace_stop_replaying_at_end (struct thread_info
*tp
)
2063 struct btrace_insn_iterator
*replay
, end
;
2064 struct btrace_thread_info
*btinfo
;
2066 btinfo
= &tp
->btrace
;
2067 replay
= btinfo
->replay
;
2072 btrace_insn_end (&end
, btinfo
);
2074 if (btrace_insn_cmp (replay
, &end
) == 0)
2075 record_btrace_stop_replaying (tp
);
2078 /* The to_resume method of target record-btrace. */
2081 record_btrace_resume (struct target_ops
*ops
, ptid_t ptid
, int step
,
2082 enum gdb_signal signal
)
2084 struct thread_info
*tp
;
2085 enum btrace_thread_flag flag
, cflag
;
2087 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid
),
2088 execution_direction
== EXEC_REVERSE
? "reverse-" : "",
2089 step
? "step" : "cont");
2091 /* Store the execution direction of the last resume.
2093 If there is more than one to_resume call, we have to rely on infrun
2094 to not change the execution direction in-between. */
2095 record_btrace_resume_exec_dir
= execution_direction
;
2097 /* As long as we're not replaying, just forward the request.
2099 For non-stop targets this means that no thread is replaying. In order to
2100 make progress, we may need to explicitly move replaying threads to the end
2101 of their execution history. */
2102 if ((execution_direction
!= EXEC_REVERSE
)
2103 && !record_btrace_is_replaying (ops
, minus_one_ptid
))
2106 ops
->to_resume (ops
, ptid
, step
, signal
);
2110 /* Compute the btrace thread flag for the requested move. */
2111 if (execution_direction
== EXEC_REVERSE
)
2113 flag
= step
== 0 ? BTHR_RCONT
: BTHR_RSTEP
;
2118 flag
= step
== 0 ? BTHR_CONT
: BTHR_STEP
;
2122 /* We just indicate the resume intent here. The actual stepping happens in
2123 record_btrace_wait below.
2125 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2126 if (!target_is_non_stop_p ())
2128 gdb_assert (ptid_match (inferior_ptid
, ptid
));
2130 ALL_NON_EXITED_THREADS (tp
)
2131 if (ptid_match (tp
->ptid
, ptid
))
2133 if (ptid_match (tp
->ptid
, inferior_ptid
))
2134 record_btrace_resume_thread (tp
, flag
);
2136 record_btrace_resume_thread (tp
, cflag
);
2141 ALL_NON_EXITED_THREADS (tp
)
2142 if (ptid_match (tp
->ptid
, ptid
))
2143 record_btrace_resume_thread (tp
, flag
);
2146 /* Async support. */
2147 if (target_can_async_p ())
2150 mark_async_event_handler (record_btrace_async_inferior_event_handler
);
2154 /* The to_commit_resume method of target record-btrace. */
2157 record_btrace_commit_resume (struct target_ops
*ops
)
2159 if ((execution_direction
!= EXEC_REVERSE
)
2160 && !record_btrace_is_replaying (ops
, minus_one_ptid
))
2161 ops
->beneath
->to_commit_resume (ops
->beneath
);
2164 /* Cancel resuming TP. */
2167 record_btrace_cancel_resume (struct thread_info
*tp
)
2169 enum btrace_thread_flag flags
;
2171 flags
= tp
->btrace
.flags
& (BTHR_MOVE
| BTHR_STOP
);
2175 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2176 print_thread_id (tp
),
2177 target_pid_to_str (tp
->ptid
), flags
,
2178 btrace_thread_flag_to_str (flags
));
2180 tp
->btrace
.flags
&= ~(BTHR_MOVE
| BTHR_STOP
);
2181 record_btrace_stop_replaying_at_end (tp
);
2184 /* Return a target_waitstatus indicating that we ran out of history. */
2186 static struct target_waitstatus
2187 btrace_step_no_history (void)
2189 struct target_waitstatus status
;
2191 status
.kind
= TARGET_WAITKIND_NO_HISTORY
;
2196 /* Return a target_waitstatus indicating that a step finished. */
2198 static struct target_waitstatus
2199 btrace_step_stopped (void)
2201 struct target_waitstatus status
;
2203 status
.kind
= TARGET_WAITKIND_STOPPED
;
2204 status
.value
.sig
= GDB_SIGNAL_TRAP
;
2209 /* Return a target_waitstatus indicating that a thread was stopped as
2212 static struct target_waitstatus
2213 btrace_step_stopped_on_request (void)
2215 struct target_waitstatus status
;
2217 status
.kind
= TARGET_WAITKIND_STOPPED
;
2218 status
.value
.sig
= GDB_SIGNAL_0
;
2223 /* Return a target_waitstatus indicating a spurious stop. */
2225 static struct target_waitstatus
2226 btrace_step_spurious (void)
2228 struct target_waitstatus status
;
2230 status
.kind
= TARGET_WAITKIND_SPURIOUS
;
2235 /* Return a target_waitstatus indicating that the thread was not resumed. */
2237 static struct target_waitstatus
2238 btrace_step_no_resumed (void)
2240 struct target_waitstatus status
;
2242 status
.kind
= TARGET_WAITKIND_NO_RESUMED
;
2247 /* Return a target_waitstatus indicating that we should wait again. */
2249 static struct target_waitstatus
2250 btrace_step_again (void)
2252 struct target_waitstatus status
;
2254 status
.kind
= TARGET_WAITKIND_IGNORE
;
2259 /* Clear the record histories. */
2262 record_btrace_clear_histories (struct btrace_thread_info
*btinfo
)
2264 xfree (btinfo
->insn_history
);
2265 xfree (btinfo
->call_history
);
2267 btinfo
->insn_history
= NULL
;
2268 btinfo
->call_history
= NULL
;
2271 /* Check whether TP's current replay position is at a breakpoint. */
2274 record_btrace_replay_at_breakpoint (struct thread_info
*tp
)
2276 struct btrace_insn_iterator
*replay
;
2277 struct btrace_thread_info
*btinfo
;
2278 const struct btrace_insn
*insn
;
2279 struct inferior
*inf
;
2281 btinfo
= &tp
->btrace
;
2282 replay
= btinfo
->replay
;
2287 insn
= btrace_insn_get (replay
);
2291 inf
= find_inferior_ptid (tp
->ptid
);
2295 return record_check_stopped_by_breakpoint (inf
->aspace
, insn
->pc
,
2296 &btinfo
->stop_reason
);
2299 /* Step one instruction in forward direction. */
2301 static struct target_waitstatus
2302 record_btrace_single_step_forward (struct thread_info
*tp
)
2304 struct btrace_insn_iterator
*replay
, end
, start
;
2305 struct btrace_thread_info
*btinfo
;
2307 btinfo
= &tp
->btrace
;
2308 replay
= btinfo
->replay
;
2310 /* We're done if we're not replaying. */
2312 return btrace_step_no_history ();
2314 /* Check if we're stepping a breakpoint. */
2315 if (record_btrace_replay_at_breakpoint (tp
))
2316 return btrace_step_stopped ();
2318 /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
2319 jump back to the instruction at which we started. */
2325 /* We will bail out here if we continue stepping after reaching the end
2326 of the execution history. */
2327 steps
= btrace_insn_next (replay
, 1);
2331 return btrace_step_no_history ();
2334 while (btrace_insn_get (replay
) == NULL
);
2336 /* Determine the end of the instruction trace. */
2337 btrace_insn_end (&end
, btinfo
);
2339 /* The execution trace contains (and ends with) the current instruction.
2340 This instruction has not been executed, yet, so the trace really ends
2341 one instruction earlier. */
2342 if (btrace_insn_cmp (replay
, &end
) == 0)
2343 return btrace_step_no_history ();
2345 return btrace_step_spurious ();
2348 /* Step one instruction in backward direction. */
2350 static struct target_waitstatus
2351 record_btrace_single_step_backward (struct thread_info
*tp
)
2353 struct btrace_insn_iterator
*replay
, start
;
2354 struct btrace_thread_info
*btinfo
;
2356 btinfo
= &tp
->btrace
;
2357 replay
= btinfo
->replay
;
2359 /* Start replaying if we're not already doing so. */
2361 replay
= record_btrace_start_replaying (tp
);
2363 /* If we can't step any further, we reached the end of the history.
2364 Skip gaps during replay. If we end up at a gap (at the beginning of
2365 the trace), jump back to the instruction at which we started. */
2371 steps
= btrace_insn_prev (replay
, 1);
2375 return btrace_step_no_history ();
2378 while (btrace_insn_get (replay
) == NULL
);
2380 /* Check if we're stepping a breakpoint.
2382 For reverse-stepping, this check is after the step. There is logic in
2383 infrun.c that handles reverse-stepping separately. See, for example,
2384 proceed and adjust_pc_after_break.
2386 This code assumes that for reverse-stepping, PC points to the last
2387 de-executed instruction, whereas for forward-stepping PC points to the
2388 next to-be-executed instruction. */
2389 if (record_btrace_replay_at_breakpoint (tp
))
2390 return btrace_step_stopped ();
2392 return btrace_step_spurious ();
2395 /* Step a single thread. */
2397 static struct target_waitstatus
2398 record_btrace_step_thread (struct thread_info
*tp
)
2400 struct btrace_thread_info
*btinfo
;
2401 struct target_waitstatus status
;
2402 enum btrace_thread_flag flags
;
2404 btinfo
= &tp
->btrace
;
2406 flags
= btinfo
->flags
& (BTHR_MOVE
| BTHR_STOP
);
2407 btinfo
->flags
&= ~(BTHR_MOVE
| BTHR_STOP
);
2409 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp
),
2410 target_pid_to_str (tp
->ptid
), flags
,
2411 btrace_thread_flag_to_str (flags
));
2413 /* We can't step without an execution history. */
2414 if ((flags
& BTHR_MOVE
) != 0 && btrace_is_empty (tp
))
2415 return btrace_step_no_history ();
2420 internal_error (__FILE__
, __LINE__
, _("invalid stepping type."));
2423 return btrace_step_stopped_on_request ();
2426 status
= record_btrace_single_step_forward (tp
);
2427 if (status
.kind
!= TARGET_WAITKIND_SPURIOUS
)
2430 return btrace_step_stopped ();
2433 status
= record_btrace_single_step_backward (tp
);
2434 if (status
.kind
!= TARGET_WAITKIND_SPURIOUS
)
2437 return btrace_step_stopped ();
2440 status
= record_btrace_single_step_forward (tp
);
2441 if (status
.kind
!= TARGET_WAITKIND_SPURIOUS
)
2444 btinfo
->flags
|= flags
;
2445 return btrace_step_again ();
2448 status
= record_btrace_single_step_backward (tp
);
2449 if (status
.kind
!= TARGET_WAITKIND_SPURIOUS
)
2452 btinfo
->flags
|= flags
;
2453 return btrace_step_again ();
2456 /* We keep threads moving at the end of their execution history. The to_wait
2457 method will stop the thread for whom the event is reported. */
2458 if (status
.kind
== TARGET_WAITKIND_NO_HISTORY
)
2459 btinfo
->flags
|= flags
;
2464 /* A vector of threads. */
2466 typedef struct thread_info
* tp_t
;
2469 /* Announce further events if necessary. */
2472 record_btrace_maybe_mark_async_event (const VEC (tp_t
) *moving
,
2473 const VEC (tp_t
) *no_history
)
2475 int more_moving
, more_no_history
;
2477 more_moving
= !VEC_empty (tp_t
, moving
);
2478 more_no_history
= !VEC_empty (tp_t
, no_history
);
2480 if (!more_moving
&& !more_no_history
)
2484 DEBUG ("movers pending");
2486 if (more_no_history
)
2487 DEBUG ("no-history pending");
2489 mark_async_event_handler (record_btrace_async_inferior_event_handler
);
2492 /* The to_wait method of target record-btrace. */
2495 record_btrace_wait (struct target_ops
*ops
, ptid_t ptid
,
2496 struct target_waitstatus
*status
, int options
)
2498 VEC (tp_t
) *moving
, *no_history
;
2499 struct thread_info
*tp
, *eventing
;
2500 struct cleanup
*cleanups
= make_cleanup (null_cleanup
, NULL
);
2502 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid
), options
);
2504 /* As long as we're not replaying, just forward the request. */
2505 if ((execution_direction
!= EXEC_REVERSE
)
2506 && !record_btrace_is_replaying (ops
, minus_one_ptid
))
2509 return ops
->to_wait (ops
, ptid
, status
, options
);
2515 make_cleanup (VEC_cleanup (tp_t
), &moving
);
2516 make_cleanup (VEC_cleanup (tp_t
), &no_history
);
2518 /* Keep a work list of moving threads. */
2519 ALL_NON_EXITED_THREADS (tp
)
2520 if (ptid_match (tp
->ptid
, ptid
)
2521 && ((tp
->btrace
.flags
& (BTHR_MOVE
| BTHR_STOP
)) != 0))
2522 VEC_safe_push (tp_t
, moving
, tp
);
2524 if (VEC_empty (tp_t
, moving
))
2526 *status
= btrace_step_no_resumed ();
2528 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid
),
2529 target_waitstatus_to_string (status
));
2531 do_cleanups (cleanups
);
2535 /* Step moving threads one by one, one step each, until either one thread
2536 reports an event or we run out of threads to step.
2538 When stepping more than one thread, chances are that some threads reach
2539 the end of their execution history earlier than others. If we reported
2540 this immediately, all-stop on top of non-stop would stop all threads and
2541 resume the same threads next time. And we would report the same thread
2542 having reached the end of its execution history again.
2544 In the worst case, this would starve the other threads. But even if other
2545 threads would be allowed to make progress, this would result in far too
2546 many intermediate stops.
2548 We therefore delay the reporting of "no execution history" until we have
2549 nothing else to report. By this time, all threads should have moved to
2550 either the beginning or the end of their execution history. There will
2551 be a single user-visible stop. */
2553 while ((eventing
== NULL
) && !VEC_empty (tp_t
, moving
))
2558 while ((eventing
== NULL
) && VEC_iterate (tp_t
, moving
, ix
, tp
))
2560 *status
= record_btrace_step_thread (tp
);
2562 switch (status
->kind
)
2564 case TARGET_WAITKIND_IGNORE
:
2568 case TARGET_WAITKIND_NO_HISTORY
:
2569 VEC_safe_push (tp_t
, no_history
,
2570 VEC_ordered_remove (tp_t
, moving
, ix
));
2574 eventing
= VEC_unordered_remove (tp_t
, moving
, ix
);
2580 if (eventing
== NULL
)
2582 /* We started with at least one moving thread. This thread must have
2583 either stopped or reached the end of its execution history.
2585 In the former case, EVENTING must not be NULL.
2586 In the latter case, NO_HISTORY must not be empty. */
2587 gdb_assert (!VEC_empty (tp_t
, no_history
));
2589 /* We kept threads moving at the end of their execution history. Stop
2590 EVENTING now that we are going to report its stop. */
2591 eventing
= VEC_unordered_remove (tp_t
, no_history
, 0);
2592 eventing
->btrace
.flags
&= ~BTHR_MOVE
;
2594 *status
= btrace_step_no_history ();
2597 gdb_assert (eventing
!= NULL
);
2599 /* We kept threads replaying at the end of their execution history. Stop
2600 replaying EVENTING now that we are going to report its stop. */
2601 record_btrace_stop_replaying_at_end (eventing
);
2603 /* Stop all other threads. */
2604 if (!target_is_non_stop_p ())
2605 ALL_NON_EXITED_THREADS (tp
)
2606 record_btrace_cancel_resume (tp
);
2608 /* In async mode, we need to announce further events. */
2609 if (target_is_async_p ())
2610 record_btrace_maybe_mark_async_event (moving
, no_history
);
2612 /* Start record histories anew from the current position. */
2613 record_btrace_clear_histories (&eventing
->btrace
);
2615 /* We moved the replay position but did not update registers. */
2616 registers_changed_ptid (eventing
->ptid
);
2618 DEBUG ("wait ended by thread %s (%s): %s",
2619 print_thread_id (eventing
),
2620 target_pid_to_str (eventing
->ptid
),
2621 target_waitstatus_to_string (status
));
2623 do_cleanups (cleanups
);
2624 return eventing
->ptid
;
2627 /* The to_stop method of target record-btrace. */
2630 record_btrace_stop (struct target_ops
*ops
, ptid_t ptid
)
2632 DEBUG ("stop %s", target_pid_to_str (ptid
));
2634 /* As long as we're not replaying, just forward the request. */
2635 if ((execution_direction
!= EXEC_REVERSE
)
2636 && !record_btrace_is_replaying (ops
, minus_one_ptid
))
2639 ops
->to_stop (ops
, ptid
);
2643 struct thread_info
*tp
;
2645 ALL_NON_EXITED_THREADS (tp
)
2646 if (ptid_match (tp
->ptid
, ptid
))
2648 tp
->btrace
.flags
&= ~BTHR_MOVE
;
2649 tp
->btrace
.flags
|= BTHR_STOP
;
2654 /* The to_can_execute_reverse method of target record-btrace. */
2657 record_btrace_can_execute_reverse (struct target_ops
*self
)
2662 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2665 record_btrace_stopped_by_sw_breakpoint (struct target_ops
*ops
)
2667 if (record_btrace_is_replaying (ops
, minus_one_ptid
))
2669 struct thread_info
*tp
= inferior_thread ();
2671 return tp
->btrace
.stop_reason
== TARGET_STOPPED_BY_SW_BREAKPOINT
;
2674 return ops
->beneath
->to_stopped_by_sw_breakpoint (ops
->beneath
);
2677 /* The to_supports_stopped_by_sw_breakpoint method of target
2681 record_btrace_supports_stopped_by_sw_breakpoint (struct target_ops
*ops
)
2683 if (record_btrace_is_replaying (ops
, minus_one_ptid
))
2686 return ops
->beneath
->to_supports_stopped_by_sw_breakpoint (ops
->beneath
);
2689 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2692 record_btrace_stopped_by_hw_breakpoint (struct target_ops
*ops
)
2694 if (record_btrace_is_replaying (ops
, minus_one_ptid
))
2696 struct thread_info
*tp
= inferior_thread ();
2698 return tp
->btrace
.stop_reason
== TARGET_STOPPED_BY_HW_BREAKPOINT
;
2701 return ops
->beneath
->to_stopped_by_hw_breakpoint (ops
->beneath
);
2704 /* The to_supports_stopped_by_hw_breakpoint method of target
2708 record_btrace_supports_stopped_by_hw_breakpoint (struct target_ops
*ops
)
2710 if (record_btrace_is_replaying (ops
, minus_one_ptid
))
2713 return ops
->beneath
->to_supports_stopped_by_hw_breakpoint (ops
->beneath
);
2716 /* The to_update_thread_list method of target record-btrace. */
2719 record_btrace_update_thread_list (struct target_ops
*ops
)
2721 /* We don't add or remove threads during replay. */
2722 if (record_btrace_is_replaying (ops
, minus_one_ptid
))
2725 /* Forward the request. */
2727 ops
->to_update_thread_list (ops
);
2730 /* The to_thread_alive method of target record-btrace. */
2733 record_btrace_thread_alive (struct target_ops
*ops
, ptid_t ptid
)
2735 /* We don't add or remove threads during replay. */
2736 if (record_btrace_is_replaying (ops
, minus_one_ptid
))
2737 return find_thread_ptid (ptid
) != NULL
;
2739 /* Forward the request. */
2741 return ops
->to_thread_alive (ops
, ptid
);
2744 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
2748 record_btrace_set_replay (struct thread_info
*tp
,
2749 const struct btrace_insn_iterator
*it
)
2751 struct btrace_thread_info
*btinfo
;
2753 btinfo
= &tp
->btrace
;
2755 if (it
== NULL
|| it
->function
== NULL
)
2756 record_btrace_stop_replaying (tp
);
2759 if (btinfo
->replay
== NULL
)
2760 record_btrace_start_replaying (tp
);
2761 else if (btrace_insn_cmp (btinfo
->replay
, it
) == 0)
2764 *btinfo
->replay
= *it
;
2765 registers_changed_ptid (tp
->ptid
);
2768 /* Start anew from the new replay position. */
2769 record_btrace_clear_histories (btinfo
);
2771 stop_pc
= regcache_read_pc (get_current_regcache ());
2772 print_stack_frame (get_selected_frame (NULL
), 1, SRC_AND_LOC
, 1);
2775 /* The to_goto_record_begin method of target record-btrace. */
2778 record_btrace_goto_begin (struct target_ops
*self
)
2780 struct thread_info
*tp
;
2781 struct btrace_insn_iterator begin
;
2783 tp
= require_btrace_thread ();
2785 btrace_insn_begin (&begin
, &tp
->btrace
);
2787 /* Skip gaps at the beginning of the trace. */
2788 while (btrace_insn_get (&begin
) == NULL
)
2792 steps
= btrace_insn_next (&begin
, 1);
2794 error (_("No trace."));
2797 record_btrace_set_replay (tp
, &begin
);
2800 /* The to_goto_record_end method of target record-btrace. */
2803 record_btrace_goto_end (struct target_ops
*ops
)
2805 struct thread_info
*tp
;
2807 tp
= require_btrace_thread ();
2809 record_btrace_set_replay (tp
, NULL
);
2812 /* The to_goto_record method of target record-btrace. */
2815 record_btrace_goto (struct target_ops
*self
, ULONGEST insn
)
2817 struct thread_info
*tp
;
2818 struct btrace_insn_iterator it
;
2819 unsigned int number
;
2824 /* Check for wrap-arounds. */
2826 error (_("Instruction number out of range."));
2828 tp
= require_btrace_thread ();
2830 found
= btrace_find_insn_by_number (&it
, &tp
->btrace
, number
);
2832 error (_("No such instruction."));
2834 record_btrace_set_replay (tp
, &it
);
2837 /* The to_record_stop_replaying method of target record-btrace. */
2840 record_btrace_stop_replaying_all (struct target_ops
*self
)
2842 struct thread_info
*tp
;
2844 ALL_NON_EXITED_THREADS (tp
)
2845 record_btrace_stop_replaying (tp
);
2848 /* The to_execution_direction target method. */
2850 static enum exec_direction_kind
2851 record_btrace_execution_direction (struct target_ops
*self
)
2853 return record_btrace_resume_exec_dir
;
2856 /* The to_prepare_to_generate_core target method. */
2859 record_btrace_prepare_to_generate_core (struct target_ops
*self
)
2861 record_btrace_generating_corefile
= 1;
2864 /* The to_done_generating_core target method. */
2867 record_btrace_done_generating_core (struct target_ops
*self
)
2869 record_btrace_generating_corefile
= 0;
2872 /* Initialize the record-btrace target ops. */
2875 init_record_btrace_ops (void)
2877 struct target_ops
*ops
;
2879 ops
= &record_btrace_ops
;
2880 ops
->to_shortname
= "record-btrace";
2881 ops
->to_longname
= "Branch tracing target";
2882 ops
->to_doc
= "Collect control-flow trace and provide the execution history.";
2883 ops
->to_open
= record_btrace_open
;
2884 ops
->to_close
= record_btrace_close
;
2885 ops
->to_async
= record_btrace_async
;
2886 ops
->to_detach
= record_detach
;
2887 ops
->to_disconnect
= record_btrace_disconnect
;
2888 ops
->to_mourn_inferior
= record_mourn_inferior
;
2889 ops
->to_kill
= record_kill
;
2890 ops
->to_stop_recording
= record_btrace_stop_recording
;
2891 ops
->to_info_record
= record_btrace_info
;
2892 ops
->to_insn_history
= record_btrace_insn_history
;
2893 ops
->to_insn_history_from
= record_btrace_insn_history_from
;
2894 ops
->to_insn_history_range
= record_btrace_insn_history_range
;
2895 ops
->to_call_history
= record_btrace_call_history
;
2896 ops
->to_call_history_from
= record_btrace_call_history_from
;
2897 ops
->to_call_history_range
= record_btrace_call_history_range
;
2898 ops
->to_record_is_replaying
= record_btrace_is_replaying
;
2899 ops
->to_record_will_replay
= record_btrace_will_replay
;
2900 ops
->to_record_stop_replaying
= record_btrace_stop_replaying_all
;
2901 ops
->to_xfer_partial
= record_btrace_xfer_partial
;
2902 ops
->to_remove_breakpoint
= record_btrace_remove_breakpoint
;
2903 ops
->to_insert_breakpoint
= record_btrace_insert_breakpoint
;
2904 ops
->to_fetch_registers
= record_btrace_fetch_registers
;
2905 ops
->to_store_registers
= record_btrace_store_registers
;
2906 ops
->to_prepare_to_store
= record_btrace_prepare_to_store
;
2907 ops
->to_get_unwinder
= &record_btrace_to_get_unwinder
;
2908 ops
->to_get_tailcall_unwinder
= &record_btrace_to_get_tailcall_unwinder
;
2909 ops
->to_resume
= record_btrace_resume
;
2910 ops
->to_commit_resume
= record_btrace_commit_resume
;
2911 ops
->to_wait
= record_btrace_wait
;
2912 ops
->to_stop
= record_btrace_stop
;
2913 ops
->to_update_thread_list
= record_btrace_update_thread_list
;
2914 ops
->to_thread_alive
= record_btrace_thread_alive
;
2915 ops
->to_goto_record_begin
= record_btrace_goto_begin
;
2916 ops
->to_goto_record_end
= record_btrace_goto_end
;
2917 ops
->to_goto_record
= record_btrace_goto
;
2918 ops
->to_can_execute_reverse
= record_btrace_can_execute_reverse
;
2919 ops
->to_stopped_by_sw_breakpoint
= record_btrace_stopped_by_sw_breakpoint
;
2920 ops
->to_supports_stopped_by_sw_breakpoint
2921 = record_btrace_supports_stopped_by_sw_breakpoint
;
2922 ops
->to_stopped_by_hw_breakpoint
= record_btrace_stopped_by_hw_breakpoint
;
2923 ops
->to_supports_stopped_by_hw_breakpoint
2924 = record_btrace_supports_stopped_by_hw_breakpoint
;
2925 ops
->to_execution_direction
= record_btrace_execution_direction
;
2926 ops
->to_prepare_to_generate_core
= record_btrace_prepare_to_generate_core
;
2927 ops
->to_done_generating_core
= record_btrace_done_generating_core
;
2928 ops
->to_stratum
= record_stratum
;
2929 ops
->to_magic
= OPS_MAGIC
;
2932 /* Start recording in BTS format. */
2935 cmd_record_btrace_bts_start (char *args
, int from_tty
)
2937 if (args
!= NULL
&& *args
!= 0)
2938 error (_("Invalid argument."));
2940 record_btrace_conf
.format
= BTRACE_FORMAT_BTS
;
2944 execute_command ("target record-btrace", from_tty
);
2946 CATCH (exception
, RETURN_MASK_ALL
)
2948 record_btrace_conf
.format
= BTRACE_FORMAT_NONE
;
2949 throw_exception (exception
);
2954 /* Start recording in Intel Processor Trace format. */
2957 cmd_record_btrace_pt_start (char *args
, int from_tty
)
2959 if (args
!= NULL
&& *args
!= 0)
2960 error (_("Invalid argument."));
2962 record_btrace_conf
.format
= BTRACE_FORMAT_PT
;
2966 execute_command ("target record-btrace", from_tty
);
2968 CATCH (exception
, RETURN_MASK_ALL
)
2970 record_btrace_conf
.format
= BTRACE_FORMAT_NONE
;
2971 throw_exception (exception
);
2976 /* Alias for "target record". */
2979 cmd_record_btrace_start (char *args
, int from_tty
)
2981 if (args
!= NULL
&& *args
!= 0)
2982 error (_("Invalid argument."));
2984 record_btrace_conf
.format
= BTRACE_FORMAT_PT
;
2988 execute_command ("target record-btrace", from_tty
);
2990 CATCH (exception
, RETURN_MASK_ALL
)
2992 record_btrace_conf
.format
= BTRACE_FORMAT_BTS
;
2996 execute_command ("target record-btrace", from_tty
);
2998 CATCH (exception
, RETURN_MASK_ALL
)
3000 record_btrace_conf
.format
= BTRACE_FORMAT_NONE
;
3001 throw_exception (exception
);
3008 /* The "set record btrace" command. */
3011 cmd_set_record_btrace (char *args
, int from_tty
)
3013 cmd_show_list (set_record_btrace_cmdlist
, from_tty
, "");
3016 /* The "show record btrace" command. */
3019 cmd_show_record_btrace (char *args
, int from_tty
)
3021 cmd_show_list (show_record_btrace_cmdlist
, from_tty
, "");
3024 /* The "show record btrace replay-memory-access" command. */
3027 cmd_show_replay_memory_access (struct ui_file
*file
, int from_tty
,
3028 struct cmd_list_element
*c
, const char *value
)
3030 fprintf_filtered (gdb_stdout
, _("Replay memory access is %s.\n"),
3031 replay_memory_access
);
3034 /* The "set record btrace bts" command. */
3037 cmd_set_record_btrace_bts (char *args
, int from_tty
)
3039 printf_unfiltered (_("\"set record btrace bts\" must be followed "
3040 "by an appropriate subcommand.\n"));
3041 help_list (set_record_btrace_bts_cmdlist
, "set record btrace bts ",
3042 all_commands
, gdb_stdout
);
3045 /* The "show record btrace bts" command. */
3048 cmd_show_record_btrace_bts (char *args
, int from_tty
)
3050 cmd_show_list (show_record_btrace_bts_cmdlist
, from_tty
, "");
3053 /* The "set record btrace pt" command. */
3056 cmd_set_record_btrace_pt (char *args
, int from_tty
)
3058 printf_unfiltered (_("\"set record btrace pt\" must be followed "
3059 "by an appropriate subcommand.\n"));
3060 help_list (set_record_btrace_pt_cmdlist
, "set record btrace pt ",
3061 all_commands
, gdb_stdout
);
3064 /* The "show record btrace pt" command. */
3067 cmd_show_record_btrace_pt (char *args
, int from_tty
)
3069 cmd_show_list (show_record_btrace_pt_cmdlist
, from_tty
, "");
3072 /* The "record bts buffer-size" show value function. */
3075 show_record_bts_buffer_size_value (struct ui_file
*file
, int from_tty
,
3076 struct cmd_list_element
*c
,
3079 fprintf_filtered (file
, _("The record/replay bts buffer size is %s.\n"),
3083 /* The "record pt buffer-size" show value function. */
3086 show_record_pt_buffer_size_value (struct ui_file
*file
, int from_tty
,
3087 struct cmd_list_element
*c
,
3090 fprintf_filtered (file
, _("The record/replay pt buffer size is %s.\n"),
3094 void _initialize_record_btrace (void);
3096 /* Initialize btrace commands. */
3099 _initialize_record_btrace (void)
3101 add_prefix_cmd ("btrace", class_obscure
, cmd_record_btrace_start
,
3102 _("Start branch trace recording."), &record_btrace_cmdlist
,
3103 "record btrace ", 0, &record_cmdlist
);
3104 add_alias_cmd ("b", "btrace", class_obscure
, 1, &record_cmdlist
);
3106 add_cmd ("bts", class_obscure
, cmd_record_btrace_bts_start
,
3108 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3109 The processor stores a from/to record for each branch into a cyclic buffer.\n\
3110 This format may not be available on all processors."),
3111 &record_btrace_cmdlist
);
3112 add_alias_cmd ("bts", "btrace bts", class_obscure
, 1, &record_cmdlist
);
3114 add_cmd ("pt", class_obscure
, cmd_record_btrace_pt_start
,
3116 Start branch trace recording in Intel Processor Trace format.\n\n\
3117 This format may not be available on all processors."),
3118 &record_btrace_cmdlist
);
3119 add_alias_cmd ("pt", "btrace pt", class_obscure
, 1, &record_cmdlist
);
3121 add_prefix_cmd ("btrace", class_support
, cmd_set_record_btrace
,
3122 _("Set record options"), &set_record_btrace_cmdlist
,
3123 "set record btrace ", 0, &set_record_cmdlist
);
3125 add_prefix_cmd ("btrace", class_support
, cmd_show_record_btrace
,
3126 _("Show record options"), &show_record_btrace_cmdlist
,
3127 "show record btrace ", 0, &show_record_cmdlist
);
3129 add_setshow_enum_cmd ("replay-memory-access", no_class
,
3130 replay_memory_access_types
, &replay_memory_access
, _("\
3131 Set what memory accesses are allowed during replay."), _("\
3132 Show what memory accesses are allowed during replay."),
3133 _("Default is READ-ONLY.\n\n\
3134 The btrace record target does not trace data.\n\
3135 The memory therefore corresponds to the live target and not \
3136 to the current replay position.\n\n\
3137 When READ-ONLY, allow accesses to read-only memory during replay.\n\
3138 When READ-WRITE, allow accesses to read-only and read-write memory during \
3140 NULL
, cmd_show_replay_memory_access
,
3141 &set_record_btrace_cmdlist
,
3142 &show_record_btrace_cmdlist
);
3144 add_prefix_cmd ("bts", class_support
, cmd_set_record_btrace_bts
,
3145 _("Set record btrace bts options"),
3146 &set_record_btrace_bts_cmdlist
,
3147 "set record btrace bts ", 0, &set_record_btrace_cmdlist
);
3149 add_prefix_cmd ("bts", class_support
, cmd_show_record_btrace_bts
,
3150 _("Show record btrace bts options"),
3151 &show_record_btrace_bts_cmdlist
,
3152 "show record btrace bts ", 0, &show_record_btrace_cmdlist
);
3154 add_setshow_uinteger_cmd ("buffer-size", no_class
,
3155 &record_btrace_conf
.bts
.size
,
3156 _("Set the record/replay bts buffer size."),
3157 _("Show the record/replay bts buffer size."), _("\
3158 When starting recording request a trace buffer of this size. \
3159 The actual buffer size may differ from the requested size. \
3160 Use \"info record\" to see the actual buffer size.\n\n\
3161 Bigger buffers allow longer recording but also take more time to process \
3162 the recorded execution trace.\n\n\
3163 The trace buffer size may not be changed while recording."), NULL
,
3164 show_record_bts_buffer_size_value
,
3165 &set_record_btrace_bts_cmdlist
,
3166 &show_record_btrace_bts_cmdlist
);
3168 add_prefix_cmd ("pt", class_support
, cmd_set_record_btrace_pt
,
3169 _("Set record btrace pt options"),
3170 &set_record_btrace_pt_cmdlist
,
3171 "set record btrace pt ", 0, &set_record_btrace_cmdlist
);
3173 add_prefix_cmd ("pt", class_support
, cmd_show_record_btrace_pt
,
3174 _("Show record btrace pt options"),
3175 &show_record_btrace_pt_cmdlist
,
3176 "show record btrace pt ", 0, &show_record_btrace_cmdlist
);
3178 add_setshow_uinteger_cmd ("buffer-size", no_class
,
3179 &record_btrace_conf
.pt
.size
,
3180 _("Set the record/replay pt buffer size."),
3181 _("Show the record/replay pt buffer size."), _("\
3182 Bigger buffers allow longer recording but also take more time to process \
3183 the recorded execution.\n\
3184 The actual buffer size may differ from the requested size. Use \"info record\" \
3185 to see the actual buffer size."), NULL
, show_record_pt_buffer_size_value
,
3186 &set_record_btrace_pt_cmdlist
,
3187 &show_record_btrace_pt_cmdlist
);
3189 init_record_btrace_ops ();
3190 add_target (&record_btrace_ops
);
3192 bfcache
= htab_create_alloc (50, bfcache_hash
, bfcache_eq
, NULL
,
3195 record_btrace_conf
.bts
.size
= 64 * 1024;
3196 record_btrace_conf
.pt
.size
= 16 * 1024;