1 /* Branch trace support for GDB, the GNU debugger.
3 Copyright (C) 2013-2016 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "gdbthread.h"
29 #include "cli/cli-utils.h"
33 #include "filenames.h"
35 #include "frame-unwind.h"
38 #include "event-loop.h"
42 /* The target_ops of record-btrace. */
43 static struct target_ops record_btrace_ops
;
45 /* A new thread observer enabling branch tracing for the new thread. */
46 static struct observer
*record_btrace_thread_observer
;
48 /* Memory access types used in set/show record btrace replay-memory-access. */
49 static const char replay_memory_access_read_only
[] = "read-only";
50 static const char replay_memory_access_read_write
[] = "read-write";
51 static const char *const replay_memory_access_types
[] =
53 replay_memory_access_read_only
,
54 replay_memory_access_read_write
,
58 /* The currently allowed replay memory access type. */
59 static const char *replay_memory_access
= replay_memory_access_read_only
;
61 /* Command lists for "set/show record btrace". */
62 static struct cmd_list_element
*set_record_btrace_cmdlist
;
63 static struct cmd_list_element
*show_record_btrace_cmdlist
;
65 /* The execution direction of the last resume we got. See record-full.c. */
66 static enum exec_direction_kind record_btrace_resume_exec_dir
= EXEC_FORWARD
;
68 /* The async event handler for reverse/replay execution. */
69 static struct async_event_handler
*record_btrace_async_inferior_event_handler
;
71 /* A flag indicating that we are currently generating a core file. */
72 static int record_btrace_generating_corefile
;
74 /* The current branch trace configuration. */
75 static struct btrace_config record_btrace_conf
;
77 /* Command list for "record btrace". */
78 static struct cmd_list_element
*record_btrace_cmdlist
;
80 /* Command lists for "set/show record btrace bts". */
81 static struct cmd_list_element
*set_record_btrace_bts_cmdlist
;
82 static struct cmd_list_element
*show_record_btrace_bts_cmdlist
;
84 /* Command lists for "set/show record btrace pt". */
85 static struct cmd_list_element
*set_record_btrace_pt_cmdlist
;
86 static struct cmd_list_element
*show_record_btrace_pt_cmdlist
;
88 /* Print a record-btrace debug message. Use do ... while (0) to avoid
89 ambiguities when used in if statements. */
91 #define DEBUG(msg, args...) \
94 if (record_debug != 0) \
95 fprintf_unfiltered (gdb_stdlog, \
96 "[record-btrace] " msg "\n", ##args); \
101 /* Update the branch trace for the current thread and return a pointer to its
104 Throws an error if there is no thread or no trace. This function never
107 static struct thread_info
*
108 require_btrace_thread (void)
110 struct thread_info
*tp
;
114 tp
= find_thread_ptid (inferior_ptid
);
116 error (_("No thread."));
120 if (btrace_is_empty (tp
))
121 error (_("No trace."));
126 /* Update the branch trace for the current thread and return a pointer to its
127 branch trace information struct.
129 Throws an error if there is no thread or no trace. This function never
132 static struct btrace_thread_info
*
133 require_btrace (void)
135 struct thread_info
*tp
;
137 tp
= require_btrace_thread ();
142 /* Enable branch tracing for one thread. Warn on errors. */
145 record_btrace_enable_warn (struct thread_info
*tp
)
149 btrace_enable (tp
, &record_btrace_conf
);
151 CATCH (error
, RETURN_MASK_ERROR
)
153 warning ("%s", error
.message
);
158 /* Callback function to disable branch tracing for one thread. */
161 record_btrace_disable_callback (void *arg
)
163 struct thread_info
*tp
= (struct thread_info
*) arg
;
168 /* Enable automatic tracing of new threads. */
171 record_btrace_auto_enable (void)
173 DEBUG ("attach thread observer");
175 record_btrace_thread_observer
176 = observer_attach_new_thread (record_btrace_enable_warn
);
179 /* Disable automatic tracing of new threads. */
182 record_btrace_auto_disable (void)
184 /* The observer may have been detached, already. */
185 if (record_btrace_thread_observer
== NULL
)
188 DEBUG ("detach thread observer");
190 observer_detach_new_thread (record_btrace_thread_observer
);
191 record_btrace_thread_observer
= NULL
;
194 /* The record-btrace async event handler function. */
197 record_btrace_handle_async_inferior_event (gdb_client_data data
)
199 inferior_event_handler (INF_REG_EVENT
, NULL
);
202 /* The to_open method of target record-btrace. */
205 record_btrace_open (const char *args
, int from_tty
)
207 struct cleanup
*disable_chain
;
208 struct thread_info
*tp
;
215 if (!target_has_execution
)
216 error (_("The program is not being run."));
218 gdb_assert (record_btrace_thread_observer
== NULL
);
220 disable_chain
= make_cleanup (null_cleanup
, NULL
);
221 ALL_NON_EXITED_THREADS (tp
)
222 if (args
== NULL
|| *args
== 0 || number_is_in_list (args
, tp
->global_num
))
224 btrace_enable (tp
, &record_btrace_conf
);
226 make_cleanup (record_btrace_disable_callback
, tp
);
229 record_btrace_auto_enable ();
231 push_target (&record_btrace_ops
);
233 record_btrace_async_inferior_event_handler
234 = create_async_event_handler (record_btrace_handle_async_inferior_event
,
236 record_btrace_generating_corefile
= 0;
238 format
= btrace_format_short_string (record_btrace_conf
.format
);
239 observer_notify_record_changed (current_inferior (), 1, "btrace", format
);
241 discard_cleanups (disable_chain
);
244 /* The to_stop_recording method of target record-btrace. */
247 record_btrace_stop_recording (struct target_ops
*self
)
249 struct thread_info
*tp
;
251 DEBUG ("stop recording");
253 record_btrace_auto_disable ();
255 ALL_NON_EXITED_THREADS (tp
)
256 if (tp
->btrace
.target
!= NULL
)
260 /* The to_close method of target record-btrace. */
263 record_btrace_close (struct target_ops
*self
)
265 struct thread_info
*tp
;
267 if (record_btrace_async_inferior_event_handler
!= NULL
)
268 delete_async_event_handler (&record_btrace_async_inferior_event_handler
);
270 /* Make sure automatic recording gets disabled even if we did not stop
271 recording before closing the record-btrace target. */
272 record_btrace_auto_disable ();
274 /* We should have already stopped recording.
275 Tear down btrace in case we have not. */
276 ALL_NON_EXITED_THREADS (tp
)
277 btrace_teardown (tp
);
280 /* The to_async method of target record-btrace. */
283 record_btrace_async (struct target_ops
*ops
, int enable
)
286 mark_async_event_handler (record_btrace_async_inferior_event_handler
);
288 clear_async_event_handler (record_btrace_async_inferior_event_handler
);
290 ops
->beneath
->to_async (ops
->beneath
, enable
);
293 /* Adjusts the size and returns a human readable size suffix. */
296 record_btrace_adjust_size (unsigned int *size
)
302 if ((sz
& ((1u << 30) - 1)) == 0)
307 else if ((sz
& ((1u << 20) - 1)) == 0)
312 else if ((sz
& ((1u << 10) - 1)) == 0)
321 /* Print a BTS configuration. */
324 record_btrace_print_bts_conf (const struct btrace_config_bts
*conf
)
332 suffix
= record_btrace_adjust_size (&size
);
333 printf_unfiltered (_("Buffer size: %u%s.\n"), size
, suffix
);
337 /* Print an Intel Processor Trace configuration. */
340 record_btrace_print_pt_conf (const struct btrace_config_pt
*conf
)
348 suffix
= record_btrace_adjust_size (&size
);
349 printf_unfiltered (_("Buffer size: %u%s.\n"), size
, suffix
);
353 /* Print a branch tracing configuration. */
356 record_btrace_print_conf (const struct btrace_config
*conf
)
358 printf_unfiltered (_("Recording format: %s.\n"),
359 btrace_format_string (conf
->format
));
361 switch (conf
->format
)
363 case BTRACE_FORMAT_NONE
:
366 case BTRACE_FORMAT_BTS
:
367 record_btrace_print_bts_conf (&conf
->bts
);
370 case BTRACE_FORMAT_PT
:
371 record_btrace_print_pt_conf (&conf
->pt
);
375 internal_error (__FILE__
, __LINE__
, _("Unkown branch trace format."));
378 /* The to_info_record method of target record-btrace. */
381 record_btrace_info (struct target_ops
*self
)
383 struct btrace_thread_info
*btinfo
;
384 const struct btrace_config
*conf
;
385 struct thread_info
*tp
;
386 unsigned int insns
, calls
, gaps
;
390 tp
= find_thread_ptid (inferior_ptid
);
392 error (_("No thread."));
394 btinfo
= &tp
->btrace
;
396 conf
= btrace_conf (btinfo
);
398 record_btrace_print_conf (conf
);
406 if (!btrace_is_empty (tp
))
408 struct btrace_call_iterator call
;
409 struct btrace_insn_iterator insn
;
411 btrace_call_end (&call
, btinfo
);
412 btrace_call_prev (&call
, 1);
413 calls
= btrace_call_number (&call
);
415 btrace_insn_end (&insn
, btinfo
);
417 insns
= btrace_insn_number (&insn
);
420 /* The last instruction does not really belong to the trace. */
427 /* Skip gaps at the end. */
430 steps
= btrace_insn_prev (&insn
, 1);
434 insns
= btrace_insn_number (&insn
);
439 gaps
= btinfo
->ngaps
;
442 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
443 "for thread %s (%s).\n"), insns
, calls
, gaps
,
444 print_thread_id (tp
), target_pid_to_str (tp
->ptid
));
446 if (btrace_is_replaying (tp
))
447 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
448 btrace_insn_number (btinfo
->replay
));
451 /* Print a decode error. */
454 btrace_ui_out_decode_error (struct ui_out
*uiout
, int errcode
,
455 enum btrace_format format
)
460 errstr
= _("unknown");
468 case BTRACE_FORMAT_BTS
:
474 case BDE_BTS_OVERFLOW
:
475 errstr
= _("instruction overflow");
478 case BDE_BTS_INSN_SIZE
:
479 errstr
= _("unknown instruction");
484 #if defined (HAVE_LIBIPT)
485 case BTRACE_FORMAT_PT
:
488 case BDE_PT_USER_QUIT
:
490 errstr
= _("trace decode cancelled");
493 case BDE_PT_DISABLED
:
495 errstr
= _("disabled");
498 case BDE_PT_OVERFLOW
:
500 errstr
= _("overflow");
505 errstr
= pt_errstr (pt_errcode (errcode
));
509 #endif /* defined (HAVE_LIBIPT) */
512 ui_out_text (uiout
, _("["));
515 ui_out_text (uiout
, _("decode error ("));
516 ui_out_field_int (uiout
, "errcode", errcode
);
517 ui_out_text (uiout
, _("): "));
519 ui_out_text (uiout
, errstr
);
520 ui_out_text (uiout
, _("]\n"));
523 /* Print an unsigned int. */
526 ui_out_field_uint (struct ui_out
*uiout
, const char *fld
, unsigned int val
)
528 ui_out_field_fmt (uiout
, fld
, "%u", val
);
531 /* A range of source lines. */
533 struct btrace_line_range
535 /* The symtab this line is from. */
536 struct symtab
*symtab
;
538 /* The first line (inclusive). */
541 /* The last line (exclusive). */
545 /* Construct a line range. */
547 static struct btrace_line_range
548 btrace_mk_line_range (struct symtab
*symtab
, int begin
, int end
)
550 struct btrace_line_range range
;
552 range
.symtab
= symtab
;
559 /* Add a line to a line range. */
561 static struct btrace_line_range
562 btrace_line_range_add (struct btrace_line_range range
, int line
)
564 if (range
.end
<= range
.begin
)
566 /* This is the first entry. */
568 range
.end
= line
+ 1;
570 else if (line
< range
.begin
)
572 else if (range
.end
< line
)
578 /* Return non-zero if RANGE is empty, zero otherwise. */
581 btrace_line_range_is_empty (struct btrace_line_range range
)
583 return range
.end
<= range
.begin
;
586 /* Return non-zero if LHS contains RHS, zero otherwise. */
589 btrace_line_range_contains_range (struct btrace_line_range lhs
,
590 struct btrace_line_range rhs
)
592 return ((lhs
.symtab
== rhs
.symtab
)
593 && (lhs
.begin
<= rhs
.begin
)
594 && (rhs
.end
<= lhs
.end
));
597 /* Find the line range associated with PC. */
599 static struct btrace_line_range
600 btrace_find_line_range (CORE_ADDR pc
)
602 struct btrace_line_range range
;
603 struct linetable_entry
*lines
;
604 struct linetable
*ltable
;
605 struct symtab
*symtab
;
608 symtab
= find_pc_line_symtab (pc
);
610 return btrace_mk_line_range (NULL
, 0, 0);
612 ltable
= SYMTAB_LINETABLE (symtab
);
614 return btrace_mk_line_range (symtab
, 0, 0);
616 nlines
= ltable
->nitems
;
617 lines
= ltable
->item
;
619 return btrace_mk_line_range (symtab
, 0, 0);
621 range
= btrace_mk_line_range (symtab
, 0, 0);
622 for (i
= 0; i
< nlines
- 1; i
++)
624 if ((lines
[i
].pc
== pc
) && (lines
[i
].line
!= 0))
625 range
= btrace_line_range_add (range
, lines
[i
].line
);
631 /* Print source lines in LINES to UIOUT.
633 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
634 instructions corresponding to that source line. When printing a new source
635 line, we do the cleanups for the open chain and open a new cleanup chain for
636 the new source line. If the source line range in LINES is not empty, this
637 function will leave the cleanup chain for the last printed source line open
638 so instructions can be added to it. */
641 btrace_print_lines (struct btrace_line_range lines
, struct ui_out
*uiout
,
642 struct cleanup
**ui_item_chain
, int flags
)
644 print_source_lines_flags psl_flags
;
648 if (flags
& DISASSEMBLY_FILENAME
)
649 psl_flags
|= PRINT_SOURCE_LINES_FILENAME
;
651 for (line
= lines
.begin
; line
< lines
.end
; ++line
)
653 if (*ui_item_chain
!= NULL
)
654 do_cleanups (*ui_item_chain
);
657 = make_cleanup_ui_out_tuple_begin_end (uiout
, "src_and_asm_line");
659 print_source_lines (lines
.symtab
, line
, line
+ 1, psl_flags
);
661 make_cleanup_ui_out_list_begin_end (uiout
, "line_asm_insn");
665 /* Disassemble a section of the recorded instruction trace. */
668 btrace_insn_history (struct ui_out
*uiout
,
669 const struct btrace_thread_info
*btinfo
,
670 const struct btrace_insn_iterator
*begin
,
671 const struct btrace_insn_iterator
*end
, int flags
)
674 struct cleanup
*cleanups
, *ui_item_chain
;
675 struct disassemble_info di
;
676 struct gdbarch
*gdbarch
;
677 struct btrace_insn_iterator it
;
678 struct btrace_line_range last_lines
;
680 DEBUG ("itrace (0x%x): [%u; %u)", flags
, btrace_insn_number (begin
),
681 btrace_insn_number (end
));
683 flags
|= DISASSEMBLY_SPECULATIVE
;
685 gdbarch
= target_gdbarch ();
686 stb
= mem_fileopen ();
687 cleanups
= make_cleanup_ui_file_delete (stb
);
688 di
= gdb_disassemble_info (gdbarch
, stb
);
689 last_lines
= btrace_mk_line_range (NULL
, 0, 0);
691 make_cleanup_ui_out_list_begin_end (uiout
, "asm_insns");
693 /* UI_ITEM_CHAIN is a cleanup chain for the last source line and the
694 instructions corresponding to that line. */
695 ui_item_chain
= NULL
;
697 for (it
= *begin
; btrace_insn_cmp (&it
, end
) != 0; btrace_insn_next (&it
, 1))
699 const struct btrace_insn
*insn
;
701 insn
= btrace_insn_get (&it
);
703 /* A NULL instruction indicates a gap in the trace. */
706 const struct btrace_config
*conf
;
708 conf
= btrace_conf (btinfo
);
710 /* We have trace so we must have a configuration. */
711 gdb_assert (conf
!= NULL
);
713 btrace_ui_out_decode_error (uiout
, it
.function
->errcode
,
718 struct disasm_insn dinsn
;
720 if ((flags
& DISASSEMBLY_SOURCE
) != 0)
722 struct btrace_line_range lines
;
724 lines
= btrace_find_line_range (insn
->pc
);
725 if (!btrace_line_range_is_empty (lines
)
726 && !btrace_line_range_contains_range (last_lines
, lines
))
728 btrace_print_lines (lines
, uiout
, &ui_item_chain
, flags
);
731 else if (ui_item_chain
== NULL
)
734 = make_cleanup_ui_out_tuple_begin_end (uiout
,
736 /* No source information. */
737 make_cleanup_ui_out_list_begin_end (uiout
, "line_asm_insn");
740 gdb_assert (ui_item_chain
!= NULL
);
743 memset (&dinsn
, 0, sizeof (dinsn
));
744 dinsn
.number
= btrace_insn_number (&it
);
745 dinsn
.addr
= insn
->pc
;
747 if ((insn
->flags
& BTRACE_INSN_FLAG_SPECULATIVE
) != 0)
748 dinsn
.is_speculative
= 1;
750 gdb_pretty_print_insn (gdbarch
, uiout
, &di
, &dinsn
, flags
, stb
);
754 do_cleanups (cleanups
);
757 /* The to_insn_history method of target record-btrace. */
760 record_btrace_insn_history (struct target_ops
*self
, int size
, int flags
)
762 struct btrace_thread_info
*btinfo
;
763 struct btrace_insn_history
*history
;
764 struct btrace_insn_iterator begin
, end
;
765 struct cleanup
*uiout_cleanup
;
766 struct ui_out
*uiout
;
767 unsigned int context
, covered
;
769 uiout
= current_uiout
;
770 uiout_cleanup
= make_cleanup_ui_out_tuple_begin_end (uiout
,
772 context
= abs (size
);
774 error (_("Bad record instruction-history-size."));
776 btinfo
= require_btrace ();
777 history
= btinfo
->insn_history
;
780 struct btrace_insn_iterator
*replay
;
782 DEBUG ("insn-history (0x%x): %d", flags
, size
);
784 /* If we're replaying, we start at the replay position. Otherwise, we
785 start at the tail of the trace. */
786 replay
= btinfo
->replay
;
790 btrace_insn_end (&begin
, btinfo
);
792 /* We start from here and expand in the requested direction. Then we
793 expand in the other direction, as well, to fill up any remaining
798 /* We want the current position covered, as well. */
799 covered
= btrace_insn_next (&end
, 1);
800 covered
+= btrace_insn_prev (&begin
, context
- covered
);
801 covered
+= btrace_insn_next (&end
, context
- covered
);
805 covered
= btrace_insn_next (&end
, context
);
806 covered
+= btrace_insn_prev (&begin
, context
- covered
);
811 begin
= history
->begin
;
814 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags
, size
,
815 btrace_insn_number (&begin
), btrace_insn_number (&end
));
820 covered
= btrace_insn_prev (&begin
, context
);
825 covered
= btrace_insn_next (&end
, context
);
830 btrace_insn_history (uiout
, btinfo
, &begin
, &end
, flags
);
834 printf_unfiltered (_("At the start of the branch trace record.\n"));
836 printf_unfiltered (_("At the end of the branch trace record.\n"));
839 btrace_set_insn_history (btinfo
, &begin
, &end
);
840 do_cleanups (uiout_cleanup
);
843 /* The to_insn_history_range method of target record-btrace. */
846 record_btrace_insn_history_range (struct target_ops
*self
,
847 ULONGEST from
, ULONGEST to
, int flags
)
849 struct btrace_thread_info
*btinfo
;
850 struct btrace_insn_history
*history
;
851 struct btrace_insn_iterator begin
, end
;
852 struct cleanup
*uiout_cleanup
;
853 struct ui_out
*uiout
;
854 unsigned int low
, high
;
857 uiout
= current_uiout
;
858 uiout_cleanup
= make_cleanup_ui_out_tuple_begin_end (uiout
,
863 DEBUG ("insn-history (0x%x): [%u; %u)", flags
, low
, high
);
865 /* Check for wrap-arounds. */
866 if (low
!= from
|| high
!= to
)
867 error (_("Bad range."));
870 error (_("Bad range."));
872 btinfo
= require_btrace ();
874 found
= btrace_find_insn_by_number (&begin
, btinfo
, low
);
876 error (_("Range out of bounds."));
878 found
= btrace_find_insn_by_number (&end
, btinfo
, high
);
881 /* Silently truncate the range. */
882 btrace_insn_end (&end
, btinfo
);
886 /* We want both begin and end to be inclusive. */
887 btrace_insn_next (&end
, 1);
890 btrace_insn_history (uiout
, btinfo
, &begin
, &end
, flags
);
891 btrace_set_insn_history (btinfo
, &begin
, &end
);
893 do_cleanups (uiout_cleanup
);
896 /* The to_insn_history_from method of target record-btrace. */
899 record_btrace_insn_history_from (struct target_ops
*self
,
900 ULONGEST from
, int size
, int flags
)
902 ULONGEST begin
, end
, context
;
904 context
= abs (size
);
906 error (_("Bad record instruction-history-size."));
915 begin
= from
- context
+ 1;
920 end
= from
+ context
- 1;
922 /* Check for wrap-around. */
927 record_btrace_insn_history_range (self
, begin
, end
, flags
);
930 /* Print the instruction number range for a function call history line. */
933 btrace_call_history_insn_range (struct ui_out
*uiout
,
934 const struct btrace_function
*bfun
)
936 unsigned int begin
, end
, size
;
938 size
= VEC_length (btrace_insn_s
, bfun
->insn
);
939 gdb_assert (size
> 0);
941 begin
= bfun
->insn_offset
;
942 end
= begin
+ size
- 1;
944 ui_out_field_uint (uiout
, "insn begin", begin
);
945 ui_out_text (uiout
, ",");
946 ui_out_field_uint (uiout
, "insn end", end
);
949 /* Compute the lowest and highest source line for the instructions in BFUN
950 and return them in PBEGIN and PEND.
951 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
952 result from inlining or macro expansion. */
955 btrace_compute_src_line_range (const struct btrace_function
*bfun
,
956 int *pbegin
, int *pend
)
958 struct btrace_insn
*insn
;
959 struct symtab
*symtab
;
971 symtab
= symbol_symtab (sym
);
973 for (idx
= 0; VEC_iterate (btrace_insn_s
, bfun
->insn
, idx
, insn
); ++idx
)
975 struct symtab_and_line sal
;
977 sal
= find_pc_line (insn
->pc
, 0);
978 if (sal
.symtab
!= symtab
|| sal
.line
== 0)
981 begin
= min (begin
, sal
.line
);
982 end
= max (end
, sal
.line
);
990 /* Print the source line information for a function call history line. */
993 btrace_call_history_src_line (struct ui_out
*uiout
,
994 const struct btrace_function
*bfun
)
1003 ui_out_field_string (uiout
, "file",
1004 symtab_to_filename_for_display (symbol_symtab (sym
)));
1006 btrace_compute_src_line_range (bfun
, &begin
, &end
);
1010 ui_out_text (uiout
, ":");
1011 ui_out_field_int (uiout
, "min line", begin
);
1016 ui_out_text (uiout
, ",");
1017 ui_out_field_int (uiout
, "max line", end
);
1020 /* Get the name of a branch trace function. */
1023 btrace_get_bfun_name (const struct btrace_function
*bfun
)
1025 struct minimal_symbol
*msym
;
1035 return SYMBOL_PRINT_NAME (sym
);
1036 else if (msym
!= NULL
)
1037 return MSYMBOL_PRINT_NAME (msym
);
1042 /* Disassemble a section of the recorded function trace. */
1045 btrace_call_history (struct ui_out
*uiout
,
1046 const struct btrace_thread_info
*btinfo
,
1047 const struct btrace_call_iterator
*begin
,
1048 const struct btrace_call_iterator
*end
,
1051 struct btrace_call_iterator it
;
1052 record_print_flags flags
= (enum record_print_flag
) int_flags
;
1054 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags
, btrace_call_number (begin
),
1055 btrace_call_number (end
));
1057 for (it
= *begin
; btrace_call_cmp (&it
, end
) < 0; btrace_call_next (&it
, 1))
1059 const struct btrace_function
*bfun
;
1060 struct minimal_symbol
*msym
;
1063 bfun
= btrace_call_get (&it
);
1067 /* Print the function index. */
1068 ui_out_field_uint (uiout
, "index", bfun
->number
);
1069 ui_out_text (uiout
, "\t");
1071 /* Indicate gaps in the trace. */
1072 if (bfun
->errcode
!= 0)
1074 const struct btrace_config
*conf
;
1076 conf
= btrace_conf (btinfo
);
1078 /* We have trace so we must have a configuration. */
1079 gdb_assert (conf
!= NULL
);
1081 btrace_ui_out_decode_error (uiout
, bfun
->errcode
, conf
->format
);
1086 if ((flags
& RECORD_PRINT_INDENT_CALLS
) != 0)
1088 int level
= bfun
->level
+ btinfo
->level
, i
;
1090 for (i
= 0; i
< level
; ++i
)
1091 ui_out_text (uiout
, " ");
1095 ui_out_field_string (uiout
, "function", SYMBOL_PRINT_NAME (sym
));
1096 else if (msym
!= NULL
)
1097 ui_out_field_string (uiout
, "function", MSYMBOL_PRINT_NAME (msym
));
1098 else if (!ui_out_is_mi_like_p (uiout
))
1099 ui_out_field_string (uiout
, "function", "??");
1101 if ((flags
& RECORD_PRINT_INSN_RANGE
) != 0)
1103 ui_out_text (uiout
, _("\tinst "));
1104 btrace_call_history_insn_range (uiout
, bfun
);
1107 if ((flags
& RECORD_PRINT_SRC_LINE
) != 0)
1109 ui_out_text (uiout
, _("\tat "));
1110 btrace_call_history_src_line (uiout
, bfun
);
1113 ui_out_text (uiout
, "\n");
1117 /* The to_call_history method of target record-btrace. */
1120 record_btrace_call_history (struct target_ops
*self
, int size
, int int_flags
)
1122 struct btrace_thread_info
*btinfo
;
1123 struct btrace_call_history
*history
;
1124 struct btrace_call_iterator begin
, end
;
1125 struct cleanup
*uiout_cleanup
;
1126 struct ui_out
*uiout
;
1127 unsigned int context
, covered
;
1128 record_print_flags flags
= (enum record_print_flag
) int_flags
;
1130 uiout
= current_uiout
;
1131 uiout_cleanup
= make_cleanup_ui_out_tuple_begin_end (uiout
,
1133 context
= abs (size
);
1135 error (_("Bad record function-call-history-size."));
1137 btinfo
= require_btrace ();
1138 history
= btinfo
->call_history
;
1139 if (history
== NULL
)
1141 struct btrace_insn_iterator
*replay
;
1143 DEBUG ("call-history (0x%x): %d", int_flags
, size
);
1145 /* If we're replaying, we start at the replay position. Otherwise, we
1146 start at the tail of the trace. */
1147 replay
= btinfo
->replay
;
1150 begin
.function
= replay
->function
;
1151 begin
.btinfo
= btinfo
;
1154 btrace_call_end (&begin
, btinfo
);
1156 /* We start from here and expand in the requested direction. Then we
1157 expand in the other direction, as well, to fill up any remaining
1162 /* We want the current position covered, as well. */
1163 covered
= btrace_call_next (&end
, 1);
1164 covered
+= btrace_call_prev (&begin
, context
- covered
);
1165 covered
+= btrace_call_next (&end
, context
- covered
);
1169 covered
= btrace_call_next (&end
, context
);
1170 covered
+= btrace_call_prev (&begin
, context
- covered
);
1175 begin
= history
->begin
;
1178 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", int_flags
, size
,
1179 btrace_call_number (&begin
), btrace_call_number (&end
));
1184 covered
= btrace_call_prev (&begin
, context
);
1189 covered
= btrace_call_next (&end
, context
);
1194 btrace_call_history (uiout
, btinfo
, &begin
, &end
, flags
);
1198 printf_unfiltered (_("At the start of the branch trace record.\n"));
1200 printf_unfiltered (_("At the end of the branch trace record.\n"));
1203 btrace_set_call_history (btinfo
, &begin
, &end
);
1204 do_cleanups (uiout_cleanup
);
1207 /* The to_call_history_range method of target record-btrace. */
1210 record_btrace_call_history_range (struct target_ops
*self
,
1211 ULONGEST from
, ULONGEST to
,
1214 struct btrace_thread_info
*btinfo
;
1215 struct btrace_call_history
*history
;
1216 struct btrace_call_iterator begin
, end
;
1217 struct cleanup
*uiout_cleanup
;
1218 struct ui_out
*uiout
;
1219 unsigned int low
, high
;
1221 record_print_flags flags
= (enum record_print_flag
) int_flags
;
1223 uiout
= current_uiout
;
1224 uiout_cleanup
= make_cleanup_ui_out_tuple_begin_end (uiout
,
1229 DEBUG ("call-history (0x%x): [%u; %u)", int_flags
, low
, high
);
1231 /* Check for wrap-arounds. */
1232 if (low
!= from
|| high
!= to
)
1233 error (_("Bad range."));
1236 error (_("Bad range."));
1238 btinfo
= require_btrace ();
1240 found
= btrace_find_call_by_number (&begin
, btinfo
, low
);
1242 error (_("Range out of bounds."));
1244 found
= btrace_find_call_by_number (&end
, btinfo
, high
);
1247 /* Silently truncate the range. */
1248 btrace_call_end (&end
, btinfo
);
1252 /* We want both begin and end to be inclusive. */
1253 btrace_call_next (&end
, 1);
1256 btrace_call_history (uiout
, btinfo
, &begin
, &end
, flags
);
1257 btrace_set_call_history (btinfo
, &begin
, &end
);
1259 do_cleanups (uiout_cleanup
);
1262 /* The to_call_history_from method of target record-btrace. */
1265 record_btrace_call_history_from (struct target_ops
*self
,
1266 ULONGEST from
, int size
,
1269 ULONGEST begin
, end
, context
;
1270 record_print_flags flags
= (enum record_print_flag
) int_flags
;
1272 context
= abs (size
);
1274 error (_("Bad record function-call-history-size."));
1283 begin
= from
- context
+ 1;
1288 end
= from
+ context
- 1;
1290 /* Check for wrap-around. */
1295 record_btrace_call_history_range (self
, begin
, end
, flags
);
1298 /* The to_record_is_replaying method of target record-btrace. */
1301 record_btrace_is_replaying (struct target_ops
*self
, ptid_t ptid
)
1303 struct thread_info
*tp
;
1305 ALL_NON_EXITED_THREADS (tp
)
1306 if (ptid_match (tp
->ptid
, ptid
) && btrace_is_replaying (tp
))
1312 /* The to_record_will_replay method of target record-btrace. */
1315 record_btrace_will_replay (struct target_ops
*self
, ptid_t ptid
, int dir
)
1317 return dir
== EXEC_REVERSE
|| record_btrace_is_replaying (self
, ptid
);
1320 /* The to_xfer_partial method of target record-btrace. */
1322 static enum target_xfer_status
1323 record_btrace_xfer_partial (struct target_ops
*ops
, enum target_object object
,
1324 const char *annex
, gdb_byte
*readbuf
,
1325 const gdb_byte
*writebuf
, ULONGEST offset
,
1326 ULONGEST len
, ULONGEST
*xfered_len
)
1328 struct target_ops
*t
;
1330 /* Filter out requests that don't make sense during replay. */
1331 if (replay_memory_access
== replay_memory_access_read_only
1332 && !record_btrace_generating_corefile
1333 && record_btrace_is_replaying (ops
, inferior_ptid
))
1337 case TARGET_OBJECT_MEMORY
:
1339 struct target_section
*section
;
1341 /* We do not allow writing memory in general. */
1342 if (writebuf
!= NULL
)
1345 return TARGET_XFER_UNAVAILABLE
;
1348 /* We allow reading readonly memory. */
1349 section
= target_section_by_addr (ops
, offset
);
1350 if (section
!= NULL
)
1352 /* Check if the section we found is readonly. */
1353 if ((bfd_get_section_flags (section
->the_bfd_section
->owner
,
1354 section
->the_bfd_section
)
1355 & SEC_READONLY
) != 0)
1357 /* Truncate the request to fit into this section. */
1358 len
= min (len
, section
->endaddr
- offset
);
1364 return TARGET_XFER_UNAVAILABLE
;
1369 /* Forward the request. */
1371 return ops
->to_xfer_partial (ops
, object
, annex
, readbuf
, writebuf
,
1372 offset
, len
, xfered_len
);
1375 /* The to_insert_breakpoint method of target record-btrace. */
1378 record_btrace_insert_breakpoint (struct target_ops
*ops
,
1379 struct gdbarch
*gdbarch
,
1380 struct bp_target_info
*bp_tgt
)
1385 /* Inserting breakpoints requires accessing memory. Allow it for the
1386 duration of this function. */
1387 old
= replay_memory_access
;
1388 replay_memory_access
= replay_memory_access_read_write
;
1393 ret
= ops
->beneath
->to_insert_breakpoint (ops
->beneath
, gdbarch
, bp_tgt
);
1395 CATCH (except
, RETURN_MASK_ALL
)
1397 replay_memory_access
= old
;
1398 throw_exception (except
);
1401 replay_memory_access
= old
;
1406 /* The to_remove_breakpoint method of target record-btrace. */
1409 record_btrace_remove_breakpoint (struct target_ops
*ops
,
1410 struct gdbarch
*gdbarch
,
1411 struct bp_target_info
*bp_tgt
)
1416 /* Removing breakpoints requires accessing memory. Allow it for the
1417 duration of this function. */
1418 old
= replay_memory_access
;
1419 replay_memory_access
= replay_memory_access_read_write
;
1424 ret
= ops
->beneath
->to_remove_breakpoint (ops
->beneath
, gdbarch
, bp_tgt
);
1426 CATCH (except
, RETURN_MASK_ALL
)
1428 replay_memory_access
= old
;
1429 throw_exception (except
);
1432 replay_memory_access
= old
;
1437 /* The to_fetch_registers method of target record-btrace. */
1440 record_btrace_fetch_registers (struct target_ops
*ops
,
1441 struct regcache
*regcache
, int regno
)
1443 struct btrace_insn_iterator
*replay
;
1444 struct thread_info
*tp
;
1446 tp
= find_thread_ptid (inferior_ptid
);
1447 gdb_assert (tp
!= NULL
);
1449 replay
= tp
->btrace
.replay
;
1450 if (replay
!= NULL
&& !record_btrace_generating_corefile
)
1452 const struct btrace_insn
*insn
;
1453 struct gdbarch
*gdbarch
;
1456 gdbarch
= get_regcache_arch (regcache
);
1457 pcreg
= gdbarch_pc_regnum (gdbarch
);
1461 /* We can only provide the PC register. */
1462 if (regno
>= 0 && regno
!= pcreg
)
1465 insn
= btrace_insn_get (replay
);
1466 gdb_assert (insn
!= NULL
);
1468 regcache_raw_supply (regcache
, regno
, &insn
->pc
);
1472 struct target_ops
*t
= ops
->beneath
;
1474 t
->to_fetch_registers (t
, regcache
, regno
);
1478 /* The to_store_registers method of target record-btrace. */
1481 record_btrace_store_registers (struct target_ops
*ops
,
1482 struct regcache
*regcache
, int regno
)
1484 struct target_ops
*t
;
1486 if (!record_btrace_generating_corefile
1487 && record_btrace_is_replaying (ops
, inferior_ptid
))
1488 error (_("Cannot write registers while replaying."));
1490 gdb_assert (may_write_registers
!= 0);
1493 t
->to_store_registers (t
, regcache
, regno
);
1496 /* The to_prepare_to_store method of target record-btrace. */
1499 record_btrace_prepare_to_store (struct target_ops
*ops
,
1500 struct regcache
*regcache
)
1502 struct target_ops
*t
;
1504 if (!record_btrace_generating_corefile
1505 && record_btrace_is_replaying (ops
, inferior_ptid
))
1509 t
->to_prepare_to_store (t
, regcache
);
1512 /* The branch trace frame cache. */
1514 struct btrace_frame_cache
1517 struct thread_info
*tp
;
1519 /* The frame info. */
1520 struct frame_info
*frame
;
1522 /* The branch trace function segment. */
1523 const struct btrace_function
*bfun
;
1526 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1528 static htab_t bfcache
;
1530 /* hash_f for htab_create_alloc of bfcache. */
1533 bfcache_hash (const void *arg
)
1535 const struct btrace_frame_cache
*cache
1536 = (const struct btrace_frame_cache
*) arg
;
1538 return htab_hash_pointer (cache
->frame
);
1541 /* eq_f for htab_create_alloc of bfcache. */
1544 bfcache_eq (const void *arg1
, const void *arg2
)
1546 const struct btrace_frame_cache
*cache1
1547 = (const struct btrace_frame_cache
*) arg1
;
1548 const struct btrace_frame_cache
*cache2
1549 = (const struct btrace_frame_cache
*) arg2
;
1551 return cache1
->frame
== cache2
->frame
;
1554 /* Create a new btrace frame cache. */
1556 static struct btrace_frame_cache
*
1557 bfcache_new (struct frame_info
*frame
)
1559 struct btrace_frame_cache
*cache
;
1562 cache
= FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache
);
1563 cache
->frame
= frame
;
1565 slot
= htab_find_slot (bfcache
, cache
, INSERT
);
1566 gdb_assert (*slot
== NULL
);
1572 /* Extract the branch trace function from a branch trace frame. */
1574 static const struct btrace_function
*
1575 btrace_get_frame_function (struct frame_info
*frame
)
1577 const struct btrace_frame_cache
*cache
;
1578 const struct btrace_function
*bfun
;
1579 struct btrace_frame_cache pattern
;
1582 pattern
.frame
= frame
;
1584 slot
= htab_find_slot (bfcache
, &pattern
, NO_INSERT
);
1588 cache
= (const struct btrace_frame_cache
*) *slot
;
1592 /* Implement stop_reason method for record_btrace_frame_unwind. */
1594 static enum unwind_stop_reason
1595 record_btrace_frame_unwind_stop_reason (struct frame_info
*this_frame
,
1598 const struct btrace_frame_cache
*cache
;
1599 const struct btrace_function
*bfun
;
1601 cache
= (const struct btrace_frame_cache
*) *this_cache
;
1603 gdb_assert (bfun
!= NULL
);
1605 if (bfun
->up
== NULL
)
1606 return UNWIND_UNAVAILABLE
;
1608 return UNWIND_NO_REASON
;
1611 /* Implement this_id method for record_btrace_frame_unwind. */
1614 record_btrace_frame_this_id (struct frame_info
*this_frame
, void **this_cache
,
1615 struct frame_id
*this_id
)
1617 const struct btrace_frame_cache
*cache
;
1618 const struct btrace_function
*bfun
;
1619 CORE_ADDR code
, special
;
1621 cache
= (const struct btrace_frame_cache
*) *this_cache
;
1624 gdb_assert (bfun
!= NULL
);
1626 while (bfun
->segment
.prev
!= NULL
)
1627 bfun
= bfun
->segment
.prev
;
1629 code
= get_frame_func (this_frame
);
1630 special
= bfun
->number
;
1632 *this_id
= frame_id_build_unavailable_stack_special (code
, special
);
1634 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1635 btrace_get_bfun_name (cache
->bfun
),
1636 core_addr_to_string_nz (this_id
->code_addr
),
1637 core_addr_to_string_nz (this_id
->special_addr
));
1640 /* Implement prev_register method for record_btrace_frame_unwind. */
1642 static struct value
*
1643 record_btrace_frame_prev_register (struct frame_info
*this_frame
,
1647 const struct btrace_frame_cache
*cache
;
1648 const struct btrace_function
*bfun
, *caller
;
1649 const struct btrace_insn
*insn
;
1650 struct gdbarch
*gdbarch
;
1654 gdbarch
= get_frame_arch (this_frame
);
1655 pcreg
= gdbarch_pc_regnum (gdbarch
);
1656 if (pcreg
< 0 || regnum
!= pcreg
)
1657 throw_error (NOT_AVAILABLE_ERROR
,
1658 _("Registers are not available in btrace record history"));
1660 cache
= (const struct btrace_frame_cache
*) *this_cache
;
1662 gdb_assert (bfun
!= NULL
);
1666 throw_error (NOT_AVAILABLE_ERROR
,
1667 _("No caller in btrace record history"));
1669 if ((bfun
->flags
& BFUN_UP_LINKS_TO_RET
) != 0)
1671 insn
= VEC_index (btrace_insn_s
, caller
->insn
, 0);
1676 insn
= VEC_last (btrace_insn_s
, caller
->insn
);
1679 pc
+= gdb_insn_length (gdbarch
, pc
);
1682 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1683 btrace_get_bfun_name (bfun
), bfun
->level
,
1684 core_addr_to_string_nz (pc
));
1686 return frame_unwind_got_address (this_frame
, regnum
, pc
);
1689 /* Implement sniffer method for record_btrace_frame_unwind. */
1692 record_btrace_frame_sniffer (const struct frame_unwind
*self
,
1693 struct frame_info
*this_frame
,
1696 const struct btrace_function
*bfun
;
1697 struct btrace_frame_cache
*cache
;
1698 struct thread_info
*tp
;
1699 struct frame_info
*next
;
1701 /* THIS_FRAME does not contain a reference to its thread. */
1702 tp
= find_thread_ptid (inferior_ptid
);
1703 gdb_assert (tp
!= NULL
);
1706 next
= get_next_frame (this_frame
);
1709 const struct btrace_insn_iterator
*replay
;
1711 replay
= tp
->btrace
.replay
;
1713 bfun
= replay
->function
;
1717 const struct btrace_function
*callee
;
1719 callee
= btrace_get_frame_function (next
);
1720 if (callee
!= NULL
&& (callee
->flags
& BFUN_UP_LINKS_TO_TAILCALL
) == 0)
1727 DEBUG ("[frame] sniffed frame for %s on level %d",
1728 btrace_get_bfun_name (bfun
), bfun
->level
);
1730 /* This is our frame. Initialize the frame cache. */
1731 cache
= bfcache_new (this_frame
);
1735 *this_cache
= cache
;
1739 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1742 record_btrace_tailcall_frame_sniffer (const struct frame_unwind
*self
,
1743 struct frame_info
*this_frame
,
1746 const struct btrace_function
*bfun
, *callee
;
1747 struct btrace_frame_cache
*cache
;
1748 struct frame_info
*next
;
1750 next
= get_next_frame (this_frame
);
1754 callee
= btrace_get_frame_function (next
);
1758 if ((callee
->flags
& BFUN_UP_LINKS_TO_TAILCALL
) == 0)
1765 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1766 btrace_get_bfun_name (bfun
), bfun
->level
);
1768 /* This is our frame. Initialize the frame cache. */
1769 cache
= bfcache_new (this_frame
);
1770 cache
->tp
= find_thread_ptid (inferior_ptid
);
1773 *this_cache
= cache
;
1778 record_btrace_frame_dealloc_cache (struct frame_info
*self
, void *this_cache
)
1780 struct btrace_frame_cache
*cache
;
1783 cache
= (struct btrace_frame_cache
*) this_cache
;
1785 slot
= htab_find_slot (bfcache
, cache
, NO_INSERT
);
1786 gdb_assert (slot
!= NULL
);
1788 htab_remove_elt (bfcache
, cache
);
1791 /* btrace recording does not store previous memory content, neither the stack
1792 frames content. Any unwinding would return errorneous results as the stack
1793 contents no longer matches the changed PC value restored from history.
1794 Therefore this unwinder reports any possibly unwound registers as
1797 const struct frame_unwind record_btrace_frame_unwind
=
1800 record_btrace_frame_unwind_stop_reason
,
1801 record_btrace_frame_this_id
,
1802 record_btrace_frame_prev_register
,
1804 record_btrace_frame_sniffer
,
1805 record_btrace_frame_dealloc_cache
1808 const struct frame_unwind record_btrace_tailcall_frame_unwind
=
1811 record_btrace_frame_unwind_stop_reason
,
1812 record_btrace_frame_this_id
,
1813 record_btrace_frame_prev_register
,
1815 record_btrace_tailcall_frame_sniffer
,
1816 record_btrace_frame_dealloc_cache
1819 /* Implement the to_get_unwinder method. */
1821 static const struct frame_unwind
*
1822 record_btrace_to_get_unwinder (struct target_ops
*self
)
1824 return &record_btrace_frame_unwind
;
1827 /* Implement the to_get_tailcall_unwinder method. */
1829 static const struct frame_unwind
*
1830 record_btrace_to_get_tailcall_unwinder (struct target_ops
*self
)
1832 return &record_btrace_tailcall_frame_unwind
;
1835 /* Return a human-readable string for FLAG. */
1838 btrace_thread_flag_to_str (enum btrace_thread_flag flag
)
1846 return "reverse-step";
1852 return "reverse-cont";
1861 /* Indicate that TP should be resumed according to FLAG. */
1864 record_btrace_resume_thread (struct thread_info
*tp
,
1865 enum btrace_thread_flag flag
)
1867 struct btrace_thread_info
*btinfo
;
1869 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp
),
1870 target_pid_to_str (tp
->ptid
), flag
, btrace_thread_flag_to_str (flag
));
1872 btinfo
= &tp
->btrace
;
1874 /* Fetch the latest branch trace. */
1877 /* A resume request overwrites a preceding resume or stop request. */
1878 btinfo
->flags
&= ~(BTHR_MOVE
| BTHR_STOP
);
1879 btinfo
->flags
|= flag
;
1882 /* Get the current frame for TP. */
1884 static struct frame_info
*
1885 get_thread_current_frame (struct thread_info
*tp
)
1887 struct frame_info
*frame
;
1888 ptid_t old_inferior_ptid
;
1891 /* Set INFERIOR_PTID, which is implicitly used by get_current_frame. */
1892 old_inferior_ptid
= inferior_ptid
;
1893 inferior_ptid
= tp
->ptid
;
1895 /* Clear the executing flag to allow changes to the current frame.
1896 We are not actually running, yet. We just started a reverse execution
1897 command or a record goto command.
1898 For the latter, EXECUTING is false and this has no effect.
1899 For the former, EXECUTING is true and we're in to_wait, about to
1900 move the thread. Since we need to recompute the stack, we temporarily
1901 set EXECUTING to flase. */
1902 executing
= is_executing (inferior_ptid
);
1903 set_executing (inferior_ptid
, 0);
1908 frame
= get_current_frame ();
1910 CATCH (except
, RETURN_MASK_ALL
)
1912 /* Restore the previous execution state. */
1913 set_executing (inferior_ptid
, executing
);
1915 /* Restore the previous inferior_ptid. */
1916 inferior_ptid
= old_inferior_ptid
;
1918 throw_exception (except
);
1922 /* Restore the previous execution state. */
1923 set_executing (inferior_ptid
, executing
);
1925 /* Restore the previous inferior_ptid. */
1926 inferior_ptid
= old_inferior_ptid
;
1931 /* Start replaying a thread. */
1933 static struct btrace_insn_iterator
*
1934 record_btrace_start_replaying (struct thread_info
*tp
)
1936 struct btrace_insn_iterator
*replay
;
1937 struct btrace_thread_info
*btinfo
;
1939 btinfo
= &tp
->btrace
;
1942 /* We can't start replaying without trace. */
1943 if (btinfo
->begin
== NULL
)
1946 /* GDB stores the current frame_id when stepping in order to detects steps
1948 Since frames are computed differently when we're replaying, we need to
1949 recompute those stored frames and fix them up so we can still detect
1950 subroutines after we started replaying. */
1953 struct frame_info
*frame
;
1954 struct frame_id frame_id
;
1955 int upd_step_frame_id
, upd_step_stack_frame_id
;
1957 /* The current frame without replaying - computed via normal unwind. */
1958 frame
= get_thread_current_frame (tp
);
1959 frame_id
= get_frame_id (frame
);
1961 /* Check if we need to update any stepping-related frame id's. */
1962 upd_step_frame_id
= frame_id_eq (frame_id
,
1963 tp
->control
.step_frame_id
);
1964 upd_step_stack_frame_id
= frame_id_eq (frame_id
,
1965 tp
->control
.step_stack_frame_id
);
1967 /* We start replaying at the end of the branch trace. This corresponds
1968 to the current instruction. */
1969 replay
= XNEW (struct btrace_insn_iterator
);
1970 btrace_insn_end (replay
, btinfo
);
1972 /* Skip gaps at the end of the trace. */
1973 while (btrace_insn_get (replay
) == NULL
)
1977 steps
= btrace_insn_prev (replay
, 1);
1979 error (_("No trace."));
1982 /* We're not replaying, yet. */
1983 gdb_assert (btinfo
->replay
== NULL
);
1984 btinfo
->replay
= replay
;
1986 /* Make sure we're not using any stale registers. */
1987 registers_changed_ptid (tp
->ptid
);
1989 /* The current frame with replaying - computed via btrace unwind. */
1990 frame
= get_thread_current_frame (tp
);
1991 frame_id
= get_frame_id (frame
);
1993 /* Replace stepping related frames where necessary. */
1994 if (upd_step_frame_id
)
1995 tp
->control
.step_frame_id
= frame_id
;
1996 if (upd_step_stack_frame_id
)
1997 tp
->control
.step_stack_frame_id
= frame_id
;
1999 CATCH (except
, RETURN_MASK_ALL
)
2001 xfree (btinfo
->replay
);
2002 btinfo
->replay
= NULL
;
2004 registers_changed_ptid (tp
->ptid
);
2006 throw_exception (except
);
2013 /* Stop replaying a thread. */
2016 record_btrace_stop_replaying (struct thread_info
*tp
)
2018 struct btrace_thread_info
*btinfo
;
2020 btinfo
= &tp
->btrace
;
2022 xfree (btinfo
->replay
);
2023 btinfo
->replay
= NULL
;
2025 /* Make sure we're not leaving any stale registers. */
2026 registers_changed_ptid (tp
->ptid
);
2029 /* Stop replaying TP if it is at the end of its execution history. */
2032 record_btrace_stop_replaying_at_end (struct thread_info
*tp
)
2034 struct btrace_insn_iterator
*replay
, end
;
2035 struct btrace_thread_info
*btinfo
;
2037 btinfo
= &tp
->btrace
;
2038 replay
= btinfo
->replay
;
2043 btrace_insn_end (&end
, btinfo
);
2045 if (btrace_insn_cmp (replay
, &end
) == 0)
2046 record_btrace_stop_replaying (tp
);
2049 /* The to_resume method of target record-btrace. */
2052 record_btrace_resume (struct target_ops
*ops
, ptid_t ptid
, int step
,
2053 enum gdb_signal signal
)
2055 struct thread_info
*tp
;
2056 enum btrace_thread_flag flag
, cflag
;
2058 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid
),
2059 execution_direction
== EXEC_REVERSE
? "reverse-" : "",
2060 step
? "step" : "cont");
2062 /* Store the execution direction of the last resume.
2064 If there is more than one to_resume call, we have to rely on infrun
2065 to not change the execution direction in-between. */
2066 record_btrace_resume_exec_dir
= execution_direction
;
2068 /* As long as we're not replaying, just forward the request.
2070 For non-stop targets this means that no thread is replaying. In order to
2071 make progress, we may need to explicitly move replaying threads to the end
2072 of their execution history. */
2073 if ((execution_direction
!= EXEC_REVERSE
)
2074 && !record_btrace_is_replaying (ops
, minus_one_ptid
))
2077 ops
->to_resume (ops
, ptid
, step
, signal
);
2081 /* Compute the btrace thread flag for the requested move. */
2082 if (execution_direction
== EXEC_REVERSE
)
2084 flag
= step
== 0 ? BTHR_RCONT
: BTHR_RSTEP
;
2089 flag
= step
== 0 ? BTHR_CONT
: BTHR_STEP
;
2093 /* We just indicate the resume intent here. The actual stepping happens in
2094 record_btrace_wait below.
2096 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2097 if (!target_is_non_stop_p ())
2099 gdb_assert (ptid_match (inferior_ptid
, ptid
));
2101 ALL_NON_EXITED_THREADS (tp
)
2102 if (ptid_match (tp
->ptid
, ptid
))
2104 if (ptid_match (tp
->ptid
, inferior_ptid
))
2105 record_btrace_resume_thread (tp
, flag
);
2107 record_btrace_resume_thread (tp
, cflag
);
2112 ALL_NON_EXITED_THREADS (tp
)
2113 if (ptid_match (tp
->ptid
, ptid
))
2114 record_btrace_resume_thread (tp
, flag
);
2117 /* Async support. */
2118 if (target_can_async_p ())
2121 mark_async_event_handler (record_btrace_async_inferior_event_handler
);
2125 /* Cancel resuming TP. */
2128 record_btrace_cancel_resume (struct thread_info
*tp
)
2130 enum btrace_thread_flag flags
;
2132 flags
= tp
->btrace
.flags
& (BTHR_MOVE
| BTHR_STOP
);
2136 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2137 print_thread_id (tp
),
2138 target_pid_to_str (tp
->ptid
), flags
,
2139 btrace_thread_flag_to_str (flags
));
2141 tp
->btrace
.flags
&= ~(BTHR_MOVE
| BTHR_STOP
);
2142 record_btrace_stop_replaying_at_end (tp
);
2145 /* Return a target_waitstatus indicating that we ran out of history. */
2147 static struct target_waitstatus
2148 btrace_step_no_history (void)
2150 struct target_waitstatus status
;
2152 status
.kind
= TARGET_WAITKIND_NO_HISTORY
;
2157 /* Return a target_waitstatus indicating that a step finished. */
2159 static struct target_waitstatus
2160 btrace_step_stopped (void)
2162 struct target_waitstatus status
;
2164 status
.kind
= TARGET_WAITKIND_STOPPED
;
2165 status
.value
.sig
= GDB_SIGNAL_TRAP
;
2170 /* Return a target_waitstatus indicating that a thread was stopped as
2173 static struct target_waitstatus
2174 btrace_step_stopped_on_request (void)
2176 struct target_waitstatus status
;
2178 status
.kind
= TARGET_WAITKIND_STOPPED
;
2179 status
.value
.sig
= GDB_SIGNAL_0
;
2184 /* Return a target_waitstatus indicating a spurious stop. */
2186 static struct target_waitstatus
2187 btrace_step_spurious (void)
2189 struct target_waitstatus status
;
2191 status
.kind
= TARGET_WAITKIND_SPURIOUS
;
2196 /* Return a target_waitstatus indicating that the thread was not resumed. */
2198 static struct target_waitstatus
2199 btrace_step_no_resumed (void)
2201 struct target_waitstatus status
;
2203 status
.kind
= TARGET_WAITKIND_NO_RESUMED
;
2208 /* Return a target_waitstatus indicating that we should wait again. */
2210 static struct target_waitstatus
2211 btrace_step_again (void)
2213 struct target_waitstatus status
;
2215 status
.kind
= TARGET_WAITKIND_IGNORE
;
2220 /* Clear the record histories. */
2223 record_btrace_clear_histories (struct btrace_thread_info
*btinfo
)
2225 xfree (btinfo
->insn_history
);
2226 xfree (btinfo
->call_history
);
2228 btinfo
->insn_history
= NULL
;
2229 btinfo
->call_history
= NULL
;
2232 /* Check whether TP's current replay position is at a breakpoint. */
2235 record_btrace_replay_at_breakpoint (struct thread_info
*tp
)
2237 struct btrace_insn_iterator
*replay
;
2238 struct btrace_thread_info
*btinfo
;
2239 const struct btrace_insn
*insn
;
2240 struct inferior
*inf
;
2242 btinfo
= &tp
->btrace
;
2243 replay
= btinfo
->replay
;
2248 insn
= btrace_insn_get (replay
);
2252 inf
= find_inferior_ptid (tp
->ptid
);
2256 return record_check_stopped_by_breakpoint (inf
->aspace
, insn
->pc
,
2257 &btinfo
->stop_reason
);
2260 /* Step one instruction in forward direction. */
2262 static struct target_waitstatus
2263 record_btrace_single_step_forward (struct thread_info
*tp
)
2265 struct btrace_insn_iterator
*replay
, end
;
2266 struct btrace_thread_info
*btinfo
;
2268 btinfo
= &tp
->btrace
;
2269 replay
= btinfo
->replay
;
2271 /* We're done if we're not replaying. */
2273 return btrace_step_no_history ();
2275 /* Check if we're stepping a breakpoint. */
2276 if (record_btrace_replay_at_breakpoint (tp
))
2277 return btrace_step_stopped ();
2279 /* Skip gaps during replay. */
2284 /* We will bail out here if we continue stepping after reaching the end
2285 of the execution history. */
2286 steps
= btrace_insn_next (replay
, 1);
2288 return btrace_step_no_history ();
2290 while (btrace_insn_get (replay
) == NULL
);
2292 /* Determine the end of the instruction trace. */
2293 btrace_insn_end (&end
, btinfo
);
2295 /* The execution trace contains (and ends with) the current instruction.
2296 This instruction has not been executed, yet, so the trace really ends
2297 one instruction earlier. */
2298 if (btrace_insn_cmp (replay
, &end
) == 0)
2299 return btrace_step_no_history ();
2301 return btrace_step_spurious ();
2304 /* Step one instruction in backward direction. */
2306 static struct target_waitstatus
2307 record_btrace_single_step_backward (struct thread_info
*tp
)
2309 struct btrace_insn_iterator
*replay
;
2310 struct btrace_thread_info
*btinfo
;
2312 btinfo
= &tp
->btrace
;
2313 replay
= btinfo
->replay
;
2315 /* Start replaying if we're not already doing so. */
2317 replay
= record_btrace_start_replaying (tp
);
2319 /* If we can't step any further, we reached the end of the history.
2320 Skip gaps during replay. */
2325 steps
= btrace_insn_prev (replay
, 1);
2327 return btrace_step_no_history ();
2329 while (btrace_insn_get (replay
) == NULL
);
2331 /* Check if we're stepping a breakpoint.
2333 For reverse-stepping, this check is after the step. There is logic in
2334 infrun.c that handles reverse-stepping separately. See, for example,
2335 proceed and adjust_pc_after_break.
2337 This code assumes that for reverse-stepping, PC points to the last
2338 de-executed instruction, whereas for forward-stepping PC points to the
2339 next to-be-executed instruction. */
2340 if (record_btrace_replay_at_breakpoint (tp
))
2341 return btrace_step_stopped ();
2343 return btrace_step_spurious ();
2346 /* Step a single thread. */
2348 static struct target_waitstatus
2349 record_btrace_step_thread (struct thread_info
*tp
)
2351 struct btrace_thread_info
*btinfo
;
2352 struct target_waitstatus status
;
2353 enum btrace_thread_flag flags
;
2355 btinfo
= &tp
->btrace
;
2357 flags
= btinfo
->flags
& (BTHR_MOVE
| BTHR_STOP
);
2358 btinfo
->flags
&= ~(BTHR_MOVE
| BTHR_STOP
);
2360 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp
),
2361 target_pid_to_str (tp
->ptid
), flags
,
2362 btrace_thread_flag_to_str (flags
));
2364 /* We can't step without an execution history. */
2365 if ((flags
& BTHR_MOVE
) != 0 && btrace_is_empty (tp
))
2366 return btrace_step_no_history ();
2371 internal_error (__FILE__
, __LINE__
, _("invalid stepping type."));
2374 return btrace_step_stopped_on_request ();
2377 status
= record_btrace_single_step_forward (tp
);
2378 if (status
.kind
!= TARGET_WAITKIND_SPURIOUS
)
2381 return btrace_step_stopped ();
2384 status
= record_btrace_single_step_backward (tp
);
2385 if (status
.kind
!= TARGET_WAITKIND_SPURIOUS
)
2388 return btrace_step_stopped ();
2391 status
= record_btrace_single_step_forward (tp
);
2392 if (status
.kind
!= TARGET_WAITKIND_SPURIOUS
)
2395 btinfo
->flags
|= flags
;
2396 return btrace_step_again ();
2399 status
= record_btrace_single_step_backward (tp
);
2400 if (status
.kind
!= TARGET_WAITKIND_SPURIOUS
)
2403 btinfo
->flags
|= flags
;
2404 return btrace_step_again ();
2407 /* We keep threads moving at the end of their execution history. The to_wait
2408 method will stop the thread for whom the event is reported. */
2409 if (status
.kind
== TARGET_WAITKIND_NO_HISTORY
)
2410 btinfo
->flags
|= flags
;
2415 /* A vector of threads. */
2417 typedef struct thread_info
* tp_t
;
2420 /* Announce further events if necessary. */
2423 record_btrace_maybe_mark_async_event (const VEC (tp_t
) *moving
,
2424 const VEC (tp_t
) *no_history
)
2426 int more_moving
, more_no_history
;
2428 more_moving
= !VEC_empty (tp_t
, moving
);
2429 more_no_history
= !VEC_empty (tp_t
, no_history
);
2431 if (!more_moving
&& !more_no_history
)
2435 DEBUG ("movers pending");
2437 if (more_no_history
)
2438 DEBUG ("no-history pending");
2440 mark_async_event_handler (record_btrace_async_inferior_event_handler
);
2443 /* The to_wait method of target record-btrace. */
2446 record_btrace_wait (struct target_ops
*ops
, ptid_t ptid
,
2447 struct target_waitstatus
*status
, int options
)
2449 VEC (tp_t
) *moving
, *no_history
;
2450 struct thread_info
*tp
, *eventing
;
2451 struct cleanup
*cleanups
= make_cleanup (null_cleanup
, NULL
);
2453 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid
), options
);
2455 /* As long as we're not replaying, just forward the request. */
2456 if ((execution_direction
!= EXEC_REVERSE
)
2457 && !record_btrace_is_replaying (ops
, minus_one_ptid
))
2460 return ops
->to_wait (ops
, ptid
, status
, options
);
2466 make_cleanup (VEC_cleanup (tp_t
), &moving
);
2467 make_cleanup (VEC_cleanup (tp_t
), &no_history
);
2469 /* Keep a work list of moving threads. */
2470 ALL_NON_EXITED_THREADS (tp
)
2471 if (ptid_match (tp
->ptid
, ptid
)
2472 && ((tp
->btrace
.flags
& (BTHR_MOVE
| BTHR_STOP
)) != 0))
2473 VEC_safe_push (tp_t
, moving
, tp
);
2475 if (VEC_empty (tp_t
, moving
))
2477 *status
= btrace_step_no_resumed ();
2479 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid
),
2480 target_waitstatus_to_string (status
));
2482 do_cleanups (cleanups
);
2486 /* Step moving threads one by one, one step each, until either one thread
2487 reports an event or we run out of threads to step.
2489 When stepping more than one thread, chances are that some threads reach
2490 the end of their execution history earlier than others. If we reported
2491 this immediately, all-stop on top of non-stop would stop all threads and
2492 resume the same threads next time. And we would report the same thread
2493 having reached the end of its execution history again.
2495 In the worst case, this would starve the other threads. But even if other
2496 threads would be allowed to make progress, this would result in far too
2497 many intermediate stops.
2499 We therefore delay the reporting of "no execution history" until we have
2500 nothing else to report. By this time, all threads should have moved to
2501 either the beginning or the end of their execution history. There will
2502 be a single user-visible stop. */
2504 while ((eventing
== NULL
) && !VEC_empty (tp_t
, moving
))
2509 while ((eventing
== NULL
) && VEC_iterate (tp_t
, moving
, ix
, tp
))
2511 *status
= record_btrace_step_thread (tp
);
2513 switch (status
->kind
)
2515 case TARGET_WAITKIND_IGNORE
:
2519 case TARGET_WAITKIND_NO_HISTORY
:
2520 VEC_safe_push (tp_t
, no_history
,
2521 VEC_ordered_remove (tp_t
, moving
, ix
));
2525 eventing
= VEC_unordered_remove (tp_t
, moving
, ix
);
2531 if (eventing
== NULL
)
2533 /* We started with at least one moving thread. This thread must have
2534 either stopped or reached the end of its execution history.
2536 In the former case, EVENTING must not be NULL.
2537 In the latter case, NO_HISTORY must not be empty. */
2538 gdb_assert (!VEC_empty (tp_t
, no_history
));
2540 /* We kept threads moving at the end of their execution history. Stop
2541 EVENTING now that we are going to report its stop. */
2542 eventing
= VEC_unordered_remove (tp_t
, no_history
, 0);
2543 eventing
->btrace
.flags
&= ~BTHR_MOVE
;
2545 *status
= btrace_step_no_history ();
2548 gdb_assert (eventing
!= NULL
);
2550 /* We kept threads replaying at the end of their execution history. Stop
2551 replaying EVENTING now that we are going to report its stop. */
2552 record_btrace_stop_replaying_at_end (eventing
);
2554 /* Stop all other threads. */
2555 if (!target_is_non_stop_p ())
2556 ALL_NON_EXITED_THREADS (tp
)
2557 record_btrace_cancel_resume (tp
);
2559 /* In async mode, we need to announce further events. */
2560 if (target_is_async_p ())
2561 record_btrace_maybe_mark_async_event (moving
, no_history
);
2563 /* Start record histories anew from the current position. */
2564 record_btrace_clear_histories (&eventing
->btrace
);
2566 /* We moved the replay position but did not update registers. */
2567 registers_changed_ptid (eventing
->ptid
);
2569 DEBUG ("wait ended by thread %s (%s): %s",
2570 print_thread_id (eventing
),
2571 target_pid_to_str (eventing
->ptid
),
2572 target_waitstatus_to_string (status
));
2574 do_cleanups (cleanups
);
2575 return eventing
->ptid
;
2578 /* The to_stop method of target record-btrace. */
2581 record_btrace_stop (struct target_ops
*ops
, ptid_t ptid
)
2583 DEBUG ("stop %s", target_pid_to_str (ptid
));
2585 /* As long as we're not replaying, just forward the request. */
2586 if ((execution_direction
!= EXEC_REVERSE
)
2587 && !record_btrace_is_replaying (ops
, minus_one_ptid
))
2590 ops
->to_stop (ops
, ptid
);
2594 struct thread_info
*tp
;
2596 ALL_NON_EXITED_THREADS (tp
)
2597 if (ptid_match (tp
->ptid
, ptid
))
2599 tp
->btrace
.flags
&= ~BTHR_MOVE
;
2600 tp
->btrace
.flags
|= BTHR_STOP
;
2605 /* The to_can_execute_reverse method of target record-btrace. */
2608 record_btrace_can_execute_reverse (struct target_ops
*self
)
2613 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2616 record_btrace_stopped_by_sw_breakpoint (struct target_ops
*ops
)
2618 if (record_btrace_is_replaying (ops
, minus_one_ptid
))
2620 struct thread_info
*tp
= inferior_thread ();
2622 return tp
->btrace
.stop_reason
== TARGET_STOPPED_BY_SW_BREAKPOINT
;
2625 return ops
->beneath
->to_stopped_by_sw_breakpoint (ops
->beneath
);
2628 /* The to_supports_stopped_by_sw_breakpoint method of target
2632 record_btrace_supports_stopped_by_sw_breakpoint (struct target_ops
*ops
)
2634 if (record_btrace_is_replaying (ops
, minus_one_ptid
))
2637 return ops
->beneath
->to_supports_stopped_by_sw_breakpoint (ops
->beneath
);
2640 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2643 record_btrace_stopped_by_hw_breakpoint (struct target_ops
*ops
)
2645 if (record_btrace_is_replaying (ops
, minus_one_ptid
))
2647 struct thread_info
*tp
= inferior_thread ();
2649 return tp
->btrace
.stop_reason
== TARGET_STOPPED_BY_HW_BREAKPOINT
;
2652 return ops
->beneath
->to_stopped_by_hw_breakpoint (ops
->beneath
);
2655 /* The to_supports_stopped_by_hw_breakpoint method of target
2659 record_btrace_supports_stopped_by_hw_breakpoint (struct target_ops
*ops
)
2661 if (record_btrace_is_replaying (ops
, minus_one_ptid
))
2664 return ops
->beneath
->to_supports_stopped_by_hw_breakpoint (ops
->beneath
);
2667 /* The to_update_thread_list method of target record-btrace. */
2670 record_btrace_update_thread_list (struct target_ops
*ops
)
2672 /* We don't add or remove threads during replay. */
2673 if (record_btrace_is_replaying (ops
, minus_one_ptid
))
2676 /* Forward the request. */
2678 ops
->to_update_thread_list (ops
);
2681 /* The to_thread_alive method of target record-btrace. */
2684 record_btrace_thread_alive (struct target_ops
*ops
, ptid_t ptid
)
2686 /* We don't add or remove threads during replay. */
2687 if (record_btrace_is_replaying (ops
, minus_one_ptid
))
2688 return find_thread_ptid (ptid
) != NULL
;
2690 /* Forward the request. */
2692 return ops
->to_thread_alive (ops
, ptid
);
2695 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
2699 record_btrace_set_replay (struct thread_info
*tp
,
2700 const struct btrace_insn_iterator
*it
)
2702 struct btrace_thread_info
*btinfo
;
2704 btinfo
= &tp
->btrace
;
2706 if (it
== NULL
|| it
->function
== NULL
)
2707 record_btrace_stop_replaying (tp
);
2710 if (btinfo
->replay
== NULL
)
2711 record_btrace_start_replaying (tp
);
2712 else if (btrace_insn_cmp (btinfo
->replay
, it
) == 0)
2715 *btinfo
->replay
= *it
;
2716 registers_changed_ptid (tp
->ptid
);
2719 /* Start anew from the new replay position. */
2720 record_btrace_clear_histories (btinfo
);
2722 stop_pc
= regcache_read_pc (get_current_regcache ());
2723 print_stack_frame (get_selected_frame (NULL
), 1, SRC_AND_LOC
, 1);
2726 /* The to_goto_record_begin method of target record-btrace. */
2729 record_btrace_goto_begin (struct target_ops
*self
)
2731 struct thread_info
*tp
;
2732 struct btrace_insn_iterator begin
;
2734 tp
= require_btrace_thread ();
2736 btrace_insn_begin (&begin
, &tp
->btrace
);
2737 record_btrace_set_replay (tp
, &begin
);
2740 /* The to_goto_record_end method of target record-btrace. */
2743 record_btrace_goto_end (struct target_ops
*ops
)
2745 struct thread_info
*tp
;
2747 tp
= require_btrace_thread ();
2749 record_btrace_set_replay (tp
, NULL
);
2752 /* The to_goto_record method of target record-btrace. */
2755 record_btrace_goto (struct target_ops
*self
, ULONGEST insn
)
2757 struct thread_info
*tp
;
2758 struct btrace_insn_iterator it
;
2759 unsigned int number
;
2764 /* Check for wrap-arounds. */
2766 error (_("Instruction number out of range."));
2768 tp
= require_btrace_thread ();
2770 found
= btrace_find_insn_by_number (&it
, &tp
->btrace
, number
);
2772 error (_("No such instruction."));
2774 record_btrace_set_replay (tp
, &it
);
2777 /* The to_record_stop_replaying method of target record-btrace. */
2780 record_btrace_stop_replaying_all (struct target_ops
*self
)
2782 struct thread_info
*tp
;
2784 ALL_NON_EXITED_THREADS (tp
)
2785 record_btrace_stop_replaying (tp
);
2788 /* The to_execution_direction target method. */
2790 static enum exec_direction_kind
2791 record_btrace_execution_direction (struct target_ops
*self
)
2793 return record_btrace_resume_exec_dir
;
2796 /* The to_prepare_to_generate_core target method. */
2799 record_btrace_prepare_to_generate_core (struct target_ops
*self
)
2801 record_btrace_generating_corefile
= 1;
2804 /* The to_done_generating_core target method. */
2807 record_btrace_done_generating_core (struct target_ops
*self
)
2809 record_btrace_generating_corefile
= 0;
2812 /* Initialize the record-btrace target ops. */
2815 init_record_btrace_ops (void)
2817 struct target_ops
*ops
;
2819 ops
= &record_btrace_ops
;
2820 ops
->to_shortname
= "record-btrace";
2821 ops
->to_longname
= "Branch tracing target";
2822 ops
->to_doc
= "Collect control-flow trace and provide the execution history.";
2823 ops
->to_open
= record_btrace_open
;
2824 ops
->to_close
= record_btrace_close
;
2825 ops
->to_async
= record_btrace_async
;
2826 ops
->to_detach
= record_detach
;
2827 ops
->to_disconnect
= record_disconnect
;
2828 ops
->to_mourn_inferior
= record_mourn_inferior
;
2829 ops
->to_kill
= record_kill
;
2830 ops
->to_stop_recording
= record_btrace_stop_recording
;
2831 ops
->to_info_record
= record_btrace_info
;
2832 ops
->to_insn_history
= record_btrace_insn_history
;
2833 ops
->to_insn_history_from
= record_btrace_insn_history_from
;
2834 ops
->to_insn_history_range
= record_btrace_insn_history_range
;
2835 ops
->to_call_history
= record_btrace_call_history
;
2836 ops
->to_call_history_from
= record_btrace_call_history_from
;
2837 ops
->to_call_history_range
= record_btrace_call_history_range
;
2838 ops
->to_record_is_replaying
= record_btrace_is_replaying
;
2839 ops
->to_record_will_replay
= record_btrace_will_replay
;
2840 ops
->to_record_stop_replaying
= record_btrace_stop_replaying_all
;
2841 ops
->to_xfer_partial
= record_btrace_xfer_partial
;
2842 ops
->to_remove_breakpoint
= record_btrace_remove_breakpoint
;
2843 ops
->to_insert_breakpoint
= record_btrace_insert_breakpoint
;
2844 ops
->to_fetch_registers
= record_btrace_fetch_registers
;
2845 ops
->to_store_registers
= record_btrace_store_registers
;
2846 ops
->to_prepare_to_store
= record_btrace_prepare_to_store
;
2847 ops
->to_get_unwinder
= &record_btrace_to_get_unwinder
;
2848 ops
->to_get_tailcall_unwinder
= &record_btrace_to_get_tailcall_unwinder
;
2849 ops
->to_resume
= record_btrace_resume
;
2850 ops
->to_wait
= record_btrace_wait
;
2851 ops
->to_stop
= record_btrace_stop
;
2852 ops
->to_update_thread_list
= record_btrace_update_thread_list
;
2853 ops
->to_thread_alive
= record_btrace_thread_alive
;
2854 ops
->to_goto_record_begin
= record_btrace_goto_begin
;
2855 ops
->to_goto_record_end
= record_btrace_goto_end
;
2856 ops
->to_goto_record
= record_btrace_goto
;
2857 ops
->to_can_execute_reverse
= record_btrace_can_execute_reverse
;
2858 ops
->to_stopped_by_sw_breakpoint
= record_btrace_stopped_by_sw_breakpoint
;
2859 ops
->to_supports_stopped_by_sw_breakpoint
2860 = record_btrace_supports_stopped_by_sw_breakpoint
;
2861 ops
->to_stopped_by_hw_breakpoint
= record_btrace_stopped_by_hw_breakpoint
;
2862 ops
->to_supports_stopped_by_hw_breakpoint
2863 = record_btrace_supports_stopped_by_hw_breakpoint
;
2864 ops
->to_execution_direction
= record_btrace_execution_direction
;
2865 ops
->to_prepare_to_generate_core
= record_btrace_prepare_to_generate_core
;
2866 ops
->to_done_generating_core
= record_btrace_done_generating_core
;
2867 ops
->to_stratum
= record_stratum
;
2868 ops
->to_magic
= OPS_MAGIC
;
2871 /* Start recording in BTS format. */
2874 cmd_record_btrace_bts_start (char *args
, int from_tty
)
2876 if (args
!= NULL
&& *args
!= 0)
2877 error (_("Invalid argument."));
2879 record_btrace_conf
.format
= BTRACE_FORMAT_BTS
;
2883 execute_command ("target record-btrace", from_tty
);
2885 CATCH (exception
, RETURN_MASK_ALL
)
2887 record_btrace_conf
.format
= BTRACE_FORMAT_NONE
;
2888 throw_exception (exception
);
2893 /* Start recording in Intel Processor Trace format. */
2896 cmd_record_btrace_pt_start (char *args
, int from_tty
)
2898 if (args
!= NULL
&& *args
!= 0)
2899 error (_("Invalid argument."));
2901 record_btrace_conf
.format
= BTRACE_FORMAT_PT
;
2905 execute_command ("target record-btrace", from_tty
);
2907 CATCH (exception
, RETURN_MASK_ALL
)
2909 record_btrace_conf
.format
= BTRACE_FORMAT_NONE
;
2910 throw_exception (exception
);
2915 /* Alias for "target record". */
2918 cmd_record_btrace_start (char *args
, int from_tty
)
2920 if (args
!= NULL
&& *args
!= 0)
2921 error (_("Invalid argument."));
2923 record_btrace_conf
.format
= BTRACE_FORMAT_PT
;
2927 execute_command ("target record-btrace", from_tty
);
2929 CATCH (exception
, RETURN_MASK_ALL
)
2931 record_btrace_conf
.format
= BTRACE_FORMAT_BTS
;
2935 execute_command ("target record-btrace", from_tty
);
2937 CATCH (exception
, RETURN_MASK_ALL
)
2939 record_btrace_conf
.format
= BTRACE_FORMAT_NONE
;
2940 throw_exception (exception
);
2947 /* The "set record btrace" command. */
2950 cmd_set_record_btrace (char *args
, int from_tty
)
2952 cmd_show_list (set_record_btrace_cmdlist
, from_tty
, "");
2955 /* The "show record btrace" command. */
2958 cmd_show_record_btrace (char *args
, int from_tty
)
2960 cmd_show_list (show_record_btrace_cmdlist
, from_tty
, "");
2963 /* The "show record btrace replay-memory-access" command. */
2966 cmd_show_replay_memory_access (struct ui_file
*file
, int from_tty
,
2967 struct cmd_list_element
*c
, const char *value
)
2969 fprintf_filtered (gdb_stdout
, _("Replay memory access is %s.\n"),
2970 replay_memory_access
);
2973 /* The "set record btrace bts" command. */
2976 cmd_set_record_btrace_bts (char *args
, int from_tty
)
2978 printf_unfiltered (_("\"set record btrace bts\" must be followed "
2979 "by an appropriate subcommand.\n"));
2980 help_list (set_record_btrace_bts_cmdlist
, "set record btrace bts ",
2981 all_commands
, gdb_stdout
);
2984 /* The "show record btrace bts" command. */
2987 cmd_show_record_btrace_bts (char *args
, int from_tty
)
2989 cmd_show_list (show_record_btrace_bts_cmdlist
, from_tty
, "");
2992 /* The "set record btrace pt" command. */
2995 cmd_set_record_btrace_pt (char *args
, int from_tty
)
2997 printf_unfiltered (_("\"set record btrace pt\" must be followed "
2998 "by an appropriate subcommand.\n"));
2999 help_list (set_record_btrace_pt_cmdlist
, "set record btrace pt ",
3000 all_commands
, gdb_stdout
);
3003 /* The "show record btrace pt" command. */
3006 cmd_show_record_btrace_pt (char *args
, int from_tty
)
3008 cmd_show_list (show_record_btrace_pt_cmdlist
, from_tty
, "");
3011 /* The "record bts buffer-size" show value function. */
3014 show_record_bts_buffer_size_value (struct ui_file
*file
, int from_tty
,
3015 struct cmd_list_element
*c
,
3018 fprintf_filtered (file
, _("The record/replay bts buffer size is %s.\n"),
3022 /* The "record pt buffer-size" show value function. */
3025 show_record_pt_buffer_size_value (struct ui_file
*file
, int from_tty
,
3026 struct cmd_list_element
*c
,
3029 fprintf_filtered (file
, _("The record/replay pt buffer size is %s.\n"),
3033 void _initialize_record_btrace (void);
3035 /* Initialize btrace commands. */
3038 _initialize_record_btrace (void)
3040 add_prefix_cmd ("btrace", class_obscure
, cmd_record_btrace_start
,
3041 _("Start branch trace recording."), &record_btrace_cmdlist
,
3042 "record btrace ", 0, &record_cmdlist
);
3043 add_alias_cmd ("b", "btrace", class_obscure
, 1, &record_cmdlist
);
3045 add_cmd ("bts", class_obscure
, cmd_record_btrace_bts_start
,
3047 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3048 The processor stores a from/to record for each branch into a cyclic buffer.\n\
3049 This format may not be available on all processors."),
3050 &record_btrace_cmdlist
);
3051 add_alias_cmd ("bts", "btrace bts", class_obscure
, 1, &record_cmdlist
);
3053 add_cmd ("pt", class_obscure
, cmd_record_btrace_pt_start
,
3055 Start branch trace recording in Intel Processor Trace format.\n\n\
3056 This format may not be available on all processors."),
3057 &record_btrace_cmdlist
);
3058 add_alias_cmd ("pt", "btrace pt", class_obscure
, 1, &record_cmdlist
);
3060 add_prefix_cmd ("btrace", class_support
, cmd_set_record_btrace
,
3061 _("Set record options"), &set_record_btrace_cmdlist
,
3062 "set record btrace ", 0, &set_record_cmdlist
);
3064 add_prefix_cmd ("btrace", class_support
, cmd_show_record_btrace
,
3065 _("Show record options"), &show_record_btrace_cmdlist
,
3066 "show record btrace ", 0, &show_record_cmdlist
);
3068 add_setshow_enum_cmd ("replay-memory-access", no_class
,
3069 replay_memory_access_types
, &replay_memory_access
, _("\
3070 Set what memory accesses are allowed during replay."), _("\
3071 Show what memory accesses are allowed during replay."),
3072 _("Default is READ-ONLY.\n\n\
3073 The btrace record target does not trace data.\n\
3074 The memory therefore corresponds to the live target and not \
3075 to the current replay position.\n\n\
3076 When READ-ONLY, allow accesses to read-only memory during replay.\n\
3077 When READ-WRITE, allow accesses to read-only and read-write memory during \
3079 NULL
, cmd_show_replay_memory_access
,
3080 &set_record_btrace_cmdlist
,
3081 &show_record_btrace_cmdlist
);
3083 add_prefix_cmd ("bts", class_support
, cmd_set_record_btrace_bts
,
3084 _("Set record btrace bts options"),
3085 &set_record_btrace_bts_cmdlist
,
3086 "set record btrace bts ", 0, &set_record_btrace_cmdlist
);
3088 add_prefix_cmd ("bts", class_support
, cmd_show_record_btrace_bts
,
3089 _("Show record btrace bts options"),
3090 &show_record_btrace_bts_cmdlist
,
3091 "show record btrace bts ", 0, &show_record_btrace_cmdlist
);
3093 add_setshow_uinteger_cmd ("buffer-size", no_class
,
3094 &record_btrace_conf
.bts
.size
,
3095 _("Set the record/replay bts buffer size."),
3096 _("Show the record/replay bts buffer size."), _("\
3097 When starting recording request a trace buffer of this size. \
3098 The actual buffer size may differ from the requested size. \
3099 Use \"info record\" to see the actual buffer size.\n\n\
3100 Bigger buffers allow longer recording but also take more time to process \
3101 the recorded execution trace.\n\n\
3102 The trace buffer size may not be changed while recording."), NULL
,
3103 show_record_bts_buffer_size_value
,
3104 &set_record_btrace_bts_cmdlist
,
3105 &show_record_btrace_bts_cmdlist
);
3107 add_prefix_cmd ("pt", class_support
, cmd_set_record_btrace_pt
,
3108 _("Set record btrace pt options"),
3109 &set_record_btrace_pt_cmdlist
,
3110 "set record btrace pt ", 0, &set_record_btrace_cmdlist
);
3112 add_prefix_cmd ("pt", class_support
, cmd_show_record_btrace_pt
,
3113 _("Show record btrace pt options"),
3114 &show_record_btrace_pt_cmdlist
,
3115 "show record btrace pt ", 0, &show_record_btrace_cmdlist
);
3117 add_setshow_uinteger_cmd ("buffer-size", no_class
,
3118 &record_btrace_conf
.pt
.size
,
3119 _("Set the record/replay pt buffer size."),
3120 _("Show the record/replay pt buffer size."), _("\
3121 Bigger buffers allow longer recording but also take more time to process \
3122 the recorded execution.\n\
3123 The actual buffer size may differ from the requested size. Use \"info record\" \
3124 to see the actual buffer size."), NULL
, show_record_pt_buffer_size_value
,
3125 &set_record_btrace_pt_cmdlist
,
3126 &show_record_btrace_pt_cmdlist
);
3128 init_record_btrace_ops ();
3129 add_target (&record_btrace_ops
);
3131 bfcache
= htab_create_alloc (50, bfcache_hash
, bfcache_eq
, NULL
,
3134 record_btrace_conf
.bts
.size
= 64 * 1024;
3135 record_btrace_conf
.pt
.size
= 16 * 1024;