1 /* Branch trace support for GDB, the GNU debugger.
3 Copyright (C) 2013-2018 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "record-btrace.h"
25 #include "gdbthread.h"
30 #include "cli/cli-utils.h"
34 #include "filenames.h"
36 #include "frame-unwind.h"
39 #include "event-loop.h"
44 /* The target_ops of record-btrace. */
45 static struct target_ops record_btrace_ops
;
47 /* A new thread observer enabling branch tracing for the new thread. */
48 static struct observer
*record_btrace_thread_observer
;
50 /* Memory access types used in set/show record btrace replay-memory-access. */
51 static const char replay_memory_access_read_only
[] = "read-only";
52 static const char replay_memory_access_read_write
[] = "read-write";
53 static const char *const replay_memory_access_types
[] =
55 replay_memory_access_read_only
,
56 replay_memory_access_read_write
,
60 /* The currently allowed replay memory access type. */
61 static const char *replay_memory_access
= replay_memory_access_read_only
;
63 /* Command lists for "set/show record btrace". */
64 static struct cmd_list_element
*set_record_btrace_cmdlist
;
65 static struct cmd_list_element
*show_record_btrace_cmdlist
;
67 /* The execution direction of the last resume we got. See record-full.c. */
68 static enum exec_direction_kind record_btrace_resume_exec_dir
= EXEC_FORWARD
;
70 /* The async event handler for reverse/replay execution. */
71 static struct async_event_handler
*record_btrace_async_inferior_event_handler
;
73 /* A flag indicating that we are currently generating a core file. */
74 static int record_btrace_generating_corefile
;
76 /* The current branch trace configuration. */
77 static struct btrace_config record_btrace_conf
;
79 /* Command list for "record btrace". */
80 static struct cmd_list_element
*record_btrace_cmdlist
;
82 /* Command lists for "set/show record btrace bts". */
83 static struct cmd_list_element
*set_record_btrace_bts_cmdlist
;
84 static struct cmd_list_element
*show_record_btrace_bts_cmdlist
;
86 /* Command lists for "set/show record btrace pt". */
87 static struct cmd_list_element
*set_record_btrace_pt_cmdlist
;
88 static struct cmd_list_element
*show_record_btrace_pt_cmdlist
;
90 /* Print a record-btrace debug message. Use do ... while (0) to avoid
91 ambiguities when used in if statements. */
93 #define DEBUG(msg, args...) \
96 if (record_debug != 0) \
97 fprintf_unfiltered (gdb_stdlog, \
98 "[record-btrace] " msg "\n", ##args); \
103 /* Update the branch trace for the current thread and return a pointer to its
106 Throws an error if there is no thread or no trace. This function never
109 static struct thread_info
*
110 require_btrace_thread (void)
112 struct thread_info
*tp
;
116 tp
= find_thread_ptid (inferior_ptid
);
118 error (_("No thread."));
120 validate_registers_access ();
124 if (btrace_is_empty (tp
))
125 error (_("No trace."));
130 /* Update the branch trace for the current thread and return a pointer to its
131 branch trace information struct.
133 Throws an error if there is no thread or no trace. This function never
136 static struct btrace_thread_info
*
137 require_btrace (void)
139 struct thread_info
*tp
;
141 tp
= require_btrace_thread ();
146 /* Enable branch tracing for one thread. Warn on errors. */
149 record_btrace_enable_warn (struct thread_info
*tp
)
153 btrace_enable (tp
, &record_btrace_conf
);
155 CATCH (error
, RETURN_MASK_ERROR
)
157 warning ("%s", error
.message
);
162 /* Enable automatic tracing of new threads. */
165 record_btrace_auto_enable (void)
167 DEBUG ("attach thread observer");
169 record_btrace_thread_observer
170 = observer_attach_new_thread (record_btrace_enable_warn
);
173 /* Disable automatic tracing of new threads. */
176 record_btrace_auto_disable (void)
178 /* The observer may have been detached, already. */
179 if (record_btrace_thread_observer
== NULL
)
182 DEBUG ("detach thread observer");
184 observer_detach_new_thread (record_btrace_thread_observer
);
185 record_btrace_thread_observer
= NULL
;
188 /* The record-btrace async event handler function. */
191 record_btrace_handle_async_inferior_event (gdb_client_data data
)
193 inferior_event_handler (INF_REG_EVENT
, NULL
);
196 /* See record-btrace.h. */
199 record_btrace_push_target (void)
203 record_btrace_auto_enable ();
205 push_target (&record_btrace_ops
);
207 record_btrace_async_inferior_event_handler
208 = create_async_event_handler (record_btrace_handle_async_inferior_event
,
210 record_btrace_generating_corefile
= 0;
212 format
= btrace_format_short_string (record_btrace_conf
.format
);
213 observer_notify_record_changed (current_inferior (), 1, "btrace", format
);
216 /* Disable btrace on a set of threads on scope exit. */
218 struct scoped_btrace_disable
220 scoped_btrace_disable () = default;
222 DISABLE_COPY_AND_ASSIGN (scoped_btrace_disable
);
224 ~scoped_btrace_disable ()
226 for (thread_info
*tp
: m_threads
)
230 void add_thread (thread_info
*thread
)
232 m_threads
.push_front (thread
);
241 std::forward_list
<thread_info
*> m_threads
;
244 /* The to_open method of target record-btrace. */
247 record_btrace_open (const char *args
, int from_tty
)
249 /* If we fail to enable btrace for one thread, disable it for the threads for
250 which it was successfully enabled. */
251 scoped_btrace_disable btrace_disable
;
252 struct thread_info
*tp
;
258 if (!target_has_execution
)
259 error (_("The program is not being run."));
261 gdb_assert (record_btrace_thread_observer
== NULL
);
263 ALL_NON_EXITED_THREADS (tp
)
264 if (args
== NULL
|| *args
== 0 || number_is_in_list (args
, tp
->global_num
))
266 btrace_enable (tp
, &record_btrace_conf
);
268 btrace_disable
.add_thread (tp
);
271 record_btrace_push_target ();
273 btrace_disable
.discard ();
276 /* The to_stop_recording method of target record-btrace. */
279 record_btrace_stop_recording (struct target_ops
*self
)
281 struct thread_info
*tp
;
283 DEBUG ("stop recording");
285 record_btrace_auto_disable ();
287 ALL_NON_EXITED_THREADS (tp
)
288 if (tp
->btrace
.target
!= NULL
)
292 /* The to_disconnect method of target record-btrace. */
295 record_btrace_disconnect (struct target_ops
*self
, const char *args
,
298 struct target_ops
*beneath
= self
->beneath
;
300 /* Do not stop recording, just clean up GDB side. */
301 unpush_target (self
);
303 /* Forward disconnect. */
304 beneath
->to_disconnect (beneath
, args
, from_tty
);
307 /* The to_close method of target record-btrace. */
310 record_btrace_close (struct target_ops
*self
)
312 struct thread_info
*tp
;
314 if (record_btrace_async_inferior_event_handler
!= NULL
)
315 delete_async_event_handler (&record_btrace_async_inferior_event_handler
);
317 /* Make sure automatic recording gets disabled even if we did not stop
318 recording before closing the record-btrace target. */
319 record_btrace_auto_disable ();
321 /* We should have already stopped recording.
322 Tear down btrace in case we have not. */
323 ALL_NON_EXITED_THREADS (tp
)
324 btrace_teardown (tp
);
327 /* The to_async method of target record-btrace. */
330 record_btrace_async (struct target_ops
*ops
, int enable
)
333 mark_async_event_handler (record_btrace_async_inferior_event_handler
);
335 clear_async_event_handler (record_btrace_async_inferior_event_handler
);
337 ops
->beneath
->to_async (ops
->beneath
, enable
);
340 /* Adjusts the size and returns a human readable size suffix. */
343 record_btrace_adjust_size (unsigned int *size
)
349 if ((sz
& ((1u << 30) - 1)) == 0)
354 else if ((sz
& ((1u << 20) - 1)) == 0)
359 else if ((sz
& ((1u << 10) - 1)) == 0)
368 /* Print a BTS configuration. */
371 record_btrace_print_bts_conf (const struct btrace_config_bts
*conf
)
379 suffix
= record_btrace_adjust_size (&size
);
380 printf_unfiltered (_("Buffer size: %u%s.\n"), size
, suffix
);
384 /* Print an Intel Processor Trace configuration. */
387 record_btrace_print_pt_conf (const struct btrace_config_pt
*conf
)
395 suffix
= record_btrace_adjust_size (&size
);
396 printf_unfiltered (_("Buffer size: %u%s.\n"), size
, suffix
);
400 /* Print a branch tracing configuration. */
403 record_btrace_print_conf (const struct btrace_config
*conf
)
405 printf_unfiltered (_("Recording format: %s.\n"),
406 btrace_format_string (conf
->format
));
408 switch (conf
->format
)
410 case BTRACE_FORMAT_NONE
:
413 case BTRACE_FORMAT_BTS
:
414 record_btrace_print_bts_conf (&conf
->bts
);
417 case BTRACE_FORMAT_PT
:
418 record_btrace_print_pt_conf (&conf
->pt
);
422 internal_error (__FILE__
, __LINE__
, _("Unkown branch trace format."));
425 /* The to_info_record method of target record-btrace. */
428 record_btrace_info (struct target_ops
*self
)
430 struct btrace_thread_info
*btinfo
;
431 const struct btrace_config
*conf
;
432 struct thread_info
*tp
;
433 unsigned int insns
, calls
, gaps
;
437 tp
= find_thread_ptid (inferior_ptid
);
439 error (_("No thread."));
441 validate_registers_access ();
443 btinfo
= &tp
->btrace
;
445 conf
= btrace_conf (btinfo
);
447 record_btrace_print_conf (conf
);
455 if (!btrace_is_empty (tp
))
457 struct btrace_call_iterator call
;
458 struct btrace_insn_iterator insn
;
460 btrace_call_end (&call
, btinfo
);
461 btrace_call_prev (&call
, 1);
462 calls
= btrace_call_number (&call
);
464 btrace_insn_end (&insn
, btinfo
);
465 insns
= btrace_insn_number (&insn
);
467 /* If the last instruction is not a gap, it is the current instruction
468 that is not actually part of the record. */
469 if (btrace_insn_get (&insn
) != NULL
)
472 gaps
= btinfo
->ngaps
;
475 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
476 "for thread %s (%s).\n"), insns
, calls
, gaps
,
477 print_thread_id (tp
), target_pid_to_str (tp
->ptid
));
479 if (btrace_is_replaying (tp
))
480 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
481 btrace_insn_number (btinfo
->replay
));
484 /* Print a decode error. */
487 btrace_ui_out_decode_error (struct ui_out
*uiout
, int errcode
,
488 enum btrace_format format
)
490 const char *errstr
= btrace_decode_error (format
, errcode
);
492 uiout
->text (_("["));
493 /* ERRCODE > 0 indicates notifications on BTRACE_FORMAT_PT. */
494 if (!(format
== BTRACE_FORMAT_PT
&& errcode
> 0))
496 uiout
->text (_("decode error ("));
497 uiout
->field_int ("errcode", errcode
);
498 uiout
->text (_("): "));
500 uiout
->text (errstr
);
501 uiout
->text (_("]\n"));
504 /* Print an unsigned int. */
507 ui_out_field_uint (struct ui_out
*uiout
, const char *fld
, unsigned int val
)
509 uiout
->field_fmt (fld
, "%u", val
);
512 /* A range of source lines. */
514 struct btrace_line_range
516 /* The symtab this line is from. */
517 struct symtab
*symtab
;
519 /* The first line (inclusive). */
522 /* The last line (exclusive). */
526 /* Construct a line range. */
528 static struct btrace_line_range
529 btrace_mk_line_range (struct symtab
*symtab
, int begin
, int end
)
531 struct btrace_line_range range
;
533 range
.symtab
= symtab
;
540 /* Add a line to a line range. */
542 static struct btrace_line_range
543 btrace_line_range_add (struct btrace_line_range range
, int line
)
545 if (range
.end
<= range
.begin
)
547 /* This is the first entry. */
549 range
.end
= line
+ 1;
551 else if (line
< range
.begin
)
553 else if (range
.end
< line
)
559 /* Return non-zero if RANGE is empty, zero otherwise. */
562 btrace_line_range_is_empty (struct btrace_line_range range
)
564 return range
.end
<= range
.begin
;
567 /* Return non-zero if LHS contains RHS, zero otherwise. */
570 btrace_line_range_contains_range (struct btrace_line_range lhs
,
571 struct btrace_line_range rhs
)
573 return ((lhs
.symtab
== rhs
.symtab
)
574 && (lhs
.begin
<= rhs
.begin
)
575 && (rhs
.end
<= lhs
.end
));
578 /* Find the line range associated with PC. */
580 static struct btrace_line_range
581 btrace_find_line_range (CORE_ADDR pc
)
583 struct btrace_line_range range
;
584 struct linetable_entry
*lines
;
585 struct linetable
*ltable
;
586 struct symtab
*symtab
;
589 symtab
= find_pc_line_symtab (pc
);
591 return btrace_mk_line_range (NULL
, 0, 0);
593 ltable
= SYMTAB_LINETABLE (symtab
);
595 return btrace_mk_line_range (symtab
, 0, 0);
597 nlines
= ltable
->nitems
;
598 lines
= ltable
->item
;
600 return btrace_mk_line_range (symtab
, 0, 0);
602 range
= btrace_mk_line_range (symtab
, 0, 0);
603 for (i
= 0; i
< nlines
- 1; i
++)
605 if ((lines
[i
].pc
== pc
) && (lines
[i
].line
!= 0))
606 range
= btrace_line_range_add (range
, lines
[i
].line
);
612 /* Print source lines in LINES to UIOUT.
614 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
615 instructions corresponding to that source line. When printing a new source
616 line, we do the cleanups for the open chain and open a new cleanup chain for
617 the new source line. If the source line range in LINES is not empty, this
618 function will leave the cleanup chain for the last printed source line open
619 so instructions can be added to it. */
622 btrace_print_lines (struct btrace_line_range lines
, struct ui_out
*uiout
,
623 struct cleanup
**ui_item_chain
, gdb_disassembly_flags flags
)
625 print_source_lines_flags psl_flags
;
629 if (flags
& DISASSEMBLY_FILENAME
)
630 psl_flags
|= PRINT_SOURCE_LINES_FILENAME
;
632 for (line
= lines
.begin
; line
< lines
.end
; ++line
)
634 if (*ui_item_chain
!= NULL
)
635 do_cleanups (*ui_item_chain
);
638 = make_cleanup_ui_out_tuple_begin_end (uiout
, "src_and_asm_line");
640 print_source_lines (lines
.symtab
, line
, line
+ 1, psl_flags
);
642 make_cleanup_ui_out_list_begin_end (uiout
, "line_asm_insn");
646 /* Disassemble a section of the recorded instruction trace. */
649 btrace_insn_history (struct ui_out
*uiout
,
650 const struct btrace_thread_info
*btinfo
,
651 const struct btrace_insn_iterator
*begin
,
652 const struct btrace_insn_iterator
*end
,
653 gdb_disassembly_flags flags
)
655 struct cleanup
*cleanups
, *ui_item_chain
;
656 struct gdbarch
*gdbarch
;
657 struct btrace_insn_iterator it
;
658 struct btrace_line_range last_lines
;
660 DEBUG ("itrace (0x%x): [%u; %u)", (unsigned) flags
,
661 btrace_insn_number (begin
), btrace_insn_number (end
));
663 flags
|= DISASSEMBLY_SPECULATIVE
;
665 gdbarch
= target_gdbarch ();
666 last_lines
= btrace_mk_line_range (NULL
, 0, 0);
668 cleanups
= make_cleanup_ui_out_list_begin_end (uiout
, "asm_insns");
670 /* UI_ITEM_CHAIN is a cleanup chain for the last source line and the
671 instructions corresponding to that line. */
672 ui_item_chain
= NULL
;
674 gdb_pretty_print_disassembler
disasm (gdbarch
);
676 for (it
= *begin
; btrace_insn_cmp (&it
, end
) != 0; btrace_insn_next (&it
, 1))
678 const struct btrace_insn
*insn
;
680 insn
= btrace_insn_get (&it
);
682 /* A NULL instruction indicates a gap in the trace. */
685 const struct btrace_config
*conf
;
687 conf
= btrace_conf (btinfo
);
689 /* We have trace so we must have a configuration. */
690 gdb_assert (conf
!= NULL
);
692 uiout
->field_fmt ("insn-number", "%u",
693 btrace_insn_number (&it
));
696 btrace_ui_out_decode_error (uiout
, btrace_insn_get_error (&it
),
701 struct disasm_insn dinsn
;
703 if ((flags
& DISASSEMBLY_SOURCE
) != 0)
705 struct btrace_line_range lines
;
707 lines
= btrace_find_line_range (insn
->pc
);
708 if (!btrace_line_range_is_empty (lines
)
709 && !btrace_line_range_contains_range (last_lines
, lines
))
711 btrace_print_lines (lines
, uiout
, &ui_item_chain
, flags
);
714 else if (ui_item_chain
== NULL
)
717 = make_cleanup_ui_out_tuple_begin_end (uiout
,
719 /* No source information. */
720 make_cleanup_ui_out_list_begin_end (uiout
, "line_asm_insn");
723 gdb_assert (ui_item_chain
!= NULL
);
726 memset (&dinsn
, 0, sizeof (dinsn
));
727 dinsn
.number
= btrace_insn_number (&it
);
728 dinsn
.addr
= insn
->pc
;
730 if ((insn
->flags
& BTRACE_INSN_FLAG_SPECULATIVE
) != 0)
731 dinsn
.is_speculative
= 1;
733 disasm
.pretty_print_insn (uiout
, &dinsn
, flags
);
737 do_cleanups (cleanups
);
740 /* The to_insn_history method of target record-btrace. */
743 record_btrace_insn_history (struct target_ops
*self
, int size
,
744 gdb_disassembly_flags flags
)
746 struct btrace_thread_info
*btinfo
;
747 struct btrace_insn_history
*history
;
748 struct btrace_insn_iterator begin
, end
;
749 struct ui_out
*uiout
;
750 unsigned int context
, covered
;
752 uiout
= current_uiout
;
753 ui_out_emit_tuple
tuple_emitter (uiout
, "insn history");
754 context
= abs (size
);
756 error (_("Bad record instruction-history-size."));
758 btinfo
= require_btrace ();
759 history
= btinfo
->insn_history
;
762 struct btrace_insn_iterator
*replay
;
764 DEBUG ("insn-history (0x%x): %d", (unsigned) flags
, size
);
766 /* If we're replaying, we start at the replay position. Otherwise, we
767 start at the tail of the trace. */
768 replay
= btinfo
->replay
;
772 btrace_insn_end (&begin
, btinfo
);
774 /* We start from here and expand in the requested direction. Then we
775 expand in the other direction, as well, to fill up any remaining
780 /* We want the current position covered, as well. */
781 covered
= btrace_insn_next (&end
, 1);
782 covered
+= btrace_insn_prev (&begin
, context
- covered
);
783 covered
+= btrace_insn_next (&end
, context
- covered
);
787 covered
= btrace_insn_next (&end
, context
);
788 covered
+= btrace_insn_prev (&begin
, context
- covered
);
793 begin
= history
->begin
;
796 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", (unsigned) flags
, size
,
797 btrace_insn_number (&begin
), btrace_insn_number (&end
));
802 covered
= btrace_insn_prev (&begin
, context
);
807 covered
= btrace_insn_next (&end
, context
);
812 btrace_insn_history (uiout
, btinfo
, &begin
, &end
, flags
);
816 printf_unfiltered (_("At the start of the branch trace record.\n"));
818 printf_unfiltered (_("At the end of the branch trace record.\n"));
821 btrace_set_insn_history (btinfo
, &begin
, &end
);
824 /* The to_insn_history_range method of target record-btrace. */
827 record_btrace_insn_history_range (struct target_ops
*self
,
828 ULONGEST from
, ULONGEST to
,
829 gdb_disassembly_flags flags
)
831 struct btrace_thread_info
*btinfo
;
832 struct btrace_insn_iterator begin
, end
;
833 struct ui_out
*uiout
;
834 unsigned int low
, high
;
837 uiout
= current_uiout
;
838 ui_out_emit_tuple
tuple_emitter (uiout
, "insn history");
842 DEBUG ("insn-history (0x%x): [%u; %u)", (unsigned) flags
, low
, high
);
844 /* Check for wrap-arounds. */
845 if (low
!= from
|| high
!= to
)
846 error (_("Bad range."));
849 error (_("Bad range."));
851 btinfo
= require_btrace ();
853 found
= btrace_find_insn_by_number (&begin
, btinfo
, low
);
855 error (_("Range out of bounds."));
857 found
= btrace_find_insn_by_number (&end
, btinfo
, high
);
860 /* Silently truncate the range. */
861 btrace_insn_end (&end
, btinfo
);
865 /* We want both begin and end to be inclusive. */
866 btrace_insn_next (&end
, 1);
869 btrace_insn_history (uiout
, btinfo
, &begin
, &end
, flags
);
870 btrace_set_insn_history (btinfo
, &begin
, &end
);
873 /* The to_insn_history_from method of target record-btrace. */
876 record_btrace_insn_history_from (struct target_ops
*self
,
877 ULONGEST from
, int size
,
878 gdb_disassembly_flags flags
)
880 ULONGEST begin
, end
, context
;
882 context
= abs (size
);
884 error (_("Bad record instruction-history-size."));
893 begin
= from
- context
+ 1;
898 end
= from
+ context
- 1;
900 /* Check for wrap-around. */
905 record_btrace_insn_history_range (self
, begin
, end
, flags
);
908 /* Print the instruction number range for a function call history line. */
911 btrace_call_history_insn_range (struct ui_out
*uiout
,
912 const struct btrace_function
*bfun
)
914 unsigned int begin
, end
, size
;
916 size
= bfun
->insn
.size ();
917 gdb_assert (size
> 0);
919 begin
= bfun
->insn_offset
;
920 end
= begin
+ size
- 1;
922 ui_out_field_uint (uiout
, "insn begin", begin
);
924 ui_out_field_uint (uiout
, "insn end", end
);
927 /* Compute the lowest and highest source line for the instructions in BFUN
928 and return them in PBEGIN and PEND.
929 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
930 result from inlining or macro expansion. */
933 btrace_compute_src_line_range (const struct btrace_function
*bfun
,
934 int *pbegin
, int *pend
)
936 struct symtab
*symtab
;
947 symtab
= symbol_symtab (sym
);
949 for (const btrace_insn
&insn
: bfun
->insn
)
951 struct symtab_and_line sal
;
953 sal
= find_pc_line (insn
.pc
, 0);
954 if (sal
.symtab
!= symtab
|| sal
.line
== 0)
957 begin
= std::min (begin
, sal
.line
);
958 end
= std::max (end
, sal
.line
);
966 /* Print the source line information for a function call history line. */
969 btrace_call_history_src_line (struct ui_out
*uiout
,
970 const struct btrace_function
*bfun
)
979 uiout
->field_string ("file",
980 symtab_to_filename_for_display (symbol_symtab (sym
)));
982 btrace_compute_src_line_range (bfun
, &begin
, &end
);
987 uiout
->field_int ("min line", begin
);
993 uiout
->field_int ("max line", end
);
996 /* Get the name of a branch trace function. */
999 btrace_get_bfun_name (const struct btrace_function
*bfun
)
1001 struct minimal_symbol
*msym
;
1011 return SYMBOL_PRINT_NAME (sym
);
1012 else if (msym
!= NULL
)
1013 return MSYMBOL_PRINT_NAME (msym
);
1018 /* Disassemble a section of the recorded function trace. */
1021 btrace_call_history (struct ui_out
*uiout
,
1022 const struct btrace_thread_info
*btinfo
,
1023 const struct btrace_call_iterator
*begin
,
1024 const struct btrace_call_iterator
*end
,
1027 struct btrace_call_iterator it
;
1028 record_print_flags flags
= (enum record_print_flag
) int_flags
;
1030 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags
, btrace_call_number (begin
),
1031 btrace_call_number (end
));
1033 for (it
= *begin
; btrace_call_cmp (&it
, end
) < 0; btrace_call_next (&it
, 1))
1035 const struct btrace_function
*bfun
;
1036 struct minimal_symbol
*msym
;
1039 bfun
= btrace_call_get (&it
);
1043 /* Print the function index. */
1044 ui_out_field_uint (uiout
, "index", bfun
->number
);
1047 /* Indicate gaps in the trace. */
1048 if (bfun
->errcode
!= 0)
1050 const struct btrace_config
*conf
;
1052 conf
= btrace_conf (btinfo
);
1054 /* We have trace so we must have a configuration. */
1055 gdb_assert (conf
!= NULL
);
1057 btrace_ui_out_decode_error (uiout
, bfun
->errcode
, conf
->format
);
1062 if ((flags
& RECORD_PRINT_INDENT_CALLS
) != 0)
1064 int level
= bfun
->level
+ btinfo
->level
, i
;
1066 for (i
= 0; i
< level
; ++i
)
1071 uiout
->field_string ("function", SYMBOL_PRINT_NAME (sym
));
1072 else if (msym
!= NULL
)
1073 uiout
->field_string ("function", MSYMBOL_PRINT_NAME (msym
));
1074 else if (!uiout
->is_mi_like_p ())
1075 uiout
->field_string ("function", "??");
1077 if ((flags
& RECORD_PRINT_INSN_RANGE
) != 0)
1079 uiout
->text (_("\tinst "));
1080 btrace_call_history_insn_range (uiout
, bfun
);
1083 if ((flags
& RECORD_PRINT_SRC_LINE
) != 0)
1085 uiout
->text (_("\tat "));
1086 btrace_call_history_src_line (uiout
, bfun
);
1093 /* The to_call_history method of target record-btrace. */
1096 record_btrace_call_history (struct target_ops
*self
, int size
,
1097 record_print_flags flags
)
1099 struct btrace_thread_info
*btinfo
;
1100 struct btrace_call_history
*history
;
1101 struct btrace_call_iterator begin
, end
;
1102 struct ui_out
*uiout
;
1103 unsigned int context
, covered
;
1105 uiout
= current_uiout
;
1106 ui_out_emit_tuple
tuple_emitter (uiout
, "insn history");
1107 context
= abs (size
);
1109 error (_("Bad record function-call-history-size."));
1111 btinfo
= require_btrace ();
1112 history
= btinfo
->call_history
;
1113 if (history
== NULL
)
1115 struct btrace_insn_iterator
*replay
;
1117 DEBUG ("call-history (0x%x): %d", (int) flags
, size
);
1119 /* If we're replaying, we start at the replay position. Otherwise, we
1120 start at the tail of the trace. */
1121 replay
= btinfo
->replay
;
1124 begin
.btinfo
= btinfo
;
1125 begin
.index
= replay
->call_index
;
1128 btrace_call_end (&begin
, btinfo
);
1130 /* We start from here and expand in the requested direction. Then we
1131 expand in the other direction, as well, to fill up any remaining
1136 /* We want the current position covered, as well. */
1137 covered
= btrace_call_next (&end
, 1);
1138 covered
+= btrace_call_prev (&begin
, context
- covered
);
1139 covered
+= btrace_call_next (&end
, context
- covered
);
1143 covered
= btrace_call_next (&end
, context
);
1144 covered
+= btrace_call_prev (&begin
, context
- covered
);
1149 begin
= history
->begin
;
1152 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", (int) flags
, size
,
1153 btrace_call_number (&begin
), btrace_call_number (&end
));
1158 covered
= btrace_call_prev (&begin
, context
);
1163 covered
= btrace_call_next (&end
, context
);
1168 btrace_call_history (uiout
, btinfo
, &begin
, &end
, flags
);
1172 printf_unfiltered (_("At the start of the branch trace record.\n"));
1174 printf_unfiltered (_("At the end of the branch trace record.\n"));
1177 btrace_set_call_history (btinfo
, &begin
, &end
);
1180 /* The to_call_history_range method of target record-btrace. */
1183 record_btrace_call_history_range (struct target_ops
*self
,
1184 ULONGEST from
, ULONGEST to
,
1185 record_print_flags flags
)
1187 struct btrace_thread_info
*btinfo
;
1188 struct btrace_call_iterator begin
, end
;
1189 struct ui_out
*uiout
;
1190 unsigned int low
, high
;
1193 uiout
= current_uiout
;
1194 ui_out_emit_tuple
tuple_emitter (uiout
, "func history");
1198 DEBUG ("call-history (0x%x): [%u; %u)", (int) flags
, low
, high
);
1200 /* Check for wrap-arounds. */
1201 if (low
!= from
|| high
!= to
)
1202 error (_("Bad range."));
1205 error (_("Bad range."));
1207 btinfo
= require_btrace ();
1209 found
= btrace_find_call_by_number (&begin
, btinfo
, low
);
1211 error (_("Range out of bounds."));
1213 found
= btrace_find_call_by_number (&end
, btinfo
, high
);
1216 /* Silently truncate the range. */
1217 btrace_call_end (&end
, btinfo
);
1221 /* We want both begin and end to be inclusive. */
1222 btrace_call_next (&end
, 1);
1225 btrace_call_history (uiout
, btinfo
, &begin
, &end
, flags
);
1226 btrace_set_call_history (btinfo
, &begin
, &end
);
1229 /* The to_call_history_from method of target record-btrace. */
1232 record_btrace_call_history_from (struct target_ops
*self
,
1233 ULONGEST from
, int size
,
1234 record_print_flags flags
)
1236 ULONGEST begin
, end
, context
;
1238 context
= abs (size
);
1240 error (_("Bad record function-call-history-size."));
1249 begin
= from
- context
+ 1;
1254 end
= from
+ context
- 1;
1256 /* Check for wrap-around. */
1261 record_btrace_call_history_range (self
, begin
, end
, flags
);
1264 /* The to_record_method method of target record-btrace. */
1266 static enum record_method
1267 record_btrace_record_method (struct target_ops
*self
, ptid_t ptid
)
1269 struct thread_info
* const tp
= find_thread_ptid (ptid
);
1272 error (_("No thread."));
1274 if (tp
->btrace
.target
== NULL
)
1275 return RECORD_METHOD_NONE
;
1277 return RECORD_METHOD_BTRACE
;
1280 /* The to_record_is_replaying method of target record-btrace. */
1283 record_btrace_is_replaying (struct target_ops
*self
, ptid_t ptid
)
1285 struct thread_info
*tp
;
1287 ALL_NON_EXITED_THREADS (tp
)
1288 if (ptid_match (tp
->ptid
, ptid
) && btrace_is_replaying (tp
))
1294 /* The to_record_will_replay method of target record-btrace. */
1297 record_btrace_will_replay (struct target_ops
*self
, ptid_t ptid
, int dir
)
1299 return dir
== EXEC_REVERSE
|| record_btrace_is_replaying (self
, ptid
);
1302 /* The to_xfer_partial method of target record-btrace. */
1304 static enum target_xfer_status
1305 record_btrace_xfer_partial (struct target_ops
*ops
, enum target_object object
,
1306 const char *annex
, gdb_byte
*readbuf
,
1307 const gdb_byte
*writebuf
, ULONGEST offset
,
1308 ULONGEST len
, ULONGEST
*xfered_len
)
1310 /* Filter out requests that don't make sense during replay. */
1311 if (replay_memory_access
== replay_memory_access_read_only
1312 && !record_btrace_generating_corefile
1313 && record_btrace_is_replaying (ops
, inferior_ptid
))
1317 case TARGET_OBJECT_MEMORY
:
1319 struct target_section
*section
;
1321 /* We do not allow writing memory in general. */
1322 if (writebuf
!= NULL
)
1325 return TARGET_XFER_UNAVAILABLE
;
1328 /* We allow reading readonly memory. */
1329 section
= target_section_by_addr (ops
, offset
);
1330 if (section
!= NULL
)
1332 /* Check if the section we found is readonly. */
1333 if ((bfd_get_section_flags (section
->the_bfd_section
->owner
,
1334 section
->the_bfd_section
)
1335 & SEC_READONLY
) != 0)
1337 /* Truncate the request to fit into this section. */
1338 len
= std::min (len
, section
->endaddr
- offset
);
1344 return TARGET_XFER_UNAVAILABLE
;
1349 /* Forward the request. */
1351 return ops
->to_xfer_partial (ops
, object
, annex
, readbuf
, writebuf
,
1352 offset
, len
, xfered_len
);
1355 /* The to_insert_breakpoint method of target record-btrace. */
1358 record_btrace_insert_breakpoint (struct target_ops
*ops
,
1359 struct gdbarch
*gdbarch
,
1360 struct bp_target_info
*bp_tgt
)
1365 /* Inserting breakpoints requires accessing memory. Allow it for the
1366 duration of this function. */
1367 old
= replay_memory_access
;
1368 replay_memory_access
= replay_memory_access_read_write
;
1373 ret
= ops
->beneath
->to_insert_breakpoint (ops
->beneath
, gdbarch
, bp_tgt
);
1375 CATCH (except
, RETURN_MASK_ALL
)
1377 replay_memory_access
= old
;
1378 throw_exception (except
);
1381 replay_memory_access
= old
;
1386 /* The to_remove_breakpoint method of target record-btrace. */
1389 record_btrace_remove_breakpoint (struct target_ops
*ops
,
1390 struct gdbarch
*gdbarch
,
1391 struct bp_target_info
*bp_tgt
,
1392 enum remove_bp_reason reason
)
1397 /* Removing breakpoints requires accessing memory. Allow it for the
1398 duration of this function. */
1399 old
= replay_memory_access
;
1400 replay_memory_access
= replay_memory_access_read_write
;
1405 ret
= ops
->beneath
->to_remove_breakpoint (ops
->beneath
, gdbarch
, bp_tgt
,
1408 CATCH (except
, RETURN_MASK_ALL
)
1410 replay_memory_access
= old
;
1411 throw_exception (except
);
1414 replay_memory_access
= old
;
1419 /* The to_fetch_registers method of target record-btrace. */
1422 record_btrace_fetch_registers (struct target_ops
*ops
,
1423 struct regcache
*regcache
, int regno
)
1425 struct btrace_insn_iterator
*replay
;
1426 struct thread_info
*tp
;
1428 tp
= find_thread_ptid (regcache_get_ptid (regcache
));
1429 gdb_assert (tp
!= NULL
);
1431 replay
= tp
->btrace
.replay
;
1432 if (replay
!= NULL
&& !record_btrace_generating_corefile
)
1434 const struct btrace_insn
*insn
;
1435 struct gdbarch
*gdbarch
;
1438 gdbarch
= regcache
->arch ();
1439 pcreg
= gdbarch_pc_regnum (gdbarch
);
1443 /* We can only provide the PC register. */
1444 if (regno
>= 0 && regno
!= pcreg
)
1447 insn
= btrace_insn_get (replay
);
1448 gdb_assert (insn
!= NULL
);
1450 regcache_raw_supply (regcache
, regno
, &insn
->pc
);
1454 struct target_ops
*t
= ops
->beneath
;
1456 t
->to_fetch_registers (t
, regcache
, regno
);
1460 /* The to_store_registers method of target record-btrace. */
1463 record_btrace_store_registers (struct target_ops
*ops
,
1464 struct regcache
*regcache
, int regno
)
1466 struct target_ops
*t
;
1468 if (!record_btrace_generating_corefile
1469 && record_btrace_is_replaying (ops
, regcache_get_ptid (regcache
)))
1470 error (_("Cannot write registers while replaying."));
1472 gdb_assert (may_write_registers
!= 0);
1475 t
->to_store_registers (t
, regcache
, regno
);
1478 /* The to_prepare_to_store method of target record-btrace. */
1481 record_btrace_prepare_to_store (struct target_ops
*ops
,
1482 struct regcache
*regcache
)
1484 struct target_ops
*t
;
1486 if (!record_btrace_generating_corefile
1487 && record_btrace_is_replaying (ops
, regcache_get_ptid (regcache
)))
1491 t
->to_prepare_to_store (t
, regcache
);
1494 /* The branch trace frame cache. */
1496 struct btrace_frame_cache
1499 struct thread_info
*tp
;
1501 /* The frame info. */
1502 struct frame_info
*frame
;
1504 /* The branch trace function segment. */
1505 const struct btrace_function
*bfun
;
1508 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1510 static htab_t bfcache
;
1512 /* hash_f for htab_create_alloc of bfcache. */
1515 bfcache_hash (const void *arg
)
1517 const struct btrace_frame_cache
*cache
1518 = (const struct btrace_frame_cache
*) arg
;
1520 return htab_hash_pointer (cache
->frame
);
1523 /* eq_f for htab_create_alloc of bfcache. */
1526 bfcache_eq (const void *arg1
, const void *arg2
)
1528 const struct btrace_frame_cache
*cache1
1529 = (const struct btrace_frame_cache
*) arg1
;
1530 const struct btrace_frame_cache
*cache2
1531 = (const struct btrace_frame_cache
*) arg2
;
1533 return cache1
->frame
== cache2
->frame
;
1536 /* Create a new btrace frame cache. */
1538 static struct btrace_frame_cache
*
1539 bfcache_new (struct frame_info
*frame
)
1541 struct btrace_frame_cache
*cache
;
1544 cache
= FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache
);
1545 cache
->frame
= frame
;
1547 slot
= htab_find_slot (bfcache
, cache
, INSERT
);
1548 gdb_assert (*slot
== NULL
);
1554 /* Extract the branch trace function from a branch trace frame. */
1556 static const struct btrace_function
*
1557 btrace_get_frame_function (struct frame_info
*frame
)
1559 const struct btrace_frame_cache
*cache
;
1560 struct btrace_frame_cache pattern
;
1563 pattern
.frame
= frame
;
1565 slot
= htab_find_slot (bfcache
, &pattern
, NO_INSERT
);
1569 cache
= (const struct btrace_frame_cache
*) *slot
;
1573 /* Implement stop_reason method for record_btrace_frame_unwind. */
1575 static enum unwind_stop_reason
1576 record_btrace_frame_unwind_stop_reason (struct frame_info
*this_frame
,
1579 const struct btrace_frame_cache
*cache
;
1580 const struct btrace_function
*bfun
;
1582 cache
= (const struct btrace_frame_cache
*) *this_cache
;
1584 gdb_assert (bfun
!= NULL
);
1587 return UNWIND_UNAVAILABLE
;
1589 return UNWIND_NO_REASON
;
1592 /* Implement this_id method for record_btrace_frame_unwind. */
1595 record_btrace_frame_this_id (struct frame_info
*this_frame
, void **this_cache
,
1596 struct frame_id
*this_id
)
1598 const struct btrace_frame_cache
*cache
;
1599 const struct btrace_function
*bfun
;
1600 struct btrace_call_iterator it
;
1601 CORE_ADDR code
, special
;
1603 cache
= (const struct btrace_frame_cache
*) *this_cache
;
1606 gdb_assert (bfun
!= NULL
);
1608 while (btrace_find_call_by_number (&it
, &cache
->tp
->btrace
, bfun
->prev
) != 0)
1609 bfun
= btrace_call_get (&it
);
1611 code
= get_frame_func (this_frame
);
1612 special
= bfun
->number
;
1614 *this_id
= frame_id_build_unavailable_stack_special (code
, special
);
1616 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1617 btrace_get_bfun_name (cache
->bfun
),
1618 core_addr_to_string_nz (this_id
->code_addr
),
1619 core_addr_to_string_nz (this_id
->special_addr
));
1622 /* Implement prev_register method for record_btrace_frame_unwind. */
1624 static struct value
*
1625 record_btrace_frame_prev_register (struct frame_info
*this_frame
,
1629 const struct btrace_frame_cache
*cache
;
1630 const struct btrace_function
*bfun
, *caller
;
1631 struct btrace_call_iterator it
;
1632 struct gdbarch
*gdbarch
;
1636 gdbarch
= get_frame_arch (this_frame
);
1637 pcreg
= gdbarch_pc_regnum (gdbarch
);
1638 if (pcreg
< 0 || regnum
!= pcreg
)
1639 throw_error (NOT_AVAILABLE_ERROR
,
1640 _("Registers are not available in btrace record history"));
1642 cache
= (const struct btrace_frame_cache
*) *this_cache
;
1644 gdb_assert (bfun
!= NULL
);
1646 if (btrace_find_call_by_number (&it
, &cache
->tp
->btrace
, bfun
->up
) == 0)
1647 throw_error (NOT_AVAILABLE_ERROR
,
1648 _("No caller in btrace record history"));
1650 caller
= btrace_call_get (&it
);
1652 if ((bfun
->flags
& BFUN_UP_LINKS_TO_RET
) != 0)
1653 pc
= caller
->insn
.front ().pc
;
1656 pc
= caller
->insn
.back ().pc
;
1657 pc
+= gdb_insn_length (gdbarch
, pc
);
1660 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1661 btrace_get_bfun_name (bfun
), bfun
->level
,
1662 core_addr_to_string_nz (pc
));
1664 return frame_unwind_got_address (this_frame
, regnum
, pc
);
1667 /* Implement sniffer method for record_btrace_frame_unwind. */
1670 record_btrace_frame_sniffer (const struct frame_unwind
*self
,
1671 struct frame_info
*this_frame
,
1674 const struct btrace_function
*bfun
;
1675 struct btrace_frame_cache
*cache
;
1676 struct thread_info
*tp
;
1677 struct frame_info
*next
;
1679 /* THIS_FRAME does not contain a reference to its thread. */
1680 tp
= find_thread_ptid (inferior_ptid
);
1681 gdb_assert (tp
!= NULL
);
1684 next
= get_next_frame (this_frame
);
1687 const struct btrace_insn_iterator
*replay
;
1689 replay
= tp
->btrace
.replay
;
1691 bfun
= &replay
->btinfo
->functions
[replay
->call_index
];
1695 const struct btrace_function
*callee
;
1696 struct btrace_call_iterator it
;
1698 callee
= btrace_get_frame_function (next
);
1699 if (callee
== NULL
|| (callee
->flags
& BFUN_UP_LINKS_TO_TAILCALL
) != 0)
1702 if (btrace_find_call_by_number (&it
, &tp
->btrace
, callee
->up
) == 0)
1705 bfun
= btrace_call_get (&it
);
1711 DEBUG ("[frame] sniffed frame for %s on level %d",
1712 btrace_get_bfun_name (bfun
), bfun
->level
);
1714 /* This is our frame. Initialize the frame cache. */
1715 cache
= bfcache_new (this_frame
);
1719 *this_cache
= cache
;
1723 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1726 record_btrace_tailcall_frame_sniffer (const struct frame_unwind
*self
,
1727 struct frame_info
*this_frame
,
1730 const struct btrace_function
*bfun
, *callee
;
1731 struct btrace_frame_cache
*cache
;
1732 struct btrace_call_iterator it
;
1733 struct frame_info
*next
;
1734 struct thread_info
*tinfo
;
1736 next
= get_next_frame (this_frame
);
1740 callee
= btrace_get_frame_function (next
);
1744 if ((callee
->flags
& BFUN_UP_LINKS_TO_TAILCALL
) == 0)
1747 tinfo
= find_thread_ptid (inferior_ptid
);
1748 if (btrace_find_call_by_number (&it
, &tinfo
->btrace
, callee
->up
) == 0)
1751 bfun
= btrace_call_get (&it
);
1753 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1754 btrace_get_bfun_name (bfun
), bfun
->level
);
1756 /* This is our frame. Initialize the frame cache. */
1757 cache
= bfcache_new (this_frame
);
1761 *this_cache
= cache
;
1766 record_btrace_frame_dealloc_cache (struct frame_info
*self
, void *this_cache
)
1768 struct btrace_frame_cache
*cache
;
1771 cache
= (struct btrace_frame_cache
*) this_cache
;
1773 slot
= htab_find_slot (bfcache
, cache
, NO_INSERT
);
1774 gdb_assert (slot
!= NULL
);
1776 htab_remove_elt (bfcache
, cache
);
1779 /* btrace recording does not store previous memory content, neither the stack
1780 frames content. Any unwinding would return errorneous results as the stack
1781 contents no longer matches the changed PC value restored from history.
1782 Therefore this unwinder reports any possibly unwound registers as
1785 const struct frame_unwind record_btrace_frame_unwind
=
1788 record_btrace_frame_unwind_stop_reason
,
1789 record_btrace_frame_this_id
,
1790 record_btrace_frame_prev_register
,
1792 record_btrace_frame_sniffer
,
1793 record_btrace_frame_dealloc_cache
1796 const struct frame_unwind record_btrace_tailcall_frame_unwind
=
1799 record_btrace_frame_unwind_stop_reason
,
1800 record_btrace_frame_this_id
,
1801 record_btrace_frame_prev_register
,
1803 record_btrace_tailcall_frame_sniffer
,
1804 record_btrace_frame_dealloc_cache
1807 /* Implement the to_get_unwinder method. */
1809 static const struct frame_unwind
*
1810 record_btrace_to_get_unwinder (struct target_ops
*self
)
1812 return &record_btrace_frame_unwind
;
1815 /* Implement the to_get_tailcall_unwinder method. */
1817 static const struct frame_unwind
*
1818 record_btrace_to_get_tailcall_unwinder (struct target_ops
*self
)
1820 return &record_btrace_tailcall_frame_unwind
;
1823 /* Return a human-readable string for FLAG. */
1826 btrace_thread_flag_to_str (enum btrace_thread_flag flag
)
1834 return "reverse-step";
1840 return "reverse-cont";
1849 /* Indicate that TP should be resumed according to FLAG. */
1852 record_btrace_resume_thread (struct thread_info
*tp
,
1853 enum btrace_thread_flag flag
)
1855 struct btrace_thread_info
*btinfo
;
1857 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp
),
1858 target_pid_to_str (tp
->ptid
), flag
, btrace_thread_flag_to_str (flag
));
1860 btinfo
= &tp
->btrace
;
1862 /* Fetch the latest branch trace. */
1865 /* A resume request overwrites a preceding resume or stop request. */
1866 btinfo
->flags
&= ~(BTHR_MOVE
| BTHR_STOP
);
1867 btinfo
->flags
|= flag
;
1870 /* Get the current frame for TP. */
1872 static struct frame_info
*
1873 get_thread_current_frame (struct thread_info
*tp
)
1875 struct frame_info
*frame
;
1876 ptid_t old_inferior_ptid
;
1879 /* Set INFERIOR_PTID, which is implicitly used by get_current_frame. */
1880 old_inferior_ptid
= inferior_ptid
;
1881 inferior_ptid
= tp
->ptid
;
1883 /* Clear the executing flag to allow changes to the current frame.
1884 We are not actually running, yet. We just started a reverse execution
1885 command or a record goto command.
1886 For the latter, EXECUTING is false and this has no effect.
1887 For the former, EXECUTING is true and we're in to_wait, about to
1888 move the thread. Since we need to recompute the stack, we temporarily
1889 set EXECUTING to flase. */
1890 executing
= is_executing (inferior_ptid
);
1891 set_executing (inferior_ptid
, 0);
1896 frame
= get_current_frame ();
1898 CATCH (except
, RETURN_MASK_ALL
)
1900 /* Restore the previous execution state. */
1901 set_executing (inferior_ptid
, executing
);
1903 /* Restore the previous inferior_ptid. */
1904 inferior_ptid
= old_inferior_ptid
;
1906 throw_exception (except
);
1910 /* Restore the previous execution state. */
1911 set_executing (inferior_ptid
, executing
);
1913 /* Restore the previous inferior_ptid. */
1914 inferior_ptid
= old_inferior_ptid
;
1919 /* Start replaying a thread. */
1921 static struct btrace_insn_iterator
*
1922 record_btrace_start_replaying (struct thread_info
*tp
)
1924 struct btrace_insn_iterator
*replay
;
1925 struct btrace_thread_info
*btinfo
;
1927 btinfo
= &tp
->btrace
;
1930 /* We can't start replaying without trace. */
1931 if (btinfo
->functions
.empty ())
1934 /* GDB stores the current frame_id when stepping in order to detects steps
1936 Since frames are computed differently when we're replaying, we need to
1937 recompute those stored frames and fix them up so we can still detect
1938 subroutines after we started replaying. */
1941 struct frame_info
*frame
;
1942 struct frame_id frame_id
;
1943 int upd_step_frame_id
, upd_step_stack_frame_id
;
1945 /* The current frame without replaying - computed via normal unwind. */
1946 frame
= get_thread_current_frame (tp
);
1947 frame_id
= get_frame_id (frame
);
1949 /* Check if we need to update any stepping-related frame id's. */
1950 upd_step_frame_id
= frame_id_eq (frame_id
,
1951 tp
->control
.step_frame_id
);
1952 upd_step_stack_frame_id
= frame_id_eq (frame_id
,
1953 tp
->control
.step_stack_frame_id
);
1955 /* We start replaying at the end of the branch trace. This corresponds
1956 to the current instruction. */
1957 replay
= XNEW (struct btrace_insn_iterator
);
1958 btrace_insn_end (replay
, btinfo
);
1960 /* Skip gaps at the end of the trace. */
1961 while (btrace_insn_get (replay
) == NULL
)
1965 steps
= btrace_insn_prev (replay
, 1);
1967 error (_("No trace."));
1970 /* We're not replaying, yet. */
1971 gdb_assert (btinfo
->replay
== NULL
);
1972 btinfo
->replay
= replay
;
1974 /* Make sure we're not using any stale registers. */
1975 registers_changed_ptid (tp
->ptid
);
1977 /* The current frame with replaying - computed via btrace unwind. */
1978 frame
= get_thread_current_frame (tp
);
1979 frame_id
= get_frame_id (frame
);
1981 /* Replace stepping related frames where necessary. */
1982 if (upd_step_frame_id
)
1983 tp
->control
.step_frame_id
= frame_id
;
1984 if (upd_step_stack_frame_id
)
1985 tp
->control
.step_stack_frame_id
= frame_id
;
1987 CATCH (except
, RETURN_MASK_ALL
)
1989 xfree (btinfo
->replay
);
1990 btinfo
->replay
= NULL
;
1992 registers_changed_ptid (tp
->ptid
);
1994 throw_exception (except
);
2001 /* Stop replaying a thread. */
2004 record_btrace_stop_replaying (struct thread_info
*tp
)
2006 struct btrace_thread_info
*btinfo
;
2008 btinfo
= &tp
->btrace
;
2010 xfree (btinfo
->replay
);
2011 btinfo
->replay
= NULL
;
2013 /* Make sure we're not leaving any stale registers. */
2014 registers_changed_ptid (tp
->ptid
);
2017 /* Stop replaying TP if it is at the end of its execution history. */
2020 record_btrace_stop_replaying_at_end (struct thread_info
*tp
)
2022 struct btrace_insn_iterator
*replay
, end
;
2023 struct btrace_thread_info
*btinfo
;
2025 btinfo
= &tp
->btrace
;
2026 replay
= btinfo
->replay
;
2031 btrace_insn_end (&end
, btinfo
);
2033 if (btrace_insn_cmp (replay
, &end
) == 0)
2034 record_btrace_stop_replaying (tp
);
2037 /* The to_resume method of target record-btrace. */
2040 record_btrace_resume (struct target_ops
*ops
, ptid_t ptid
, int step
,
2041 enum gdb_signal signal
)
2043 struct thread_info
*tp
;
2044 enum btrace_thread_flag flag
, cflag
;
2046 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid
),
2047 execution_direction
== EXEC_REVERSE
? "reverse-" : "",
2048 step
? "step" : "cont");
2050 /* Store the execution direction of the last resume.
2052 If there is more than one to_resume call, we have to rely on infrun
2053 to not change the execution direction in-between. */
2054 record_btrace_resume_exec_dir
= execution_direction
;
2056 /* As long as we're not replaying, just forward the request.
2058 For non-stop targets this means that no thread is replaying. In order to
2059 make progress, we may need to explicitly move replaying threads to the end
2060 of their execution history. */
2061 if ((execution_direction
!= EXEC_REVERSE
)
2062 && !record_btrace_is_replaying (ops
, minus_one_ptid
))
2065 ops
->to_resume (ops
, ptid
, step
, signal
);
2069 /* Compute the btrace thread flag for the requested move. */
2070 if (execution_direction
== EXEC_REVERSE
)
2072 flag
= step
== 0 ? BTHR_RCONT
: BTHR_RSTEP
;
2077 flag
= step
== 0 ? BTHR_CONT
: BTHR_STEP
;
2081 /* We just indicate the resume intent here. The actual stepping happens in
2082 record_btrace_wait below.
2084 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2085 if (!target_is_non_stop_p ())
2087 gdb_assert (ptid_match (inferior_ptid
, ptid
));
2089 ALL_NON_EXITED_THREADS (tp
)
2090 if (ptid_match (tp
->ptid
, ptid
))
2092 if (ptid_match (tp
->ptid
, inferior_ptid
))
2093 record_btrace_resume_thread (tp
, flag
);
2095 record_btrace_resume_thread (tp
, cflag
);
2100 ALL_NON_EXITED_THREADS (tp
)
2101 if (ptid_match (tp
->ptid
, ptid
))
2102 record_btrace_resume_thread (tp
, flag
);
2105 /* Async support. */
2106 if (target_can_async_p ())
2109 mark_async_event_handler (record_btrace_async_inferior_event_handler
);
2113 /* The to_commit_resume method of target record-btrace. */
2116 record_btrace_commit_resume (struct target_ops
*ops
)
2118 if ((execution_direction
!= EXEC_REVERSE
)
2119 && !record_btrace_is_replaying (ops
, minus_one_ptid
))
2120 ops
->beneath
->to_commit_resume (ops
->beneath
);
2123 /* Cancel resuming TP. */
2126 record_btrace_cancel_resume (struct thread_info
*tp
)
2128 enum btrace_thread_flag flags
;
2130 flags
= tp
->btrace
.flags
& (BTHR_MOVE
| BTHR_STOP
);
2134 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2135 print_thread_id (tp
),
2136 target_pid_to_str (tp
->ptid
), flags
,
2137 btrace_thread_flag_to_str (flags
));
2139 tp
->btrace
.flags
&= ~(BTHR_MOVE
| BTHR_STOP
);
2140 record_btrace_stop_replaying_at_end (tp
);
2143 /* Return a target_waitstatus indicating that we ran out of history. */
2145 static struct target_waitstatus
2146 btrace_step_no_history (void)
2148 struct target_waitstatus status
;
2150 status
.kind
= TARGET_WAITKIND_NO_HISTORY
;
2155 /* Return a target_waitstatus indicating that a step finished. */
2157 static struct target_waitstatus
2158 btrace_step_stopped (void)
2160 struct target_waitstatus status
;
2162 status
.kind
= TARGET_WAITKIND_STOPPED
;
2163 status
.value
.sig
= GDB_SIGNAL_TRAP
;
2168 /* Return a target_waitstatus indicating that a thread was stopped as
2171 static struct target_waitstatus
2172 btrace_step_stopped_on_request (void)
2174 struct target_waitstatus status
;
2176 status
.kind
= TARGET_WAITKIND_STOPPED
;
2177 status
.value
.sig
= GDB_SIGNAL_0
;
2182 /* Return a target_waitstatus indicating a spurious stop. */
2184 static struct target_waitstatus
2185 btrace_step_spurious (void)
2187 struct target_waitstatus status
;
2189 status
.kind
= TARGET_WAITKIND_SPURIOUS
;
2194 /* Return a target_waitstatus indicating that the thread was not resumed. */
2196 static struct target_waitstatus
2197 btrace_step_no_resumed (void)
2199 struct target_waitstatus status
;
2201 status
.kind
= TARGET_WAITKIND_NO_RESUMED
;
2206 /* Return a target_waitstatus indicating that we should wait again. */
2208 static struct target_waitstatus
2209 btrace_step_again (void)
2211 struct target_waitstatus status
;
2213 status
.kind
= TARGET_WAITKIND_IGNORE
;
2218 /* Clear the record histories. */
2221 record_btrace_clear_histories (struct btrace_thread_info
*btinfo
)
2223 xfree (btinfo
->insn_history
);
2224 xfree (btinfo
->call_history
);
2226 btinfo
->insn_history
= NULL
;
2227 btinfo
->call_history
= NULL
;
2230 /* Check whether TP's current replay position is at a breakpoint. */
2233 record_btrace_replay_at_breakpoint (struct thread_info
*tp
)
2235 struct btrace_insn_iterator
*replay
;
2236 struct btrace_thread_info
*btinfo
;
2237 const struct btrace_insn
*insn
;
2238 struct inferior
*inf
;
2240 btinfo
= &tp
->btrace
;
2241 replay
= btinfo
->replay
;
2246 insn
= btrace_insn_get (replay
);
2250 inf
= find_inferior_ptid (tp
->ptid
);
2254 return record_check_stopped_by_breakpoint (inf
->aspace
, insn
->pc
,
2255 &btinfo
->stop_reason
);
2258 /* Step one instruction in forward direction. */
2260 static struct target_waitstatus
2261 record_btrace_single_step_forward (struct thread_info
*tp
)
2263 struct btrace_insn_iterator
*replay
, end
, start
;
2264 struct btrace_thread_info
*btinfo
;
2266 btinfo
= &tp
->btrace
;
2267 replay
= btinfo
->replay
;
2269 /* We're done if we're not replaying. */
2271 return btrace_step_no_history ();
2273 /* Check if we're stepping a breakpoint. */
2274 if (record_btrace_replay_at_breakpoint (tp
))
2275 return btrace_step_stopped ();
2277 /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
2278 jump back to the instruction at which we started. */
2284 /* We will bail out here if we continue stepping after reaching the end
2285 of the execution history. */
2286 steps
= btrace_insn_next (replay
, 1);
2290 return btrace_step_no_history ();
2293 while (btrace_insn_get (replay
) == NULL
);
2295 /* Determine the end of the instruction trace. */
2296 btrace_insn_end (&end
, btinfo
);
2298 /* The execution trace contains (and ends with) the current instruction.
2299 This instruction has not been executed, yet, so the trace really ends
2300 one instruction earlier. */
2301 if (btrace_insn_cmp (replay
, &end
) == 0)
2302 return btrace_step_no_history ();
2304 return btrace_step_spurious ();
2307 /* Step one instruction in backward direction. */
2309 static struct target_waitstatus
2310 record_btrace_single_step_backward (struct thread_info
*tp
)
2312 struct btrace_insn_iterator
*replay
, start
;
2313 struct btrace_thread_info
*btinfo
;
2315 btinfo
= &tp
->btrace
;
2316 replay
= btinfo
->replay
;
2318 /* Start replaying if we're not already doing so. */
2320 replay
= record_btrace_start_replaying (tp
);
2322 /* If we can't step any further, we reached the end of the history.
2323 Skip gaps during replay. If we end up at a gap (at the beginning of
2324 the trace), jump back to the instruction at which we started. */
2330 steps
= btrace_insn_prev (replay
, 1);
2334 return btrace_step_no_history ();
2337 while (btrace_insn_get (replay
) == NULL
);
2339 /* Check if we're stepping a breakpoint.
2341 For reverse-stepping, this check is after the step. There is logic in
2342 infrun.c that handles reverse-stepping separately. See, for example,
2343 proceed and adjust_pc_after_break.
2345 This code assumes that for reverse-stepping, PC points to the last
2346 de-executed instruction, whereas for forward-stepping PC points to the
2347 next to-be-executed instruction. */
2348 if (record_btrace_replay_at_breakpoint (tp
))
2349 return btrace_step_stopped ();
2351 return btrace_step_spurious ();
2354 /* Step a single thread. */
2356 static struct target_waitstatus
2357 record_btrace_step_thread (struct thread_info
*tp
)
2359 struct btrace_thread_info
*btinfo
;
2360 struct target_waitstatus status
;
2361 enum btrace_thread_flag flags
;
2363 btinfo
= &tp
->btrace
;
2365 flags
= btinfo
->flags
& (BTHR_MOVE
| BTHR_STOP
);
2366 btinfo
->flags
&= ~(BTHR_MOVE
| BTHR_STOP
);
2368 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp
),
2369 target_pid_to_str (tp
->ptid
), flags
,
2370 btrace_thread_flag_to_str (flags
));
2372 /* We can't step without an execution history. */
2373 if ((flags
& BTHR_MOVE
) != 0 && btrace_is_empty (tp
))
2374 return btrace_step_no_history ();
2379 internal_error (__FILE__
, __LINE__
, _("invalid stepping type."));
2382 return btrace_step_stopped_on_request ();
2385 status
= record_btrace_single_step_forward (tp
);
2386 if (status
.kind
!= TARGET_WAITKIND_SPURIOUS
)
2389 return btrace_step_stopped ();
2392 status
= record_btrace_single_step_backward (tp
);
2393 if (status
.kind
!= TARGET_WAITKIND_SPURIOUS
)
2396 return btrace_step_stopped ();
2399 status
= record_btrace_single_step_forward (tp
);
2400 if (status
.kind
!= TARGET_WAITKIND_SPURIOUS
)
2403 btinfo
->flags
|= flags
;
2404 return btrace_step_again ();
2407 status
= record_btrace_single_step_backward (tp
);
2408 if (status
.kind
!= TARGET_WAITKIND_SPURIOUS
)
2411 btinfo
->flags
|= flags
;
2412 return btrace_step_again ();
2415 /* We keep threads moving at the end of their execution history. The to_wait
2416 method will stop the thread for whom the event is reported. */
2417 if (status
.kind
== TARGET_WAITKIND_NO_HISTORY
)
2418 btinfo
->flags
|= flags
;
2423 /* A vector of threads. */
2425 typedef struct thread_info
* tp_t
;
2428 /* Announce further events if necessary. */
2431 record_btrace_maybe_mark_async_event
2432 (const std::vector
<thread_info
*> &moving
,
2433 const std::vector
<thread_info
*> &no_history
)
2435 bool more_moving
= !moving
.empty ();
2436 bool more_no_history
= !no_history
.empty ();;
2438 if (!more_moving
&& !more_no_history
)
2442 DEBUG ("movers pending");
2444 if (more_no_history
)
2445 DEBUG ("no-history pending");
2447 mark_async_event_handler (record_btrace_async_inferior_event_handler
);
2450 /* The to_wait method of target record-btrace. */
2453 record_btrace_wait (struct target_ops
*ops
, ptid_t ptid
,
2454 struct target_waitstatus
*status
, int options
)
2456 std::vector
<thread_info
*> moving
;
2457 std::vector
<thread_info
*> no_history
;
2459 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid
), options
);
2461 /* As long as we're not replaying, just forward the request. */
2462 if ((execution_direction
!= EXEC_REVERSE
)
2463 && !record_btrace_is_replaying (ops
, minus_one_ptid
))
2466 return ops
->to_wait (ops
, ptid
, status
, options
);
2469 /* Keep a work list of moving threads. */
2473 ALL_NON_EXITED_THREADS (tp
)
2475 if (ptid_match (tp
->ptid
, ptid
)
2476 && ((tp
->btrace
.flags
& (BTHR_MOVE
| BTHR_STOP
)) != 0))
2477 moving
.push_back (tp
);
2481 if (moving
.empty ())
2483 *status
= btrace_step_no_resumed ();
2485 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid
),
2486 target_waitstatus_to_string (status
).c_str ());
2491 /* Step moving threads one by one, one step each, until either one thread
2492 reports an event or we run out of threads to step.
2494 When stepping more than one thread, chances are that some threads reach
2495 the end of their execution history earlier than others. If we reported
2496 this immediately, all-stop on top of non-stop would stop all threads and
2497 resume the same threads next time. And we would report the same thread
2498 having reached the end of its execution history again.
2500 In the worst case, this would starve the other threads. But even if other
2501 threads would be allowed to make progress, this would result in far too
2502 many intermediate stops.
2504 We therefore delay the reporting of "no execution history" until we have
2505 nothing else to report. By this time, all threads should have moved to
2506 either the beginning or the end of their execution history. There will
2507 be a single user-visible stop. */
2508 struct thread_info
*eventing
= NULL
;
2509 while ((eventing
== NULL
) && !moving
.empty ())
2511 for (unsigned int ix
= 0; eventing
== NULL
&& ix
< moving
.size ();)
2513 thread_info
*tp
= moving
[ix
];
2515 *status
= record_btrace_step_thread (tp
);
2517 switch (status
->kind
)
2519 case TARGET_WAITKIND_IGNORE
:
2523 case TARGET_WAITKIND_NO_HISTORY
:
2524 no_history
.push_back (ordered_remove (moving
, ix
));
2528 eventing
= unordered_remove (moving
, ix
);
2534 if (eventing
== NULL
)
2536 /* We started with at least one moving thread. This thread must have
2537 either stopped or reached the end of its execution history.
2539 In the former case, EVENTING must not be NULL.
2540 In the latter case, NO_HISTORY must not be empty. */
2541 gdb_assert (!no_history
.empty ());
2543 /* We kept threads moving at the end of their execution history. Stop
2544 EVENTING now that we are going to report its stop. */
2545 eventing
= unordered_remove (no_history
, 0);
2546 eventing
->btrace
.flags
&= ~BTHR_MOVE
;
2548 *status
= btrace_step_no_history ();
2551 gdb_assert (eventing
!= NULL
);
2553 /* We kept threads replaying at the end of their execution history. Stop
2554 replaying EVENTING now that we are going to report its stop. */
2555 record_btrace_stop_replaying_at_end (eventing
);
2557 /* Stop all other threads. */
2558 if (!target_is_non_stop_p ())
2562 ALL_NON_EXITED_THREADS (tp
)
2563 record_btrace_cancel_resume (tp
);
2566 /* In async mode, we need to announce further events. */
2567 if (target_is_async_p ())
2568 record_btrace_maybe_mark_async_event (moving
, no_history
);
2570 /* Start record histories anew from the current position. */
2571 record_btrace_clear_histories (&eventing
->btrace
);
2573 /* We moved the replay position but did not update registers. */
2574 registers_changed_ptid (eventing
->ptid
);
2576 DEBUG ("wait ended by thread %s (%s): %s",
2577 print_thread_id (eventing
),
2578 target_pid_to_str (eventing
->ptid
),
2579 target_waitstatus_to_string (status
).c_str ());
2581 return eventing
->ptid
;
2584 /* The to_stop method of target record-btrace. */
2587 record_btrace_stop (struct target_ops
*ops
, ptid_t ptid
)
2589 DEBUG ("stop %s", target_pid_to_str (ptid
));
2591 /* As long as we're not replaying, just forward the request. */
2592 if ((execution_direction
!= EXEC_REVERSE
)
2593 && !record_btrace_is_replaying (ops
, minus_one_ptid
))
2596 ops
->to_stop (ops
, ptid
);
2600 struct thread_info
*tp
;
2602 ALL_NON_EXITED_THREADS (tp
)
2603 if (ptid_match (tp
->ptid
, ptid
))
2605 tp
->btrace
.flags
&= ~BTHR_MOVE
;
2606 tp
->btrace
.flags
|= BTHR_STOP
;
2611 /* The to_can_execute_reverse method of target record-btrace. */
2614 record_btrace_can_execute_reverse (struct target_ops
*self
)
2619 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2622 record_btrace_stopped_by_sw_breakpoint (struct target_ops
*ops
)
2624 if (record_btrace_is_replaying (ops
, minus_one_ptid
))
2626 struct thread_info
*tp
= inferior_thread ();
2628 return tp
->btrace
.stop_reason
== TARGET_STOPPED_BY_SW_BREAKPOINT
;
2631 return ops
->beneath
->to_stopped_by_sw_breakpoint (ops
->beneath
);
2634 /* The to_supports_stopped_by_sw_breakpoint method of target
2638 record_btrace_supports_stopped_by_sw_breakpoint (struct target_ops
*ops
)
2640 if (record_btrace_is_replaying (ops
, minus_one_ptid
))
2643 return ops
->beneath
->to_supports_stopped_by_sw_breakpoint (ops
->beneath
);
2646 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2649 record_btrace_stopped_by_hw_breakpoint (struct target_ops
*ops
)
2651 if (record_btrace_is_replaying (ops
, minus_one_ptid
))
2653 struct thread_info
*tp
= inferior_thread ();
2655 return tp
->btrace
.stop_reason
== TARGET_STOPPED_BY_HW_BREAKPOINT
;
2658 return ops
->beneath
->to_stopped_by_hw_breakpoint (ops
->beneath
);
2661 /* The to_supports_stopped_by_hw_breakpoint method of target
2665 record_btrace_supports_stopped_by_hw_breakpoint (struct target_ops
*ops
)
2667 if (record_btrace_is_replaying (ops
, minus_one_ptid
))
2670 return ops
->beneath
->to_supports_stopped_by_hw_breakpoint (ops
->beneath
);
2673 /* The to_update_thread_list method of target record-btrace. */
2676 record_btrace_update_thread_list (struct target_ops
*ops
)
2678 /* We don't add or remove threads during replay. */
2679 if (record_btrace_is_replaying (ops
, minus_one_ptid
))
2682 /* Forward the request. */
2684 ops
->to_update_thread_list (ops
);
2687 /* The to_thread_alive method of target record-btrace. */
2690 record_btrace_thread_alive (struct target_ops
*ops
, ptid_t ptid
)
2692 /* We don't add or remove threads during replay. */
2693 if (record_btrace_is_replaying (ops
, minus_one_ptid
))
2694 return find_thread_ptid (ptid
) != NULL
;
2696 /* Forward the request. */
2698 return ops
->to_thread_alive (ops
, ptid
);
2701 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
2705 record_btrace_set_replay (struct thread_info
*tp
,
2706 const struct btrace_insn_iterator
*it
)
2708 struct btrace_thread_info
*btinfo
;
2710 btinfo
= &tp
->btrace
;
2713 record_btrace_stop_replaying (tp
);
2716 if (btinfo
->replay
== NULL
)
2717 record_btrace_start_replaying (tp
);
2718 else if (btrace_insn_cmp (btinfo
->replay
, it
) == 0)
2721 *btinfo
->replay
= *it
;
2722 registers_changed_ptid (tp
->ptid
);
2725 /* Start anew from the new replay position. */
2726 record_btrace_clear_histories (btinfo
);
2728 stop_pc
= regcache_read_pc (get_current_regcache ());
2729 print_stack_frame (get_selected_frame (NULL
), 1, SRC_AND_LOC
, 1);
2732 /* The to_goto_record_begin method of target record-btrace. */
2735 record_btrace_goto_begin (struct target_ops
*self
)
2737 struct thread_info
*tp
;
2738 struct btrace_insn_iterator begin
;
2740 tp
= require_btrace_thread ();
2742 btrace_insn_begin (&begin
, &tp
->btrace
);
2744 /* Skip gaps at the beginning of the trace. */
2745 while (btrace_insn_get (&begin
) == NULL
)
2749 steps
= btrace_insn_next (&begin
, 1);
2751 error (_("No trace."));
2754 record_btrace_set_replay (tp
, &begin
);
2757 /* The to_goto_record_end method of target record-btrace. */
2760 record_btrace_goto_end (struct target_ops
*ops
)
2762 struct thread_info
*tp
;
2764 tp
= require_btrace_thread ();
2766 record_btrace_set_replay (tp
, NULL
);
2769 /* The to_goto_record method of target record-btrace. */
2772 record_btrace_goto (struct target_ops
*self
, ULONGEST insn
)
2774 struct thread_info
*tp
;
2775 struct btrace_insn_iterator it
;
2776 unsigned int number
;
2781 /* Check for wrap-arounds. */
2783 error (_("Instruction number out of range."));
2785 tp
= require_btrace_thread ();
2787 found
= btrace_find_insn_by_number (&it
, &tp
->btrace
, number
);
2789 /* Check if the instruction could not be found or is a gap. */
2790 if (found
== 0 || btrace_insn_get (&it
) == NULL
)
2791 error (_("No such instruction."));
2793 record_btrace_set_replay (tp
, &it
);
2796 /* The to_record_stop_replaying method of target record-btrace. */
2799 record_btrace_stop_replaying_all (struct target_ops
*self
)
2801 struct thread_info
*tp
;
2803 ALL_NON_EXITED_THREADS (tp
)
2804 record_btrace_stop_replaying (tp
);
2807 /* The to_execution_direction target method. */
2809 static enum exec_direction_kind
2810 record_btrace_execution_direction (struct target_ops
*self
)
2812 return record_btrace_resume_exec_dir
;
2815 /* The to_prepare_to_generate_core target method. */
2818 record_btrace_prepare_to_generate_core (struct target_ops
*self
)
2820 record_btrace_generating_corefile
= 1;
2823 /* The to_done_generating_core target method. */
2826 record_btrace_done_generating_core (struct target_ops
*self
)
2828 record_btrace_generating_corefile
= 0;
2831 /* Initialize the record-btrace target ops. */
2834 init_record_btrace_ops (void)
2836 struct target_ops
*ops
;
2838 ops
= &record_btrace_ops
;
2839 ops
->to_shortname
= "record-btrace";
2840 ops
->to_longname
= "Branch tracing target";
2841 ops
->to_doc
= "Collect control-flow trace and provide the execution history.";
2842 ops
->to_open
= record_btrace_open
;
2843 ops
->to_close
= record_btrace_close
;
2844 ops
->to_async
= record_btrace_async
;
2845 ops
->to_detach
= record_detach
;
2846 ops
->to_disconnect
= record_btrace_disconnect
;
2847 ops
->to_mourn_inferior
= record_mourn_inferior
;
2848 ops
->to_kill
= record_kill
;
2849 ops
->to_stop_recording
= record_btrace_stop_recording
;
2850 ops
->to_info_record
= record_btrace_info
;
2851 ops
->to_insn_history
= record_btrace_insn_history
;
2852 ops
->to_insn_history_from
= record_btrace_insn_history_from
;
2853 ops
->to_insn_history_range
= record_btrace_insn_history_range
;
2854 ops
->to_call_history
= record_btrace_call_history
;
2855 ops
->to_call_history_from
= record_btrace_call_history_from
;
2856 ops
->to_call_history_range
= record_btrace_call_history_range
;
2857 ops
->to_record_method
= record_btrace_record_method
;
2858 ops
->to_record_is_replaying
= record_btrace_is_replaying
;
2859 ops
->to_record_will_replay
= record_btrace_will_replay
;
2860 ops
->to_record_stop_replaying
= record_btrace_stop_replaying_all
;
2861 ops
->to_xfer_partial
= record_btrace_xfer_partial
;
2862 ops
->to_remove_breakpoint
= record_btrace_remove_breakpoint
;
2863 ops
->to_insert_breakpoint
= record_btrace_insert_breakpoint
;
2864 ops
->to_fetch_registers
= record_btrace_fetch_registers
;
2865 ops
->to_store_registers
= record_btrace_store_registers
;
2866 ops
->to_prepare_to_store
= record_btrace_prepare_to_store
;
2867 ops
->to_get_unwinder
= &record_btrace_to_get_unwinder
;
2868 ops
->to_get_tailcall_unwinder
= &record_btrace_to_get_tailcall_unwinder
;
2869 ops
->to_resume
= record_btrace_resume
;
2870 ops
->to_commit_resume
= record_btrace_commit_resume
;
2871 ops
->to_wait
= record_btrace_wait
;
2872 ops
->to_stop
= record_btrace_stop
;
2873 ops
->to_update_thread_list
= record_btrace_update_thread_list
;
2874 ops
->to_thread_alive
= record_btrace_thread_alive
;
2875 ops
->to_goto_record_begin
= record_btrace_goto_begin
;
2876 ops
->to_goto_record_end
= record_btrace_goto_end
;
2877 ops
->to_goto_record
= record_btrace_goto
;
2878 ops
->to_can_execute_reverse
= record_btrace_can_execute_reverse
;
2879 ops
->to_stopped_by_sw_breakpoint
= record_btrace_stopped_by_sw_breakpoint
;
2880 ops
->to_supports_stopped_by_sw_breakpoint
2881 = record_btrace_supports_stopped_by_sw_breakpoint
;
2882 ops
->to_stopped_by_hw_breakpoint
= record_btrace_stopped_by_hw_breakpoint
;
2883 ops
->to_supports_stopped_by_hw_breakpoint
2884 = record_btrace_supports_stopped_by_hw_breakpoint
;
2885 ops
->to_execution_direction
= record_btrace_execution_direction
;
2886 ops
->to_prepare_to_generate_core
= record_btrace_prepare_to_generate_core
;
2887 ops
->to_done_generating_core
= record_btrace_done_generating_core
;
2888 ops
->to_stratum
= record_stratum
;
2889 ops
->to_magic
= OPS_MAGIC
;
2892 /* Start recording in BTS format. */
2895 cmd_record_btrace_bts_start (const char *args
, int from_tty
)
2897 if (args
!= NULL
&& *args
!= 0)
2898 error (_("Invalid argument."));
2900 record_btrace_conf
.format
= BTRACE_FORMAT_BTS
;
2904 execute_command ("target record-btrace", from_tty
);
2906 CATCH (exception
, RETURN_MASK_ALL
)
2908 record_btrace_conf
.format
= BTRACE_FORMAT_NONE
;
2909 throw_exception (exception
);
2914 /* Start recording in Intel Processor Trace format. */
2917 cmd_record_btrace_pt_start (const char *args
, int from_tty
)
2919 if (args
!= NULL
&& *args
!= 0)
2920 error (_("Invalid argument."));
2922 record_btrace_conf
.format
= BTRACE_FORMAT_PT
;
2926 execute_command ("target record-btrace", from_tty
);
2928 CATCH (exception
, RETURN_MASK_ALL
)
2930 record_btrace_conf
.format
= BTRACE_FORMAT_NONE
;
2931 throw_exception (exception
);
2936 /* Alias for "target record". */
2939 cmd_record_btrace_start (const char *args
, int from_tty
)
2941 if (args
!= NULL
&& *args
!= 0)
2942 error (_("Invalid argument."));
2944 record_btrace_conf
.format
= BTRACE_FORMAT_PT
;
2948 execute_command ("target record-btrace", from_tty
);
2950 CATCH (exception
, RETURN_MASK_ALL
)
2952 record_btrace_conf
.format
= BTRACE_FORMAT_BTS
;
2956 execute_command ("target record-btrace", from_tty
);
2958 CATCH (exception
, RETURN_MASK_ALL
)
2960 record_btrace_conf
.format
= BTRACE_FORMAT_NONE
;
2961 throw_exception (exception
);
2968 /* The "set record btrace" command. */
2971 cmd_set_record_btrace (const char *args
, int from_tty
)
2973 cmd_show_list (set_record_btrace_cmdlist
, from_tty
, "");
2976 /* The "show record btrace" command. */
2979 cmd_show_record_btrace (const char *args
, int from_tty
)
2981 cmd_show_list (show_record_btrace_cmdlist
, from_tty
, "");
2984 /* The "show record btrace replay-memory-access" command. */
2987 cmd_show_replay_memory_access (struct ui_file
*file
, int from_tty
,
2988 struct cmd_list_element
*c
, const char *value
)
2990 fprintf_filtered (gdb_stdout
, _("Replay memory access is %s.\n"),
2991 replay_memory_access
);
2994 /* The "set record btrace bts" command. */
2997 cmd_set_record_btrace_bts (const char *args
, int from_tty
)
2999 printf_unfiltered (_("\"set record btrace bts\" must be followed "
3000 "by an appropriate subcommand.\n"));
3001 help_list (set_record_btrace_bts_cmdlist
, "set record btrace bts ",
3002 all_commands
, gdb_stdout
);
3005 /* The "show record btrace bts" command. */
3008 cmd_show_record_btrace_bts (const char *args
, int from_tty
)
3010 cmd_show_list (show_record_btrace_bts_cmdlist
, from_tty
, "");
3013 /* The "set record btrace pt" command. */
3016 cmd_set_record_btrace_pt (const char *args
, int from_tty
)
3018 printf_unfiltered (_("\"set record btrace pt\" must be followed "
3019 "by an appropriate subcommand.\n"));
3020 help_list (set_record_btrace_pt_cmdlist
, "set record btrace pt ",
3021 all_commands
, gdb_stdout
);
3024 /* The "show record btrace pt" command. */
3027 cmd_show_record_btrace_pt (const char *args
, int from_tty
)
3029 cmd_show_list (show_record_btrace_pt_cmdlist
, from_tty
, "");
3032 /* The "record bts buffer-size" show value function. */
3035 show_record_bts_buffer_size_value (struct ui_file
*file
, int from_tty
,
3036 struct cmd_list_element
*c
,
3039 fprintf_filtered (file
, _("The record/replay bts buffer size is %s.\n"),
3043 /* The "record pt buffer-size" show value function. */
3046 show_record_pt_buffer_size_value (struct ui_file
*file
, int from_tty
,
3047 struct cmd_list_element
*c
,
3050 fprintf_filtered (file
, _("The record/replay pt buffer size is %s.\n"),
3054 /* Initialize btrace commands. */
3057 _initialize_record_btrace (void)
3059 add_prefix_cmd ("btrace", class_obscure
, cmd_record_btrace_start
,
3060 _("Start branch trace recording."), &record_btrace_cmdlist
,
3061 "record btrace ", 0, &record_cmdlist
);
3062 add_alias_cmd ("b", "btrace", class_obscure
, 1, &record_cmdlist
);
3064 add_cmd ("bts", class_obscure
, cmd_record_btrace_bts_start
,
3066 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3067 The processor stores a from/to record for each branch into a cyclic buffer.\n\
3068 This format may not be available on all processors."),
3069 &record_btrace_cmdlist
);
3070 add_alias_cmd ("bts", "btrace bts", class_obscure
, 1, &record_cmdlist
);
3072 add_cmd ("pt", class_obscure
, cmd_record_btrace_pt_start
,
3074 Start branch trace recording in Intel Processor Trace format.\n\n\
3075 This format may not be available on all processors."),
3076 &record_btrace_cmdlist
);
3077 add_alias_cmd ("pt", "btrace pt", class_obscure
, 1, &record_cmdlist
);
3079 add_prefix_cmd ("btrace", class_support
, cmd_set_record_btrace
,
3080 _("Set record options"), &set_record_btrace_cmdlist
,
3081 "set record btrace ", 0, &set_record_cmdlist
);
3083 add_prefix_cmd ("btrace", class_support
, cmd_show_record_btrace
,
3084 _("Show record options"), &show_record_btrace_cmdlist
,
3085 "show record btrace ", 0, &show_record_cmdlist
);
3087 add_setshow_enum_cmd ("replay-memory-access", no_class
,
3088 replay_memory_access_types
, &replay_memory_access
, _("\
3089 Set what memory accesses are allowed during replay."), _("\
3090 Show what memory accesses are allowed during replay."),
3091 _("Default is READ-ONLY.\n\n\
3092 The btrace record target does not trace data.\n\
3093 The memory therefore corresponds to the live target and not \
3094 to the current replay position.\n\n\
3095 When READ-ONLY, allow accesses to read-only memory during replay.\n\
3096 When READ-WRITE, allow accesses to read-only and read-write memory during \
3098 NULL
, cmd_show_replay_memory_access
,
3099 &set_record_btrace_cmdlist
,
3100 &show_record_btrace_cmdlist
);
3102 add_prefix_cmd ("bts", class_support
, cmd_set_record_btrace_bts
,
3103 _("Set record btrace bts options"),
3104 &set_record_btrace_bts_cmdlist
,
3105 "set record btrace bts ", 0, &set_record_btrace_cmdlist
);
3107 add_prefix_cmd ("bts", class_support
, cmd_show_record_btrace_bts
,
3108 _("Show record btrace bts options"),
3109 &show_record_btrace_bts_cmdlist
,
3110 "show record btrace bts ", 0, &show_record_btrace_cmdlist
);
3112 add_setshow_uinteger_cmd ("buffer-size", no_class
,
3113 &record_btrace_conf
.bts
.size
,
3114 _("Set the record/replay bts buffer size."),
3115 _("Show the record/replay bts buffer size."), _("\
3116 When starting recording request a trace buffer of this size. \
3117 The actual buffer size may differ from the requested size. \
3118 Use \"info record\" to see the actual buffer size.\n\n\
3119 Bigger buffers allow longer recording but also take more time to process \
3120 the recorded execution trace.\n\n\
3121 The trace buffer size may not be changed while recording."), NULL
,
3122 show_record_bts_buffer_size_value
,
3123 &set_record_btrace_bts_cmdlist
,
3124 &show_record_btrace_bts_cmdlist
);
3126 add_prefix_cmd ("pt", class_support
, cmd_set_record_btrace_pt
,
3127 _("Set record btrace pt options"),
3128 &set_record_btrace_pt_cmdlist
,
3129 "set record btrace pt ", 0, &set_record_btrace_cmdlist
);
3131 add_prefix_cmd ("pt", class_support
, cmd_show_record_btrace_pt
,
3132 _("Show record btrace pt options"),
3133 &show_record_btrace_pt_cmdlist
,
3134 "show record btrace pt ", 0, &show_record_btrace_cmdlist
);
3136 add_setshow_uinteger_cmd ("buffer-size", no_class
,
3137 &record_btrace_conf
.pt
.size
,
3138 _("Set the record/replay pt buffer size."),
3139 _("Show the record/replay pt buffer size."), _("\
3140 Bigger buffers allow longer recording but also take more time to process \
3141 the recorded execution.\n\
3142 The actual buffer size may differ from the requested size. Use \"info record\" \
3143 to see the actual buffer size."), NULL
, show_record_pt_buffer_size_value
,
3144 &set_record_btrace_pt_cmdlist
,
3145 &show_record_btrace_pt_cmdlist
);
3147 init_record_btrace_ops ();
3148 add_target (&record_btrace_ops
);
3150 bfcache
= htab_create_alloc (50, bfcache_hash
, bfcache_eq
, NULL
,
3153 record_btrace_conf
.bts
.size
= 64 * 1024;
3154 record_btrace_conf
.pt
.size
= 16 * 1024;