1 /* Branch trace support for GDB, the GNU debugger.
3 Copyright (C) 2013-2020 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "record-btrace.h"
25 #include "gdbthread.h"
29 #include "observable.h"
30 #include "cli/cli-utils.h"
34 #include "filenames.h"
36 #include "frame-unwind.h"
39 #include "gdbsupport/event-loop.h"
44 #include "cli/cli-style.h"
45 #include "async-event.h"
47 static const target_info record_btrace_target_info
= {
49 N_("Branch tracing target"),
50 N_("Collect control-flow trace and provide the execution history.")
53 /* The target_ops of record-btrace. */
55 class record_btrace_target final
: public target_ops
58 const target_info
&info () const override
59 { return record_btrace_target_info
; }
61 strata
stratum () const override
{ return record_stratum
; }
63 void close () override
;
64 void async (int) override
;
66 void detach (inferior
*inf
, int from_tty
) override
67 { record_detach (this, inf
, from_tty
); }
69 void disconnect (const char *, int) override
;
71 void mourn_inferior () override
72 { record_mourn_inferior (this); }
75 { record_kill (this); }
77 enum record_method
record_method (ptid_t ptid
) override
;
79 void stop_recording () override
;
80 void info_record () override
;
82 void insn_history (int size
, gdb_disassembly_flags flags
) override
;
83 void insn_history_from (ULONGEST from
, int size
,
84 gdb_disassembly_flags flags
) override
;
85 void insn_history_range (ULONGEST begin
, ULONGEST end
,
86 gdb_disassembly_flags flags
) override
;
87 void call_history (int size
, record_print_flags flags
) override
;
88 void call_history_from (ULONGEST begin
, int size
, record_print_flags flags
)
90 void call_history_range (ULONGEST begin
, ULONGEST end
, record_print_flags flags
)
93 bool record_is_replaying (ptid_t ptid
) override
;
94 bool record_will_replay (ptid_t ptid
, int dir
) override
;
95 void record_stop_replaying () override
;
97 enum target_xfer_status
xfer_partial (enum target_object object
,
100 const gdb_byte
*writebuf
,
101 ULONGEST offset
, ULONGEST len
,
102 ULONGEST
*xfered_len
) override
;
104 int insert_breakpoint (struct gdbarch
*,
105 struct bp_target_info
*) override
;
106 int remove_breakpoint (struct gdbarch
*, struct bp_target_info
*,
107 enum remove_bp_reason
) override
;
109 void fetch_registers (struct regcache
*, int) override
;
111 void store_registers (struct regcache
*, int) override
;
112 void prepare_to_store (struct regcache
*) override
;
114 const struct frame_unwind
*get_unwinder () override
;
116 const struct frame_unwind
*get_tailcall_unwinder () override
;
118 void commit_resume () override
;
119 void resume (ptid_t
, int, enum gdb_signal
) override
;
120 ptid_t
wait (ptid_t
, struct target_waitstatus
*, int) override
;
122 void stop (ptid_t
) override
;
123 void update_thread_list () override
;
124 bool thread_alive (ptid_t ptid
) override
;
125 void goto_record_begin () override
;
126 void goto_record_end () override
;
127 void goto_record (ULONGEST insn
) override
;
129 bool can_execute_reverse () override
;
131 bool stopped_by_sw_breakpoint () override
;
132 bool supports_stopped_by_sw_breakpoint () override
;
134 bool stopped_by_hw_breakpoint () override
;
135 bool supports_stopped_by_hw_breakpoint () override
;
137 enum exec_direction_kind
execution_direction () override
;
138 void prepare_to_generate_core () override
;
139 void done_generating_core () override
;
142 static record_btrace_target record_btrace_ops
;
144 /* Initialize the record-btrace target ops. */
146 /* Token associated with a new-thread observer enabling branch tracing
147 for the new thread. */
148 static const gdb::observers::token record_btrace_thread_observer_token
{};
150 /* Memory access types used in set/show record btrace replay-memory-access. */
151 static const char replay_memory_access_read_only
[] = "read-only";
152 static const char replay_memory_access_read_write
[] = "read-write";
153 static const char *const replay_memory_access_types
[] =
155 replay_memory_access_read_only
,
156 replay_memory_access_read_write
,
160 /* The currently allowed replay memory access type. */
161 static const char *replay_memory_access
= replay_memory_access_read_only
;
163 /* The cpu state kinds. */
164 enum record_btrace_cpu_state_kind
171 /* The current cpu state. */
172 static enum record_btrace_cpu_state_kind record_btrace_cpu_state
= CS_AUTO
;
174 /* The current cpu for trace decode. */
175 static struct btrace_cpu record_btrace_cpu
;
177 /* Command lists for "set/show record btrace". */
178 static struct cmd_list_element
*set_record_btrace_cmdlist
;
179 static struct cmd_list_element
*show_record_btrace_cmdlist
;
181 /* The execution direction of the last resume we got. See record-full.c. */
182 static enum exec_direction_kind record_btrace_resume_exec_dir
= EXEC_FORWARD
;
184 /* The async event handler for reverse/replay execution. */
185 static struct async_event_handler
*record_btrace_async_inferior_event_handler
;
187 /* A flag indicating that we are currently generating a core file. */
188 static int record_btrace_generating_corefile
;
190 /* The current branch trace configuration. */
191 static struct btrace_config record_btrace_conf
;
193 /* Command list for "record btrace". */
194 static struct cmd_list_element
*record_btrace_cmdlist
;
196 /* Command lists for "set/show record btrace bts". */
197 static struct cmd_list_element
*set_record_btrace_bts_cmdlist
;
198 static struct cmd_list_element
*show_record_btrace_bts_cmdlist
;
200 /* Command lists for "set/show record btrace pt". */
201 static struct cmd_list_element
*set_record_btrace_pt_cmdlist
;
202 static struct cmd_list_element
*show_record_btrace_pt_cmdlist
;
204 /* Command list for "set record btrace cpu". */
205 static struct cmd_list_element
*set_record_btrace_cpu_cmdlist
;
207 /* Print a record-btrace debug message. Use do ... while (0) to avoid
208 ambiguities when used in if statements. */
210 #define DEBUG(msg, args...) \
213 if (record_debug != 0) \
214 fprintf_unfiltered (gdb_stdlog, \
215 "[record-btrace] " msg "\n", ##args); \
220 /* Return the cpu configured by the user. Returns NULL if the cpu was
221 configured as auto. */
222 const struct btrace_cpu
*
223 record_btrace_get_cpu (void)
225 switch (record_btrace_cpu_state
)
231 record_btrace_cpu
.vendor
= CV_UNKNOWN
;
234 return &record_btrace_cpu
;
237 error (_("Internal error: bad record btrace cpu state."));
240 /* Update the branch trace for the current thread and return a pointer to its
243 Throws an error if there is no thread or no trace. This function never
246 static struct thread_info
*
247 require_btrace_thread (void)
251 if (inferior_ptid
== null_ptid
)
252 error (_("No thread."));
254 thread_info
*tp
= inferior_thread ();
256 validate_registers_access ();
258 btrace_fetch (tp
, record_btrace_get_cpu ());
260 if (btrace_is_empty (tp
))
261 error (_("No trace."));
266 /* Update the branch trace for the current thread and return a pointer to its
267 branch trace information struct.
269 Throws an error if there is no thread or no trace. This function never
272 static struct btrace_thread_info
*
273 require_btrace (void)
275 struct thread_info
*tp
;
277 tp
= require_btrace_thread ();
282 /* Enable branch tracing for one thread. Warn on errors. */
285 record_btrace_enable_warn (struct thread_info
*tp
)
289 btrace_enable (tp
, &record_btrace_conf
);
291 catch (const gdb_exception_error
&error
)
293 warning ("%s", error
.what ());
297 /* Enable automatic tracing of new threads. */
300 record_btrace_auto_enable (void)
302 DEBUG ("attach thread observer");
304 gdb::observers::new_thread
.attach (record_btrace_enable_warn
,
305 record_btrace_thread_observer_token
);
308 /* Disable automatic tracing of new threads. */
311 record_btrace_auto_disable (void)
313 DEBUG ("detach thread observer");
315 gdb::observers::new_thread
.detach (record_btrace_thread_observer_token
);
318 /* The record-btrace async event handler function. */
321 record_btrace_handle_async_inferior_event (gdb_client_data data
)
323 inferior_event_handler (INF_REG_EVENT
, NULL
);
326 /* See record-btrace.h. */
329 record_btrace_push_target (void)
333 record_btrace_auto_enable ();
335 push_target (&record_btrace_ops
);
337 record_btrace_async_inferior_event_handler
338 = create_async_event_handler (record_btrace_handle_async_inferior_event
,
340 record_btrace_generating_corefile
= 0;
342 format
= btrace_format_short_string (record_btrace_conf
.format
);
343 gdb::observers::record_changed
.notify (current_inferior (), 1, "btrace", format
);
346 /* Disable btrace on a set of threads on scope exit. */
348 struct scoped_btrace_disable
350 scoped_btrace_disable () = default;
352 DISABLE_COPY_AND_ASSIGN (scoped_btrace_disable
);
354 ~scoped_btrace_disable ()
356 for (thread_info
*tp
: m_threads
)
360 void add_thread (thread_info
*thread
)
362 m_threads
.push_front (thread
);
371 std::forward_list
<thread_info
*> m_threads
;
374 /* Open target record-btrace. */
377 record_btrace_target_open (const char *args
, int from_tty
)
379 /* If we fail to enable btrace for one thread, disable it for the threads for
380 which it was successfully enabled. */
381 scoped_btrace_disable btrace_disable
;
387 if (!target_has_execution
)
388 error (_("The program is not being run."));
390 for (thread_info
*tp
: all_non_exited_threads ())
391 if (args
== NULL
|| *args
== 0 || number_is_in_list (args
, tp
->global_num
))
393 btrace_enable (tp
, &record_btrace_conf
);
395 btrace_disable
.add_thread (tp
);
398 record_btrace_push_target ();
400 btrace_disable
.discard ();
403 /* The stop_recording method of target record-btrace. */
406 record_btrace_target::stop_recording ()
408 DEBUG ("stop recording");
410 record_btrace_auto_disable ();
412 for (thread_info
*tp
: all_non_exited_threads ())
413 if (tp
->btrace
.target
!= NULL
)
417 /* The disconnect method of target record-btrace. */
420 record_btrace_target::disconnect (const char *args
,
423 struct target_ops
*beneath
= this->beneath ();
425 /* Do not stop recording, just clean up GDB side. */
426 unpush_target (this);
428 /* Forward disconnect. */
429 beneath
->disconnect (args
, from_tty
);
432 /* The close method of target record-btrace. */
435 record_btrace_target::close ()
437 if (record_btrace_async_inferior_event_handler
!= NULL
)
438 delete_async_event_handler (&record_btrace_async_inferior_event_handler
);
440 /* Make sure automatic recording gets disabled even if we did not stop
441 recording before closing the record-btrace target. */
442 record_btrace_auto_disable ();
444 /* We should have already stopped recording.
445 Tear down btrace in case we have not. */
446 for (thread_info
*tp
: all_non_exited_threads ())
447 btrace_teardown (tp
);
450 /* The async method of target record-btrace. */
453 record_btrace_target::async (int enable
)
456 mark_async_event_handler (record_btrace_async_inferior_event_handler
);
458 clear_async_event_handler (record_btrace_async_inferior_event_handler
);
460 this->beneath ()->async (enable
);
463 /* Adjusts the size and returns a human readable size suffix. */
466 record_btrace_adjust_size (unsigned int *size
)
472 if ((sz
& ((1u << 30) - 1)) == 0)
477 else if ((sz
& ((1u << 20) - 1)) == 0)
482 else if ((sz
& ((1u << 10) - 1)) == 0)
491 /* Print a BTS configuration. */
494 record_btrace_print_bts_conf (const struct btrace_config_bts
*conf
)
502 suffix
= record_btrace_adjust_size (&size
);
503 printf_unfiltered (_("Buffer size: %u%s.\n"), size
, suffix
);
507 /* Print an Intel Processor Trace configuration. */
510 record_btrace_print_pt_conf (const struct btrace_config_pt
*conf
)
518 suffix
= record_btrace_adjust_size (&size
);
519 printf_unfiltered (_("Buffer size: %u%s.\n"), size
, suffix
);
523 /* Print a branch tracing configuration. */
526 record_btrace_print_conf (const struct btrace_config
*conf
)
528 printf_unfiltered (_("Recording format: %s.\n"),
529 btrace_format_string (conf
->format
));
531 switch (conf
->format
)
533 case BTRACE_FORMAT_NONE
:
536 case BTRACE_FORMAT_BTS
:
537 record_btrace_print_bts_conf (&conf
->bts
);
540 case BTRACE_FORMAT_PT
:
541 record_btrace_print_pt_conf (&conf
->pt
);
545 internal_error (__FILE__
, __LINE__
, _("Unknown branch trace format."));
548 /* The info_record method of target record-btrace. */
551 record_btrace_target::info_record ()
553 struct btrace_thread_info
*btinfo
;
554 const struct btrace_config
*conf
;
555 struct thread_info
*tp
;
556 unsigned int insns
, calls
, gaps
;
560 if (inferior_ptid
== null_ptid
)
561 error (_("No thread."));
563 tp
= inferior_thread ();
565 validate_registers_access ();
567 btinfo
= &tp
->btrace
;
569 conf
= ::btrace_conf (btinfo
);
571 record_btrace_print_conf (conf
);
573 btrace_fetch (tp
, record_btrace_get_cpu ());
579 if (!btrace_is_empty (tp
))
581 struct btrace_call_iterator call
;
582 struct btrace_insn_iterator insn
;
584 btrace_call_end (&call
, btinfo
);
585 btrace_call_prev (&call
, 1);
586 calls
= btrace_call_number (&call
);
588 btrace_insn_end (&insn
, btinfo
);
589 insns
= btrace_insn_number (&insn
);
591 /* If the last instruction is not a gap, it is the current instruction
592 that is not actually part of the record. */
593 if (btrace_insn_get (&insn
) != NULL
)
596 gaps
= btinfo
->ngaps
;
599 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
600 "for thread %s (%s).\n"), insns
, calls
, gaps
,
601 print_thread_id (tp
),
602 target_pid_to_str (tp
->ptid
).c_str ());
604 if (btrace_is_replaying (tp
))
605 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
606 btrace_insn_number (btinfo
->replay
));
609 /* Print a decode error. */
612 btrace_ui_out_decode_error (struct ui_out
*uiout
, int errcode
,
613 enum btrace_format format
)
615 const char *errstr
= btrace_decode_error (format
, errcode
);
617 uiout
->text (_("["));
618 /* ERRCODE > 0 indicates notifications on BTRACE_FORMAT_PT. */
619 if (!(format
== BTRACE_FORMAT_PT
&& errcode
> 0))
621 uiout
->text (_("decode error ("));
622 uiout
->field_signed ("errcode", errcode
);
623 uiout
->text (_("): "));
625 uiout
->text (errstr
);
626 uiout
->text (_("]\n"));
629 /* A range of source lines. */
631 struct btrace_line_range
633 /* The symtab this line is from. */
634 struct symtab
*symtab
;
636 /* The first line (inclusive). */
639 /* The last line (exclusive). */
643 /* Construct a line range. */
645 static struct btrace_line_range
646 btrace_mk_line_range (struct symtab
*symtab
, int begin
, int end
)
648 struct btrace_line_range range
;
650 range
.symtab
= symtab
;
657 /* Add a line to a line range. */
659 static struct btrace_line_range
660 btrace_line_range_add (struct btrace_line_range range
, int line
)
662 if (range
.end
<= range
.begin
)
664 /* This is the first entry. */
666 range
.end
= line
+ 1;
668 else if (line
< range
.begin
)
670 else if (range
.end
< line
)
676 /* Return non-zero if RANGE is empty, zero otherwise. */
679 btrace_line_range_is_empty (struct btrace_line_range range
)
681 return range
.end
<= range
.begin
;
684 /* Return non-zero if LHS contains RHS, zero otherwise. */
687 btrace_line_range_contains_range (struct btrace_line_range lhs
,
688 struct btrace_line_range rhs
)
690 return ((lhs
.symtab
== rhs
.symtab
)
691 && (lhs
.begin
<= rhs
.begin
)
692 && (rhs
.end
<= lhs
.end
));
695 /* Find the line range associated with PC. */
697 static struct btrace_line_range
698 btrace_find_line_range (CORE_ADDR pc
)
700 struct btrace_line_range range
;
701 struct linetable_entry
*lines
;
702 struct linetable
*ltable
;
703 struct symtab
*symtab
;
706 symtab
= find_pc_line_symtab (pc
);
708 return btrace_mk_line_range (NULL
, 0, 0);
710 ltable
= SYMTAB_LINETABLE (symtab
);
712 return btrace_mk_line_range (symtab
, 0, 0);
714 nlines
= ltable
->nitems
;
715 lines
= ltable
->item
;
717 return btrace_mk_line_range (symtab
, 0, 0);
719 range
= btrace_mk_line_range (symtab
, 0, 0);
720 for (i
= 0; i
< nlines
- 1; i
++)
722 /* The test of is_stmt here was added when the is_stmt field was
723 introduced to the 'struct linetable_entry' structure. This
724 ensured that this loop maintained the same behaviour as before we
725 introduced is_stmt. That said, it might be that we would be
726 better off not checking is_stmt here, this would lead to us
727 possibly adding more line numbers to the range. At the time this
728 change was made I was unsure how to test this so chose to go with
729 maintaining the existing experience. */
730 if ((lines
[i
].pc
== pc
) && (lines
[i
].line
!= 0)
731 && (lines
[i
].is_stmt
== 1))
732 range
= btrace_line_range_add (range
, lines
[i
].line
);
738 /* Print source lines in LINES to UIOUT.
740 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
741 instructions corresponding to that source line. When printing a new source
742 line, we do the cleanups for the open chain and open a new cleanup chain for
743 the new source line. If the source line range in LINES is not empty, this
744 function will leave the cleanup chain for the last printed source line open
745 so instructions can be added to it. */
748 btrace_print_lines (struct btrace_line_range lines
, struct ui_out
*uiout
,
749 gdb::optional
<ui_out_emit_tuple
> *src_and_asm_tuple
,
750 gdb::optional
<ui_out_emit_list
> *asm_list
,
751 gdb_disassembly_flags flags
)
753 print_source_lines_flags psl_flags
;
755 if (flags
& DISASSEMBLY_FILENAME
)
756 psl_flags
|= PRINT_SOURCE_LINES_FILENAME
;
758 for (int line
= lines
.begin
; line
< lines
.end
; ++line
)
762 src_and_asm_tuple
->emplace (uiout
, "src_and_asm_line");
764 print_source_lines (lines
.symtab
, line
, line
+ 1, psl_flags
);
766 asm_list
->emplace (uiout
, "line_asm_insn");
770 /* Disassemble a section of the recorded instruction trace. */
773 btrace_insn_history (struct ui_out
*uiout
,
774 const struct btrace_thread_info
*btinfo
,
775 const struct btrace_insn_iterator
*begin
,
776 const struct btrace_insn_iterator
*end
,
777 gdb_disassembly_flags flags
)
779 DEBUG ("itrace (0x%x): [%u; %u)", (unsigned) flags
,
780 btrace_insn_number (begin
), btrace_insn_number (end
));
782 flags
|= DISASSEMBLY_SPECULATIVE
;
784 struct gdbarch
*gdbarch
= target_gdbarch ();
785 btrace_line_range last_lines
= btrace_mk_line_range (NULL
, 0, 0);
787 ui_out_emit_list
list_emitter (uiout
, "asm_insns");
789 gdb::optional
<ui_out_emit_tuple
> src_and_asm_tuple
;
790 gdb::optional
<ui_out_emit_list
> asm_list
;
792 gdb_pretty_print_disassembler
disasm (gdbarch
, uiout
);
794 for (btrace_insn_iterator it
= *begin
; btrace_insn_cmp (&it
, end
) != 0;
795 btrace_insn_next (&it
, 1))
797 const struct btrace_insn
*insn
;
799 insn
= btrace_insn_get (&it
);
801 /* A NULL instruction indicates a gap in the trace. */
804 const struct btrace_config
*conf
;
806 conf
= btrace_conf (btinfo
);
808 /* We have trace so we must have a configuration. */
809 gdb_assert (conf
!= NULL
);
811 uiout
->field_fmt ("insn-number", "%u",
812 btrace_insn_number (&it
));
815 btrace_ui_out_decode_error (uiout
, btrace_insn_get_error (&it
),
820 struct disasm_insn dinsn
;
822 if ((flags
& DISASSEMBLY_SOURCE
) != 0)
824 struct btrace_line_range lines
;
826 lines
= btrace_find_line_range (insn
->pc
);
827 if (!btrace_line_range_is_empty (lines
)
828 && !btrace_line_range_contains_range (last_lines
, lines
))
830 btrace_print_lines (lines
, uiout
, &src_and_asm_tuple
, &asm_list
,
834 else if (!src_and_asm_tuple
.has_value ())
836 gdb_assert (!asm_list
.has_value ());
838 src_and_asm_tuple
.emplace (uiout
, "src_and_asm_line");
840 /* No source information. */
841 asm_list
.emplace (uiout
, "line_asm_insn");
844 gdb_assert (src_and_asm_tuple
.has_value ());
845 gdb_assert (asm_list
.has_value ());
848 memset (&dinsn
, 0, sizeof (dinsn
));
849 dinsn
.number
= btrace_insn_number (&it
);
850 dinsn
.addr
= insn
->pc
;
852 if ((insn
->flags
& BTRACE_INSN_FLAG_SPECULATIVE
) != 0)
853 dinsn
.is_speculative
= 1;
855 disasm
.pretty_print_insn (&dinsn
, flags
);
860 /* The insn_history method of target record-btrace. */
863 record_btrace_target::insn_history (int size
, gdb_disassembly_flags flags
)
865 struct btrace_thread_info
*btinfo
;
866 struct btrace_insn_history
*history
;
867 struct btrace_insn_iterator begin
, end
;
868 struct ui_out
*uiout
;
869 unsigned int context
, covered
;
871 uiout
= current_uiout
;
872 ui_out_emit_tuple
tuple_emitter (uiout
, "insn history");
873 context
= abs (size
);
875 error (_("Bad record instruction-history-size."));
877 btinfo
= require_btrace ();
878 history
= btinfo
->insn_history
;
881 struct btrace_insn_iterator
*replay
;
883 DEBUG ("insn-history (0x%x): %d", (unsigned) flags
, size
);
885 /* If we're replaying, we start at the replay position. Otherwise, we
886 start at the tail of the trace. */
887 replay
= btinfo
->replay
;
891 btrace_insn_end (&begin
, btinfo
);
893 /* We start from here and expand in the requested direction. Then we
894 expand in the other direction, as well, to fill up any remaining
899 /* We want the current position covered, as well. */
900 covered
= btrace_insn_next (&end
, 1);
901 covered
+= btrace_insn_prev (&begin
, context
- covered
);
902 covered
+= btrace_insn_next (&end
, context
- covered
);
906 covered
= btrace_insn_next (&end
, context
);
907 covered
+= btrace_insn_prev (&begin
, context
- covered
);
912 begin
= history
->begin
;
915 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", (unsigned) flags
, size
,
916 btrace_insn_number (&begin
), btrace_insn_number (&end
));
921 covered
= btrace_insn_prev (&begin
, context
);
926 covered
= btrace_insn_next (&end
, context
);
931 btrace_insn_history (uiout
, btinfo
, &begin
, &end
, flags
);
935 printf_unfiltered (_("At the start of the branch trace record.\n"));
937 printf_unfiltered (_("At the end of the branch trace record.\n"));
940 btrace_set_insn_history (btinfo
, &begin
, &end
);
943 /* The insn_history_range method of target record-btrace. */
946 record_btrace_target::insn_history_range (ULONGEST from
, ULONGEST to
,
947 gdb_disassembly_flags flags
)
949 struct btrace_thread_info
*btinfo
;
950 struct btrace_insn_iterator begin
, end
;
951 struct ui_out
*uiout
;
952 unsigned int low
, high
;
955 uiout
= current_uiout
;
956 ui_out_emit_tuple
tuple_emitter (uiout
, "insn history");
960 DEBUG ("insn-history (0x%x): [%u; %u)", (unsigned) flags
, low
, high
);
962 /* Check for wrap-arounds. */
963 if (low
!= from
|| high
!= to
)
964 error (_("Bad range."));
967 error (_("Bad range."));
969 btinfo
= require_btrace ();
971 found
= btrace_find_insn_by_number (&begin
, btinfo
, low
);
973 error (_("Range out of bounds."));
975 found
= btrace_find_insn_by_number (&end
, btinfo
, high
);
978 /* Silently truncate the range. */
979 btrace_insn_end (&end
, btinfo
);
983 /* We want both begin and end to be inclusive. */
984 btrace_insn_next (&end
, 1);
987 btrace_insn_history (uiout
, btinfo
, &begin
, &end
, flags
);
988 btrace_set_insn_history (btinfo
, &begin
, &end
);
991 /* The insn_history_from method of target record-btrace. */
994 record_btrace_target::insn_history_from (ULONGEST from
, int size
,
995 gdb_disassembly_flags flags
)
997 ULONGEST begin
, end
, context
;
999 context
= abs (size
);
1001 error (_("Bad record instruction-history-size."));
1010 begin
= from
- context
+ 1;
1015 end
= from
+ context
- 1;
1017 /* Check for wrap-around. */
1022 insn_history_range (begin
, end
, flags
);
1025 /* Print the instruction number range for a function call history line. */
1028 btrace_call_history_insn_range (struct ui_out
*uiout
,
1029 const struct btrace_function
*bfun
)
1031 unsigned int begin
, end
, size
;
1033 size
= bfun
->insn
.size ();
1034 gdb_assert (size
> 0);
1036 begin
= bfun
->insn_offset
;
1037 end
= begin
+ size
- 1;
1039 uiout
->field_unsigned ("insn begin", begin
);
1041 uiout
->field_unsigned ("insn end", end
);
1044 /* Compute the lowest and highest source line for the instructions in BFUN
1045 and return them in PBEGIN and PEND.
1046 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
1047 result from inlining or macro expansion. */
1050 btrace_compute_src_line_range (const struct btrace_function
*bfun
,
1051 int *pbegin
, int *pend
)
1053 struct symtab
*symtab
;
1064 symtab
= symbol_symtab (sym
);
1066 for (const btrace_insn
&insn
: bfun
->insn
)
1068 struct symtab_and_line sal
;
1070 sal
= find_pc_line (insn
.pc
, 0);
1071 if (sal
.symtab
!= symtab
|| sal
.line
== 0)
1074 begin
= std::min (begin
, sal
.line
);
1075 end
= std::max (end
, sal
.line
);
1083 /* Print the source line information for a function call history line. */
1086 btrace_call_history_src_line (struct ui_out
*uiout
,
1087 const struct btrace_function
*bfun
)
1096 uiout
->field_string ("file",
1097 symtab_to_filename_for_display (symbol_symtab (sym
)),
1098 file_name_style
.style ());
1100 btrace_compute_src_line_range (bfun
, &begin
, &end
);
1105 uiout
->field_signed ("min line", begin
);
1111 uiout
->field_signed ("max line", end
);
1114 /* Get the name of a branch trace function. */
1117 btrace_get_bfun_name (const struct btrace_function
*bfun
)
1119 struct minimal_symbol
*msym
;
1129 return sym
->print_name ();
1130 else if (msym
!= NULL
)
1131 return msym
->print_name ();
1136 /* Disassemble a section of the recorded function trace. */
1139 btrace_call_history (struct ui_out
*uiout
,
1140 const struct btrace_thread_info
*btinfo
,
1141 const struct btrace_call_iterator
*begin
,
1142 const struct btrace_call_iterator
*end
,
1145 struct btrace_call_iterator it
;
1146 record_print_flags flags
= (enum record_print_flag
) int_flags
;
1148 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags
, btrace_call_number (begin
),
1149 btrace_call_number (end
));
1151 for (it
= *begin
; btrace_call_cmp (&it
, end
) < 0; btrace_call_next (&it
, 1))
1153 const struct btrace_function
*bfun
;
1154 struct minimal_symbol
*msym
;
1157 bfun
= btrace_call_get (&it
);
1161 /* Print the function index. */
1162 uiout
->field_unsigned ("index", bfun
->number
);
1165 /* Indicate gaps in the trace. */
1166 if (bfun
->errcode
!= 0)
1168 const struct btrace_config
*conf
;
1170 conf
= btrace_conf (btinfo
);
1172 /* We have trace so we must have a configuration. */
1173 gdb_assert (conf
!= NULL
);
1175 btrace_ui_out_decode_error (uiout
, bfun
->errcode
, conf
->format
);
1180 if ((flags
& RECORD_PRINT_INDENT_CALLS
) != 0)
1182 int level
= bfun
->level
+ btinfo
->level
, i
;
1184 for (i
= 0; i
< level
; ++i
)
1189 uiout
->field_string ("function", sym
->print_name (),
1190 function_name_style
.style ());
1191 else if (msym
!= NULL
)
1192 uiout
->field_string ("function", msym
->print_name (),
1193 function_name_style
.style ());
1194 else if (!uiout
->is_mi_like_p ())
1195 uiout
->field_string ("function", "??",
1196 function_name_style
.style ());
1198 if ((flags
& RECORD_PRINT_INSN_RANGE
) != 0)
1200 uiout
->text (_("\tinst "));
1201 btrace_call_history_insn_range (uiout
, bfun
);
1204 if ((flags
& RECORD_PRINT_SRC_LINE
) != 0)
1206 uiout
->text (_("\tat "));
1207 btrace_call_history_src_line (uiout
, bfun
);
1214 /* The call_history method of target record-btrace. */
1217 record_btrace_target::call_history (int size
, record_print_flags flags
)
1219 struct btrace_thread_info
*btinfo
;
1220 struct btrace_call_history
*history
;
1221 struct btrace_call_iterator begin
, end
;
1222 struct ui_out
*uiout
;
1223 unsigned int context
, covered
;
1225 uiout
= current_uiout
;
1226 ui_out_emit_tuple
tuple_emitter (uiout
, "insn history");
1227 context
= abs (size
);
1229 error (_("Bad record function-call-history-size."));
1231 btinfo
= require_btrace ();
1232 history
= btinfo
->call_history
;
1233 if (history
== NULL
)
1235 struct btrace_insn_iterator
*replay
;
1237 DEBUG ("call-history (0x%x): %d", (int) flags
, size
);
1239 /* If we're replaying, we start at the replay position. Otherwise, we
1240 start at the tail of the trace. */
1241 replay
= btinfo
->replay
;
1244 begin
.btinfo
= btinfo
;
1245 begin
.index
= replay
->call_index
;
1248 btrace_call_end (&begin
, btinfo
);
1250 /* We start from here and expand in the requested direction. Then we
1251 expand in the other direction, as well, to fill up any remaining
1256 /* We want the current position covered, as well. */
1257 covered
= btrace_call_next (&end
, 1);
1258 covered
+= btrace_call_prev (&begin
, context
- covered
);
1259 covered
+= btrace_call_next (&end
, context
- covered
);
1263 covered
= btrace_call_next (&end
, context
);
1264 covered
+= btrace_call_prev (&begin
, context
- covered
);
1269 begin
= history
->begin
;
1272 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", (int) flags
, size
,
1273 btrace_call_number (&begin
), btrace_call_number (&end
));
1278 covered
= btrace_call_prev (&begin
, context
);
1283 covered
= btrace_call_next (&end
, context
);
1288 btrace_call_history (uiout
, btinfo
, &begin
, &end
, flags
);
1292 printf_unfiltered (_("At the start of the branch trace record.\n"));
1294 printf_unfiltered (_("At the end of the branch trace record.\n"));
1297 btrace_set_call_history (btinfo
, &begin
, &end
);
1300 /* The call_history_range method of target record-btrace. */
1303 record_btrace_target::call_history_range (ULONGEST from
, ULONGEST to
,
1304 record_print_flags flags
)
1306 struct btrace_thread_info
*btinfo
;
1307 struct btrace_call_iterator begin
, end
;
1308 struct ui_out
*uiout
;
1309 unsigned int low
, high
;
1312 uiout
= current_uiout
;
1313 ui_out_emit_tuple
tuple_emitter (uiout
, "func history");
1317 DEBUG ("call-history (0x%x): [%u; %u)", (int) flags
, low
, high
);
1319 /* Check for wrap-arounds. */
1320 if (low
!= from
|| high
!= to
)
1321 error (_("Bad range."));
1324 error (_("Bad range."));
1326 btinfo
= require_btrace ();
1328 found
= btrace_find_call_by_number (&begin
, btinfo
, low
);
1330 error (_("Range out of bounds."));
1332 found
= btrace_find_call_by_number (&end
, btinfo
, high
);
1335 /* Silently truncate the range. */
1336 btrace_call_end (&end
, btinfo
);
1340 /* We want both begin and end to be inclusive. */
1341 btrace_call_next (&end
, 1);
1344 btrace_call_history (uiout
, btinfo
, &begin
, &end
, flags
);
1345 btrace_set_call_history (btinfo
, &begin
, &end
);
1348 /* The call_history_from method of target record-btrace. */
1351 record_btrace_target::call_history_from (ULONGEST from
, int size
,
1352 record_print_flags flags
)
1354 ULONGEST begin
, end
, context
;
1356 context
= abs (size
);
1358 error (_("Bad record function-call-history-size."));
1367 begin
= from
- context
+ 1;
1372 end
= from
+ context
- 1;
1374 /* Check for wrap-around. */
1379 call_history_range ( begin
, end
, flags
);
1382 /* The record_method method of target record-btrace. */
1385 record_btrace_target::record_method (ptid_t ptid
)
1387 process_stratum_target
*proc_target
= current_inferior ()->process_target ();
1388 thread_info
*const tp
= find_thread_ptid (proc_target
, ptid
);
1391 error (_("No thread."));
1393 if (tp
->btrace
.target
== NULL
)
1394 return RECORD_METHOD_NONE
;
1396 return RECORD_METHOD_BTRACE
;
1399 /* The record_is_replaying method of target record-btrace. */
1402 record_btrace_target::record_is_replaying (ptid_t ptid
)
1404 process_stratum_target
*proc_target
= current_inferior ()->process_target ();
1405 for (thread_info
*tp
: all_non_exited_threads (proc_target
, ptid
))
1406 if (btrace_is_replaying (tp
))
1412 /* The record_will_replay method of target record-btrace. */
1415 record_btrace_target::record_will_replay (ptid_t ptid
, int dir
)
1417 return dir
== EXEC_REVERSE
|| record_is_replaying (ptid
);
1420 /* The xfer_partial method of target record-btrace. */
1422 enum target_xfer_status
1423 record_btrace_target::xfer_partial (enum target_object object
,
1424 const char *annex
, gdb_byte
*readbuf
,
1425 const gdb_byte
*writebuf
, ULONGEST offset
,
1426 ULONGEST len
, ULONGEST
*xfered_len
)
1428 /* Filter out requests that don't make sense during replay. */
1429 if (replay_memory_access
== replay_memory_access_read_only
1430 && !record_btrace_generating_corefile
1431 && record_is_replaying (inferior_ptid
))
1435 case TARGET_OBJECT_MEMORY
:
1437 struct target_section
*section
;
1439 /* We do not allow writing memory in general. */
1440 if (writebuf
!= NULL
)
1443 return TARGET_XFER_UNAVAILABLE
;
1446 /* We allow reading readonly memory. */
1447 section
= target_section_by_addr (this, offset
);
1448 if (section
!= NULL
)
1450 /* Check if the section we found is readonly. */
1451 if ((bfd_section_flags (section
->the_bfd_section
)
1452 & SEC_READONLY
) != 0)
1454 /* Truncate the request to fit into this section. */
1455 len
= std::min (len
, section
->endaddr
- offset
);
1461 return TARGET_XFER_UNAVAILABLE
;
1466 /* Forward the request. */
1467 return this->beneath ()->xfer_partial (object
, annex
, readbuf
, writebuf
,
1468 offset
, len
, xfered_len
);
1471 /* The insert_breakpoint method of target record-btrace. */
1474 record_btrace_target::insert_breakpoint (struct gdbarch
*gdbarch
,
1475 struct bp_target_info
*bp_tgt
)
1480 /* Inserting breakpoints requires accessing memory. Allow it for the
1481 duration of this function. */
1482 old
= replay_memory_access
;
1483 replay_memory_access
= replay_memory_access_read_write
;
1488 ret
= this->beneath ()->insert_breakpoint (gdbarch
, bp_tgt
);
1490 catch (const gdb_exception
&except
)
1492 replay_memory_access
= old
;
1495 replay_memory_access
= old
;
1500 /* The remove_breakpoint method of target record-btrace. */
1503 record_btrace_target::remove_breakpoint (struct gdbarch
*gdbarch
,
1504 struct bp_target_info
*bp_tgt
,
1505 enum remove_bp_reason reason
)
1510 /* Removing breakpoints requires accessing memory. Allow it for the
1511 duration of this function. */
1512 old
= replay_memory_access
;
1513 replay_memory_access
= replay_memory_access_read_write
;
1518 ret
= this->beneath ()->remove_breakpoint (gdbarch
, bp_tgt
, reason
);
1520 catch (const gdb_exception
&except
)
1522 replay_memory_access
= old
;
1525 replay_memory_access
= old
;
1530 /* The fetch_registers method of target record-btrace. */
1533 record_btrace_target::fetch_registers (struct regcache
*regcache
, int regno
)
1535 thread_info
*tp
= find_thread_ptid (regcache
->target (), regcache
->ptid ());
1536 gdb_assert (tp
!= NULL
);
1538 btrace_insn_iterator
*replay
= tp
->btrace
.replay
;
1539 if (replay
!= NULL
&& !record_btrace_generating_corefile
)
1541 const struct btrace_insn
*insn
;
1542 struct gdbarch
*gdbarch
;
1545 gdbarch
= regcache
->arch ();
1546 pcreg
= gdbarch_pc_regnum (gdbarch
);
1550 /* We can only provide the PC register. */
1551 if (regno
>= 0 && regno
!= pcreg
)
1554 insn
= btrace_insn_get (replay
);
1555 gdb_assert (insn
!= NULL
);
1557 regcache
->raw_supply (regno
, &insn
->pc
);
1560 this->beneath ()->fetch_registers (regcache
, regno
);
1563 /* The store_registers method of target record-btrace. */
1566 record_btrace_target::store_registers (struct regcache
*regcache
, int regno
)
1568 if (!record_btrace_generating_corefile
1569 && record_is_replaying (regcache
->ptid ()))
1570 error (_("Cannot write registers while replaying."));
1572 gdb_assert (may_write_registers
);
1574 this->beneath ()->store_registers (regcache
, regno
);
1577 /* The prepare_to_store method of target record-btrace. */
1580 record_btrace_target::prepare_to_store (struct regcache
*regcache
)
1582 if (!record_btrace_generating_corefile
1583 && record_is_replaying (regcache
->ptid ()))
1586 this->beneath ()->prepare_to_store (regcache
);
1589 /* The branch trace frame cache. */
1591 struct btrace_frame_cache
1594 struct thread_info
*tp
;
1596 /* The frame info. */
1597 struct frame_info
*frame
;
1599 /* The branch trace function segment. */
1600 const struct btrace_function
*bfun
;
1603 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1605 static htab_t bfcache
;
1607 /* hash_f for htab_create_alloc of bfcache. */
1610 bfcache_hash (const void *arg
)
1612 const struct btrace_frame_cache
*cache
1613 = (const struct btrace_frame_cache
*) arg
;
1615 return htab_hash_pointer (cache
->frame
);
1618 /* eq_f for htab_create_alloc of bfcache. */
1621 bfcache_eq (const void *arg1
, const void *arg2
)
1623 const struct btrace_frame_cache
*cache1
1624 = (const struct btrace_frame_cache
*) arg1
;
1625 const struct btrace_frame_cache
*cache2
1626 = (const struct btrace_frame_cache
*) arg2
;
1628 return cache1
->frame
== cache2
->frame
;
1631 /* Create a new btrace frame cache. */
1633 static struct btrace_frame_cache
*
1634 bfcache_new (struct frame_info
*frame
)
1636 struct btrace_frame_cache
*cache
;
1639 cache
= FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache
);
1640 cache
->frame
= frame
;
1642 slot
= htab_find_slot (bfcache
, cache
, INSERT
);
1643 gdb_assert (*slot
== NULL
);
1649 /* Extract the branch trace function from a branch trace frame. */
1651 static const struct btrace_function
*
1652 btrace_get_frame_function (struct frame_info
*frame
)
1654 const struct btrace_frame_cache
*cache
;
1655 struct btrace_frame_cache pattern
;
1658 pattern
.frame
= frame
;
1660 slot
= htab_find_slot (bfcache
, &pattern
, NO_INSERT
);
1664 cache
= (const struct btrace_frame_cache
*) *slot
;
1668 /* Implement stop_reason method for record_btrace_frame_unwind. */
1670 static enum unwind_stop_reason
1671 record_btrace_frame_unwind_stop_reason (struct frame_info
*this_frame
,
1674 const struct btrace_frame_cache
*cache
;
1675 const struct btrace_function
*bfun
;
1677 cache
= (const struct btrace_frame_cache
*) *this_cache
;
1679 gdb_assert (bfun
!= NULL
);
1682 return UNWIND_UNAVAILABLE
;
1684 return UNWIND_NO_REASON
;
1687 /* Implement this_id method for record_btrace_frame_unwind. */
1690 record_btrace_frame_this_id (struct frame_info
*this_frame
, void **this_cache
,
1691 struct frame_id
*this_id
)
1693 const struct btrace_frame_cache
*cache
;
1694 const struct btrace_function
*bfun
;
1695 struct btrace_call_iterator it
;
1696 CORE_ADDR code
, special
;
1698 cache
= (const struct btrace_frame_cache
*) *this_cache
;
1701 gdb_assert (bfun
!= NULL
);
1703 while (btrace_find_call_by_number (&it
, &cache
->tp
->btrace
, bfun
->prev
) != 0)
1704 bfun
= btrace_call_get (&it
);
1706 code
= get_frame_func (this_frame
);
1707 special
= bfun
->number
;
1709 *this_id
= frame_id_build_unavailable_stack_special (code
, special
);
1711 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1712 btrace_get_bfun_name (cache
->bfun
),
1713 core_addr_to_string_nz (this_id
->code_addr
),
1714 core_addr_to_string_nz (this_id
->special_addr
));
1717 /* Implement prev_register method for record_btrace_frame_unwind. */
1719 static struct value
*
1720 record_btrace_frame_prev_register (struct frame_info
*this_frame
,
1724 const struct btrace_frame_cache
*cache
;
1725 const struct btrace_function
*bfun
, *caller
;
1726 struct btrace_call_iterator it
;
1727 struct gdbarch
*gdbarch
;
1731 gdbarch
= get_frame_arch (this_frame
);
1732 pcreg
= gdbarch_pc_regnum (gdbarch
);
1733 if (pcreg
< 0 || regnum
!= pcreg
)
1734 throw_error (NOT_AVAILABLE_ERROR
,
1735 _("Registers are not available in btrace record history"));
1737 cache
= (const struct btrace_frame_cache
*) *this_cache
;
1739 gdb_assert (bfun
!= NULL
);
1741 if (btrace_find_call_by_number (&it
, &cache
->tp
->btrace
, bfun
->up
) == 0)
1742 throw_error (NOT_AVAILABLE_ERROR
,
1743 _("No caller in btrace record history"));
1745 caller
= btrace_call_get (&it
);
1747 if ((bfun
->flags
& BFUN_UP_LINKS_TO_RET
) != 0)
1748 pc
= caller
->insn
.front ().pc
;
1751 pc
= caller
->insn
.back ().pc
;
1752 pc
+= gdb_insn_length (gdbarch
, pc
);
1755 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1756 btrace_get_bfun_name (bfun
), bfun
->level
,
1757 core_addr_to_string_nz (pc
));
1759 return frame_unwind_got_address (this_frame
, regnum
, pc
);
1762 /* Implement sniffer method for record_btrace_frame_unwind. */
1765 record_btrace_frame_sniffer (const struct frame_unwind
*self
,
1766 struct frame_info
*this_frame
,
1769 const struct btrace_function
*bfun
;
1770 struct btrace_frame_cache
*cache
;
1771 struct thread_info
*tp
;
1772 struct frame_info
*next
;
1774 /* THIS_FRAME does not contain a reference to its thread. */
1775 tp
= inferior_thread ();
1778 next
= get_next_frame (this_frame
);
1781 const struct btrace_insn_iterator
*replay
;
1783 replay
= tp
->btrace
.replay
;
1785 bfun
= &replay
->btinfo
->functions
[replay
->call_index
];
1789 const struct btrace_function
*callee
;
1790 struct btrace_call_iterator it
;
1792 callee
= btrace_get_frame_function (next
);
1793 if (callee
== NULL
|| (callee
->flags
& BFUN_UP_LINKS_TO_TAILCALL
) != 0)
1796 if (btrace_find_call_by_number (&it
, &tp
->btrace
, callee
->up
) == 0)
1799 bfun
= btrace_call_get (&it
);
1805 DEBUG ("[frame] sniffed frame for %s on level %d",
1806 btrace_get_bfun_name (bfun
), bfun
->level
);
1808 /* This is our frame. Initialize the frame cache. */
1809 cache
= bfcache_new (this_frame
);
1813 *this_cache
= cache
;
1817 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1820 record_btrace_tailcall_frame_sniffer (const struct frame_unwind
*self
,
1821 struct frame_info
*this_frame
,
1824 const struct btrace_function
*bfun
, *callee
;
1825 struct btrace_frame_cache
*cache
;
1826 struct btrace_call_iterator it
;
1827 struct frame_info
*next
;
1828 struct thread_info
*tinfo
;
1830 next
= get_next_frame (this_frame
);
1834 callee
= btrace_get_frame_function (next
);
1838 if ((callee
->flags
& BFUN_UP_LINKS_TO_TAILCALL
) == 0)
1841 tinfo
= inferior_thread ();
1842 if (btrace_find_call_by_number (&it
, &tinfo
->btrace
, callee
->up
) == 0)
1845 bfun
= btrace_call_get (&it
);
1847 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1848 btrace_get_bfun_name (bfun
), bfun
->level
);
1850 /* This is our frame. Initialize the frame cache. */
1851 cache
= bfcache_new (this_frame
);
1855 *this_cache
= cache
;
1860 record_btrace_frame_dealloc_cache (struct frame_info
*self
, void *this_cache
)
1862 struct btrace_frame_cache
*cache
;
1865 cache
= (struct btrace_frame_cache
*) this_cache
;
1867 slot
= htab_find_slot (bfcache
, cache
, NO_INSERT
);
1868 gdb_assert (slot
!= NULL
);
1870 htab_remove_elt (bfcache
, cache
);
1873 /* btrace recording does not store previous memory content, neither the stack
1874 frames content. Any unwinding would return erroneous results as the stack
1875 contents no longer matches the changed PC value restored from history.
1876 Therefore this unwinder reports any possibly unwound registers as
1879 const struct frame_unwind record_btrace_frame_unwind
=
1882 record_btrace_frame_unwind_stop_reason
,
1883 record_btrace_frame_this_id
,
1884 record_btrace_frame_prev_register
,
1886 record_btrace_frame_sniffer
,
1887 record_btrace_frame_dealloc_cache
1890 const struct frame_unwind record_btrace_tailcall_frame_unwind
=
1893 record_btrace_frame_unwind_stop_reason
,
1894 record_btrace_frame_this_id
,
1895 record_btrace_frame_prev_register
,
1897 record_btrace_tailcall_frame_sniffer
,
1898 record_btrace_frame_dealloc_cache
1901 /* Implement the get_unwinder method. */
1903 const struct frame_unwind
*
1904 record_btrace_target::get_unwinder ()
1906 return &record_btrace_frame_unwind
;
1909 /* Implement the get_tailcall_unwinder method. */
1911 const struct frame_unwind
*
1912 record_btrace_target::get_tailcall_unwinder ()
1914 return &record_btrace_tailcall_frame_unwind
;
1917 /* Return a human-readable string for FLAG. */
1920 btrace_thread_flag_to_str (enum btrace_thread_flag flag
)
1928 return "reverse-step";
1934 return "reverse-cont";
1943 /* Indicate that TP should be resumed according to FLAG. */
1946 record_btrace_resume_thread (struct thread_info
*tp
,
1947 enum btrace_thread_flag flag
)
1949 struct btrace_thread_info
*btinfo
;
1951 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp
),
1952 target_pid_to_str (tp
->ptid
).c_str (), flag
,
1953 btrace_thread_flag_to_str (flag
));
1955 btinfo
= &tp
->btrace
;
1957 /* Fetch the latest branch trace. */
1958 btrace_fetch (tp
, record_btrace_get_cpu ());
1960 /* A resume request overwrites a preceding resume or stop request. */
1961 btinfo
->flags
&= ~(BTHR_MOVE
| BTHR_STOP
);
1962 btinfo
->flags
|= flag
;
1965 /* Get the current frame for TP. */
1967 static struct frame_id
1968 get_thread_current_frame_id (struct thread_info
*tp
)
1973 /* Set current thread, which is implicitly used by
1974 get_current_frame. */
1975 scoped_restore_current_thread restore_thread
;
1977 switch_to_thread (tp
);
1979 process_stratum_target
*proc_target
= tp
->inf
->process_target ();
1981 /* Clear the executing flag to allow changes to the current frame.
1982 We are not actually running, yet. We just started a reverse execution
1983 command or a record goto command.
1984 For the latter, EXECUTING is false and this has no effect.
1985 For the former, EXECUTING is true and we're in wait, about to
1986 move the thread. Since we need to recompute the stack, we temporarily
1987 set EXECUTING to false. */
1988 executing
= tp
->executing
;
1989 set_executing (proc_target
, inferior_ptid
, false);
1994 id
= get_frame_id (get_current_frame ());
1996 catch (const gdb_exception
&except
)
1998 /* Restore the previous execution state. */
1999 set_executing (proc_target
, inferior_ptid
, executing
);
2004 /* Restore the previous execution state. */
2005 set_executing (proc_target
, inferior_ptid
, executing
);
2010 /* Start replaying a thread. */
2012 static struct btrace_insn_iterator
*
2013 record_btrace_start_replaying (struct thread_info
*tp
)
2015 struct btrace_insn_iterator
*replay
;
2016 struct btrace_thread_info
*btinfo
;
2018 btinfo
= &tp
->btrace
;
2021 /* We can't start replaying without trace. */
2022 if (btinfo
->functions
.empty ())
2025 /* GDB stores the current frame_id when stepping in order to detects steps
2027 Since frames are computed differently when we're replaying, we need to
2028 recompute those stored frames and fix them up so we can still detect
2029 subroutines after we started replaying. */
2032 struct frame_id frame_id
;
2033 int upd_step_frame_id
, upd_step_stack_frame_id
;
2035 /* The current frame without replaying - computed via normal unwind. */
2036 frame_id
= get_thread_current_frame_id (tp
);
2038 /* Check if we need to update any stepping-related frame id's. */
2039 upd_step_frame_id
= frame_id_eq (frame_id
,
2040 tp
->control
.step_frame_id
);
2041 upd_step_stack_frame_id
= frame_id_eq (frame_id
,
2042 tp
->control
.step_stack_frame_id
);
2044 /* We start replaying at the end of the branch trace. This corresponds
2045 to the current instruction. */
2046 replay
= XNEW (struct btrace_insn_iterator
);
2047 btrace_insn_end (replay
, btinfo
);
2049 /* Skip gaps at the end of the trace. */
2050 while (btrace_insn_get (replay
) == NULL
)
2054 steps
= btrace_insn_prev (replay
, 1);
2056 error (_("No trace."));
2059 /* We're not replaying, yet. */
2060 gdb_assert (btinfo
->replay
== NULL
);
2061 btinfo
->replay
= replay
;
2063 /* Make sure we're not using any stale registers. */
2064 registers_changed_thread (tp
);
2066 /* The current frame with replaying - computed via btrace unwind. */
2067 frame_id
= get_thread_current_frame_id (tp
);
2069 /* Replace stepping related frames where necessary. */
2070 if (upd_step_frame_id
)
2071 tp
->control
.step_frame_id
= frame_id
;
2072 if (upd_step_stack_frame_id
)
2073 tp
->control
.step_stack_frame_id
= frame_id
;
2075 catch (const gdb_exception
&except
)
2077 xfree (btinfo
->replay
);
2078 btinfo
->replay
= NULL
;
2080 registers_changed_thread (tp
);
2088 /* Stop replaying a thread. */
2091 record_btrace_stop_replaying (struct thread_info
*tp
)
2093 struct btrace_thread_info
*btinfo
;
2095 btinfo
= &tp
->btrace
;
2097 xfree (btinfo
->replay
);
2098 btinfo
->replay
= NULL
;
2100 /* Make sure we're not leaving any stale registers. */
2101 registers_changed_thread (tp
);
2104 /* Stop replaying TP if it is at the end of its execution history. */
2107 record_btrace_stop_replaying_at_end (struct thread_info
*tp
)
2109 struct btrace_insn_iterator
*replay
, end
;
2110 struct btrace_thread_info
*btinfo
;
2112 btinfo
= &tp
->btrace
;
2113 replay
= btinfo
->replay
;
2118 btrace_insn_end (&end
, btinfo
);
2120 if (btrace_insn_cmp (replay
, &end
) == 0)
2121 record_btrace_stop_replaying (tp
);
2124 /* The resume method of target record-btrace. */
2127 record_btrace_target::resume (ptid_t ptid
, int step
, enum gdb_signal signal
)
2129 enum btrace_thread_flag flag
, cflag
;
2131 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid
).c_str (),
2132 ::execution_direction
== EXEC_REVERSE
? "reverse-" : "",
2133 step
? "step" : "cont");
2135 /* Store the execution direction of the last resume.
2137 If there is more than one resume call, we have to rely on infrun
2138 to not change the execution direction in-between. */
2139 record_btrace_resume_exec_dir
= ::execution_direction
;
2141 /* As long as we're not replaying, just forward the request.
2143 For non-stop targets this means that no thread is replaying. In order to
2144 make progress, we may need to explicitly move replaying threads to the end
2145 of their execution history. */
2146 if ((::execution_direction
!= EXEC_REVERSE
)
2147 && !record_is_replaying (minus_one_ptid
))
2149 this->beneath ()->resume (ptid
, step
, signal
);
2153 /* Compute the btrace thread flag for the requested move. */
2154 if (::execution_direction
== EXEC_REVERSE
)
2156 flag
= step
== 0 ? BTHR_RCONT
: BTHR_RSTEP
;
2161 flag
= step
== 0 ? BTHR_CONT
: BTHR_STEP
;
2165 /* We just indicate the resume intent here. The actual stepping happens in
2166 record_btrace_wait below.
2168 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2170 process_stratum_target
*proc_target
= current_inferior ()->process_target ();
2172 if (!target_is_non_stop_p ())
2174 gdb_assert (inferior_ptid
.matches (ptid
));
2176 for (thread_info
*tp
: all_non_exited_threads (proc_target
, ptid
))
2178 if (tp
->ptid
.matches (inferior_ptid
))
2179 record_btrace_resume_thread (tp
, flag
);
2181 record_btrace_resume_thread (tp
, cflag
);
2186 for (thread_info
*tp
: all_non_exited_threads (proc_target
, ptid
))
2187 record_btrace_resume_thread (tp
, flag
);
2190 /* Async support. */
2191 if (target_can_async_p ())
2194 mark_async_event_handler (record_btrace_async_inferior_event_handler
);
2198 /* The commit_resume method of target record-btrace. */
2201 record_btrace_target::commit_resume ()
2203 if ((::execution_direction
!= EXEC_REVERSE
)
2204 && !record_is_replaying (minus_one_ptid
))
2205 beneath ()->commit_resume ();
2208 /* Cancel resuming TP. */
2211 record_btrace_cancel_resume (struct thread_info
*tp
)
2213 enum btrace_thread_flag flags
;
2215 flags
= tp
->btrace
.flags
& (BTHR_MOVE
| BTHR_STOP
);
2219 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2220 print_thread_id (tp
),
2221 target_pid_to_str (tp
->ptid
).c_str (), flags
,
2222 btrace_thread_flag_to_str (flags
));
2224 tp
->btrace
.flags
&= ~(BTHR_MOVE
| BTHR_STOP
);
2225 record_btrace_stop_replaying_at_end (tp
);
2228 /* Return a target_waitstatus indicating that we ran out of history. */
2230 static struct target_waitstatus
2231 btrace_step_no_history (void)
2233 struct target_waitstatus status
;
2235 status
.kind
= TARGET_WAITKIND_NO_HISTORY
;
2240 /* Return a target_waitstatus indicating that a step finished. */
2242 static struct target_waitstatus
2243 btrace_step_stopped (void)
2245 struct target_waitstatus status
;
2247 status
.kind
= TARGET_WAITKIND_STOPPED
;
2248 status
.value
.sig
= GDB_SIGNAL_TRAP
;
2253 /* Return a target_waitstatus indicating that a thread was stopped as
2256 static struct target_waitstatus
2257 btrace_step_stopped_on_request (void)
2259 struct target_waitstatus status
;
2261 status
.kind
= TARGET_WAITKIND_STOPPED
;
2262 status
.value
.sig
= GDB_SIGNAL_0
;
2267 /* Return a target_waitstatus indicating a spurious stop. */
2269 static struct target_waitstatus
2270 btrace_step_spurious (void)
2272 struct target_waitstatus status
;
2274 status
.kind
= TARGET_WAITKIND_SPURIOUS
;
2279 /* Return a target_waitstatus indicating that the thread was not resumed. */
2281 static struct target_waitstatus
2282 btrace_step_no_resumed (void)
2284 struct target_waitstatus status
;
2286 status
.kind
= TARGET_WAITKIND_NO_RESUMED
;
2291 /* Return a target_waitstatus indicating that we should wait again. */
2293 static struct target_waitstatus
2294 btrace_step_again (void)
2296 struct target_waitstatus status
;
2298 status
.kind
= TARGET_WAITKIND_IGNORE
;
2303 /* Clear the record histories. */
2306 record_btrace_clear_histories (struct btrace_thread_info
*btinfo
)
2308 xfree (btinfo
->insn_history
);
2309 xfree (btinfo
->call_history
);
2311 btinfo
->insn_history
= NULL
;
2312 btinfo
->call_history
= NULL
;
2315 /* Check whether TP's current replay position is at a breakpoint. */
2318 record_btrace_replay_at_breakpoint (struct thread_info
*tp
)
2320 struct btrace_insn_iterator
*replay
;
2321 struct btrace_thread_info
*btinfo
;
2322 const struct btrace_insn
*insn
;
2324 btinfo
= &tp
->btrace
;
2325 replay
= btinfo
->replay
;
2330 insn
= btrace_insn_get (replay
);
2334 return record_check_stopped_by_breakpoint (tp
->inf
->aspace
, insn
->pc
,
2335 &btinfo
->stop_reason
);
2338 /* Step one instruction in forward direction. */
2340 static struct target_waitstatus
2341 record_btrace_single_step_forward (struct thread_info
*tp
)
2343 struct btrace_insn_iterator
*replay
, end
, start
;
2344 struct btrace_thread_info
*btinfo
;
2346 btinfo
= &tp
->btrace
;
2347 replay
= btinfo
->replay
;
2349 /* We're done if we're not replaying. */
2351 return btrace_step_no_history ();
2353 /* Check if we're stepping a breakpoint. */
2354 if (record_btrace_replay_at_breakpoint (tp
))
2355 return btrace_step_stopped ();
2357 /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
2358 jump back to the instruction at which we started. */
2364 /* We will bail out here if we continue stepping after reaching the end
2365 of the execution history. */
2366 steps
= btrace_insn_next (replay
, 1);
2370 return btrace_step_no_history ();
2373 while (btrace_insn_get (replay
) == NULL
);
2375 /* Determine the end of the instruction trace. */
2376 btrace_insn_end (&end
, btinfo
);
2378 /* The execution trace contains (and ends with) the current instruction.
2379 This instruction has not been executed, yet, so the trace really ends
2380 one instruction earlier. */
2381 if (btrace_insn_cmp (replay
, &end
) == 0)
2382 return btrace_step_no_history ();
2384 return btrace_step_spurious ();
2387 /* Step one instruction in backward direction. */
2389 static struct target_waitstatus
2390 record_btrace_single_step_backward (struct thread_info
*tp
)
2392 struct btrace_insn_iterator
*replay
, start
;
2393 struct btrace_thread_info
*btinfo
;
2395 btinfo
= &tp
->btrace
;
2396 replay
= btinfo
->replay
;
2398 /* Start replaying if we're not already doing so. */
2400 replay
= record_btrace_start_replaying (tp
);
2402 /* If we can't step any further, we reached the end of the history.
2403 Skip gaps during replay. If we end up at a gap (at the beginning of
2404 the trace), jump back to the instruction at which we started. */
2410 steps
= btrace_insn_prev (replay
, 1);
2414 return btrace_step_no_history ();
2417 while (btrace_insn_get (replay
) == NULL
);
2419 /* Check if we're stepping a breakpoint.
2421 For reverse-stepping, this check is after the step. There is logic in
2422 infrun.c that handles reverse-stepping separately. See, for example,
2423 proceed and adjust_pc_after_break.
2425 This code assumes that for reverse-stepping, PC points to the last
2426 de-executed instruction, whereas for forward-stepping PC points to the
2427 next to-be-executed instruction. */
2428 if (record_btrace_replay_at_breakpoint (tp
))
2429 return btrace_step_stopped ();
2431 return btrace_step_spurious ();
2434 /* Step a single thread. */
2436 static struct target_waitstatus
2437 record_btrace_step_thread (struct thread_info
*tp
)
2439 struct btrace_thread_info
*btinfo
;
2440 struct target_waitstatus status
;
2441 enum btrace_thread_flag flags
;
2443 btinfo
= &tp
->btrace
;
2445 flags
= btinfo
->flags
& (BTHR_MOVE
| BTHR_STOP
);
2446 btinfo
->flags
&= ~(BTHR_MOVE
| BTHR_STOP
);
2448 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp
),
2449 target_pid_to_str (tp
->ptid
).c_str (), flags
,
2450 btrace_thread_flag_to_str (flags
));
2452 /* We can't step without an execution history. */
2453 if ((flags
& BTHR_MOVE
) != 0 && btrace_is_empty (tp
))
2454 return btrace_step_no_history ();
2459 internal_error (__FILE__
, __LINE__
, _("invalid stepping type."));
2462 return btrace_step_stopped_on_request ();
2465 status
= record_btrace_single_step_forward (tp
);
2466 if (status
.kind
!= TARGET_WAITKIND_SPURIOUS
)
2469 return btrace_step_stopped ();
2472 status
= record_btrace_single_step_backward (tp
);
2473 if (status
.kind
!= TARGET_WAITKIND_SPURIOUS
)
2476 return btrace_step_stopped ();
2479 status
= record_btrace_single_step_forward (tp
);
2480 if (status
.kind
!= TARGET_WAITKIND_SPURIOUS
)
2483 btinfo
->flags
|= flags
;
2484 return btrace_step_again ();
2487 status
= record_btrace_single_step_backward (tp
);
2488 if (status
.kind
!= TARGET_WAITKIND_SPURIOUS
)
2491 btinfo
->flags
|= flags
;
2492 return btrace_step_again ();
2495 /* We keep threads moving at the end of their execution history. The wait
2496 method will stop the thread for whom the event is reported. */
2497 if (status
.kind
== TARGET_WAITKIND_NO_HISTORY
)
2498 btinfo
->flags
|= flags
;
2503 /* Announce further events if necessary. */
2506 record_btrace_maybe_mark_async_event
2507 (const std::vector
<thread_info
*> &moving
,
2508 const std::vector
<thread_info
*> &no_history
)
2510 bool more_moving
= !moving
.empty ();
2511 bool more_no_history
= !no_history
.empty ();;
2513 if (!more_moving
&& !more_no_history
)
2517 DEBUG ("movers pending");
2519 if (more_no_history
)
2520 DEBUG ("no-history pending");
2522 mark_async_event_handler (record_btrace_async_inferior_event_handler
);
2525 /* The wait method of target record-btrace. */
2528 record_btrace_target::wait (ptid_t ptid
, struct target_waitstatus
*status
,
2531 std::vector
<thread_info
*> moving
;
2532 std::vector
<thread_info
*> no_history
;
2534 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid
).c_str (), options
);
2536 /* As long as we're not replaying, just forward the request. */
2537 if ((::execution_direction
!= EXEC_REVERSE
)
2538 && !record_is_replaying (minus_one_ptid
))
2540 return this->beneath ()->wait (ptid
, status
, options
);
2543 /* Keep a work list of moving threads. */
2544 process_stratum_target
*proc_target
= current_inferior ()->process_target ();
2545 for (thread_info
*tp
: all_non_exited_threads (proc_target
, ptid
))
2546 if ((tp
->btrace
.flags
& (BTHR_MOVE
| BTHR_STOP
)) != 0)
2547 moving
.push_back (tp
);
2549 if (moving
.empty ())
2551 *status
= btrace_step_no_resumed ();
2553 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid
).c_str (),
2554 target_waitstatus_to_string (status
).c_str ());
2559 /* Step moving threads one by one, one step each, until either one thread
2560 reports an event or we run out of threads to step.
2562 When stepping more than one thread, chances are that some threads reach
2563 the end of their execution history earlier than others. If we reported
2564 this immediately, all-stop on top of non-stop would stop all threads and
2565 resume the same threads next time. And we would report the same thread
2566 having reached the end of its execution history again.
2568 In the worst case, this would starve the other threads. But even if other
2569 threads would be allowed to make progress, this would result in far too
2570 many intermediate stops.
2572 We therefore delay the reporting of "no execution history" until we have
2573 nothing else to report. By this time, all threads should have moved to
2574 either the beginning or the end of their execution history. There will
2575 be a single user-visible stop. */
2576 struct thread_info
*eventing
= NULL
;
2577 while ((eventing
== NULL
) && !moving
.empty ())
2579 for (unsigned int ix
= 0; eventing
== NULL
&& ix
< moving
.size ();)
2581 thread_info
*tp
= moving
[ix
];
2583 *status
= record_btrace_step_thread (tp
);
2585 switch (status
->kind
)
2587 case TARGET_WAITKIND_IGNORE
:
2591 case TARGET_WAITKIND_NO_HISTORY
:
2592 no_history
.push_back (ordered_remove (moving
, ix
));
2596 eventing
= unordered_remove (moving
, ix
);
2602 if (eventing
== NULL
)
2604 /* We started with at least one moving thread. This thread must have
2605 either stopped or reached the end of its execution history.
2607 In the former case, EVENTING must not be NULL.
2608 In the latter case, NO_HISTORY must not be empty. */
2609 gdb_assert (!no_history
.empty ());
2611 /* We kept threads moving at the end of their execution history. Stop
2612 EVENTING now that we are going to report its stop. */
2613 eventing
= unordered_remove (no_history
, 0);
2614 eventing
->btrace
.flags
&= ~BTHR_MOVE
;
2616 *status
= btrace_step_no_history ();
2619 gdb_assert (eventing
!= NULL
);
2621 /* We kept threads replaying at the end of their execution history. Stop
2622 replaying EVENTING now that we are going to report its stop. */
2623 record_btrace_stop_replaying_at_end (eventing
);
2625 /* Stop all other threads. */
2626 if (!target_is_non_stop_p ())
2628 for (thread_info
*tp
: all_non_exited_threads ())
2629 record_btrace_cancel_resume (tp
);
2632 /* In async mode, we need to announce further events. */
2633 if (target_is_async_p ())
2634 record_btrace_maybe_mark_async_event (moving
, no_history
);
2636 /* Start record histories anew from the current position. */
2637 record_btrace_clear_histories (&eventing
->btrace
);
2639 /* We moved the replay position but did not update registers. */
2640 registers_changed_thread (eventing
);
2642 DEBUG ("wait ended by thread %s (%s): %s",
2643 print_thread_id (eventing
),
2644 target_pid_to_str (eventing
->ptid
).c_str (),
2645 target_waitstatus_to_string (status
).c_str ());
2647 return eventing
->ptid
;
2650 /* The stop method of target record-btrace. */
2653 record_btrace_target::stop (ptid_t ptid
)
2655 DEBUG ("stop %s", target_pid_to_str (ptid
).c_str ());
2657 /* As long as we're not replaying, just forward the request. */
2658 if ((::execution_direction
!= EXEC_REVERSE
)
2659 && !record_is_replaying (minus_one_ptid
))
2661 this->beneath ()->stop (ptid
);
2665 process_stratum_target
*proc_target
2666 = current_inferior ()->process_target ();
2668 for (thread_info
*tp
: all_non_exited_threads (proc_target
, ptid
))
2670 tp
->btrace
.flags
&= ~BTHR_MOVE
;
2671 tp
->btrace
.flags
|= BTHR_STOP
;
2676 /* The can_execute_reverse method of target record-btrace. */
2679 record_btrace_target::can_execute_reverse ()
2684 /* The stopped_by_sw_breakpoint method of target record-btrace. */
2687 record_btrace_target::stopped_by_sw_breakpoint ()
2689 if (record_is_replaying (minus_one_ptid
))
2691 struct thread_info
*tp
= inferior_thread ();
2693 return tp
->btrace
.stop_reason
== TARGET_STOPPED_BY_SW_BREAKPOINT
;
2696 return this->beneath ()->stopped_by_sw_breakpoint ();
2699 /* The supports_stopped_by_sw_breakpoint method of target
2703 record_btrace_target::supports_stopped_by_sw_breakpoint ()
2705 if (record_is_replaying (minus_one_ptid
))
2708 return this->beneath ()->supports_stopped_by_sw_breakpoint ();
2711 /* The stopped_by_sw_breakpoint method of target record-btrace. */
2714 record_btrace_target::stopped_by_hw_breakpoint ()
2716 if (record_is_replaying (minus_one_ptid
))
2718 struct thread_info
*tp
= inferior_thread ();
2720 return tp
->btrace
.stop_reason
== TARGET_STOPPED_BY_HW_BREAKPOINT
;
2723 return this->beneath ()->stopped_by_hw_breakpoint ();
2726 /* The supports_stopped_by_hw_breakpoint method of target
2730 record_btrace_target::supports_stopped_by_hw_breakpoint ()
2732 if (record_is_replaying (minus_one_ptid
))
2735 return this->beneath ()->supports_stopped_by_hw_breakpoint ();
2738 /* The update_thread_list method of target record-btrace. */
2741 record_btrace_target::update_thread_list ()
2743 /* We don't add or remove threads during replay. */
2744 if (record_is_replaying (minus_one_ptid
))
2747 /* Forward the request. */
2748 this->beneath ()->update_thread_list ();
2751 /* The thread_alive method of target record-btrace. */
2754 record_btrace_target::thread_alive (ptid_t ptid
)
2756 /* We don't add or remove threads during replay. */
2757 if (record_is_replaying (minus_one_ptid
))
2760 /* Forward the request. */
2761 return this->beneath ()->thread_alive (ptid
);
2764 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
2768 record_btrace_set_replay (struct thread_info
*tp
,
2769 const struct btrace_insn_iterator
*it
)
2771 struct btrace_thread_info
*btinfo
;
2773 btinfo
= &tp
->btrace
;
2776 record_btrace_stop_replaying (tp
);
2779 if (btinfo
->replay
== NULL
)
2780 record_btrace_start_replaying (tp
);
2781 else if (btrace_insn_cmp (btinfo
->replay
, it
) == 0)
2784 *btinfo
->replay
= *it
;
2785 registers_changed_thread (tp
);
2788 /* Start anew from the new replay position. */
2789 record_btrace_clear_histories (btinfo
);
2791 inferior_thread ()->suspend
.stop_pc
2792 = regcache_read_pc (get_current_regcache ());
2793 print_stack_frame (get_selected_frame (NULL
), 1, SRC_AND_LOC
, 1);
2796 /* The goto_record_begin method of target record-btrace. */
2799 record_btrace_target::goto_record_begin ()
2801 struct thread_info
*tp
;
2802 struct btrace_insn_iterator begin
;
2804 tp
= require_btrace_thread ();
2806 btrace_insn_begin (&begin
, &tp
->btrace
);
2808 /* Skip gaps at the beginning of the trace. */
2809 while (btrace_insn_get (&begin
) == NULL
)
2813 steps
= btrace_insn_next (&begin
, 1);
2815 error (_("No trace."));
2818 record_btrace_set_replay (tp
, &begin
);
2821 /* The goto_record_end method of target record-btrace. */
2824 record_btrace_target::goto_record_end ()
2826 struct thread_info
*tp
;
2828 tp
= require_btrace_thread ();
2830 record_btrace_set_replay (tp
, NULL
);
2833 /* The goto_record method of target record-btrace. */
2836 record_btrace_target::goto_record (ULONGEST insn
)
2838 struct thread_info
*tp
;
2839 struct btrace_insn_iterator it
;
2840 unsigned int number
;
2845 /* Check for wrap-arounds. */
2847 error (_("Instruction number out of range."));
2849 tp
= require_btrace_thread ();
2851 found
= btrace_find_insn_by_number (&it
, &tp
->btrace
, number
);
2853 /* Check if the instruction could not be found or is a gap. */
2854 if (found
== 0 || btrace_insn_get (&it
) == NULL
)
2855 error (_("No such instruction."));
2857 record_btrace_set_replay (tp
, &it
);
2860 /* The record_stop_replaying method of target record-btrace. */
2863 record_btrace_target::record_stop_replaying ()
2865 for (thread_info
*tp
: all_non_exited_threads ())
2866 record_btrace_stop_replaying (tp
);
2869 /* The execution_direction target method. */
2871 enum exec_direction_kind
2872 record_btrace_target::execution_direction ()
2874 return record_btrace_resume_exec_dir
;
2877 /* The prepare_to_generate_core target method. */
2880 record_btrace_target::prepare_to_generate_core ()
2882 record_btrace_generating_corefile
= 1;
2885 /* The done_generating_core target method. */
2888 record_btrace_target::done_generating_core ()
2890 record_btrace_generating_corefile
= 0;
2893 /* Start recording in BTS format. */
2896 cmd_record_btrace_bts_start (const char *args
, int from_tty
)
2898 if (args
!= NULL
&& *args
!= 0)
2899 error (_("Invalid argument."));
2901 record_btrace_conf
.format
= BTRACE_FORMAT_BTS
;
2905 execute_command ("target record-btrace", from_tty
);
2907 catch (const gdb_exception
&exception
)
2909 record_btrace_conf
.format
= BTRACE_FORMAT_NONE
;
2914 /* Start recording in Intel Processor Trace format. */
2917 cmd_record_btrace_pt_start (const char *args
, int from_tty
)
2919 if (args
!= NULL
&& *args
!= 0)
2920 error (_("Invalid argument."));
2922 record_btrace_conf
.format
= BTRACE_FORMAT_PT
;
2926 execute_command ("target record-btrace", from_tty
);
2928 catch (const gdb_exception
&exception
)
2930 record_btrace_conf
.format
= BTRACE_FORMAT_NONE
;
2935 /* Alias for "target record". */
2938 cmd_record_btrace_start (const char *args
, int from_tty
)
2940 if (args
!= NULL
&& *args
!= 0)
2941 error (_("Invalid argument."));
2943 record_btrace_conf
.format
= BTRACE_FORMAT_PT
;
2947 execute_command ("target record-btrace", from_tty
);
2949 catch (const gdb_exception
&exception
)
2951 record_btrace_conf
.format
= BTRACE_FORMAT_BTS
;
2955 execute_command ("target record-btrace", from_tty
);
2957 catch (const gdb_exception
&ex
)
2959 record_btrace_conf
.format
= BTRACE_FORMAT_NONE
;
2965 /* The "show record btrace replay-memory-access" command. */
2968 cmd_show_replay_memory_access (struct ui_file
*file
, int from_tty
,
2969 struct cmd_list_element
*c
, const char *value
)
2971 fprintf_filtered (gdb_stdout
, _("Replay memory access is %s.\n"),
2972 replay_memory_access
);
2975 /* The "set record btrace cpu none" command. */
2978 cmd_set_record_btrace_cpu_none (const char *args
, int from_tty
)
2980 if (args
!= nullptr && *args
!= 0)
2981 error (_("Trailing junk: '%s'."), args
);
2983 record_btrace_cpu_state
= CS_NONE
;
2986 /* The "set record btrace cpu auto" command. */
2989 cmd_set_record_btrace_cpu_auto (const char *args
, int from_tty
)
2991 if (args
!= nullptr && *args
!= 0)
2992 error (_("Trailing junk: '%s'."), args
);
2994 record_btrace_cpu_state
= CS_AUTO
;
2997 /* The "set record btrace cpu" command. */
3000 cmd_set_record_btrace_cpu (const char *args
, int from_tty
)
3002 if (args
== nullptr)
3005 /* We use a hard-coded vendor string for now. */
3006 unsigned int family
, model
, stepping
;
3007 int l1
, l2
, matches
= sscanf (args
, "intel: %u/%u%n/%u%n", &family
,
3008 &model
, &l1
, &stepping
, &l2
);
3011 if (strlen (args
) != l2
)
3012 error (_("Trailing junk: '%s'."), args
+ l2
);
3014 else if (matches
== 2)
3016 if (strlen (args
) != l1
)
3017 error (_("Trailing junk: '%s'."), args
+ l1
);
3022 error (_("Bad format. See \"help set record btrace cpu\"."));
3024 if (USHRT_MAX
< family
)
3025 error (_("Cpu family too big."));
3027 if (UCHAR_MAX
< model
)
3028 error (_("Cpu model too big."));
3030 if (UCHAR_MAX
< stepping
)
3031 error (_("Cpu stepping too big."));
3033 record_btrace_cpu
.vendor
= CV_INTEL
;
3034 record_btrace_cpu
.family
= family
;
3035 record_btrace_cpu
.model
= model
;
3036 record_btrace_cpu
.stepping
= stepping
;
3038 record_btrace_cpu_state
= CS_CPU
;
3041 /* The "show record btrace cpu" command. */
3044 cmd_show_record_btrace_cpu (const char *args
, int from_tty
)
3046 if (args
!= nullptr && *args
!= 0)
3047 error (_("Trailing junk: '%s'."), args
);
3049 switch (record_btrace_cpu_state
)
3052 printf_unfiltered (_("btrace cpu is 'auto'.\n"));
3056 printf_unfiltered (_("btrace cpu is 'none'.\n"));
3060 switch (record_btrace_cpu
.vendor
)
3063 if (record_btrace_cpu
.stepping
== 0)
3064 printf_unfiltered (_("btrace cpu is 'intel: %u/%u'.\n"),
3065 record_btrace_cpu
.family
,
3066 record_btrace_cpu
.model
);
3068 printf_unfiltered (_("btrace cpu is 'intel: %u/%u/%u'.\n"),
3069 record_btrace_cpu
.family
,
3070 record_btrace_cpu
.model
,
3071 record_btrace_cpu
.stepping
);
3076 error (_("Internal error: bad cpu state."));
3079 /* The "record bts buffer-size" show value function. */
3082 show_record_bts_buffer_size_value (struct ui_file
*file
, int from_tty
,
3083 struct cmd_list_element
*c
,
3086 fprintf_filtered (file
, _("The record/replay bts buffer size is %s.\n"),
3090 /* The "record pt buffer-size" show value function. */
3093 show_record_pt_buffer_size_value (struct ui_file
*file
, int from_tty
,
3094 struct cmd_list_element
*c
,
3097 fprintf_filtered (file
, _("The record/replay pt buffer size is %s.\n"),
3101 /* Initialize btrace commands. */
3103 void _initialize_record_btrace ();
3105 _initialize_record_btrace ()
3107 add_prefix_cmd ("btrace", class_obscure
, cmd_record_btrace_start
,
3108 _("Start branch trace recording."), &record_btrace_cmdlist
,
3109 "record btrace ", 0, &record_cmdlist
);
3110 add_alias_cmd ("b", "btrace", class_obscure
, 1, &record_cmdlist
);
3112 add_cmd ("bts", class_obscure
, cmd_record_btrace_bts_start
,
3114 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3115 The processor stores a from/to record for each branch into a cyclic buffer.\n\
3116 This format may not be available on all processors."),
3117 &record_btrace_cmdlist
);
3118 add_alias_cmd ("bts", "btrace bts", class_obscure
, 1, &record_cmdlist
);
3120 add_cmd ("pt", class_obscure
, cmd_record_btrace_pt_start
,
3122 Start branch trace recording in Intel Processor Trace format.\n\n\
3123 This format may not be available on all processors."),
3124 &record_btrace_cmdlist
);
3125 add_alias_cmd ("pt", "btrace pt", class_obscure
, 1, &record_cmdlist
);
3127 add_basic_prefix_cmd ("btrace", class_support
,
3128 _("Set record options."), &set_record_btrace_cmdlist
,
3129 "set record btrace ", 0, &set_record_cmdlist
);
3131 add_show_prefix_cmd ("btrace", class_support
,
3132 _("Show record options."), &show_record_btrace_cmdlist
,
3133 "show record btrace ", 0, &show_record_cmdlist
);
3135 add_setshow_enum_cmd ("replay-memory-access", no_class
,
3136 replay_memory_access_types
, &replay_memory_access
, _("\
3137 Set what memory accesses are allowed during replay."), _("\
3138 Show what memory accesses are allowed during replay."),
3139 _("Default is READ-ONLY.\n\n\
3140 The btrace record target does not trace data.\n\
3141 The memory therefore corresponds to the live target and not \
3142 to the current replay position.\n\n\
3143 When READ-ONLY, allow accesses to read-only memory during replay.\n\
3144 When READ-WRITE, allow accesses to read-only and read-write memory during \
3146 NULL
, cmd_show_replay_memory_access
,
3147 &set_record_btrace_cmdlist
,
3148 &show_record_btrace_cmdlist
);
3150 add_prefix_cmd ("cpu", class_support
, cmd_set_record_btrace_cpu
,
3152 Set the cpu to be used for trace decode.\n\n\
3153 The format is \"VENDOR:IDENTIFIER\" or \"none\" or \"auto\" (default).\n\
3154 For vendor \"intel\" the format is \"FAMILY/MODEL[/STEPPING]\".\n\n\
3155 When decoding branch trace, enable errata workarounds for the specified cpu.\n\
3156 The default is \"auto\", which uses the cpu on which the trace was recorded.\n\
3157 When GDB does not support that cpu, this option can be used to enable\n\
3158 workarounds for a similar cpu that GDB supports.\n\n\
3159 When set to \"none\", errata workarounds are disabled."),
3160 &set_record_btrace_cpu_cmdlist
,
3161 "set record btrace cpu ", 1,
3162 &set_record_btrace_cmdlist
);
3164 add_cmd ("auto", class_support
, cmd_set_record_btrace_cpu_auto
, _("\
3165 Automatically determine the cpu to be used for trace decode."),
3166 &set_record_btrace_cpu_cmdlist
);
3168 add_cmd ("none", class_support
, cmd_set_record_btrace_cpu_none
, _("\
3169 Do not enable errata workarounds for trace decode."),
3170 &set_record_btrace_cpu_cmdlist
);
3172 add_cmd ("cpu", class_support
, cmd_show_record_btrace_cpu
, _("\
3173 Show the cpu to be used for trace decode."),
3174 &show_record_btrace_cmdlist
);
3176 add_basic_prefix_cmd ("bts", class_support
,
3177 _("Set record btrace bts options."),
3178 &set_record_btrace_bts_cmdlist
,
3179 "set record btrace bts ", 0,
3180 &set_record_btrace_cmdlist
);
3182 add_show_prefix_cmd ("bts", class_support
,
3183 _("Show record btrace bts options."),
3184 &show_record_btrace_bts_cmdlist
,
3185 "show record btrace bts ", 0,
3186 &show_record_btrace_cmdlist
);
3188 add_setshow_uinteger_cmd ("buffer-size", no_class
,
3189 &record_btrace_conf
.bts
.size
,
3190 _("Set the record/replay bts buffer size."),
3191 _("Show the record/replay bts buffer size."), _("\
3192 When starting recording request a trace buffer of this size. \
3193 The actual buffer size may differ from the requested size. \
3194 Use \"info record\" to see the actual buffer size.\n\n\
3195 Bigger buffers allow longer recording but also take more time to process \
3196 the recorded execution trace.\n\n\
3197 The trace buffer size may not be changed while recording."), NULL
,
3198 show_record_bts_buffer_size_value
,
3199 &set_record_btrace_bts_cmdlist
,
3200 &show_record_btrace_bts_cmdlist
);
3202 add_basic_prefix_cmd ("pt", class_support
,
3203 _("Set record btrace pt options."),
3204 &set_record_btrace_pt_cmdlist
,
3205 "set record btrace pt ", 0,
3206 &set_record_btrace_cmdlist
);
3208 add_show_prefix_cmd ("pt", class_support
,
3209 _("Show record btrace pt options."),
3210 &show_record_btrace_pt_cmdlist
,
3211 "show record btrace pt ", 0,
3212 &show_record_btrace_cmdlist
);
3214 add_setshow_uinteger_cmd ("buffer-size", no_class
,
3215 &record_btrace_conf
.pt
.size
,
3216 _("Set the record/replay pt buffer size."),
3217 _("Show the record/replay pt buffer size."), _("\
3218 Bigger buffers allow longer recording but also take more time to process \
3219 the recorded execution.\n\
3220 The actual buffer size may differ from the requested size. Use \"info record\" \
3221 to see the actual buffer size."), NULL
, show_record_pt_buffer_size_value
,
3222 &set_record_btrace_pt_cmdlist
,
3223 &show_record_btrace_pt_cmdlist
);
3225 add_target (record_btrace_target_info
, record_btrace_target_open
);
3227 bfcache
= htab_create_alloc (50, bfcache_hash
, bfcache_eq
, NULL
,
3230 record_btrace_conf
.bts
.size
= 64 * 1024;
3231 record_btrace_conf
.pt
.size
= 16 * 1024;