X-Git-Url: http://drtracing.org/?a=blobdiff_plain;f=gdb%2Frecord-btrace.c;h=b67d71161946a1135767a8670a706471bd61a37c;hb=f2ffa92bbce9dd5fbedc138ac2a3bc8a88327d09;hp=77494baa54f73a4ec24a27b329a1520809cb4f08;hpb=3c615f99d3923df7dfa94c6587733c682efbbc78;p=deliverable%2Fbinutils-gdb.git diff --git a/gdb/record-btrace.c b/gdb/record-btrace.c index 77494baa54..b67d711619 100644 --- a/gdb/record-btrace.c +++ b/gdb/record-btrace.c @@ -1,6 +1,6 @@ /* Branch trace support for GDB, the GNU debugger. - Copyright (C) 2013-2015 Free Software Foundation, Inc. + Copyright (C) 2013-2018 Free Software Foundation, Inc. Contributed by Intel Corp. @@ -21,11 +21,12 @@ #include "defs.h" #include "record.h" +#include "record-btrace.h" #include "gdbthread.h" #include "target.h" #include "gdbcmd.h" #include "disasm.h" -#include "observer.h" +#include "observable.h" #include "cli/cli-utils.h" #include "source.h" #include "ui-out.h" @@ -37,12 +38,113 @@ #include "infrun.h" #include "event-loop.h" #include "inf-loop.h" +#include "vec.h" +#include "inferior.h" +#include + +static const target_info record_btrace_target_info = { + "record-btrace", + N_("Branch tracing target"), + N_("Collect control-flow trace and provide the execution history.") +}; /* The target_ops of record-btrace. */ -static struct target_ops record_btrace_ops; -/* A new thread observer enabling branch tracing for the new thread. */ -static struct observer *record_btrace_thread_observer; +class record_btrace_target final : public target_ops +{ +public: + record_btrace_target () + { to_stratum = record_stratum; } + + const target_info &info () const override + { return record_btrace_target_info; } + + void close () override; + void async (int) override; + + void detach (inferior *inf, int from_tty) override + { record_detach (this, inf, from_tty); } + + void disconnect (const char *, int) override; + + void mourn_inferior () override + { record_mourn_inferior (this); } + + void kill () override + { record_kill (this); } + + enum record_method record_method (ptid_t ptid) override; + + void stop_recording () override; + void info_record () override; + + void insn_history (int size, gdb_disassembly_flags flags) override; + void insn_history_from (ULONGEST from, int size, + gdb_disassembly_flags flags) override; + void insn_history_range (ULONGEST begin, ULONGEST end, + gdb_disassembly_flags flags) override; + void call_history (int size, record_print_flags flags) override; + void call_history_from (ULONGEST begin, int size, record_print_flags flags) + override; + void call_history_range (ULONGEST begin, ULONGEST end, record_print_flags flags) + override; + + bool record_is_replaying (ptid_t ptid) override; + bool record_will_replay (ptid_t ptid, int dir) override; + void record_stop_replaying () override; + + enum target_xfer_status xfer_partial (enum target_object object, + const char *annex, + gdb_byte *readbuf, + const gdb_byte *writebuf, + ULONGEST offset, ULONGEST len, + ULONGEST *xfered_len) override; + + int insert_breakpoint (struct gdbarch *, + struct bp_target_info *) override; + int remove_breakpoint (struct gdbarch *, struct bp_target_info *, + enum remove_bp_reason) override; + + void fetch_registers (struct regcache *, int) override; + + void store_registers (struct regcache *, int) override; + void prepare_to_store (struct regcache *) override; + + const struct frame_unwind *get_unwinder () override; + + const struct frame_unwind *get_tailcall_unwinder () override; + + void commit_resume () override; + void resume (ptid_t, int, enum gdb_signal) override; + ptid_t wait (ptid_t, struct target_waitstatus *, int) override; + + void stop (ptid_t) override; + void update_thread_list () override; + bool thread_alive (ptid_t ptid) override; + void goto_record_begin () override; + void goto_record_end () override; + void goto_record (ULONGEST insn) override; + + bool can_execute_reverse () override; + + bool stopped_by_sw_breakpoint () override; + bool supports_stopped_by_sw_breakpoint () override; + + bool stopped_by_hw_breakpoint () override; + bool supports_stopped_by_hw_breakpoint () override; + + enum exec_direction_kind execution_direction () override; + void prepare_to_generate_core () override; + void done_generating_core () override; +}; + +static record_btrace_target record_btrace_ops; + +/* Initialize the record-btrace target ops. */ + +/* Token associated with a new-thread observer enabling branch tracing + for the new thread. */ +static const gdb::observers::token record_btrace_thread_observer_token; /* Memory access types used in set/show record btrace replay-memory-access. */ static const char replay_memory_access_read_only[] = "read-only"; @@ -57,6 +159,20 @@ static const char *const replay_memory_access_types[] = /* The currently allowed replay memory access type. */ static const char *replay_memory_access = replay_memory_access_read_only; +/* The cpu state kinds. */ +enum record_btrace_cpu_state_kind +{ + CS_AUTO, + CS_NONE, + CS_CPU +}; + +/* The current cpu state. */ +static enum record_btrace_cpu_state_kind record_btrace_cpu_state = CS_AUTO; + +/* The current cpu for trace decode. */ +static struct btrace_cpu record_btrace_cpu; + /* Command lists for "set/show record btrace". */ static struct cmd_list_element *set_record_btrace_cmdlist; static struct cmd_list_element *show_record_btrace_cmdlist; @@ -84,6 +200,9 @@ static struct cmd_list_element *show_record_btrace_bts_cmdlist; static struct cmd_list_element *set_record_btrace_pt_cmdlist; static struct cmd_list_element *show_record_btrace_pt_cmdlist; +/* Command list for "set record btrace cpu". */ +static struct cmd_list_element *set_record_btrace_cpu_cmdlist; + /* Print a record-btrace debug message. Use do ... while (0) to avoid ambiguities when used in if statements. */ @@ -97,6 +216,26 @@ static struct cmd_list_element *show_record_btrace_pt_cmdlist; while (0) +/* Return the cpu configured by the user. Returns NULL if the cpu was + configured as auto. */ +const struct btrace_cpu * +record_btrace_get_cpu (void) +{ + switch (record_btrace_cpu_state) + { + case CS_AUTO: + return nullptr; + + case CS_NONE: + record_btrace_cpu.vendor = CV_UNKNOWN; + /* Fall through. */ + case CS_CPU: + return &record_btrace_cpu; + } + + error (_("Internal error: bad record btrace cpu state.")); +} + /* Update the branch trace for the current thread and return a pointer to its thread_info. @@ -106,15 +245,16 @@ static struct cmd_list_element *show_record_btrace_pt_cmdlist; static struct thread_info * require_btrace_thread (void) { - struct thread_info *tp; - DEBUG ("require"); - tp = find_thread_ptid (inferior_ptid); - if (tp == NULL) + if (inferior_ptid == null_ptid) error (_("No thread.")); - btrace_fetch (tp); + thread_info *tp = inferior_thread (); + + validate_registers_access (); + + btrace_fetch (tp, record_btrace_get_cpu ()); if (btrace_is_empty (tp)) error (_("No trace.")); @@ -154,18 +294,6 @@ record_btrace_enable_warn (struct thread_info *tp) END_CATCH } -/* Callback function to disable branch tracing for one thread. */ - -static void -record_btrace_disable_callback (void *arg) -{ - struct thread_info *tp; - - tp = arg; - - btrace_disable (tp); -} - /* Enable automatic tracing of new threads. */ static void @@ -173,8 +301,8 @@ record_btrace_auto_enable (void) { DEBUG ("attach thread observer"); - record_btrace_thread_observer - = observer_attach_new_thread (record_btrace_enable_warn); + gdb::observers::new_thread.attach (record_btrace_enable_warn, + record_btrace_thread_observer_token); } /* Disable automatic tracing of new threads. */ @@ -182,14 +310,9 @@ record_btrace_auto_enable (void) static void record_btrace_auto_disable (void) { - /* The observer may have been detached, already. */ - if (record_btrace_thread_observer == NULL) - return; - DEBUG ("detach thread observer"); - observer_detach_new_thread (record_btrace_thread_observer); - record_btrace_thread_observer = NULL; + gdb::observers::new_thread.detach (record_btrace_thread_observer_token); } /* The record-btrace async event handler function. */ @@ -200,12 +323,62 @@ record_btrace_handle_async_inferior_event (gdb_client_data data) inferior_event_handler (INF_REG_EVENT, NULL); } -/* The to_open method of target record-btrace. */ +/* See record-btrace.h. */ + +void +record_btrace_push_target (void) +{ + const char *format; + + record_btrace_auto_enable (); + + push_target (&record_btrace_ops); + + record_btrace_async_inferior_event_handler + = create_async_event_handler (record_btrace_handle_async_inferior_event, + NULL); + record_btrace_generating_corefile = 0; + + format = btrace_format_short_string (record_btrace_conf.format); + gdb::observers::record_changed.notify (current_inferior (), 1, "btrace", format); +} + +/* Disable btrace on a set of threads on scope exit. */ + +struct scoped_btrace_disable +{ + scoped_btrace_disable () = default; + + DISABLE_COPY_AND_ASSIGN (scoped_btrace_disable); + + ~scoped_btrace_disable () + { + for (thread_info *tp : m_threads) + btrace_disable (tp); + } + + void add_thread (thread_info *thread) + { + m_threads.push_front (thread); + } + + void discard () + { + m_threads.clear (); + } + +private: + std::forward_list m_threads; +}; + +/* Open target record-btrace. */ static void -record_btrace_open (const char *args, int from_tty) +record_btrace_target_open (const char *args, int from_tty) { - struct cleanup *disable_chain; + /* If we fail to enable btrace for one thread, disable it for the threads for + which it was successfully enabled. */ + scoped_btrace_disable btrace_disable; struct thread_info *tp; DEBUG ("open"); @@ -215,38 +388,23 @@ record_btrace_open (const char *args, int from_tty) if (!target_has_execution) error (_("The program is not being run.")); - if (non_stop) - error (_("Record btrace can't debug inferior in non-stop mode.")); - - gdb_assert (record_btrace_thread_observer == NULL); - - disable_chain = make_cleanup (null_cleanup, NULL); ALL_NON_EXITED_THREADS (tp) - if (args == NULL || *args == 0 || number_is_in_list (args, tp->num)) + if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num)) { btrace_enable (tp, &record_btrace_conf); - make_cleanup (record_btrace_disable_callback, tp); + btrace_disable.add_thread (tp); } - record_btrace_auto_enable (); - - push_target (&record_btrace_ops); - - record_btrace_async_inferior_event_handler - = create_async_event_handler (record_btrace_handle_async_inferior_event, - NULL); - record_btrace_generating_corefile = 0; + record_btrace_push_target (); - observer_notify_record_changed (current_inferior (), 1); - - discard_cleanups (disable_chain); + btrace_disable.discard (); } -/* The to_stop_recording method of target record-btrace. */ +/* The stop_recording method of target record-btrace. */ -static void -record_btrace_stop_recording (struct target_ops *self) +void +record_btrace_target::stop_recording () { struct thread_info *tp; @@ -259,10 +417,25 @@ record_btrace_stop_recording (struct target_ops *self) btrace_disable (tp); } -/* The to_close method of target record-btrace. */ +/* The disconnect method of target record-btrace. */ -static void -record_btrace_close (struct target_ops *self) +void +record_btrace_target::disconnect (const char *args, + int from_tty) +{ + struct target_ops *beneath = this->beneath (); + + /* Do not stop recording, just clean up GDB side. */ + unpush_target (this); + + /* Forward disconnect. */ + beneath->disconnect (args, from_tty); +} + +/* The close method of target record-btrace. */ + +void +record_btrace_target::close () { struct thread_info *tp; @@ -279,17 +452,17 @@ record_btrace_close (struct target_ops *self) btrace_teardown (tp); } -/* The to_async method of target record-btrace. */ +/* The async method of target record-btrace. */ -static void -record_btrace_async (struct target_ops *ops, int enable) +void +record_btrace_target::async (int enable) { if (enable) mark_async_event_handler (record_btrace_async_inferior_event_handler); else clear_async_event_handler (record_btrace_async_inferior_event_handler); - ops->beneath->to_async (ops->beneath, enable); + this->beneath ()->async (enable); } /* Adjusts the size and returns a human readable size suffix. */ @@ -336,7 +509,7 @@ record_btrace_print_bts_conf (const struct btrace_config_bts *conf) } } -/* Print an Intel(R) Processor Trace configuration. */ +/* Print an Intel Processor Trace configuration. */ static void record_btrace_print_pt_conf (const struct btrace_config_pt *conf) @@ -377,10 +550,10 @@ record_btrace_print_conf (const struct btrace_config *conf) internal_error (__FILE__, __LINE__, _("Unkown branch trace format.")); } -/* The to_info_record method of target record-btrace. */ +/* The info_record method of target record-btrace. */ -static void -record_btrace_info (struct target_ops *self) +void +record_btrace_target::info_record () { struct btrace_thread_info *btinfo; const struct btrace_config *conf; @@ -393,13 +566,15 @@ record_btrace_info (struct target_ops *self) if (tp == NULL) error (_("No thread.")); + validate_registers_access (); + btinfo = &tp->btrace; - conf = btrace_conf (btinfo); + conf = ::btrace_conf (btinfo); if (conf != NULL) record_btrace_print_conf (conf); - btrace_fetch (tp); + btrace_fetch (tp, record_btrace_get_cpu ()); insns = 0; calls = 0; @@ -415,35 +590,19 @@ record_btrace_info (struct target_ops *self) calls = btrace_call_number (&call); btrace_insn_end (&insn, btinfo); - insns = btrace_insn_number (&insn); - if (insns != 0) - { - /* The last instruction does not really belong to the trace. */ - insns -= 1; - } - else - { - unsigned int steps; - - /* Skip gaps at the end. */ - do - { - steps = btrace_insn_prev (&insn, 1); - if (steps == 0) - break; - insns = btrace_insn_number (&insn); - } - while (insns == 0); - } + /* If the last instruction is not a gap, it is the current instruction + that is not actually part of the record. */ + if (btrace_insn_get (&insn) != NULL) + insns -= 1; gaps = btinfo->ngaps; } printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) " - "for thread %d (%s).\n"), insns, calls, gaps, - tp->num, target_pid_to_str (tp->ptid)); + "for thread %s (%s).\n"), insns, calls, gaps, + print_thread_id (tp), target_pid_to_str (tp->ptid)); if (btrace_is_replaying (tp)) printf_unfiltered (_("Replay in progress. At instruction %u.\n"), @@ -456,78 +615,158 @@ static void btrace_ui_out_decode_error (struct ui_out *uiout, int errcode, enum btrace_format format) { - const char *errstr; - int is_error; - - errstr = _("unknown"); - is_error = 1; + const char *errstr = btrace_decode_error (format, errcode); - switch (format) + uiout->text (_("[")); + /* ERRCODE > 0 indicates notifications on BTRACE_FORMAT_PT. */ + if (!(format == BTRACE_FORMAT_PT && errcode > 0)) { - default: - break; + uiout->text (_("decode error (")); + uiout->field_int ("errcode", errcode); + uiout->text (_("): ")); + } + uiout->text (errstr); + uiout->text (_("]\n")); +} - case BTRACE_FORMAT_BTS: - switch (errcode) - { - default: - break; +/* Print an unsigned int. */ + +static void +ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val) +{ + uiout->field_fmt (fld, "%u", val); +} - case BDE_BTS_OVERFLOW: - errstr = _("instruction overflow"); - break; +/* A range of source lines. */ - case BDE_BTS_INSN_SIZE: - errstr = _("unknown instruction"); - break; - } - break; +struct btrace_line_range +{ + /* The symtab this line is from. */ + struct symtab *symtab; -#if defined (HAVE_LIBIPT) - case BTRACE_FORMAT_PT: - switch (errcode) - { - case BDE_PT_USER_QUIT: - is_error = 0; - errstr = _("trace decode cancelled"); - break; - - case BDE_PT_DISABLED: - is_error = 0; - errstr = _("disabled"); - break; - - case BDE_PT_OVERFLOW: - is_error = 0; - errstr = _("overflow"); - break; - - default: - if (errcode < 0) - errstr = pt_errstr (pt_errcode (errcode)); - break; - } - break; -#endif /* defined (HAVE_LIBIPT) */ + /* The first line (inclusive). */ + int begin; + + /* The last line (exclusive). */ + int end; +}; + +/* Construct a line range. */ + +static struct btrace_line_range +btrace_mk_line_range (struct symtab *symtab, int begin, int end) +{ + struct btrace_line_range range; + + range.symtab = symtab; + range.begin = begin; + range.end = end; + + return range; +} + +/* Add a line to a line range. */ + +static struct btrace_line_range +btrace_line_range_add (struct btrace_line_range range, int line) +{ + if (range.end <= range.begin) + { + /* This is the first entry. */ + range.begin = line; + range.end = line + 1; } + else if (line < range.begin) + range.begin = line; + else if (range.end < line) + range.end = line; + + return range; +} + +/* Return non-zero if RANGE is empty, zero otherwise. */ + +static int +btrace_line_range_is_empty (struct btrace_line_range range) +{ + return range.end <= range.begin; +} + +/* Return non-zero if LHS contains RHS, zero otherwise. */ + +static int +btrace_line_range_contains_range (struct btrace_line_range lhs, + struct btrace_line_range rhs) +{ + return ((lhs.symtab == rhs.symtab) + && (lhs.begin <= rhs.begin) + && (rhs.end <= lhs.end)); +} + +/* Find the line range associated with PC. */ + +static struct btrace_line_range +btrace_find_line_range (CORE_ADDR pc) +{ + struct btrace_line_range range; + struct linetable_entry *lines; + struct linetable *ltable; + struct symtab *symtab; + int nlines, i; + + symtab = find_pc_line_symtab (pc); + if (symtab == NULL) + return btrace_mk_line_range (NULL, 0, 0); + + ltable = SYMTAB_LINETABLE (symtab); + if (ltable == NULL) + return btrace_mk_line_range (symtab, 0, 0); + + nlines = ltable->nitems; + lines = ltable->item; + if (nlines <= 0) + return btrace_mk_line_range (symtab, 0, 0); - ui_out_text (uiout, _("[")); - if (is_error) + range = btrace_mk_line_range (symtab, 0, 0); + for (i = 0; i < nlines - 1; i++) { - ui_out_text (uiout, _("decode error (")); - ui_out_field_int (uiout, "errcode", errcode); - ui_out_text (uiout, _("): ")); + if ((lines[i].pc == pc) && (lines[i].line != 0)) + range = btrace_line_range_add (range, lines[i].line); } - ui_out_text (uiout, errstr); - ui_out_text (uiout, _("]\n")); + + return range; } -/* Print an unsigned int. */ +/* Print source lines in LINES to UIOUT. + + UI_ITEM_CHAIN is a cleanup chain for the last source line and the + instructions corresponding to that source line. When printing a new source + line, we do the cleanups for the open chain and open a new cleanup chain for + the new source line. If the source line range in LINES is not empty, this + function will leave the cleanup chain for the last printed source line open + so instructions can be added to it. */ static void -ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val) +btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout, + gdb::optional *src_and_asm_tuple, + gdb::optional *asm_list, + gdb_disassembly_flags flags) { - ui_out_field_fmt (uiout, fld, "%u", val); + print_source_lines_flags psl_flags; + + if (flags & DISASSEMBLY_FILENAME) + psl_flags |= PRINT_SOURCE_LINES_FILENAME; + + for (int line = lines.begin; line < lines.end; ++line) + { + asm_list->reset (); + + src_and_asm_tuple->emplace (uiout, "src_and_asm_line"); + + print_source_lines (lines.symtab, line, line + 1, psl_flags); + + asm_list->emplace (uiout, "line_asm_insn"); + } } /* Disassemble a section of the recorded instruction trace. */ @@ -536,17 +775,26 @@ static void btrace_insn_history (struct ui_out *uiout, const struct btrace_thread_info *btinfo, const struct btrace_insn_iterator *begin, - const struct btrace_insn_iterator *end, int flags) + const struct btrace_insn_iterator *end, + gdb_disassembly_flags flags) { - struct gdbarch *gdbarch; - struct btrace_insn_iterator it; + DEBUG ("itrace (0x%x): [%u; %u)", (unsigned) flags, + btrace_insn_number (begin), btrace_insn_number (end)); + + flags |= DISASSEMBLY_SPECULATIVE; + + struct gdbarch *gdbarch = target_gdbarch (); + btrace_line_range last_lines = btrace_mk_line_range (NULL, 0, 0); + + ui_out_emit_list list_emitter (uiout, "asm_insns"); - DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin), - btrace_insn_number (end)); + gdb::optional src_and_asm_tuple; + gdb::optional asm_list; - gdbarch = target_gdbarch (); + gdb_pretty_print_disassembler disasm (gdbarch); - for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1)) + for (btrace_insn_iterator it = *begin; btrace_insn_cmp (&it, end) != 0; + btrace_insn_next (&it, 1)) { const struct btrace_insn *insn; @@ -562,59 +810,68 @@ btrace_insn_history (struct ui_out *uiout, /* We have trace so we must have a configuration. */ gdb_assert (conf != NULL); - btrace_ui_out_decode_error (uiout, it.function->errcode, + uiout->field_fmt ("insn-number", "%u", + btrace_insn_number (&it)); + uiout->text ("\t"); + + btrace_ui_out_decode_error (uiout, btrace_insn_get_error (&it), conf->format); } else { - char prefix[4]; + struct disasm_insn dinsn; - /* We may add a speculation prefix later. We use the same space - that is used for the pc prefix. */ - if ((flags & DISASSEMBLY_OMIT_PC) == 0) - strncpy (prefix, pc_prefix (insn->pc), 3); - else + if ((flags & DISASSEMBLY_SOURCE) != 0) { - prefix[0] = ' '; - prefix[1] = ' '; - prefix[2] = ' '; + struct btrace_line_range lines; + + lines = btrace_find_line_range (insn->pc); + if (!btrace_line_range_is_empty (lines) + && !btrace_line_range_contains_range (last_lines, lines)) + { + btrace_print_lines (lines, uiout, &src_and_asm_tuple, &asm_list, + flags); + last_lines = lines; + } + else if (!src_and_asm_tuple.has_value ()) + { + gdb_assert (!asm_list.has_value ()); + + src_and_asm_tuple.emplace (uiout, "src_and_asm_line"); + + /* No source information. */ + asm_list.emplace (uiout, "line_asm_insn"); + } + + gdb_assert (src_and_asm_tuple.has_value ()); + gdb_assert (asm_list.has_value ()); } - prefix[3] = 0; - /* Print the instruction index. */ - ui_out_field_uint (uiout, "index", btrace_insn_number (&it)); - ui_out_text (uiout, "\t"); + memset (&dinsn, 0, sizeof (dinsn)); + dinsn.number = btrace_insn_number (&it); + dinsn.addr = insn->pc; - /* Indicate speculative execution by a leading '?'. */ if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0) - prefix[0] = '?'; + dinsn.is_speculative = 1; - /* Print the prefix; we tell gdb_disassembly below to omit it. */ - ui_out_field_fmt (uiout, "prefix", "%s", prefix); - - /* Disassembly with '/m' flag may not produce the expected result. - See PR gdb/11833. */ - gdb_disassembly (gdbarch, uiout, NULL, flags | DISASSEMBLY_OMIT_PC, - 1, insn->pc, insn->pc + 1); + disasm.pretty_print_insn (uiout, &dinsn, flags); } } } -/* The to_insn_history method of target record-btrace. */ +/* The insn_history method of target record-btrace. */ -static void -record_btrace_insn_history (struct target_ops *self, int size, int flags) +void +record_btrace_target::insn_history (int size, gdb_disassembly_flags flags) { struct btrace_thread_info *btinfo; struct btrace_insn_history *history; struct btrace_insn_iterator begin, end; - struct cleanup *uiout_cleanup; struct ui_out *uiout; unsigned int context, covered; uiout = current_uiout; - uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout, - "insn history"); + ui_out_emit_tuple tuple_emitter (uiout, "insn history"); context = abs (size); if (context == 0) error (_("Bad record instruction-history-size.")); @@ -625,7 +882,7 @@ record_btrace_insn_history (struct target_ops *self, int size, int flags) { struct btrace_insn_iterator *replay; - DEBUG ("insn-history (0x%x): %d", flags, size); + DEBUG ("insn-history (0x%x): %d", (unsigned) flags, size); /* If we're replaying, we start at the replay position. Otherwise, we start at the tail of the trace. */ @@ -657,7 +914,7 @@ record_btrace_insn_history (struct target_ops *self, int size, int flags) begin = history->begin; end = history->end; - DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size, + DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", (unsigned) flags, size, btrace_insn_number (&begin), btrace_insn_number (&end)); if (size < 0) @@ -683,30 +940,26 @@ record_btrace_insn_history (struct target_ops *self, int size, int flags) } btrace_set_insn_history (btinfo, &begin, &end); - do_cleanups (uiout_cleanup); } -/* The to_insn_history_range method of target record-btrace. */ +/* The insn_history_range method of target record-btrace. */ -static void -record_btrace_insn_history_range (struct target_ops *self, - ULONGEST from, ULONGEST to, int flags) +void +record_btrace_target::insn_history_range (ULONGEST from, ULONGEST to, + gdb_disassembly_flags flags) { struct btrace_thread_info *btinfo; - struct btrace_insn_history *history; struct btrace_insn_iterator begin, end; - struct cleanup *uiout_cleanup; struct ui_out *uiout; unsigned int low, high; int found; uiout = current_uiout; - uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout, - "insn history"); + ui_out_emit_tuple tuple_emitter (uiout, "insn history"); low = from; high = to; - DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high); + DEBUG ("insn-history (0x%x): [%u; %u)", (unsigned) flags, low, high); /* Check for wrap-arounds. */ if (low != from || high != to) @@ -735,15 +988,13 @@ record_btrace_insn_history_range (struct target_ops *self, btrace_insn_history (uiout, btinfo, &begin, &end, flags); btrace_set_insn_history (btinfo, &begin, &end); - - do_cleanups (uiout_cleanup); } -/* The to_insn_history_from method of target record-btrace. */ +/* The insn_history_from method of target record-btrace. */ -static void -record_btrace_insn_history_from (struct target_ops *self, - ULONGEST from, int size, int flags) +void +record_btrace_target::insn_history_from (ULONGEST from, int size, + gdb_disassembly_flags flags) { ULONGEST begin, end, context; @@ -770,7 +1021,7 @@ record_btrace_insn_history_from (struct target_ops *self, end = ULONGEST_MAX; } - record_btrace_insn_history_range (self, begin, end, flags); + insn_history_range (begin, end, flags); } /* Print the instruction number range for a function call history line. */ @@ -781,14 +1032,14 @@ btrace_call_history_insn_range (struct ui_out *uiout, { unsigned int begin, end, size; - size = VEC_length (btrace_insn_s, bfun->insn); + size = bfun->insn.size (); gdb_assert (size > 0); begin = bfun->insn_offset; end = begin + size - 1; ui_out_field_uint (uiout, "insn begin", begin); - ui_out_text (uiout, ","); + uiout->text (","); ui_out_field_uint (uiout, "insn end", end); } @@ -801,10 +1052,8 @@ static void btrace_compute_src_line_range (const struct btrace_function *bfun, int *pbegin, int *pend) { - struct btrace_insn *insn; struct symtab *symtab; struct symbol *sym; - unsigned int idx; int begin, end; begin = INT_MAX; @@ -816,16 +1065,16 @@ btrace_compute_src_line_range (const struct btrace_function *bfun, symtab = symbol_symtab (sym); - for (idx = 0; VEC_iterate (btrace_insn_s, bfun->insn, idx, insn); ++idx) + for (const btrace_insn &insn : bfun->insn) { struct symtab_and_line sal; - sal = find_pc_line (insn->pc, 0); + sal = find_pc_line (insn.pc, 0); if (sal.symtab != symtab || sal.line == 0) continue; - begin = min (begin, sal.line); - end = max (end, sal.line); + begin = std::min (begin, sal.line); + end = std::max (end, sal.line); } out: @@ -846,21 +1095,21 @@ btrace_call_history_src_line (struct ui_out *uiout, if (sym == NULL) return; - ui_out_field_string (uiout, "file", + uiout->field_string ("file", symtab_to_filename_for_display (symbol_symtab (sym))); btrace_compute_src_line_range (bfun, &begin, &end); if (end < begin) return; - ui_out_text (uiout, ":"); - ui_out_field_int (uiout, "min line", begin); + uiout->text (":"); + uiout->field_int ("min line", begin); if (end == begin) return; - ui_out_text (uiout, ","); - ui_out_field_int (uiout, "max line", end); + uiout->text (","); + uiout->field_int ("max line", end); } /* Get the name of a branch trace function. */ @@ -892,11 +1141,12 @@ btrace_call_history (struct ui_out *uiout, const struct btrace_thread_info *btinfo, const struct btrace_call_iterator *begin, const struct btrace_call_iterator *end, - enum record_print_flag flags) + int int_flags) { struct btrace_call_iterator it; + record_print_flags flags = (enum record_print_flag) int_flags; - DEBUG ("ftrace (0x%x): [%u; %u)", flags, btrace_call_number (begin), + DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin), btrace_call_number (end)); for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1)) @@ -911,7 +1161,7 @@ btrace_call_history (struct ui_out *uiout, /* Print the function index. */ ui_out_field_uint (uiout, "index", bfun->number); - ui_out_text (uiout, "\t"); + uiout->text ("\t"); /* Indicate gaps in the trace. */ if (bfun->errcode != 0) @@ -933,47 +1183,45 @@ btrace_call_history (struct ui_out *uiout, int level = bfun->level + btinfo->level, i; for (i = 0; i < level; ++i) - ui_out_text (uiout, " "); + uiout->text (" "); } if (sym != NULL) - ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (sym)); + uiout->field_string ("function", SYMBOL_PRINT_NAME (sym)); else if (msym != NULL) - ui_out_field_string (uiout, "function", MSYMBOL_PRINT_NAME (msym)); - else if (!ui_out_is_mi_like_p (uiout)) - ui_out_field_string (uiout, "function", "??"); + uiout->field_string ("function", MSYMBOL_PRINT_NAME (msym)); + else if (!uiout->is_mi_like_p ()) + uiout->field_string ("function", "??"); if ((flags & RECORD_PRINT_INSN_RANGE) != 0) { - ui_out_text (uiout, _("\tinst ")); + uiout->text (_("\tinst ")); btrace_call_history_insn_range (uiout, bfun); } if ((flags & RECORD_PRINT_SRC_LINE) != 0) { - ui_out_text (uiout, _("\tat ")); + uiout->text (_("\tat ")); btrace_call_history_src_line (uiout, bfun); } - ui_out_text (uiout, "\n"); + uiout->text ("\n"); } } -/* The to_call_history method of target record-btrace. */ +/* The call_history method of target record-btrace. */ -static void -record_btrace_call_history (struct target_ops *self, int size, int flags) +void +record_btrace_target::call_history (int size, record_print_flags flags) { struct btrace_thread_info *btinfo; struct btrace_call_history *history; struct btrace_call_iterator begin, end; - struct cleanup *uiout_cleanup; struct ui_out *uiout; unsigned int context, covered; uiout = current_uiout; - uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout, - "insn history"); + ui_out_emit_tuple tuple_emitter (uiout, "insn history"); context = abs (size); if (context == 0) error (_("Bad record function-call-history-size.")); @@ -984,15 +1232,15 @@ record_btrace_call_history (struct target_ops *self, int size, int flags) { struct btrace_insn_iterator *replay; - DEBUG ("call-history (0x%x): %d", flags, size); + DEBUG ("call-history (0x%x): %d", (int) flags, size); /* If we're replaying, we start at the replay position. Otherwise, we start at the tail of the trace. */ replay = btinfo->replay; if (replay != NULL) { - begin.function = replay->function; begin.btinfo = btinfo; + begin.index = replay->call_index; } else btrace_call_end (&begin, btinfo); @@ -1019,7 +1267,7 @@ record_btrace_call_history (struct target_ops *self, int size, int flags) begin = history->begin; end = history->end; - DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", flags, size, + DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", (int) flags, size, btrace_call_number (&begin), btrace_call_number (&end)); if (size < 0) @@ -1045,30 +1293,26 @@ record_btrace_call_history (struct target_ops *self, int size, int flags) } btrace_set_call_history (btinfo, &begin, &end); - do_cleanups (uiout_cleanup); } -/* The to_call_history_range method of target record-btrace. */ +/* The call_history_range method of target record-btrace. */ -static void -record_btrace_call_history_range (struct target_ops *self, - ULONGEST from, ULONGEST to, int flags) +void +record_btrace_target::call_history_range (ULONGEST from, ULONGEST to, + record_print_flags flags) { struct btrace_thread_info *btinfo; - struct btrace_call_history *history; struct btrace_call_iterator begin, end; - struct cleanup *uiout_cleanup; struct ui_out *uiout; unsigned int low, high; int found; uiout = current_uiout; - uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout, - "func history"); + ui_out_emit_tuple tuple_emitter (uiout, "func history"); low = from; high = to; - DEBUG ("call-history (0x%x): [%u; %u)", flags, low, high); + DEBUG ("call-history (0x%x): [%u; %u)", (int) flags, low, high); /* Check for wrap-arounds. */ if (low != from || high != to) @@ -1097,15 +1341,13 @@ record_btrace_call_history_range (struct target_ops *self, btrace_call_history (uiout, btinfo, &begin, &end, flags); btrace_set_call_history (btinfo, &begin, &end); - - do_cleanups (uiout_cleanup); } -/* The to_call_history_from method of target record-btrace. */ +/* The call_history_from method of target record-btrace. */ -static void -record_btrace_call_history_from (struct target_ops *self, - ULONGEST from, int size, int flags) +void +record_btrace_target::call_history_from (ULONGEST from, int size, + record_print_flags flags) { ULONGEST begin, end, context; @@ -1132,37 +1374,59 @@ record_btrace_call_history_from (struct target_ops *self, end = ULONGEST_MAX; } - record_btrace_call_history_range (self, begin, end, flags); + call_history_range ( begin, end, flags); } -/* The to_record_is_replaying method of target record-btrace. */ +/* The record_method method of target record-btrace. */ -static int -record_btrace_is_replaying (struct target_ops *self) +enum record_method +record_btrace_target::record_method (ptid_t ptid) +{ + struct thread_info * const tp = find_thread_ptid (ptid); + + if (tp == NULL) + error (_("No thread.")); + + if (tp->btrace.target == NULL) + return RECORD_METHOD_NONE; + + return RECORD_METHOD_BTRACE; +} + +/* The record_is_replaying method of target record-btrace. */ + +bool +record_btrace_target::record_is_replaying (ptid_t ptid) { struct thread_info *tp; ALL_NON_EXITED_THREADS (tp) - if (btrace_is_replaying (tp)) - return 1; + if (ptid_match (tp->ptid, ptid) && btrace_is_replaying (tp)) + return true; - return 0; + return false; } -/* The to_xfer_partial method of target record-btrace. */ +/* The record_will_replay method of target record-btrace. */ -static enum target_xfer_status -record_btrace_xfer_partial (struct target_ops *ops, enum target_object object, - const char *annex, gdb_byte *readbuf, - const gdb_byte *writebuf, ULONGEST offset, - ULONGEST len, ULONGEST *xfered_len) +bool +record_btrace_target::record_will_replay (ptid_t ptid, int dir) { - struct target_ops *t; + return dir == EXEC_REVERSE || record_is_replaying (ptid); +} + +/* The xfer_partial method of target record-btrace. */ +enum target_xfer_status +record_btrace_target::xfer_partial (enum target_object object, + const char *annex, gdb_byte *readbuf, + const gdb_byte *writebuf, ULONGEST offset, + ULONGEST len, ULONGEST *xfered_len) +{ /* Filter out requests that don't make sense during replay. */ if (replay_memory_access == replay_memory_access_read_only && !record_btrace_generating_corefile - && record_btrace_is_replaying (ops)) + && record_is_replaying (inferior_ptid)) { switch (object) { @@ -1178,7 +1442,7 @@ record_btrace_xfer_partial (struct target_ops *ops, enum target_object object, } /* We allow reading readonly memory. */ - section = target_section_by_addr (ops, offset); + section = target_section_by_addr (this, offset); if (section != NULL) { /* Check if the section we found is readonly. */ @@ -1187,7 +1451,7 @@ record_btrace_xfer_partial (struct target_ops *ops, enum target_object object, & SEC_READONLY) != 0) { /* Truncate the request to fit into this section. */ - len = min (len, section->endaddr - offset); + len = std::min (len, section->endaddr - offset); break; } } @@ -1199,17 +1463,15 @@ record_btrace_xfer_partial (struct target_ops *ops, enum target_object object, } /* Forward the request. */ - ops = ops->beneath; - return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf, - offset, len, xfered_len); + return this->beneath ()->xfer_partial (object, annex, readbuf, writebuf, + offset, len, xfered_len); } -/* The to_insert_breakpoint method of target record-btrace. */ +/* The insert_breakpoint method of target record-btrace. */ -static int -record_btrace_insert_breakpoint (struct target_ops *ops, - struct gdbarch *gdbarch, - struct bp_target_info *bp_tgt) +int +record_btrace_target::insert_breakpoint (struct gdbarch *gdbarch, + struct bp_target_info *bp_tgt) { const char *old; int ret; @@ -1222,7 +1484,7 @@ record_btrace_insert_breakpoint (struct target_ops *ops, ret = 0; TRY { - ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt); + ret = this->beneath ()->insert_breakpoint (gdbarch, bp_tgt); } CATCH (except, RETURN_MASK_ALL) { @@ -1235,12 +1497,12 @@ record_btrace_insert_breakpoint (struct target_ops *ops, return ret; } -/* The to_remove_breakpoint method of target record-btrace. */ +/* The remove_breakpoint method of target record-btrace. */ -static int -record_btrace_remove_breakpoint (struct target_ops *ops, - struct gdbarch *gdbarch, - struct bp_target_info *bp_tgt) +int +record_btrace_target::remove_breakpoint (struct gdbarch *gdbarch, + struct bp_target_info *bp_tgt, + enum remove_bp_reason reason) { const char *old; int ret; @@ -1253,7 +1515,7 @@ record_btrace_remove_breakpoint (struct target_ops *ops, ret = 0; TRY { - ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt); + ret = this->beneath ()->remove_breakpoint (gdbarch, bp_tgt, reason); } CATCH (except, RETURN_MASK_ALL) { @@ -1266,16 +1528,15 @@ record_btrace_remove_breakpoint (struct target_ops *ops, return ret; } -/* The to_fetch_registers method of target record-btrace. */ +/* The fetch_registers method of target record-btrace. */ -static void -record_btrace_fetch_registers (struct target_ops *ops, - struct regcache *regcache, int regno) +void +record_btrace_target::fetch_registers (struct regcache *regcache, int regno) { struct btrace_insn_iterator *replay; struct thread_info *tp; - tp = find_thread_ptid (inferior_ptid); + tp = find_thread_ptid (regcache->ptid ()); gdb_assert (tp != NULL); replay = tp->btrace.replay; @@ -1285,7 +1546,7 @@ record_btrace_fetch_registers (struct target_ops *ops, struct gdbarch *gdbarch; int pcreg; - gdbarch = get_regcache_arch (regcache); + gdbarch = regcache->arch (); pcreg = gdbarch_pc_regnum (gdbarch); if (pcreg < 0) return; @@ -1297,46 +1558,38 @@ record_btrace_fetch_registers (struct target_ops *ops, insn = btrace_insn_get (replay); gdb_assert (insn != NULL); - regcache_raw_supply (regcache, regno, &insn->pc); + regcache->raw_supply (regno, &insn->pc); } else - { - struct target_ops *t = ops->beneath; - - t->to_fetch_registers (t, regcache, regno); - } + this->beneath ()->fetch_registers (regcache, regno); } -/* The to_store_registers method of target record-btrace. */ +/* The store_registers method of target record-btrace. */ -static void -record_btrace_store_registers (struct target_ops *ops, - struct regcache *regcache, int regno) +void +record_btrace_target::store_registers (struct regcache *regcache, int regno) { struct target_ops *t; - if (!record_btrace_generating_corefile && record_btrace_is_replaying (ops)) - error (_("This record target does not allow writing registers.")); + if (!record_btrace_generating_corefile + && record_is_replaying (regcache->ptid ())) + error (_("Cannot write registers while replaying.")); gdb_assert (may_write_registers != 0); - t = ops->beneath; - t->to_store_registers (t, regcache, regno); + this->beneath ()->store_registers (regcache, regno); } -/* The to_prepare_to_store method of target record-btrace. */ +/* The prepare_to_store method of target record-btrace. */ -static void -record_btrace_prepare_to_store (struct target_ops *ops, - struct regcache *regcache) +void +record_btrace_target::prepare_to_store (struct regcache *regcache) { - struct target_ops *t; - - if (!record_btrace_generating_corefile && record_btrace_is_replaying (ops)) + if (!record_btrace_generating_corefile + && record_is_replaying (regcache->ptid ())) return; - t = ops->beneath; - t->to_prepare_to_store (t, regcache); + this->beneath ()->prepare_to_store (regcache); } /* The branch trace frame cache. */ @@ -1362,7 +1615,8 @@ static htab_t bfcache; static hashval_t bfcache_hash (const void *arg) { - const struct btrace_frame_cache *cache = arg; + const struct btrace_frame_cache *cache + = (const struct btrace_frame_cache *) arg; return htab_hash_pointer (cache->frame); } @@ -1372,8 +1626,10 @@ bfcache_hash (const void *arg) static int bfcache_eq (const void *arg1, const void *arg2) { - const struct btrace_frame_cache *cache1 = arg1; - const struct btrace_frame_cache *cache2 = arg2; + const struct btrace_frame_cache *cache1 + = (const struct btrace_frame_cache *) arg1; + const struct btrace_frame_cache *cache2 + = (const struct btrace_frame_cache *) arg2; return cache1->frame == cache2->frame; } @@ -1402,7 +1658,6 @@ static const struct btrace_function * btrace_get_frame_function (struct frame_info *frame) { const struct btrace_frame_cache *cache; - const struct btrace_function *bfun; struct btrace_frame_cache pattern; void **slot; @@ -1412,7 +1667,7 @@ btrace_get_frame_function (struct frame_info *frame) if (slot == NULL) return NULL; - cache = *slot; + cache = (const struct btrace_frame_cache *) *slot; return cache->bfun; } @@ -1425,11 +1680,11 @@ record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame, const struct btrace_frame_cache *cache; const struct btrace_function *bfun; - cache = *this_cache; + cache = (const struct btrace_frame_cache *) *this_cache; bfun = cache->bfun; gdb_assert (bfun != NULL); - if (bfun->up == NULL) + if (bfun->up == 0) return UNWIND_UNAVAILABLE; return UNWIND_NO_REASON; @@ -1443,15 +1698,16 @@ record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache, { const struct btrace_frame_cache *cache; const struct btrace_function *bfun; + struct btrace_call_iterator it; CORE_ADDR code, special; - cache = *this_cache; + cache = (const struct btrace_frame_cache *) *this_cache; bfun = cache->bfun; gdb_assert (bfun != NULL); - while (bfun->segment.prev != NULL) - bfun = bfun->segment.prev; + while (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->prev) != 0) + bfun = btrace_call_get (&it); code = get_frame_func (this_frame); special = bfun->number; @@ -1473,7 +1729,7 @@ record_btrace_frame_prev_register (struct frame_info *this_frame, { const struct btrace_frame_cache *cache; const struct btrace_function *bfun, *caller; - const struct btrace_insn *insn; + struct btrace_call_iterator it; struct gdbarch *gdbarch; CORE_ADDR pc; int pcreg; @@ -1484,25 +1740,21 @@ record_btrace_frame_prev_register (struct frame_info *this_frame, throw_error (NOT_AVAILABLE_ERROR, _("Registers are not available in btrace record history")); - cache = *this_cache; + cache = (const struct btrace_frame_cache *) *this_cache; bfun = cache->bfun; gdb_assert (bfun != NULL); - caller = bfun->up; - if (caller == NULL) + if (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->up) == 0) throw_error (NOT_AVAILABLE_ERROR, _("No caller in btrace record history")); + caller = btrace_call_get (&it); + if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0) - { - insn = VEC_index (btrace_insn_s, caller->insn, 0); - pc = insn->pc; - } + pc = caller->insn.front ().pc; else { - insn = VEC_last (btrace_insn_s, caller->insn); - pc = insn->pc; - + pc = caller->insn.back ().pc; pc += gdb_insn_length (gdbarch, pc); } @@ -1526,8 +1778,7 @@ record_btrace_frame_sniffer (const struct frame_unwind *self, struct frame_info *next; /* THIS_FRAME does not contain a reference to its thread. */ - tp = find_thread_ptid (inferior_ptid); - gdb_assert (tp != NULL); + tp = inferior_thread (); bfun = NULL; next = get_next_frame (this_frame); @@ -1537,15 +1788,21 @@ record_btrace_frame_sniffer (const struct frame_unwind *self, replay = tp->btrace.replay; if (replay != NULL) - bfun = replay->function; + bfun = &replay->btinfo->functions[replay->call_index]; } else { const struct btrace_function *callee; + struct btrace_call_iterator it; callee = btrace_get_frame_function (next); - if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0) - bfun = callee->up; + if (callee == NULL || (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0) + return 0; + + if (btrace_find_call_by_number (&it, &tp->btrace, callee->up) == 0) + return 0; + + bfun = btrace_call_get (&it); } if (bfun == NULL) @@ -1572,7 +1829,9 @@ record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self, { const struct btrace_function *bfun, *callee; struct btrace_frame_cache *cache; + struct btrace_call_iterator it; struct frame_info *next; + struct thread_info *tinfo; next = get_next_frame (this_frame); if (next == NULL) @@ -1585,16 +1844,18 @@ record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self, if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0) return 0; - bfun = callee->up; - if (bfun == NULL) + tinfo = inferior_thread (); + if (btrace_find_call_by_number (&it, &tinfo->btrace, callee->up) == 0) return 0; + bfun = btrace_call_get (&it); + DEBUG ("[frame] sniffed tailcall frame for %s on level %d", btrace_get_bfun_name (bfun), bfun->level); /* This is our frame. Initialize the frame cache. */ cache = bfcache_new (this_frame); - cache->tp = find_thread_ptid (inferior_ptid); + cache->tp = tinfo; cache->bfun = bfun; *this_cache = cache; @@ -1607,7 +1868,7 @@ record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache) struct btrace_frame_cache *cache; void **slot; - cache = this_cache; + cache = (struct btrace_frame_cache *) this_cache; slot = htab_find_slot (bfcache, cache, NO_INSERT); gdb_assert (slot != NULL); @@ -1643,18 +1904,18 @@ const struct frame_unwind record_btrace_tailcall_frame_unwind = record_btrace_frame_dealloc_cache }; -/* Implement the to_get_unwinder method. */ +/* Implement the get_unwinder method. */ -static const struct frame_unwind * -record_btrace_to_get_unwinder (struct target_ops *self) +const struct frame_unwind * +record_btrace_target::get_unwinder () { return &record_btrace_frame_unwind; } -/* Implement the to_get_tailcall_unwinder method. */ +/* Implement the get_tailcall_unwinder method. */ -static const struct frame_unwind * -record_btrace_to_get_tailcall_unwinder (struct target_ops *self) +const struct frame_unwind * +record_btrace_target::get_tailcall_unwinder () { return &record_btrace_tailcall_frame_unwind; } @@ -1693,34 +1954,62 @@ record_btrace_resume_thread (struct thread_info *tp, { struct btrace_thread_info *btinfo; - DEBUG ("resuming thread %d (%s): %x (%s)", tp->num, + DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp), target_pid_to_str (tp->ptid), flag, btrace_thread_flag_to_str (flag)); btinfo = &tp->btrace; - if ((btinfo->flags & BTHR_MOVE) != 0) - error (_("Thread already moving.")); - /* Fetch the latest branch trace. */ - btrace_fetch (tp); + btrace_fetch (tp, record_btrace_get_cpu ()); - /* A resume request overwrites a preceding stop request. */ - btinfo->flags &= ~BTHR_STOP; + /* A resume request overwrites a preceding resume or stop request. */ + btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP); btinfo->flags |= flag; } -/* Find the thread to resume given a PTID. */ +/* Get the current frame for TP. */ -static struct thread_info * -record_btrace_find_resume_thread (ptid_t ptid) +static struct frame_info * +get_thread_current_frame (struct thread_info *tp) { - struct thread_info *tp; + struct frame_info *frame; + ptid_t old_inferior_ptid; + int executing; + + /* Set current thread, which is implicitly used by + get_current_frame. */ + scoped_restore_current_thread restore_thread; + + switch_to_thread (tp); + + /* Clear the executing flag to allow changes to the current frame. + We are not actually running, yet. We just started a reverse execution + command or a record goto command. + For the latter, EXECUTING is false and this has no effect. + For the former, EXECUTING is true and we're in wait, about to + move the thread. Since we need to recompute the stack, we temporarily + set EXECUTING to flase. */ + executing = tp->executing; + set_executing (inferior_ptid, false); + + frame = NULL; + TRY + { + frame = get_current_frame (); + } + CATCH (except, RETURN_MASK_ALL) + { + /* Restore the previous execution state. */ + set_executing (inferior_ptid, executing); + + throw_exception (except); + } + END_CATCH - /* When asked to resume everything, we pick the current thread. */ - if (ptid_equal (minus_one_ptid, ptid) || ptid_is_pid (ptid)) - ptid = inferior_ptid; + /* Restore the previous execution state. */ + set_executing (inferior_ptid, executing); - return find_thread_ptid (ptid); + return frame; } /* Start replaying a thread. */ @@ -1730,25 +2019,14 @@ record_btrace_start_replaying (struct thread_info *tp) { struct btrace_insn_iterator *replay; struct btrace_thread_info *btinfo; - int executing; btinfo = &tp->btrace; replay = NULL; /* We can't start replaying without trace. */ - if (btinfo->begin == NULL) + if (btinfo->functions.empty ()) return NULL; - /* Clear the executing flag to allow changes to the current frame. - We are not actually running, yet. We just started a reverse execution - command or a record goto command. - For the latter, EXECUTING is false and this has no effect. - For the former, EXECUTING is true and we're in to_wait, about to - move the thread. Since we need to recompute the stack, we temporarily - set EXECUTING to flase. */ - executing = is_executing (tp->ptid); - set_executing (tp->ptid, 0); - /* GDB stores the current frame_id when stepping in order to detects steps into subroutines. Since frames are computed differently when we're replaying, we need to @@ -1761,7 +2039,7 @@ record_btrace_start_replaying (struct thread_info *tp) int upd_step_frame_id, upd_step_stack_frame_id; /* The current frame without replaying - computed via normal unwind. */ - frame = get_current_frame (); + frame = get_thread_current_frame (tp); frame_id = get_frame_id (frame); /* Check if we need to update any stepping-related frame id's. */ @@ -1790,10 +2068,10 @@ record_btrace_start_replaying (struct thread_info *tp) btinfo->replay = replay; /* Make sure we're not using any stale registers. */ - registers_changed_ptid (tp->ptid); + registers_changed_thread (tp); /* The current frame with replaying - computed via btrace unwind. */ - frame = get_current_frame (); + frame = get_thread_current_frame (tp); frame_id = get_frame_id (frame); /* Replace stepping related frames where necessary. */ @@ -1804,21 +2082,15 @@ record_btrace_start_replaying (struct thread_info *tp) } CATCH (except, RETURN_MASK_ALL) { - /* Restore the previous execution state. */ - set_executing (tp->ptid, executing); - xfree (btinfo->replay); btinfo->replay = NULL; - registers_changed_ptid (tp->ptid); + registers_changed_thread (tp); throw_exception (except); } END_CATCH - /* Restore the previous execution state. */ - set_executing (tp->ptid, executing); - return replay; } @@ -1835,56 +2107,94 @@ record_btrace_stop_replaying (struct thread_info *tp) btinfo->replay = NULL; /* Make sure we're not leaving any stale registers. */ - registers_changed_ptid (tp->ptid); + registers_changed_thread (tp); } -/* The to_resume method of target record-btrace. */ +/* Stop replaying TP if it is at the end of its execution history. */ static void -record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step, - enum gdb_signal signal) +record_btrace_stop_replaying_at_end (struct thread_info *tp) { - struct thread_info *tp, *other; - enum btrace_thread_flag flag; + struct btrace_insn_iterator *replay, end; + struct btrace_thread_info *btinfo; + + btinfo = &tp->btrace; + replay = btinfo->replay; + + if (replay == NULL) + return; + + btrace_insn_end (&end, btinfo); + + if (btrace_insn_cmp (replay, &end) == 0) + record_btrace_stop_replaying (tp); +} + +/* The resume method of target record-btrace. */ + +void +record_btrace_target::resume (ptid_t ptid, int step, enum gdb_signal signal) +{ + struct thread_info *tp; + enum btrace_thread_flag flag, cflag; DEBUG ("resume %s: %s%s", target_pid_to_str (ptid), - execution_direction == EXEC_REVERSE ? "reverse-" : "", + ::execution_direction == EXEC_REVERSE ? "reverse-" : "", step ? "step" : "cont"); - /* Store the execution direction of the last resume. */ - record_btrace_resume_exec_dir = execution_direction; + /* Store the execution direction of the last resume. - tp = record_btrace_find_resume_thread (ptid); - if (tp == NULL) - error (_("Cannot find thread to resume.")); + If there is more than one resume call, we have to rely on infrun + to not change the execution direction in-between. */ + record_btrace_resume_exec_dir = ::execution_direction; - /* Stop replaying other threads if the thread to resume is not replaying. */ - if (!btrace_is_replaying (tp) && execution_direction != EXEC_REVERSE) - ALL_NON_EXITED_THREADS (other) - record_btrace_stop_replaying (other); + /* As long as we're not replaying, just forward the request. - /* As long as we're not replaying, just forward the request. */ - if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE) + For non-stop targets this means that no thread is replaying. In order to + make progress, we may need to explicitly move replaying threads to the end + of their execution history. */ + if ((::execution_direction != EXEC_REVERSE) + && !record_is_replaying (minus_one_ptid)) { - ops = ops->beneath; - return ops->to_resume (ops, ptid, step, signal); + this->beneath ()->resume (ptid, step, signal); + return; } /* Compute the btrace thread flag for the requested move. */ - if (step == 0) - flag = execution_direction == EXEC_REVERSE ? BTHR_RCONT : BTHR_CONT; + if (::execution_direction == EXEC_REVERSE) + { + flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP; + cflag = BTHR_RCONT; + } else - flag = execution_direction == EXEC_REVERSE ? BTHR_RSTEP : BTHR_STEP; - - /* At the moment, we only move a single thread. We could also move - all threads in parallel by single-stepping each resumed thread - until the first runs into an event. - When we do that, we would want to continue all other threads. - For now, just resume one thread to not confuse to_wait. */ - record_btrace_resume_thread (tp, flag); + { + flag = step == 0 ? BTHR_CONT : BTHR_STEP; + cflag = BTHR_CONT; + } /* We just indicate the resume intent here. The actual stepping happens in - record_btrace_wait below. */ + record_btrace_wait below. + + For all-stop targets, we only step INFERIOR_PTID and continue others. */ + if (!target_is_non_stop_p ()) + { + gdb_assert (ptid_match (inferior_ptid, ptid)); + + ALL_NON_EXITED_THREADS (tp) + if (ptid_match (tp->ptid, ptid)) + { + if (ptid_match (tp->ptid, inferior_ptid)) + record_btrace_resume_thread (tp, flag); + else + record_btrace_resume_thread (tp, cflag); + } + } + else + { + ALL_NON_EXITED_THREADS (tp) + if (ptid_match (tp->ptid, ptid)) + record_btrace_resume_thread (tp, flag); + } /* Async support. */ if (target_can_async_p ()) @@ -1894,6 +2204,16 @@ record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step, } } +/* The commit_resume method of target record-btrace. */ + +void +record_btrace_target::commit_resume () +{ + if ((::execution_direction != EXEC_REVERSE) + && !record_is_replaying (minus_one_ptid)) + beneath ()->commit_resume (); +} + /* Cancel resuming TP. */ static void @@ -1905,31 +2225,13 @@ record_btrace_cancel_resume (struct thread_info *tp) if (flags == 0) return; - DEBUG ("cancel resume thread %d (%s): %x (%s)", tp->num, + DEBUG ("cancel resume thread %s (%s): %x (%s)", + print_thread_id (tp), target_pid_to_str (tp->ptid), flags, btrace_thread_flag_to_str (flags)); tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP); -} - -/* Find a thread to move. */ - -static struct thread_info * -record_btrace_find_thread_to_move (ptid_t ptid) -{ - struct thread_info *tp; - - /* First check the parameter thread. */ - tp = find_thread_ptid (ptid); - if (tp != NULL && (tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0) - return tp; - - /* Otherwise, find one other thread that has been resumed. */ - ALL_NON_EXITED_THREADS (tp) - if ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0) - return tp; - - return NULL; + record_btrace_stop_replaying_at_end (tp); } /* Return a target_waitstatus indicating that we ran out of history. */ @@ -1971,6 +2273,42 @@ btrace_step_stopped_on_request (void) return status; } +/* Return a target_waitstatus indicating a spurious stop. */ + +static struct target_waitstatus +btrace_step_spurious (void) +{ + struct target_waitstatus status; + + status.kind = TARGET_WAITKIND_SPURIOUS; + + return status; +} + +/* Return a target_waitstatus indicating that the thread was not resumed. */ + +static struct target_waitstatus +btrace_step_no_resumed (void) +{ + struct target_waitstatus status; + + status.kind = TARGET_WAITKIND_NO_RESUMED; + + return status; +} + +/* Return a target_waitstatus indicating that we should wait again. */ + +static struct target_waitstatus +btrace_step_again (void) +{ + struct target_waitstatus status; + + status.kind = TARGET_WAITKIND_IGNORE; + + return status; +} + /* Clear the record histories. */ static void @@ -1991,7 +2329,6 @@ record_btrace_replay_at_breakpoint (struct thread_info *tp) struct btrace_insn_iterator *replay; struct btrace_thread_info *btinfo; const struct btrace_insn *insn; - struct inferior *inf; btinfo = &tp->btrace; replay = btinfo->replay; @@ -2003,32 +2340,121 @@ record_btrace_replay_at_breakpoint (struct thread_info *tp) if (insn == NULL) return 0; - inf = find_inferior_ptid (tp->ptid); - if (inf == NULL) - return 0; - - return record_check_stopped_by_breakpoint (inf->aspace, insn->pc, + return record_check_stopped_by_breakpoint (tp->inf->aspace, insn->pc, &btinfo->stop_reason); } +/* Step one instruction in forward direction. */ + +static struct target_waitstatus +record_btrace_single_step_forward (struct thread_info *tp) +{ + struct btrace_insn_iterator *replay, end, start; + struct btrace_thread_info *btinfo; + + btinfo = &tp->btrace; + replay = btinfo->replay; + + /* We're done if we're not replaying. */ + if (replay == NULL) + return btrace_step_no_history (); + + /* Check if we're stepping a breakpoint. */ + if (record_btrace_replay_at_breakpoint (tp)) + return btrace_step_stopped (); + + /* Skip gaps during replay. If we end up at a gap (at the end of the trace), + jump back to the instruction at which we started. */ + start = *replay; + do + { + unsigned int steps; + + /* We will bail out here if we continue stepping after reaching the end + of the execution history. */ + steps = btrace_insn_next (replay, 1); + if (steps == 0) + { + *replay = start; + return btrace_step_no_history (); + } + } + while (btrace_insn_get (replay) == NULL); + + /* Determine the end of the instruction trace. */ + btrace_insn_end (&end, btinfo); + + /* The execution trace contains (and ends with) the current instruction. + This instruction has not been executed, yet, so the trace really ends + one instruction earlier. */ + if (btrace_insn_cmp (replay, &end) == 0) + return btrace_step_no_history (); + + return btrace_step_spurious (); +} + +/* Step one instruction in backward direction. */ + +static struct target_waitstatus +record_btrace_single_step_backward (struct thread_info *tp) +{ + struct btrace_insn_iterator *replay, start; + struct btrace_thread_info *btinfo; + + btinfo = &tp->btrace; + replay = btinfo->replay; + + /* Start replaying if we're not already doing so. */ + if (replay == NULL) + replay = record_btrace_start_replaying (tp); + + /* If we can't step any further, we reached the end of the history. + Skip gaps during replay. If we end up at a gap (at the beginning of + the trace), jump back to the instruction at which we started. */ + start = *replay; + do + { + unsigned int steps; + + steps = btrace_insn_prev (replay, 1); + if (steps == 0) + { + *replay = start; + return btrace_step_no_history (); + } + } + while (btrace_insn_get (replay) == NULL); + + /* Check if we're stepping a breakpoint. + + For reverse-stepping, this check is after the step. There is logic in + infrun.c that handles reverse-stepping separately. See, for example, + proceed and adjust_pc_after_break. + + This code assumes that for reverse-stepping, PC points to the last + de-executed instruction, whereas for forward-stepping PC points to the + next to-be-executed instruction. */ + if (record_btrace_replay_at_breakpoint (tp)) + return btrace_step_stopped (); + + return btrace_step_spurious (); +} + /* Step a single thread. */ static struct target_waitstatus record_btrace_step_thread (struct thread_info *tp) { - struct btrace_insn_iterator *replay, end; struct btrace_thread_info *btinfo; + struct target_waitstatus status; enum btrace_thread_flag flags; - unsigned int steps; - btinfo = &tp->btrace; - replay = btinfo->replay; flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP); btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP); - DEBUG ("stepping thread %d (%s): %x (%s)", tp->num, + DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp), target_pid_to_str (tp->ptid), flags, btrace_thread_flag_to_str (flags)); @@ -2045,177 +2471,211 @@ record_btrace_step_thread (struct thread_info *tp) return btrace_step_stopped_on_request (); case BTHR_STEP: - /* We're done if we're not replaying. */ - if (replay == NULL) - return btrace_step_no_history (); - - /* Skip gaps during replay. */ - do - { - steps = btrace_insn_next (replay, 1); - if (steps == 0) - { - record_btrace_stop_replaying (tp); - return btrace_step_no_history (); - } - } - while (btrace_insn_get (replay) == NULL); - - /* Determine the end of the instruction trace. */ - btrace_insn_end (&end, btinfo); - - /* We stop replaying if we reached the end of the trace. */ - if (btrace_insn_cmp (replay, &end) == 0) - record_btrace_stop_replaying (tp); + status = record_btrace_single_step_forward (tp); + if (status.kind != TARGET_WAITKIND_SPURIOUS) + break; return btrace_step_stopped (); case BTHR_RSTEP: - /* Start replaying if we're not already doing so. */ - if (replay == NULL) - replay = record_btrace_start_replaying (tp); - - /* If we can't step any further, we reached the end of the history. - Skip gaps during replay. */ - do - { - steps = btrace_insn_prev (replay, 1); - if (steps == 0) - return btrace_step_no_history (); - - } - while (btrace_insn_get (replay) == NULL); + status = record_btrace_single_step_backward (tp); + if (status.kind != TARGET_WAITKIND_SPURIOUS) + break; return btrace_step_stopped (); case BTHR_CONT: - /* We're done if we're not replaying. */ - if (replay == NULL) - return btrace_step_no_history (); - - /* Determine the end of the instruction trace. */ - btrace_insn_end (&end, btinfo); + status = record_btrace_single_step_forward (tp); + if (status.kind != TARGET_WAITKIND_SPURIOUS) + break; - for (;;) - { - const struct btrace_insn *insn; - - /* Skip gaps during replay. */ - do - { - steps = btrace_insn_next (replay, 1); - if (steps == 0) - { - record_btrace_stop_replaying (tp); - return btrace_step_no_history (); - } + btinfo->flags |= flags; + return btrace_step_again (); - insn = btrace_insn_get (replay); - } - while (insn == NULL); + case BTHR_RCONT: + status = record_btrace_single_step_backward (tp); + if (status.kind != TARGET_WAITKIND_SPURIOUS) + break; - /* We stop replaying if we reached the end of the trace. */ - if (btrace_insn_cmp (replay, &end) == 0) - { - record_btrace_stop_replaying (tp); - return btrace_step_no_history (); - } + btinfo->flags |= flags; + return btrace_step_again (); + } - DEBUG ("stepping %d (%s) ... %s", tp->num, - target_pid_to_str (tp->ptid), - core_addr_to_string_nz (insn->pc)); + /* We keep threads moving at the end of their execution history. The wait + method will stop the thread for whom the event is reported. */ + if (status.kind == TARGET_WAITKIND_NO_HISTORY) + btinfo->flags |= flags; - if (record_btrace_replay_at_breakpoint (tp)) - return btrace_step_stopped (); - } + return status; +} - case BTHR_RCONT: - /* Start replaying if we're not already doing so. */ - if (replay == NULL) - replay = record_btrace_start_replaying (tp); +/* Announce further events if necessary. */ - for (;;) - { - const struct btrace_insn *insn; +static void +record_btrace_maybe_mark_async_event + (const std::vector &moving, + const std::vector &no_history) +{ + bool more_moving = !moving.empty (); + bool more_no_history = !no_history.empty ();; - /* If we can't step any further, we reached the end of the history. - Skip gaps during replay. */ - do - { - steps = btrace_insn_prev (replay, 1); - if (steps == 0) - return btrace_step_no_history (); + if (!more_moving && !more_no_history) + return; - insn = btrace_insn_get (replay); - } - while (insn == NULL); + if (more_moving) + DEBUG ("movers pending"); - DEBUG ("reverse-stepping %d (%s) ... %s", tp->num, - target_pid_to_str (tp->ptid), - core_addr_to_string_nz (insn->pc)); + if (more_no_history) + DEBUG ("no-history pending"); - if (record_btrace_replay_at_breakpoint (tp)) - return btrace_step_stopped (); - } - } + mark_async_event_handler (record_btrace_async_inferior_event_handler); } -/* The to_wait method of target record-btrace. */ +/* The wait method of target record-btrace. */ -static ptid_t -record_btrace_wait (struct target_ops *ops, ptid_t ptid, - struct target_waitstatus *status, int options) +ptid_t +record_btrace_target::wait (ptid_t ptid, struct target_waitstatus *status, + int options) { - struct thread_info *tp, *other; + std::vector moving; + std::vector no_history; DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options); /* As long as we're not replaying, just forward the request. */ - if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE) + if ((::execution_direction != EXEC_REVERSE) + && !record_is_replaying (minus_one_ptid)) { - ops = ops->beneath; - return ops->to_wait (ops, ptid, status, options); + return this->beneath ()->wait (ptid, status, options); } - /* Let's find a thread to move. */ - tp = record_btrace_find_thread_to_move (ptid); - if (tp == NULL) + /* Keep a work list of moving threads. */ + { + thread_info *tp; + + ALL_NON_EXITED_THREADS (tp) + { + if (ptid_match (tp->ptid, ptid) + && ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0)) + moving.push_back (tp); + } + } + + if (moving.empty ()) { - DEBUG ("wait %s: no thread", target_pid_to_str (ptid)); + *status = btrace_step_no_resumed (); - status->kind = TARGET_WAITKIND_IGNORE; - return minus_one_ptid; + DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid), + target_waitstatus_to_string (status).c_str ()); + + return null_ptid; } - /* We only move a single thread. We're not able to correlate threads. */ - *status = record_btrace_step_thread (tp); + /* Step moving threads one by one, one step each, until either one thread + reports an event or we run out of threads to step. + + When stepping more than one thread, chances are that some threads reach + the end of their execution history earlier than others. If we reported + this immediately, all-stop on top of non-stop would stop all threads and + resume the same threads next time. And we would report the same thread + having reached the end of its execution history again. + + In the worst case, this would starve the other threads. But even if other + threads would be allowed to make progress, this would result in far too + many intermediate stops. + + We therefore delay the reporting of "no execution history" until we have + nothing else to report. By this time, all threads should have moved to + either the beginning or the end of their execution history. There will + be a single user-visible stop. */ + struct thread_info *eventing = NULL; + while ((eventing == NULL) && !moving.empty ()) + { + for (unsigned int ix = 0; eventing == NULL && ix < moving.size ();) + { + thread_info *tp = moving[ix]; + + *status = record_btrace_step_thread (tp); + + switch (status->kind) + { + case TARGET_WAITKIND_IGNORE: + ix++; + break; + + case TARGET_WAITKIND_NO_HISTORY: + no_history.push_back (ordered_remove (moving, ix)); + break; + + default: + eventing = unordered_remove (moving, ix); + break; + } + } + } + + if (eventing == NULL) + { + /* We started with at least one moving thread. This thread must have + either stopped or reached the end of its execution history. + + In the former case, EVENTING must not be NULL. + In the latter case, NO_HISTORY must not be empty. */ + gdb_assert (!no_history.empty ()); + + /* We kept threads moving at the end of their execution history. Stop + EVENTING now that we are going to report its stop. */ + eventing = unordered_remove (no_history, 0); + eventing->btrace.flags &= ~BTHR_MOVE; + + *status = btrace_step_no_history (); + } + + gdb_assert (eventing != NULL); + + /* We kept threads replaying at the end of their execution history. Stop + replaying EVENTING now that we are going to report its stop. */ + record_btrace_stop_replaying_at_end (eventing); /* Stop all other threads. */ if (!target_is_non_stop_p ()) - ALL_NON_EXITED_THREADS (other) - record_btrace_cancel_resume (other); + { + thread_info *tp; + + ALL_NON_EXITED_THREADS (tp) + record_btrace_cancel_resume (tp); + } + + /* In async mode, we need to announce further events. */ + if (target_is_async_p ()) + record_btrace_maybe_mark_async_event (moving, no_history); /* Start record histories anew from the current position. */ - record_btrace_clear_histories (&tp->btrace); + record_btrace_clear_histories (&eventing->btrace); /* We moved the replay position but did not update registers. */ - registers_changed_ptid (tp->ptid); + registers_changed_thread (eventing); + + DEBUG ("wait ended by thread %s (%s): %s", + print_thread_id (eventing), + target_pid_to_str (eventing->ptid), + target_waitstatus_to_string (status).c_str ()); - return tp->ptid; + return eventing->ptid; } -/* The to_stop method of target record-btrace. */ +/* The stop method of target record-btrace. */ -static void -record_btrace_stop (struct target_ops *ops, ptid_t ptid) +void +record_btrace_target::stop (ptid_t ptid) { DEBUG ("stop %s", target_pid_to_str (ptid)); /* As long as we're not replaying, just forward the request. */ - if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE) + if ((::execution_direction != EXEC_REVERSE) + && !record_is_replaying (minus_one_ptid)) { - ops = ops->beneath; - ops->to_stop (ops, ptid); + this->beneath ()->stop (ptid); } else { @@ -2230,94 +2690,92 @@ record_btrace_stop (struct target_ops *ops, ptid_t ptid) } } -/* The to_can_execute_reverse method of target record-btrace. */ +/* The can_execute_reverse method of target record-btrace. */ -static int -record_btrace_can_execute_reverse (struct target_ops *self) +bool +record_btrace_target::can_execute_reverse () { - return 1; + return true; } -/* The to_stopped_by_sw_breakpoint method of target record-btrace. */ +/* The stopped_by_sw_breakpoint method of target record-btrace. */ -static int -record_btrace_stopped_by_sw_breakpoint (struct target_ops *ops) +bool +record_btrace_target::stopped_by_sw_breakpoint () { - if (record_btrace_is_replaying (ops)) + if (record_is_replaying (minus_one_ptid)) { struct thread_info *tp = inferior_thread (); return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT; } - return ops->beneath->to_stopped_by_sw_breakpoint (ops->beneath); + return this->beneath ()->stopped_by_sw_breakpoint (); } -/* The to_supports_stopped_by_sw_breakpoint method of target +/* The supports_stopped_by_sw_breakpoint method of target record-btrace. */ -static int -record_btrace_supports_stopped_by_sw_breakpoint (struct target_ops *ops) +bool +record_btrace_target::supports_stopped_by_sw_breakpoint () { - if (record_btrace_is_replaying (ops)) - return 1; + if (record_is_replaying (minus_one_ptid)) + return true; - return ops->beneath->to_supports_stopped_by_sw_breakpoint (ops->beneath); + return this->beneath ()->supports_stopped_by_sw_breakpoint (); } -/* The to_stopped_by_sw_breakpoint method of target record-btrace. */ +/* The stopped_by_sw_breakpoint method of target record-btrace. */ -static int -record_btrace_stopped_by_hw_breakpoint (struct target_ops *ops) +bool +record_btrace_target::stopped_by_hw_breakpoint () { - if (record_btrace_is_replaying (ops)) + if (record_is_replaying (minus_one_ptid)) { struct thread_info *tp = inferior_thread (); return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT; } - return ops->beneath->to_stopped_by_hw_breakpoint (ops->beneath); + return this->beneath ()->stopped_by_hw_breakpoint (); } -/* The to_supports_stopped_by_hw_breakpoint method of target +/* The supports_stopped_by_hw_breakpoint method of target record-btrace. */ -static int -record_btrace_supports_stopped_by_hw_breakpoint (struct target_ops *ops) +bool +record_btrace_target::supports_stopped_by_hw_breakpoint () { - if (record_btrace_is_replaying (ops)) - return 1; + if (record_is_replaying (minus_one_ptid)) + return true; - return ops->beneath->to_supports_stopped_by_hw_breakpoint (ops->beneath); + return this->beneath ()->supports_stopped_by_hw_breakpoint (); } -/* The to_update_thread_list method of target record-btrace. */ +/* The update_thread_list method of target record-btrace. */ -static void -record_btrace_update_thread_list (struct target_ops *ops) +void +record_btrace_target::update_thread_list () { /* We don't add or remove threads during replay. */ - if (record_btrace_is_replaying (ops)) + if (record_is_replaying (minus_one_ptid)) return; /* Forward the request. */ - ops = ops->beneath; - ops->to_update_thread_list (ops); + this->beneath ()->update_thread_list (); } -/* The to_thread_alive method of target record-btrace. */ +/* The thread_alive method of target record-btrace. */ -static int -record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid) +bool +record_btrace_target::thread_alive (ptid_t ptid) { /* We don't add or remove threads during replay. */ - if (record_btrace_is_replaying (ops)) - return find_thread_ptid (ptid) != NULL; + if (record_is_replaying (minus_one_ptid)) + return true; /* Forward the request. */ - ops = ops->beneath; - return ops->to_thread_alive (ops, ptid); + return this->beneath ()->thread_alive (ptid); } /* Set the replay branch trace instruction iterator. If IT is NULL, replay @@ -2331,7 +2789,7 @@ record_btrace_set_replay (struct thread_info *tp, btinfo = &tp->btrace; - if (it == NULL || it->function == NULL) + if (it == NULL) record_btrace_stop_replaying (tp); else { @@ -2341,20 +2799,21 @@ record_btrace_set_replay (struct thread_info *tp, return; *btinfo->replay = *it; - registers_changed_ptid (tp->ptid); + registers_changed_thread (tp); } /* Start anew from the new replay position. */ record_btrace_clear_histories (btinfo); - stop_pc = regcache_read_pc (get_current_regcache ()); + inferior_thread ()->suspend.stop_pc + = regcache_read_pc (get_current_regcache ()); print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1); } -/* The to_goto_record_begin method of target record-btrace. */ +/* The goto_record_begin method of target record-btrace. */ -static void -record_btrace_goto_begin (struct target_ops *self) +void +record_btrace_target::goto_record_begin () { struct thread_info *tp; struct btrace_insn_iterator begin; @@ -2362,13 +2821,24 @@ record_btrace_goto_begin (struct target_ops *self) tp = require_btrace_thread (); btrace_insn_begin (&begin, &tp->btrace); + + /* Skip gaps at the beginning of the trace. */ + while (btrace_insn_get (&begin) == NULL) + { + unsigned int steps; + + steps = btrace_insn_next (&begin, 1); + if (steps == 0) + error (_("No trace.")); + } + record_btrace_set_replay (tp, &begin); } -/* The to_goto_record_end method of target record-btrace. */ +/* The goto_record_end method of target record-btrace. */ -static void -record_btrace_goto_end (struct target_ops *ops) +void +record_btrace_target::goto_record_end () { struct thread_info *tp; @@ -2377,10 +2847,10 @@ record_btrace_goto_end (struct target_ops *ops) record_btrace_set_replay (tp, NULL); } -/* The to_goto_record method of target record-btrace. */ +/* The goto_record method of target record-btrace. */ -static void -record_btrace_goto (struct target_ops *self, ULONGEST insn) +void +record_btrace_target::goto_record (ULONGEST insn) { struct thread_info *tp; struct btrace_insn_iterator it; @@ -2396,97 +2866,53 @@ record_btrace_goto (struct target_ops *self, ULONGEST insn) tp = require_btrace_thread (); found = btrace_find_insn_by_number (&it, &tp->btrace, number); - if (found == 0) + + /* Check if the instruction could not be found or is a gap. */ + if (found == 0 || btrace_insn_get (&it) == NULL) error (_("No such instruction.")); record_btrace_set_replay (tp, &it); } -/* The to_execution_direction target method. */ +/* The record_stop_replaying method of target record-btrace. */ -static enum exec_direction_kind -record_btrace_execution_direction (struct target_ops *self) +void +record_btrace_target::record_stop_replaying () { - return record_btrace_resume_exec_dir; + struct thread_info *tp; + + ALL_NON_EXITED_THREADS (tp) + record_btrace_stop_replaying (tp); } -/* The to_prepare_to_generate_core target method. */ +/* The execution_direction target method. */ -static void -record_btrace_prepare_to_generate_core (struct target_ops *self) +enum exec_direction_kind +record_btrace_target::execution_direction () { - record_btrace_generating_corefile = 1; + return record_btrace_resume_exec_dir; } -/* The to_done_generating_core target method. */ +/* The prepare_to_generate_core target method. */ -static void -record_btrace_done_generating_core (struct target_ops *self) +void +record_btrace_target::prepare_to_generate_core () { - record_btrace_generating_corefile = 0; + record_btrace_generating_corefile = 1; } -/* Initialize the record-btrace target ops. */ +/* The done_generating_core target method. */ -static void -init_record_btrace_ops (void) -{ - struct target_ops *ops; - - ops = &record_btrace_ops; - ops->to_shortname = "record-btrace"; - ops->to_longname = "Branch tracing target"; - ops->to_doc = "Collect control-flow trace and provide the execution history."; - ops->to_open = record_btrace_open; - ops->to_close = record_btrace_close; - ops->to_async = record_btrace_async; - ops->to_detach = record_detach; - ops->to_disconnect = record_disconnect; - ops->to_mourn_inferior = record_mourn_inferior; - ops->to_kill = record_kill; - ops->to_stop_recording = record_btrace_stop_recording; - ops->to_info_record = record_btrace_info; - ops->to_insn_history = record_btrace_insn_history; - ops->to_insn_history_from = record_btrace_insn_history_from; - ops->to_insn_history_range = record_btrace_insn_history_range; - ops->to_call_history = record_btrace_call_history; - ops->to_call_history_from = record_btrace_call_history_from; - ops->to_call_history_range = record_btrace_call_history_range; - ops->to_record_is_replaying = record_btrace_is_replaying; - ops->to_xfer_partial = record_btrace_xfer_partial; - ops->to_remove_breakpoint = record_btrace_remove_breakpoint; - ops->to_insert_breakpoint = record_btrace_insert_breakpoint; - ops->to_fetch_registers = record_btrace_fetch_registers; - ops->to_store_registers = record_btrace_store_registers; - ops->to_prepare_to_store = record_btrace_prepare_to_store; - ops->to_get_unwinder = &record_btrace_to_get_unwinder; - ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder; - ops->to_resume = record_btrace_resume; - ops->to_wait = record_btrace_wait; - ops->to_stop = record_btrace_stop; - ops->to_update_thread_list = record_btrace_update_thread_list; - ops->to_thread_alive = record_btrace_thread_alive; - ops->to_goto_record_begin = record_btrace_goto_begin; - ops->to_goto_record_end = record_btrace_goto_end; - ops->to_goto_record = record_btrace_goto; - ops->to_can_execute_reverse = record_btrace_can_execute_reverse; - ops->to_stopped_by_sw_breakpoint = record_btrace_stopped_by_sw_breakpoint; - ops->to_supports_stopped_by_sw_breakpoint - = record_btrace_supports_stopped_by_sw_breakpoint; - ops->to_stopped_by_hw_breakpoint = record_btrace_stopped_by_hw_breakpoint; - ops->to_supports_stopped_by_hw_breakpoint - = record_btrace_supports_stopped_by_hw_breakpoint; - ops->to_execution_direction = record_btrace_execution_direction; - ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core; - ops->to_done_generating_core = record_btrace_done_generating_core; - ops->to_stratum = record_stratum; - ops->to_magic = OPS_MAGIC; +void +record_btrace_target::done_generating_core () +{ + record_btrace_generating_corefile = 0; } /* Start recording in BTS format. */ static void -cmd_record_btrace_bts_start (char *args, int from_tty) +cmd_record_btrace_bts_start (const char *args, int from_tty) { if (args != NULL && *args != 0) error (_("Invalid argument.")); @@ -2505,10 +2931,10 @@ cmd_record_btrace_bts_start (char *args, int from_tty) END_CATCH } -/* Start recording Intel(R) Processor Trace. */ +/* Start recording in Intel Processor Trace format. */ static void -cmd_record_btrace_pt_start (char *args, int from_tty) +cmd_record_btrace_pt_start (const char *args, int from_tty) { if (args != NULL && *args != 0) error (_("Invalid argument.")); @@ -2530,7 +2956,7 @@ cmd_record_btrace_pt_start (char *args, int from_tty) /* Alias for "target record". */ static void -cmd_record_btrace_start (char *args, int from_tty) +cmd_record_btrace_start (const char *args, int from_tty) { if (args != NULL && *args != 0) error (_("Invalid argument.")); @@ -2562,15 +2988,18 @@ cmd_record_btrace_start (char *args, int from_tty) /* The "set record btrace" command. */ static void -cmd_set_record_btrace (char *args, int from_tty) +cmd_set_record_btrace (const char *args, int from_tty) { - cmd_show_list (set_record_btrace_cmdlist, from_tty, ""); + printf_unfiltered (_("\"set record btrace\" must be followed " + "by an appropriate subcommand.\n")); + help_list (set_record_btrace_cmdlist, "set record btrace ", + all_commands, gdb_stdout); } /* The "show record btrace" command. */ static void -cmd_show_record_btrace (char *args, int from_tty) +cmd_show_record_btrace (const char *args, int from_tty) { cmd_show_list (show_record_btrace_cmdlist, from_tty, ""); } @@ -2585,10 +3014,116 @@ cmd_show_replay_memory_access (struct ui_file *file, int from_tty, replay_memory_access); } -/* The "set record btrace bts" command. */ +/* The "set record btrace cpu none" command. */ + +static void +cmd_set_record_btrace_cpu_none (const char *args, int from_tty) +{ + if (args != nullptr && *args != 0) + error (_("Trailing junk: '%s'."), args); + + record_btrace_cpu_state = CS_NONE; +} + +/* The "set record btrace cpu auto" command. */ + +static void +cmd_set_record_btrace_cpu_auto (const char *args, int from_tty) +{ + if (args != nullptr && *args != 0) + error (_("Trailing junk: '%s'."), args); + + record_btrace_cpu_state = CS_AUTO; +} + +/* The "set record btrace cpu" command. */ static void -cmd_set_record_btrace_bts (char *args, int from_tty) +cmd_set_record_btrace_cpu (const char *args, int from_tty) +{ + if (args == nullptr) + args = ""; + + /* We use a hard-coded vendor string for now. */ + unsigned int family, model, stepping; + int l1, l2, matches = sscanf (args, "intel: %u/%u%n/%u%n", &family, + &model, &l1, &stepping, &l2); + if (matches == 3) + { + if (strlen (args) != l2) + error (_("Trailing junk: '%s'."), args + l2); + } + else if (matches == 2) + { + if (strlen (args) != l1) + error (_("Trailing junk: '%s'."), args + l1); + + stepping = 0; + } + else + error (_("Bad format. See \"help set record btrace cpu\".")); + + if (USHRT_MAX < family) + error (_("Cpu family too big.")); + + if (UCHAR_MAX < model) + error (_("Cpu model too big.")); + + if (UCHAR_MAX < stepping) + error (_("Cpu stepping too big.")); + + record_btrace_cpu.vendor = CV_INTEL; + record_btrace_cpu.family = family; + record_btrace_cpu.model = model; + record_btrace_cpu.stepping = stepping; + + record_btrace_cpu_state = CS_CPU; +} + +/* The "show record btrace cpu" command. */ + +static void +cmd_show_record_btrace_cpu (const char *args, int from_tty) +{ + const char *cpu; + + if (args != nullptr && *args != 0) + error (_("Trailing junk: '%s'."), args); + + switch (record_btrace_cpu_state) + { + case CS_AUTO: + printf_unfiltered (_("btrace cpu is 'auto'.\n")); + return; + + case CS_NONE: + printf_unfiltered (_("btrace cpu is 'none'.\n")); + return; + + case CS_CPU: + switch (record_btrace_cpu.vendor) + { + case CV_INTEL: + if (record_btrace_cpu.stepping == 0) + printf_unfiltered (_("btrace cpu is 'intel: %u/%u'.\n"), + record_btrace_cpu.family, + record_btrace_cpu.model); + else + printf_unfiltered (_("btrace cpu is 'intel: %u/%u/%u'.\n"), + record_btrace_cpu.family, + record_btrace_cpu.model, + record_btrace_cpu.stepping); + return; + } + } + + error (_("Internal error: bad cpu state.")); +} + +/* The "s record btrace bts" command. */ + +static void +cmd_set_record_btrace_bts (const char *args, int from_tty) { printf_unfiltered (_("\"set record btrace bts\" must be followed " "by an appropriate subcommand.\n")); @@ -2599,7 +3134,7 @@ cmd_set_record_btrace_bts (char *args, int from_tty) /* The "show record btrace bts" command. */ static void -cmd_show_record_btrace_bts (char *args, int from_tty) +cmd_show_record_btrace_bts (const char *args, int from_tty) { cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, ""); } @@ -2607,7 +3142,7 @@ cmd_show_record_btrace_bts (char *args, int from_tty) /* The "set record btrace pt" command. */ static void -cmd_set_record_btrace_pt (char *args, int from_tty) +cmd_set_record_btrace_pt (const char *args, int from_tty) { printf_unfiltered (_("\"set record btrace pt\" must be followed " "by an appropriate subcommand.\n")); @@ -2618,7 +3153,7 @@ cmd_set_record_btrace_pt (char *args, int from_tty) /* The "show record btrace pt" command. */ static void -cmd_show_record_btrace_pt (char *args, int from_tty) +cmd_show_record_btrace_pt (const char *args, int from_tty) { cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, ""); } @@ -2645,8 +3180,6 @@ show_record_pt_buffer_size_value (struct ui_file *file, int from_tty, value); } -void _initialize_record_btrace (void); - /* Initialize btrace commands. */ void @@ -2667,7 +3200,7 @@ This format may not be available on all processors."), add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start, _("\ -Start branch trace recording in Intel(R) Processor Trace format.\n\n\ +Start branch trace recording in Intel Processor Trace format.\n\n\ This format may not be available on all processors."), &record_btrace_cmdlist); add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist); @@ -2695,6 +3228,32 @@ replay."), &set_record_btrace_cmdlist, &show_record_btrace_cmdlist); + add_prefix_cmd ("cpu", class_support, cmd_set_record_btrace_cpu, + _("\ +Set the cpu to be used for trace decode.\n\n\ +The format is \"VENDOR:IDENTIFIER\" or \"none\" or \"auto\" (default).\n\ +For vendor \"intel\" the format is \"FAMILY/MODEL[/STEPPING]\".\n\n\ +When decoding branch trace, enable errata workarounds for the specified cpu.\n\ +The default is \"auto\", which uses the cpu on which the trace was recorded.\n\ +When GDB does not support that cpu, this option can be used to enable\n\ +workarounds for a similar cpu that GDB supports.\n\n\ +When set to \"none\", errata workarounds are disabled."), + &set_record_btrace_cpu_cmdlist, + _("set record btrace cpu "), 1, + &set_record_btrace_cmdlist); + + add_cmd ("auto", class_support, cmd_set_record_btrace_cpu_auto, _("\ +Automatically determine the cpu to be used for trace decode."), + &set_record_btrace_cpu_cmdlist); + + add_cmd ("none", class_support, cmd_set_record_btrace_cpu_none, _("\ +Do not enable errata workarounds for trace decode."), + &set_record_btrace_cpu_cmdlist); + + add_cmd ("cpu", class_support, cmd_show_record_btrace_cpu, _("\ +Show the cpu to be used for trace decode."), + &show_record_btrace_cmdlist); + add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts, _("Set record btrace bts options"), &set_record_btrace_bts_cmdlist, @@ -2740,8 +3299,7 @@ to see the actual buffer size."), NULL, show_record_pt_buffer_size_value, &set_record_btrace_pt_cmdlist, &show_record_btrace_pt_cmdlist); - init_record_btrace_ops (); - add_target (&record_btrace_ops); + add_target (record_btrace_target_info, record_btrace_target_open); bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL, xcalloc, xfree);