X-Git-Url: http://drtracing.org/?a=blobdiff_plain;f=gdb%2Frecord-btrace.c;h=102e0ebe94ed1e68b5b1084d48efaf2319dfb5f9;hb=31fd9caad9fa8e13bbc132dce264f0c3bc53412f;hp=ae2befff2ac06d794c8b95d31f89d4c8fb932d8b;hpb=7a6c5609f76c156a95e314da790b331eace4137a;p=deliverable%2Fbinutils-gdb.git diff --git a/gdb/record-btrace.c b/gdb/record-btrace.c index ae2befff2a..102e0ebe94 100644 --- a/gdb/record-btrace.c +++ b/gdb/record-btrace.c @@ -1,6 +1,6 @@ /* Branch trace support for GDB, the GNU debugger. - Copyright (C) 2013-2014 Free Software Foundation, Inc. + Copyright (C) 2013-2015 Free Software Foundation, Inc. Contributed by Intel Corp. @@ -26,7 +26,6 @@ #include "gdbcmd.h" #include "disasm.h" #include "observer.h" -#include "exceptions.h" #include "cli/cli-utils.h" #include "source.h" #include "ui-out.h" @@ -35,6 +34,9 @@ #include "regcache.h" #include "frame-unwind.h" #include "hashtab.h" +#include "infrun.h" +#include "event-loop.h" +#include "inf-loop.h" /* The target_ops of record-btrace. */ static struct target_ops record_btrace_ops; @@ -42,8 +44,45 @@ static struct target_ops record_btrace_ops; /* A new thread observer enabling branch tracing for the new thread. */ static struct observer *record_btrace_thread_observer; -/* Temporarily allow memory accesses. */ -static int record_btrace_allow_memory_access; +/* Memory access types used in set/show record btrace replay-memory-access. */ +static const char replay_memory_access_read_only[] = "read-only"; +static const char replay_memory_access_read_write[] = "read-write"; +static const char *const replay_memory_access_types[] = +{ + replay_memory_access_read_only, + replay_memory_access_read_write, + NULL +}; + +/* The currently allowed replay memory access type. */ +static const char *replay_memory_access = replay_memory_access_read_only; + +/* Command lists for "set/show record btrace". */ +static struct cmd_list_element *set_record_btrace_cmdlist; +static struct cmd_list_element *show_record_btrace_cmdlist; + +/* The execution direction of the last resume we got. See record-full.c. */ +static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD; + +/* The async event handler for reverse/replay execution. */ +static struct async_event_handler *record_btrace_async_inferior_event_handler; + +/* A flag indicating that we are currently generating a core file. */ +static int record_btrace_generating_corefile; + +/* The current branch trace configuration. */ +static struct btrace_config record_btrace_conf; + +/* Command list for "record btrace". */ +static struct cmd_list_element *record_btrace_cmdlist; + +/* Command lists for "set/show record btrace". */ +static struct cmd_list_element *set_record_btrace_cmdlist; +static struct cmd_list_element *show_record_btrace_cmdlist; + +/* Command lists for "set/show record btrace bts". */ +static struct cmd_list_element *set_record_btrace_bts_cmdlist; +static struct cmd_list_element *show_record_btrace_bts_cmdlist; /* Print a record-btrace debug message. Use do ... while (0) to avoid ambiguities when used in if statements. */ @@ -107,7 +146,7 @@ record_btrace_enable_warn (struct thread_info *tp) volatile struct gdb_exception error; TRY_CATCH (error, RETURN_MASK_ERROR) - btrace_enable (tp); + btrace_enable (tp, &record_btrace_conf); if (error.message != NULL) warning ("%s", error.message); @@ -151,10 +190,18 @@ record_btrace_auto_disable (void) record_btrace_thread_observer = NULL; } +/* The record-btrace async event handler function. */ + +static void +record_btrace_handle_async_inferior_event (gdb_client_data data) +{ + inferior_event_handler (INF_REG_EVENT, NULL); +} + /* The to_open method of target record-btrace. */ static void -record_btrace_open (char *args, int from_tty) +record_btrace_open (const char *args, int from_tty) { struct cleanup *disable_chain; struct thread_info *tp; @@ -166,19 +213,16 @@ record_btrace_open (char *args, int from_tty) if (!target_has_execution) error (_("The program is not being run.")); - if (!target_supports_btrace ()) - error (_("Target does not support branch tracing.")); - if (non_stop) error (_("Record btrace can't debug inferior in non-stop mode.")); gdb_assert (record_btrace_thread_observer == NULL); disable_chain = make_cleanup (null_cleanup, NULL); - ALL_THREADS (tp) + ALL_NON_EXITED_THREADS (tp) if (args == NULL || *args == 0 || number_is_in_list (args, tp->num)) { - btrace_enable (tp); + btrace_enable (tp, &record_btrace_conf); make_cleanup (record_btrace_disable_callback, tp); } @@ -187,6 +231,11 @@ record_btrace_open (char *args, int from_tty) push_target (&record_btrace_ops); + record_btrace_async_inferior_event_handler + = create_async_event_handler (record_btrace_handle_async_inferior_event, + NULL); + record_btrace_generating_corefile = 0; + observer_notify_record_changed (current_inferior (), 1); discard_cleanups (disable_chain); @@ -203,7 +252,7 @@ record_btrace_stop_recording (struct target_ops *self) record_btrace_auto_disable (); - ALL_THREADS (tp) + ALL_NON_EXITED_THREADS (tp) if (tp->btrace.target != NULL) btrace_disable (tp); } @@ -215,24 +264,109 @@ record_btrace_close (struct target_ops *self) { struct thread_info *tp; + if (record_btrace_async_inferior_event_handler != NULL) + delete_async_event_handler (&record_btrace_async_inferior_event_handler); + /* Make sure automatic recording gets disabled even if we did not stop recording before closing the record-btrace target. */ record_btrace_auto_disable (); /* We should have already stopped recording. Tear down btrace in case we have not. */ - ALL_THREADS (tp) + ALL_NON_EXITED_THREADS (tp) btrace_teardown (tp); } +/* The to_async method of target record-btrace. */ + +static void +record_btrace_async (struct target_ops *ops, + void (*callback) (enum inferior_event_type event_type, + void *context), + void *context) +{ + if (callback != NULL) + mark_async_event_handler (record_btrace_async_inferior_event_handler); + else + clear_async_event_handler (record_btrace_async_inferior_event_handler); + + ops->beneath->to_async (ops->beneath, callback, context); +} + +/* Adjusts the size and returns a human readable size suffix. */ + +static const char * +record_btrace_adjust_size (unsigned int *size) +{ + unsigned int sz; + + sz = *size; + + if ((sz & ((1u << 30) - 1)) == 0) + { + *size = sz >> 30; + return "GB"; + } + else if ((sz & ((1u << 20) - 1)) == 0) + { + *size = sz >> 20; + return "MB"; + } + else if ((sz & ((1u << 10) - 1)) == 0) + { + *size = sz >> 10; + return "kB"; + } + else + return ""; +} + +/* Print a BTS configuration. */ + +static void +record_btrace_print_bts_conf (const struct btrace_config_bts *conf) +{ + const char *suffix; + unsigned int size; + + size = conf->size; + if (size > 0) + { + suffix = record_btrace_adjust_size (&size); + printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix); + } +} + +/* Print a branch tracing configuration. */ + +static void +record_btrace_print_conf (const struct btrace_config *conf) +{ + printf_unfiltered (_("Recording format: %s.\n"), + btrace_format_string (conf->format)); + + switch (conf->format) + { + case BTRACE_FORMAT_NONE: + return; + + case BTRACE_FORMAT_BTS: + record_btrace_print_bts_conf (&conf->bts); + return; + } + + internal_error (__FILE__, __LINE__, _("Unkown branch trace format.")); +} + /* The to_info_record method of target record-btrace. */ static void record_btrace_info (struct target_ops *self) { struct btrace_thread_info *btinfo; + const struct btrace_config *conf; struct thread_info *tp; - unsigned int insns, calls; + unsigned int insns, calls, gaps; DEBUG ("info"); @@ -240,12 +374,17 @@ record_btrace_info (struct target_ops *self) if (tp == NULL) error (_("No thread.")); + btinfo = &tp->btrace; + + conf = btrace_conf (btinfo); + if (conf != NULL) + record_btrace_print_conf (conf); + btrace_fetch (tp); insns = 0; calls = 0; - - btinfo = &tp->btrace; + gaps = 0; if (!btrace_is_empty (tp)) { @@ -257,19 +396,86 @@ record_btrace_info (struct target_ops *self) calls = btrace_call_number (&call); btrace_insn_end (&insn, btinfo); - btrace_insn_prev (&insn, 1); + insns = btrace_insn_number (&insn); + if (insns != 0) + { + /* The last instruction does not really belong to the trace. */ + insns -= 1; + } + else + { + unsigned int steps; + + /* Skip gaps at the end. */ + do + { + steps = btrace_insn_prev (&insn, 1); + if (steps == 0) + break; + + insns = btrace_insn_number (&insn); + } + while (insns == 0); + } + + gaps = btinfo->ngaps; } - printf_unfiltered (_("Recorded %u instructions in %u functions for thread " - "%d (%s).\n"), insns, calls, tp->num, - target_pid_to_str (tp->ptid)); + printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) " + "for thread %d (%s).\n"), insns, calls, gaps, + tp->num, target_pid_to_str (tp->ptid)); if (btrace_is_replaying (tp)) printf_unfiltered (_("Replay in progress. At instruction %u.\n"), btrace_insn_number (btinfo->replay)); } +/* Print a decode error. */ + +static void +btrace_ui_out_decode_error (struct ui_out *uiout, int errcode, + enum btrace_format format) +{ + const char *errstr; + int is_error; + + errstr = _("unknown"); + is_error = 1; + + switch (format) + { + default: + break; + + case BTRACE_FORMAT_BTS: + switch (errcode) + { + default: + break; + + case BDE_BTS_OVERFLOW: + errstr = _("instruction overflow"); + break; + + case BDE_BTS_INSN_SIZE: + errstr = _("unknown instruction"); + break; + } + break; + } + + ui_out_text (uiout, _("[")); + if (is_error) + { + ui_out_text (uiout, _("decode error (")); + ui_out_field_int (uiout, "errcode", errcode); + ui_out_text (uiout, _("): ")); + } + ui_out_text (uiout, errstr); + ui_out_text (uiout, _("]\n")); +} + /* Print an unsigned int. */ static void @@ -282,6 +488,7 @@ ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val) static void btrace_insn_history (struct ui_out *uiout, + const struct btrace_thread_info *btinfo, const struct btrace_insn_iterator *begin, const struct btrace_insn_iterator *end, int flags) { @@ -299,13 +506,30 @@ btrace_insn_history (struct ui_out *uiout, insn = btrace_insn_get (&it); - /* Print the instruction index. */ - ui_out_field_uint (uiout, "index", btrace_insn_number (&it)); - ui_out_text (uiout, "\t"); + /* A NULL instruction indicates a gap in the trace. */ + if (insn == NULL) + { + const struct btrace_config *conf; + + conf = btrace_conf (btinfo); - /* Disassembly with '/m' flag may not produce the expected result. - See PR gdb/11833. */ - gdb_disassembly (gdbarch, uiout, NULL, flags, 1, insn->pc, insn->pc + 1); + /* We have trace so we must have a configuration. */ + gdb_assert (conf != NULL); + + btrace_ui_out_decode_error (uiout, it.function->errcode, + conf->format); + } + else + { + /* Print the instruction index. */ + ui_out_field_uint (uiout, "index", btrace_insn_number (&it)); + ui_out_text (uiout, "\t"); + + /* Disassembly with '/m' flag may not produce the expected result. + See PR gdb/11833. */ + gdb_disassembly (gdbarch, uiout, NULL, flags, 1, insn->pc, + insn->pc + 1); + } } } @@ -382,7 +606,7 @@ record_btrace_insn_history (struct target_ops *self, int size, int flags) } if (covered > 0) - btrace_insn_history (uiout, &begin, &end, flags); + btrace_insn_history (uiout, btinfo, &begin, &end, flags); else { if (size < 0) @@ -398,7 +622,8 @@ record_btrace_insn_history (struct target_ops *self, int size, int flags) /* The to_insn_history_range method of target record-btrace. */ static void -record_btrace_insn_history_range (ULONGEST from, ULONGEST to, int flags) +record_btrace_insn_history_range (struct target_ops *self, + ULONGEST from, ULONGEST to, int flags) { struct btrace_thread_info *btinfo; struct btrace_insn_history *history; @@ -441,7 +666,7 @@ record_btrace_insn_history_range (ULONGEST from, ULONGEST to, int flags) btrace_insn_next (&end, 1); } - btrace_insn_history (uiout, &begin, &end, flags); + btrace_insn_history (uiout, btinfo, &begin, &end, flags); btrace_set_insn_history (btinfo, &begin, &end); do_cleanups (uiout_cleanup); @@ -450,7 +675,8 @@ record_btrace_insn_history_range (ULONGEST from, ULONGEST to, int flags) /* The to_insn_history_from method of target record-btrace. */ static void -record_btrace_insn_history_from (ULONGEST from, int size, int flags) +record_btrace_insn_history_from (struct target_ops *self, + ULONGEST from, int size, int flags) { ULONGEST begin, end, context; @@ -477,7 +703,7 @@ record_btrace_insn_history_from (ULONGEST from, int size, int flags) end = ULONGEST_MAX; } - record_btrace_insn_history_range (begin, end, flags); + record_btrace_insn_history_range (self, begin, end, flags); } /* Print the instruction number range for a function call history line. */ @@ -513,7 +739,7 @@ btrace_call_history_src_line (struct ui_out *uiout, return; ui_out_field_string (uiout, "file", - symtab_to_filename_for_display (sym->symtab)); + symtab_to_filename_for_display (symbol_symtab (sym))); begin = bfun->lbegin; end = bfun->lend; @@ -548,7 +774,7 @@ btrace_get_bfun_name (const struct btrace_function *bfun) if (sym != NULL) return SYMBOL_PRINT_NAME (sym); else if (msym != NULL) - return SYMBOL_PRINT_NAME (msym); + return MSYMBOL_PRINT_NAME (msym); else return "??"; } @@ -581,6 +807,21 @@ btrace_call_history (struct ui_out *uiout, ui_out_field_uint (uiout, "index", bfun->number); ui_out_text (uiout, "\t"); + /* Indicate gaps in the trace. */ + if (bfun->errcode != 0) + { + const struct btrace_config *conf; + + conf = btrace_conf (btinfo); + + /* We have trace so we must have a configuration. */ + gdb_assert (conf != NULL); + + btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format); + + continue; + } + if ((flags & RECORD_PRINT_INDENT_CALLS) != 0) { int level = bfun->level + btinfo->level, i; @@ -592,7 +833,7 @@ btrace_call_history (struct ui_out *uiout, if (sym != NULL) ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (sym)); else if (msym != NULL) - ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (msym)); + ui_out_field_string (uiout, "function", MSYMBOL_PRINT_NAME (msym)); else if (!ui_out_is_mi_like_p (uiout)) ui_out_field_string (uiout, "function", "??"); @@ -615,7 +856,7 @@ btrace_call_history (struct ui_out *uiout, /* The to_call_history method of target record-btrace. */ static void -record_btrace_call_history (int size, int flags) +record_btrace_call_history (struct target_ops *self, int size, int flags) { struct btrace_thread_info *btinfo; struct btrace_call_history *history; @@ -704,7 +945,8 @@ record_btrace_call_history (int size, int flags) /* The to_call_history_range method of target record-btrace. */ static void -record_btrace_call_history_range (ULONGEST from, ULONGEST to, int flags) +record_btrace_call_history_range (struct target_ops *self, + ULONGEST from, ULONGEST to, int flags) { struct btrace_thread_info *btinfo; struct btrace_call_history *history; @@ -756,7 +998,8 @@ record_btrace_call_history_range (ULONGEST from, ULONGEST to, int flags) /* The to_call_history_from method of target record-btrace. */ static void -record_btrace_call_history_from (ULONGEST from, int size, int flags) +record_btrace_call_history_from (struct target_ops *self, + ULONGEST from, int size, int flags) { ULONGEST begin, end, context; @@ -783,7 +1026,7 @@ record_btrace_call_history_from (ULONGEST from, int size, int flags) end = ULONGEST_MAX; } - record_btrace_call_history_range (begin, end, flags); + record_btrace_call_history_range (self, begin, end, flags); } /* The to_record_is_replaying method of target record-btrace. */ @@ -793,7 +1036,7 @@ record_btrace_is_replaying (struct target_ops *self) { struct thread_info *tp; - ALL_THREADS (tp) + ALL_NON_EXITED_THREADS (tp) if (btrace_is_replaying (tp)) return 1; @@ -811,7 +1054,9 @@ record_btrace_xfer_partial (struct target_ops *ops, enum target_object object, struct target_ops *t; /* Filter out requests that don't make sense during replay. */ - if (!record_btrace_allow_memory_access && record_btrace_is_replaying (ops)) + if (replay_memory_access == replay_memory_access_read_only + && !record_btrace_generating_corefile + && record_btrace_is_replaying (ops)) { switch (object) { @@ -823,7 +1068,7 @@ record_btrace_xfer_partial (struct target_ops *ops, enum target_object object, if (writebuf != NULL) { *xfered_len = len; - return TARGET_XFER_E_UNAVAILABLE; + return TARGET_XFER_UNAVAILABLE; } /* We allow reading readonly memory. */ @@ -842,19 +1087,15 @@ record_btrace_xfer_partial (struct target_ops *ops, enum target_object object, } *xfered_len = len; - return TARGET_XFER_E_UNAVAILABLE; + return TARGET_XFER_UNAVAILABLE; } } } /* Forward the request. */ - for (ops = ops->beneath; ops != NULL; ops = ops->beneath) - if (ops->to_xfer_partial != NULL) - return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf, - offset, len, xfered_len); - - *xfered_len = len; - return TARGET_XFER_E_UNAVAILABLE; + ops = ops->beneath; + return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf, + offset, len, xfered_len); } /* The to_insert_breakpoint method of target record-btrace. */ @@ -865,18 +1106,19 @@ record_btrace_insert_breakpoint (struct target_ops *ops, struct bp_target_info *bp_tgt) { volatile struct gdb_exception except; - int old, ret; + const char *old; + int ret; /* Inserting breakpoints requires accessing memory. Allow it for the duration of this function. */ - old = record_btrace_allow_memory_access; - record_btrace_allow_memory_access = 1; + old = replay_memory_access; + replay_memory_access = replay_memory_access_read_write; ret = 0; TRY_CATCH (except, RETURN_MASK_ALL) ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt); - record_btrace_allow_memory_access = old; + replay_memory_access = old; if (except.reason < 0) throw_exception (except); @@ -892,18 +1134,19 @@ record_btrace_remove_breakpoint (struct target_ops *ops, struct bp_target_info *bp_tgt) { volatile struct gdb_exception except; - int old, ret; + const char *old; + int ret; /* Removing breakpoints requires accessing memory. Allow it for the duration of this function. */ - old = record_btrace_allow_memory_access; - record_btrace_allow_memory_access = 1; + old = replay_memory_access; + replay_memory_access = replay_memory_access_read_write; ret = 0; TRY_CATCH (except, RETURN_MASK_ALL) ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt); - record_btrace_allow_memory_access = old; + replay_memory_access = old; if (except.reason < 0) throw_exception (except); @@ -924,7 +1167,7 @@ record_btrace_fetch_registers (struct target_ops *ops, gdb_assert (tp != NULL); replay = tp->btrace.replay; - if (replay != NULL) + if (replay != NULL && !record_btrace_generating_corefile) { const struct btrace_insn *insn; struct gdbarch *gdbarch; @@ -946,14 +1189,9 @@ record_btrace_fetch_registers (struct target_ops *ops, } else { - struct target_ops *t; + struct target_ops *t = ops->beneath; - for (t = ops->beneath; t != NULL; t = t->beneath) - if (t->to_fetch_registers != NULL) - { - t->to_fetch_registers (t, regcache, regno); - break; - } + t->to_fetch_registers (t, regcache, regno); } } @@ -965,19 +1203,13 @@ record_btrace_store_registers (struct target_ops *ops, { struct target_ops *t; - if (record_btrace_is_replaying (ops)) + if (!record_btrace_generating_corefile && record_btrace_is_replaying (ops)) error (_("This record target does not allow writing registers.")); gdb_assert (may_write_registers != 0); - for (t = ops->beneath; t != NULL; t = t->beneath) - if (t->to_store_registers != NULL) - { - t->to_store_registers (t, regcache, regno); - return; - } - - noprocess (); + t = ops->beneath; + t->to_store_registers (t, regcache, regno); } /* The to_prepare_to_store method of target record-btrace. */ @@ -988,15 +1220,11 @@ record_btrace_prepare_to_store (struct target_ops *ops, { struct target_ops *t; - if (record_btrace_is_replaying (ops)) + if (!record_btrace_generating_corefile && record_btrace_is_replaying (ops)) return; - for (t = ops->beneath; t != NULL; t = t->beneath) - if (t->to_prepare_to_store != NULL) - { - t->to_prepare_to_store (t, regcache); - return; - } + t = ops->beneath; + t->to_prepare_to_store (t, regcache); } /* The branch trace frame cache. */ @@ -1303,6 +1531,22 @@ const struct frame_unwind record_btrace_tailcall_frame_unwind = record_btrace_frame_dealloc_cache }; +/* Implement the to_get_unwinder method. */ + +static const struct frame_unwind * +record_btrace_to_get_unwinder (struct target_ops *self) +{ + return &record_btrace_frame_unwind; +} + +/* Implement the to_get_tailcall_unwinder method. */ + +static const struct frame_unwind * +record_btrace_to_get_tailcall_unwinder (struct target_ops *self) +{ + return &record_btrace_tailcall_frame_unwind; +} + /* Indicate that TP should be resumed according to FLAG. */ static void @@ -1391,6 +1635,16 @@ record_btrace_start_replaying (struct thread_info *tp) replay = xmalloc (sizeof (*replay)); btrace_insn_end (replay, btinfo); + /* Skip gaps at the end of the trace. */ + while (btrace_insn_get (replay) == NULL) + { + unsigned int steps; + + steps = btrace_insn_prev (replay, 1); + if (steps == 0) + error (_("No trace.")); + } + /* We're not replaying, yet. */ gdb_assert (btinfo->replay == NULL); btinfo->replay = replay; @@ -1452,23 +1706,23 @@ record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step, DEBUG ("resume %s: %s", target_pid_to_str (ptid), step ? "step" : "cont"); + /* Store the execution direction of the last resume. */ + record_btrace_resume_exec_dir = execution_direction; + tp = record_btrace_find_resume_thread (ptid); if (tp == NULL) error (_("Cannot find thread to resume.")); /* Stop replaying other threads if the thread to resume is not replaying. */ if (!btrace_is_replaying (tp) && execution_direction != EXEC_REVERSE) - ALL_THREADS (other) + ALL_NON_EXITED_THREADS (other) record_btrace_stop_replaying (other); /* As long as we're not replaying, just forward the request. */ if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE) { - for (ops = ops->beneath; ops != NULL; ops = ops->beneath) - if (ops->to_resume != NULL) - return ops->to_resume (ops, ptid, step, signal); - - error (_("Cannot find target for stepping.")); + ops = ops->beneath; + return ops->to_resume (ops, ptid, step, signal); } /* Compute the btrace thread flag for the requested move. */ @@ -1486,6 +1740,13 @@ record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step, /* We just indicate the resume intent here. The actual stepping happens in record_btrace_wait below. */ + + /* Async support. */ + if (target_can_async_p ()) + { + target_async (inferior_event_handler, 0); + mark_async_event_handler (record_btrace_async_inferior_event_handler); + } } /* Find a thread to move. */ @@ -1501,7 +1762,7 @@ record_btrace_find_thread_to_move (ptid_t ptid) return tp; /* Otherwise, find one other thread that has been resumed. */ - ALL_THREADS (tp) + ALL_NON_EXITED_THREADS (tp) if ((tp->btrace.flags & BTHR_MOVE) != 0) return tp; @@ -1557,6 +1818,10 @@ record_btrace_step_thread (struct thread_info *tp) enum btrace_thread_flag flags; unsigned int steps; + /* We can't step without an execution history. */ + if (btrace_is_empty (tp)) + return btrace_step_no_history (); + btinfo = &tp->btrace; replay = btinfo->replay; @@ -1575,9 +1840,17 @@ record_btrace_step_thread (struct thread_info *tp) if (replay == NULL) return btrace_step_no_history (); - /* We are always able to step at least once. */ - steps = btrace_insn_next (replay, 1); - gdb_assert (steps == 1); + /* Skip gaps during replay. */ + do + { + steps = btrace_insn_next (replay, 1); + if (steps == 0) + { + record_btrace_stop_replaying (tp); + return btrace_step_no_history (); + } + } + while (btrace_insn_get (replay) == NULL); /* Determine the end of the instruction trace. */ btrace_insn_end (&end, btinfo); @@ -1593,10 +1866,16 @@ record_btrace_step_thread (struct thread_info *tp) if (replay == NULL) replay = record_btrace_start_replaying (tp); - /* If we can't step any further, we reached the end of the history. */ - steps = btrace_insn_prev (replay, 1); - if (steps == 0) - return btrace_step_no_history (); + /* If we can't step any further, we reached the end of the history. + Skip gaps during replay. */ + do + { + steps = btrace_insn_prev (replay, 1); + if (steps == 0) + return btrace_step_no_history (); + + } + while (btrace_insn_get (replay) == NULL); return btrace_step_stopped (); @@ -1605,7 +1884,7 @@ record_btrace_step_thread (struct thread_info *tp) if (replay == NULL) return btrace_step_no_history (); - inf = find_inferior_pid (ptid_get_pid (tp->ptid)); + inf = find_inferior_ptid (tp->ptid); aspace = inf->aspace; /* Determine the end of the instruction trace. */ @@ -1615,9 +1894,19 @@ record_btrace_step_thread (struct thread_info *tp) { const struct btrace_insn *insn; - /* We are always able to step at least once. */ - steps = btrace_insn_next (replay, 1); - gdb_assert (steps == 1); + /* Skip gaps during replay. */ + do + { + steps = btrace_insn_next (replay, 1); + if (steps == 0) + { + record_btrace_stop_replaying (tp); + return btrace_step_no_history (); + } + + insn = btrace_insn_get (replay); + } + while (insn == NULL); /* We stop replaying if we reached the end of the trace. */ if (btrace_insn_cmp (replay, &end) == 0) @@ -1626,9 +1915,6 @@ record_btrace_step_thread (struct thread_info *tp) return btrace_step_no_history (); } - insn = btrace_insn_get (replay); - gdb_assert (insn); - DEBUG ("stepping %d (%s) ... %s", tp->num, target_pid_to_str (tp->ptid), core_addr_to_string_nz (insn->pc)); @@ -1642,20 +1928,24 @@ record_btrace_step_thread (struct thread_info *tp) if (replay == NULL) replay = record_btrace_start_replaying (tp); - inf = find_inferior_pid (ptid_get_pid (tp->ptid)); + inf = find_inferior_ptid (tp->ptid); aspace = inf->aspace; for (;;) { const struct btrace_insn *insn; - /* If we can't step any further, we're done. */ - steps = btrace_insn_prev (replay, 1); - if (steps == 0) - return btrace_step_no_history (); + /* If we can't step any further, we reached the end of the history. + Skip gaps during replay. */ + do + { + steps = btrace_insn_prev (replay, 1); + if (steps == 0) + return btrace_step_no_history (); - insn = btrace_insn_get (replay); - gdb_assert (insn); + insn = btrace_insn_get (replay); + } + while (insn == NULL); DEBUG ("reverse-stepping %d (%s) ... %s", tp->num, target_pid_to_str (tp->ptid), @@ -1680,11 +1970,8 @@ record_btrace_wait (struct target_ops *ops, ptid_t ptid, /* As long as we're not replaying, just forward the request. */ if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE) { - for (ops = ops->beneath; ops != NULL; ops = ops->beneath) - if (ops->to_wait != NULL) - return ops->to_wait (ops, ptid, status, options); - - error (_("Cannot find target for waiting.")); + ops = ops->beneath; + return ops->to_wait (ops, ptid, status, options); } /* Let's find a thread to move. */ @@ -1702,7 +1989,7 @@ record_btrace_wait (struct target_ops *ops, ptid_t ptid, /* Stop all other threads. */ if (!non_stop) - ALL_THREADS (other) + ALL_NON_EXITED_THREADS (other) other->btrace.flags &= ~BTHR_MOVE; /* Start record histories anew from the current position. */ @@ -1733,25 +2020,21 @@ record_btrace_decr_pc_after_break (struct target_ops *ops, if (record_btrace_is_replaying (ops)) return 0; - return forward_target_decr_pc_after_break (ops->beneath, gdbarch); + return ops->beneath->to_decr_pc_after_break (ops->beneath, gdbarch); } -/* The to_find_new_threads method of target record-btrace. */ +/* The to_update_thread_list method of target record-btrace. */ static void -record_btrace_find_new_threads (struct target_ops *ops) +record_btrace_update_thread_list (struct target_ops *ops) { - /* Don't expect new threads if we're replaying. */ + /* We don't add or remove threads during replay. */ if (record_btrace_is_replaying (ops)) return; /* Forward the request. */ - for (ops = ops->beneath; ops != NULL; ops = ops->beneath) - if (ops->to_find_new_threads != NULL) - { - ops->to_find_new_threads (ops); - break; - } + ops = ops->beneath; + ops->to_update_thread_list (ops); } /* The to_thread_alive method of target record-btrace. */ @@ -1764,11 +2047,8 @@ record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid) return find_thread_ptid (ptid) != NULL; /* Forward the request. */ - for (ops = ops->beneath; ops != NULL; ops = ops->beneath) - if (ops->to_thread_alive != NULL) - return ops->to_thread_alive (ops, ptid); - - return 0; + ops = ops->beneath; + return ops->to_thread_alive (ops, ptid); } /* Set the replay branch trace instruction iterator. If IT is NULL, replay @@ -1856,6 +2136,30 @@ record_btrace_goto (struct target_ops *self, ULONGEST insn) print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1); } +/* The to_execution_direction target method. */ + +static enum exec_direction_kind +record_btrace_execution_direction (struct target_ops *self) +{ + return record_btrace_resume_exec_dir; +} + +/* The to_prepare_to_generate_core target method. */ + +static void +record_btrace_prepare_to_generate_core (struct target_ops *self) +{ + record_btrace_generating_corefile = 1; +} + +/* The to_done_generating_core target method. */ + +static void +record_btrace_done_generating_core (struct target_ops *self) +{ + record_btrace_generating_corefile = 0; +} + /* Initialize the record-btrace target ops. */ static void @@ -1869,11 +2173,11 @@ init_record_btrace_ops (void) ops->to_doc = "Collect control-flow trace and provide the execution history."; ops->to_open = record_btrace_open; ops->to_close = record_btrace_close; + ops->to_async = record_btrace_async; ops->to_detach = record_detach; ops->to_disconnect = record_disconnect; ops->to_mourn_inferior = record_mourn_inferior; ops->to_kill = record_kill; - ops->to_create_inferior = find_default_create_inferior; ops->to_stop_recording = record_btrace_stop_recording; ops->to_info_record = record_btrace_info; ops->to_insn_history = record_btrace_insn_history; @@ -1889,30 +2193,111 @@ init_record_btrace_ops (void) ops->to_fetch_registers = record_btrace_fetch_registers; ops->to_store_registers = record_btrace_store_registers; ops->to_prepare_to_store = record_btrace_prepare_to_store; - ops->to_get_unwinder = &record_btrace_frame_unwind; - ops->to_get_tailcall_unwinder = &record_btrace_tailcall_frame_unwind; + ops->to_get_unwinder = &record_btrace_to_get_unwinder; + ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder; ops->to_resume = record_btrace_resume; ops->to_wait = record_btrace_wait; - ops->to_find_new_threads = record_btrace_find_new_threads; + ops->to_update_thread_list = record_btrace_update_thread_list; ops->to_thread_alive = record_btrace_thread_alive; ops->to_goto_record_begin = record_btrace_goto_begin; ops->to_goto_record_end = record_btrace_goto_end; ops->to_goto_record = record_btrace_goto; ops->to_can_execute_reverse = record_btrace_can_execute_reverse; ops->to_decr_pc_after_break = record_btrace_decr_pc_after_break; + ops->to_execution_direction = record_btrace_execution_direction; + ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core; + ops->to_done_generating_core = record_btrace_done_generating_core; ops->to_stratum = record_stratum; ops->to_magic = OPS_MAGIC; } +/* Start recording in BTS format. */ + +static void +cmd_record_btrace_bts_start (char *args, int from_tty) +{ + volatile struct gdb_exception exception; + + if (args != NULL && *args != 0) + error (_("Invalid argument.")); + + record_btrace_conf.format = BTRACE_FORMAT_BTS; + + TRY_CATCH (exception, RETURN_MASK_ALL) + execute_command ("target record-btrace", from_tty); + + if (exception.error != 0) + { + record_btrace_conf.format = BTRACE_FORMAT_NONE; + throw_exception (exception); + } +} + /* Alias for "target record". */ static void cmd_record_btrace_start (char *args, int from_tty) { + volatile struct gdb_exception exception; + if (args != NULL && *args != 0) error (_("Invalid argument.")); - execute_command ("target record-btrace", from_tty); + record_btrace_conf.format = BTRACE_FORMAT_BTS; + + TRY_CATCH (exception, RETURN_MASK_ALL) + execute_command ("target record-btrace", from_tty); + + if (exception.error == 0) + return; + + record_btrace_conf.format = BTRACE_FORMAT_NONE; + throw_exception (exception); +} + +/* The "set record btrace" command. */ + +static void +cmd_set_record_btrace (char *args, int from_tty) +{ + cmd_show_list (set_record_btrace_cmdlist, from_tty, ""); +} + +/* The "show record btrace" command. */ + +static void +cmd_show_record_btrace (char *args, int from_tty) +{ + cmd_show_list (show_record_btrace_cmdlist, from_tty, ""); +} + +/* The "show record btrace replay-memory-access" command. */ + +static void +cmd_show_replay_memory_access (struct ui_file *file, int from_tty, + struct cmd_list_element *c, const char *value) +{ + fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"), + replay_memory_access); +} + +/* The "set record btrace bts" command. */ + +static void +cmd_set_record_btrace_bts (char *args, int from_tty) +{ + printf_unfiltered (_("\"set record btrace bts\" must be followed " + "by an apporpriate subcommand.\n")); + help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ", + all_commands, gdb_stdout); +} + +/* The "show record btrace bts" command. */ + +static void +cmd_show_record_btrace_bts (char *args, int from_tty) +{ + cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, ""); } void _initialize_record_btrace (void); @@ -1922,14 +2307,70 @@ void _initialize_record_btrace (void); void _initialize_record_btrace (void) { - add_cmd ("btrace", class_obscure, cmd_record_btrace_start, - _("Start branch trace recording."), - &record_cmdlist); + add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start, + _("Start branch trace recording."), &record_btrace_cmdlist, + "record btrace ", 0, &record_cmdlist); add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist); + add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start, + _("\ +Start branch trace recording in Branch Trace Store (BTS) format.\n\n\ +The processor stores a from/to record for each branch into a cyclic buffer.\n\ +This format may not be available on all processors."), + &record_btrace_cmdlist); + add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist); + + add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace, + _("Set record options"), &set_record_btrace_cmdlist, + "set record btrace ", 0, &set_record_cmdlist); + + add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace, + _("Show record options"), &show_record_btrace_cmdlist, + "show record btrace ", 0, &show_record_cmdlist); + + add_setshow_enum_cmd ("replay-memory-access", no_class, + replay_memory_access_types, &replay_memory_access, _("\ +Set what memory accesses are allowed during replay."), _("\ +Show what memory accesses are allowed during replay."), + _("Default is READ-ONLY.\n\n\ +The btrace record target does not trace data.\n\ +The memory therefore corresponds to the live target and not \ +to the current replay position.\n\n\ +When READ-ONLY, allow accesses to read-only memory during replay.\n\ +When READ-WRITE, allow accesses to read-only and read-write memory during \ +replay."), + NULL, cmd_show_replay_memory_access, + &set_record_btrace_cmdlist, + &show_record_btrace_cmdlist); + + add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts, + _("Set record btrace bts options"), + &set_record_btrace_bts_cmdlist, + "set record btrace bts ", 0, &set_record_btrace_cmdlist); + + add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts, + _("Show record btrace bts options"), + &show_record_btrace_bts_cmdlist, + "show record btrace bts ", 0, &show_record_btrace_cmdlist); + + add_setshow_uinteger_cmd ("buffer-size", no_class, + &record_btrace_conf.bts.size, + _("Set the record/replay bts buffer size."), + _("Show the record/replay bts buffer size."), _("\ +When starting recording request a trace buffer of this size. \ +The actual buffer size may differ from the requested size. \ +Use \"info record\" to see the actual buffer size.\n\n\ +Bigger buffers allow longer recording but also take more time to process \ +the recorded execution trace.\n\n\ +The trace buffer size may not be changed while recording."), NULL, NULL, + &set_record_btrace_bts_cmdlist, + &show_record_btrace_bts_cmdlist); + init_record_btrace_ops (); add_target (&record_btrace_ops); bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL, xcalloc, xfree); + + record_btrace_conf.bts.size = 64 * 1024; }