#include "target-dcache.h"
#include "terminal.h"
#include "solist.h"
+#include "event-loop.h"
+#include "thread-fsm.h"
/* Prototypes for local functions */
static int maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc);
+/* Asynchronous signal handler registered as event loop source for
+ when we have pending events ready to be passed to the core. */
+static struct async_event_handler *infrun_async_inferior_event_token;
+
+/* Stores whether infrun_async was previously enabled or disabled.
+ Starts off as -1, indicating "never enabled/disabled". */
+static int infrun_is_async = -1;
+
+/* See infrun.h. */
+
+void
+infrun_async (int enable)
+{
+ if (infrun_is_async != enable)
+ {
+ infrun_is_async = enable;
+
+ if (debug_infrun)
+ fprintf_unfiltered (gdb_stdlog,
+ "infrun: infrun_async(%d)\n",
+ enable);
+
+ if (enable)
+ mark_async_event_handler (infrun_async_inferior_event_token);
+ else
+ clear_async_event_handler (infrun_async_inferior_event_token);
+ }
+}
+
+/* See infrun.h. */
+
+void
+mark_infrun_async_event_handler (void)
+{
+ mark_async_event_handler (infrun_async_inferior_event_token);
+}
+
/* When set, stop the 'step' command if we enter a function which has
no line number information. The normal behavior is that we step
over such function. */
issued is most likely not applicable to the
child, so just warn, and refuse to resume. */
warning (_("Not resuming: switched threads "
- "before following fork child.\n"));
+ "before following fork child."));
}
/* Reset breakpoints in the child as appropriate. */
struct thread_info *th, *tmp;
struct inferior *inf = current_inferior ();
int pid = ptid_get_pid (ptid);
+ ptid_t process_ptid;
/* This is an exec event that we actually wish to pay attention to.
Refresh our symbol table to the newly exec'd program, remove any
update_breakpoints_after_exec ();
/* What is this a.out's name? */
+ process_ptid = pid_to_ptid (pid);
printf_unfiltered (_("%s is executing new program: %s\n"),
- target_pid_to_str (inferior_ptid),
+ target_pid_to_str (process_ptid),
execd_pathname);
/* We've followed the inferior through an exec. Therefore, the
{
char *name = exec_file_find (execd_pathname, NULL);
- execd_pathname = alloca (strlen (name) + 1);
+ execd_pathname = (char *) alloca (strlen (name) + 1);
strcpy (execd_pathname, name);
xfree (name);
}
if (follow_exec_mode_string == follow_exec_mode_new)
{
- struct program_space *pspace;
-
/* The user wants to keep the old inferior and program spaces
around. Create a new fresh one, and switch to it. */
- inf = add_inferior (current_inferior ()->pid);
- pspace = add_program_space (maybe_new_address_space ());
- inf->pspace = pspace;
- inf->aspace = pspace->aspace;
-
+ /* Do exit processing for the original inferior before adding
+ the new inferior so we don't have two active inferiors with
+ the same ptid, which can confuse find_inferior_ptid. */
exit_inferior_num_silent (current_inferior ()->num);
+ inf = add_inferior_with_spaces ();
+ inf->pid = pid;
+ target_follow_exec (inf, execd_pathname);
+
set_current_inferior (inf);
- set_current_program_space (pspace);
+ set_current_program_space (inf->pspace);
+ add_thread (ptid);
}
else
{
static void
clear_step_over_info (void)
{
+ if (debug_infrun)
+ fprintf_unfiltered (gdb_stdlog,
+ "infrun: clear_step_over_info\n");
step_over_info.aspace = NULL;
step_over_info.address = 0;
step_over_info.nonsteppable_watchpoint_p = 0;
/* The process this displaced step state refers to. */
int pid;
+ /* True if preparing a displaced step ever failed. If so, we won't
+ try displaced stepping for this inferior again. */
+ int failed_before;
+
/* If this is not null_ptid, this is the thread carrying out a
displaced single-step in process PID. This thread's state will
require fixing up once it has completed its step. */
return NULL;
}
+/* Returns true if any inferior has a thread doing a displaced
+ step. */
+
+static int
+displaced_step_in_progress_any_inferior (void)
+{
+ struct displaced_step_inferior_state *state;
+
+ for (state = displaced_step_inferior_states;
+ state != NULL;
+ state = state->next)
+ if (!ptid_equal (state->step_ptid, null_ptid))
+ return 1;
+
+ return 0;
+}
+
/* Return true if process PID has a thread doing a displaced step. */
static int
if (state->pid == pid)
return state;
- state = xcalloc (1, sizeof (*state));
+ state = XCNEW (struct displaced_step_inferior_state);
state->pid = pid;
state->next = displaced_step_inferior_states;
displaced_step_inferior_states = state;
fprintf_filtered (file,
_("Debugger's willingness to use displaced stepping "
"to step over breakpoints is %s (currently %s).\n"),
- value, non_stop ? "on" : "off");
+ value, target_is_non_stop_p () ? "on" : "off");
else
fprintf_filtered (file,
_("Debugger's willingness to use displaced stepping "
}
/* Return non-zero if displaced stepping can/should be used to step
- over breakpoints. */
+ over breakpoints of thread TP. */
static int
-use_displaced_stepping (struct gdbarch *gdbarch)
+use_displaced_stepping (struct thread_info *tp)
{
- return (((can_use_displaced_stepping == AUTO_BOOLEAN_AUTO && non_stop)
+ struct regcache *regcache = get_thread_regcache (tp->ptid);
+ struct gdbarch *gdbarch = get_regcache_arch (regcache);
+ struct displaced_step_inferior_state *displaced_state;
+
+ displaced_state = get_displaced_stepping_state (ptid_get_pid (tp->ptid));
+
+ return (((can_use_displaced_stepping == AUTO_BOOLEAN_AUTO
+ && target_is_non_stop_p ())
|| can_use_displaced_stepping == AUTO_BOOLEAN_TRUE)
&& gdbarch_displaced_step_copy_insn_p (gdbarch)
- && find_record_target () == NULL);
+ && find_record_target () == NULL
+ && (displaced_state == NULL
+ || !displaced_state->failed_before));
}
/* Clean out any stray displaced stepping state. */
static void
displaced_step_clear_cleanup (void *arg)
{
- struct displaced_step_inferior_state *state = arg;
+ struct displaced_step_inferior_state *state
+ = (struct displaced_step_inferior_state *) arg;
displaced_step_clear (state);
}
explain how we handle this case instead.
Returns 1 if preparing was successful -- this thread is going to be
- stepped now; or 0 if displaced stepping this thread got queued. */
+ stepped now; 0 if displaced stepping this thread got queued; or -1
+ if this instruction can't be displaced stepped. */
+
static int
-displaced_step_prepare (ptid_t ptid)
+displaced_step_prepare_throw (ptid_t ptid)
{
struct cleanup *old_cleanups, *ignore_cleanups;
struct thread_info *tp = find_thread_ptid (ptid);
len = gdbarch_max_insn_length (gdbarch);
/* Save the original contents of the copy area. */
- displaced->step_saved_copy = xmalloc (len);
+ displaced->step_saved_copy = (gdb_byte *) xmalloc (len);
ignore_cleanups = make_cleanup (free_current_contents,
&displaced->step_saved_copy);
status = target_read_memory (copy, displaced->step_saved_copy, len);
closure = gdbarch_displaced_step_copy_insn (gdbarch,
original, copy, regcache);
-
- /* We don't support the fully-simulated case at present. */
- gdb_assert (closure);
+ if (closure == NULL)
+ {
+ /* The architecture doesn't know how or want to displaced step
+ this instruction or instruction sequence. Fallback to
+ stepping over the breakpoint in-line. */
+ do_cleanups (old_cleanups);
+ return -1;
+ }
/* Save the information we need to fix things up if the step
succeeds. */
return 1;
}
+/* Wrapper for displaced_step_prepare_throw that disabled further
+ attempts at displaced stepping if we get a memory error. */
+
+static int
+displaced_step_prepare (ptid_t ptid)
+{
+ int prepared = -1;
+
+ TRY
+ {
+ prepared = displaced_step_prepare_throw (ptid);
+ }
+ CATCH (ex, RETURN_MASK_ERROR)
+ {
+ struct displaced_step_inferior_state *displaced_state;
+
+ if (ex.error != MEMORY_ERROR)
+ throw_exception (ex);
+
+ if (debug_infrun)
+ {
+ fprintf_unfiltered (gdb_stdlog,
+ "infrun: disabling displaced stepping: %s\n",
+ ex.message);
+ }
+
+ /* Be verbose if "set displaced-stepping" is "on", silent if
+ "auto". */
+ if (can_use_displaced_stepping == AUTO_BOOLEAN_TRUE)
+ {
+ warning (_("disabling displaced stepping: %s"),
+ ex.message);
+ }
+
+ /* Disable further displaced stepping attempts. */
+ displaced_state
+ = get_displaced_stepping_state (ptid_get_pid (ptid));
+ displaced_state->failed_before = 1;
+ }
+ END_CATCH
+
+ return prepared;
+}
+
static void
write_memory_ptid (ptid_t ptid, CORE_ADDR memaddr,
const gdb_byte *myaddr, int len)
displaced->step_copy));
}
-static void
+/* If we displaced stepped an instruction successfully, adjust
+ registers and memory to yield the same effect the instruction would
+ have had if we had executed it at its original address, and return
+ 1. If the instruction didn't complete, relocate the PC and return
+ -1. If the thread wasn't displaced stepping, return 0. */
+
+static int
displaced_step_fixup (ptid_t event_ptid, enum gdb_signal signal)
{
struct cleanup *old_cleanups;
struct displaced_step_inferior_state *displaced
= get_displaced_stepping_state (ptid_get_pid (event_ptid));
+ int ret;
/* Was any thread of this process doing a displaced step? */
if (displaced == NULL)
- return;
+ return 0;
/* Was this event for the pid we displaced? */
if (ptid_equal (displaced->step_ptid, null_ptid)
|| ! ptid_equal (displaced->step_ptid, event_ptid))
- return;
+ return 0;
old_cleanups = make_cleanup (displaced_step_clear_cleanup, displaced);
displaced->step_original,
displaced->step_copy,
get_thread_regcache (displaced->step_ptid));
+ ret = 1;
}
else
{
pc = displaced->step_original + (pc - displaced->step_copy);
regcache_write_pc (regcache, pc);
+ ret = -1;
}
do_cleanups (old_cleanups);
displaced->step_ptid = null_ptid;
+
+ return ret;
}
/* Data to be passed around while handling an event. This data is
static void keep_going_pass_signal (struct execution_control_state *ecs);
static void prepare_to_wait (struct execution_control_state *ecs);
+static int keep_going_stepped_thread (struct thread_info *tp);
static int thread_still_needs_step_over (struct thread_info *tp);
+static void stop_all_threads (void);
/* Are there any pending step-over requests? If so, run all we can
now and return true. Otherwise, return false. */
{
struct thread_info *tp, *next;
+ /* Don't start a new step-over if we already have an in-line
+ step-over operation ongoing. */
+ if (step_over_info_valid_p ())
+ return 0;
+
for (tp = step_over_queue_head; tp != NULL; tp = next)
{
struct execution_control_state ecss;
struct execution_control_state *ecs = &ecss;
+ enum step_over_what step_what;
+ int must_be_in_line;
next = thread_step_over_chain_next (tp);
if (displaced_step_in_progress (ptid_get_pid (tp->ptid)))
continue;
+ step_what = thread_still_needs_step_over (tp);
+ must_be_in_line = ((step_what & STEP_OVER_WATCHPOINT)
+ || ((step_what & STEP_OVER_BREAKPOINT)
+ && !use_displaced_stepping (tp)));
+
+ /* We currently stop all threads of all processes to step-over
+ in-line. If we need to start a new in-line step-over, let
+ any pending displaced steps finish first. */
+ if (must_be_in_line && displaced_step_in_progress_any_inferior ())
+ return 0;
+
thread_step_over_chain_remove (tp);
if (step_over_queue_head == NULL)
"infrun: step-over queue now empty\n");
}
- if (tp->control.trap_expected || tp->executing)
+ if (tp->control.trap_expected
+ || tp->resumed
+ || tp->executing)
{
internal_error (__FILE__, __LINE__,
"[%s] has inconsistent state: "
- "trap_expected=%d, executing=%d\n",
+ "trap_expected=%d, resumed=%d, executing=%d\n",
target_pid_to_str (tp->ptid),
tp->control.trap_expected,
+ tp->resumed,
tp->executing);
}
because we wouldn't be able to resume anything else until the
target stops again. In non-stop, the resume always resumes
only TP, so it's OK to let the thread resume freely. */
- if (!non_stop && !thread_still_needs_step_over (tp))
+ if (!target_is_non_stop_p () && !step_what)
continue;
switch_to_thread (tp->ptid);
if (!ecs->wait_some_more)
error (_("Command aborted."));
- if (!non_stop)
+ gdb_assert (tp->resumed);
+
+ /* If we started a new in-line step-over, we're done. */
+ if (step_over_info_valid_p ())
+ {
+ gdb_assert (tp->control.trap_expected);
+ return 1;
+ }
+
+ if (!target_is_non_stop_p ())
{
/* On all-stop, shouldn't have resumed unless we needed a
step over. */
static const char schedlock_off[] = "off";
static const char schedlock_on[] = "on";
static const char schedlock_step[] = "step";
+static const char schedlock_replay[] = "replay";
static const char *const scheduler_enums[] = {
schedlock_off,
schedlock_on,
schedlock_step,
+ schedlock_replay,
NULL
};
-static const char *scheduler_mode = schedlock_off;
+static const char *scheduler_mode = schedlock_replay;
static void
show_scheduler_mode (struct ui_file *file, int from_tty,
struct cmd_list_element *c, const char *value)
resume. */
resume_ptid = inferior_ptid;
}
+ else if ((scheduler_mode == schedlock_replay)
+ && target_record_will_replay (minus_one_ptid, execution_direction))
+ {
+ /* User-settable 'scheduler' mode requires solo thread resume in replay
+ mode. */
+ resume_ptid = inferior_ptid;
+ }
else if (!sched_multi && target_supports_multi_process ())
{
/* Resume all threads of the current process (and none of other
return resume_ptid;
}
+/* Return a ptid representing the set of threads that we will resume,
+ in the perspective of the target, assuming run control handling
+ does not require leaving some threads stopped (e.g., stepping past
+ breakpoint). USER_STEP indicates whether we're about to start the
+ target for a stepping command. */
+
+static ptid_t
+internal_resume_ptid (int user_step)
+{
+ /* In non-stop, we always control threads individually. Note that
+ the target may always work in non-stop mode even with "set
+ non-stop off", in which case user_visible_resume_ptid could
+ return a wildcard ptid. */
+ if (target_is_non_stop_p ())
+ return inferior_ptid;
+ else
+ return user_visible_resume_ptid (user_step);
+}
+
/* Wrapper for target_resume, that handles infrun-specific
bookkeeping. */
single-step). */
int step;
- tp->stepped_breakpoint = 0;
-
gdb_assert (!thread_is_in_step_over_chain (tp));
QUIT;
+ if (tp->suspend.waitstatus_pending_p)
+ {
+ if (debug_infrun)
+ {
+ char *statstr;
+
+ statstr = target_waitstatus_to_string (&tp->suspend.waitstatus);
+ fprintf_unfiltered (gdb_stdlog,
+ "infrun: resume: thread %s has pending wait status %s "
+ "(currently_stepping=%d).\n",
+ target_pid_to_str (tp->ptid), statstr,
+ currently_stepping (tp));
+ xfree (statstr);
+ }
+
+ tp->resumed = 1;
+
+ /* FIXME: What should we do if we are supposed to resume this
+ thread with a signal? Maybe we should maintain a queue of
+ pending signals to deliver. */
+ if (sig != GDB_SIGNAL_0)
+ {
+ warning (_("Couldn't deliver signal %s to %s."),
+ gdb_signal_to_name (sig), target_pid_to_str (tp->ptid));
+ }
+
+ tp->suspend.stop_signal = GDB_SIGNAL_0;
+ discard_cleanups (old_cleanups);
+
+ if (target_can_async_p ())
+ target_async (1);
+ return;
+ }
+
+ tp->stepped_breakpoint = 0;
+
/* Depends on stepped_breakpoint. */
step = currently_stepping (tp);
insert_single_step_breakpoint (gdbarch, aspace, pc);
insert_breakpoints ();
- resume_ptid = user_visible_resume_ptid (user_step);
+ resume_ptid = internal_resume_ptid (user_step);
do_target_resume (resume_ptid, 0, GDB_SIGNAL_0);
discard_cleanups (old_cleanups);
+ tp->resumed = 1;
return;
}
}
We can't use displaced stepping when we are waiting for vfork_done
event, displaced stepping breaks the vfork child similarly as single
step software breakpoint. */
- if (use_displaced_stepping (gdbarch)
- && tp->control.trap_expected
+ if (tp->control.trap_expected
+ && use_displaced_stepping (tp)
&& !step_over_info_valid_p ()
&& sig == GDB_SIGNAL_0
&& !current_inferior ()->waiting_for_vfork_done)
{
- struct displaced_step_inferior_state *displaced;
+ int prepared = displaced_step_prepare (inferior_ptid);
- if (!displaced_step_prepare (inferior_ptid))
+ if (prepared == 0)
{
if (debug_infrun)
fprintf_unfiltered (gdb_stdlog,
discard_cleanups (old_cleanups);
return;
}
+ else if (prepared < 0)
+ {
+ /* Fallback to stepping over the breakpoint in-line. */
+
+ if (target_is_non_stop_p ())
+ stop_all_threads ();
+
+ set_step_over_info (get_regcache_aspace (regcache),
+ regcache_read_pc (regcache), 0);
+
+ step = maybe_software_singlestep (gdbarch, pc);
+
+ insert_breakpoints ();
+ }
+ else if (prepared > 0)
+ {
+ struct displaced_step_inferior_state *displaced;
- /* Update pc to reflect the new address from which we will execute
- instructions due to displaced stepping. */
- pc = regcache_read_pc (get_thread_regcache (inferior_ptid));
+ /* Update pc to reflect the new address from which we will
+ execute instructions due to displaced stepping. */
+ pc = regcache_read_pc (get_thread_regcache (inferior_ptid));
- displaced = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));
- step = gdbarch_displaced_step_hw_singlestep (gdbarch,
- displaced->step_closure);
+ displaced = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));
+ step = gdbarch_displaced_step_hw_singlestep (gdbarch,
+ displaced->step_closure);
+ }
}
/* Do we need to do it the hard way, w/temp breakpoints? */
use singlestep breakpoint. */
gdb_assert (!(thread_has_single_step_breakpoints_set (tp) && step));
- /* Decide the set of threads to ask the target to resume. Start
- by assuming everything will be resumed, than narrow the set
- by applying increasingly restricting conditions. */
- resume_ptid = user_visible_resume_ptid (user_step);
-
- /* Maybe resume a single thread after all. */
+ /* Decide the set of threads to ask the target to resume. */
if ((step || thread_has_single_step_breakpoints_set (tp))
&& tp->control.trap_expected)
{
breakpoint if allowed to run. */
resume_ptid = inferior_ptid;
}
+ else
+ resume_ptid = internal_resume_ptid (user_step);
if (execution_direction != EXEC_REVERSE
&& step && breakpoint_inserted_here_p (aspace, pc))
{
- /* The only case we currently need to step a breakpoint
- instruction is when we have a signal to deliver. See
- handle_signal_stop where we handle random signals that could
- take out us out of the stepping range. Normally, in that
- case we end up continuing (instead of stepping) over the
+ /* There are two cases where we currently need to step a
+ breakpoint instruction when we have a signal to deliver:
+
+ - See handle_signal_stop where we handle random signals that
+ could take out us out of the stepping range. Normally, in
+ that case we end up continuing (instead of stepping) over the
signal handler with a breakpoint at PC, but there are cases
where we should _always_ single-step, even if we have a
step-resume breakpoint, like when a software watchpoint is
recurses and executes PC again, it'll miss the breakpoint.
So we leave the breakpoint inserted anyway, but we need to
record that we tried to step a breakpoint instruction, so
- that adjust_pc_after_break doesn't end up confused. */
- gdb_assert (sig != GDB_SIGNAL_0);
+ that adjust_pc_after_break doesn't end up confused.
+
+ - In non-stop if we insert a breakpoint (e.g., a step-resume)
+ in one thread after another thread that was stepping had been
+ momentarily paused for a step-over. When we re-resume the
+ stepping thread, it may be resumed from that address with a
+ breakpoint that hasn't trapped yet. Seen with
+ gdb.threads/non-stop-fair-events.exp, on targets that don't
+ do displaced stepping. */
+
+ if (debug_infrun)
+ fprintf_unfiltered (gdb_stdlog,
+ "infrun: resume: [%s] stepped breakpoint\n",
+ target_pid_to_str (tp->ptid));
tp->stepped_breakpoint = 1;
}
if (debug_displaced
- && use_displaced_stepping (gdbarch)
&& tp->control.trap_expected
+ && use_displaced_stepping (tp)
&& !step_over_info_valid_p ())
{
struct regcache *resume_regcache = get_thread_regcache (tp->ptid);
}
do_target_resume (resume_ptid, step, sig);
+ tp->resumed = 1;
discard_cleanups (old_cleanups);
}
\f
/* Proceeding. */
+/* See infrun.h. */
+
+/* Counter that tracks number of user visible stops. This can be used
+ to tell whether a command has proceeded the inferior past the
+ current location. This allows e.g., inferior function calls in
+ breakpoint commands to not interrupt the command list. When the
+ call finishes successfully, the inferior is standing at the same
+ breakpoint as if nothing happened (and so we don't call
+ normal_stop). */
+static ULONGEST current_stop_id;
+
+/* See infrun.h. */
+
+ULONGEST
+get_stop_id (void)
+{
+ return current_stop_id;
+}
+
+/* Called when we report a user visible stop. */
+
+static void
+new_stop_id (void)
+{
+ current_stop_id++;
+}
+
/* Clear out all variables saying what to do when inferior is continued.
First do this, then set the ones you want, then call `proceed'. */
"infrun: clear_proceed_status_thread (%s)\n",
target_pid_to_str (tp->ptid));
+ /* If we're starting a new sequence, then the previous finished
+ single-step is no longer relevant. */
+ if (tp->suspend.waitstatus_pending_p)
+ {
+ if (tp->suspend.stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
+ {
+ if (debug_infrun)
+ fprintf_unfiltered (gdb_stdlog,
+ "infrun: clear_proceed_status: pending "
+ "event of %s was a finished step. "
+ "Discarding.\n",
+ target_pid_to_str (tp->ptid));
+
+ tp->suspend.waitstatus_pending_p = 0;
+ tp->suspend.stop_reason = TARGET_STOPPED_BY_NO_REASON;
+ }
+ else if (debug_infrun)
+ {
+ char *statstr;
+
+ statstr = target_waitstatus_to_string (&tp->suspend.waitstatus);
+ fprintf_unfiltered (gdb_stdlog,
+ "infrun: clear_proceed_status_thread: thread %s "
+ "has pending wait status %s "
+ "(currently_stepping=%d).\n",
+ target_pid_to_str (tp->ptid), statstr,
+ currently_stepping (tp));
+ xfree (statstr);
+ }
+ }
+
/* If this signal should not be seen by program, give it zero.
Used for debugging signals. */
if (!signal_pass_state (tp->suspend.stop_signal))
tp->suspend.stop_signal = GDB_SIGNAL_0;
+ thread_fsm_delete (tp->thread_fsm);
+ tp->thread_fsm = NULL;
+
tp->control.trap_expected = 0;
tp->control.step_range_start = 0;
tp->control.step_range_end = 0;
void
clear_proceed_status (int step)
{
+ /* With scheduler-locking replay, stop replaying other threads if we're
+ not replaying the user-visible resume ptid.
+
+ This is a convenience feature to not require the user to explicitly
+ stop replaying the other threads. We're assuming that the user's
+ intent is to resume tracing the recorded process. */
+ if (!non_stop && scheduler_mode == schedlock_replay
+ && target_record_is_replaying (minus_one_ptid)
+ && !target_record_will_replay (user_visible_resume_ptid (step),
+ execution_direction))
+ target_record_stop_replaying ();
+
if (!non_stop)
{
struct thread_info *tp;
stop_after_trap = 0;
- clear_step_over_info ();
-
observer_notify_about_to_proceed ();
}
{
return (scheduler_mode == schedlock_on
|| (scheduler_mode == schedlock_step
- && tp->control.stepping_command));
+ && tp->control.stepping_command)
+ || (scheduler_mode == schedlock_replay
+ && target_record_will_replay (minus_one_ptid,
+ execution_direction)));
}
/* Basic routine for continuing the program in various fashions.
other thread was already doing one. In either case, don't
resume anything else until the step-over is finished. */
}
- else if (started && !non_stop)
+ else if (started && !target_is_non_stop_p ())
{
/* A new displaced stepping sequence was started. In all-stop,
we can't talk to the target anymore until it next stops. */
}
- else if (!tp->executing && !thread_is_in_step_over_chain (tp))
+ else if (!non_stop && target_is_non_stop_p ())
+ {
+ /* In all-stop, but the target is always in non-stop mode.
+ Start all other threads that are implicitly resumed too. */
+ ALL_NON_EXITED_THREADS (tp)
+ {
+ /* Ignore threads of processes we're not resuming. */
+ if (!ptid_match (tp->ptid, resume_ptid))
+ continue;
+
+ if (tp->resumed)
+ {
+ if (debug_infrun)
+ fprintf_unfiltered (gdb_stdlog,
+ "infrun: proceed: [%s] resumed\n",
+ target_pid_to_str (tp->ptid));
+ gdb_assert (tp->executing || tp->suspend.waitstatus_pending_p);
+ continue;
+ }
+
+ if (thread_is_in_step_over_chain (tp))
+ {
+ if (debug_infrun)
+ fprintf_unfiltered (gdb_stdlog,
+ "infrun: proceed: [%s] needs step-over\n",
+ target_pid_to_str (tp->ptid));
+ continue;
+ }
+
+ if (debug_infrun)
+ fprintf_unfiltered (gdb_stdlog,
+ "infrun: proceed: resuming %s\n",
+ target_pid_to_str (tp->ptid));
+
+ reset_ecs (ecs, tp);
+ switch_to_thread (tp->ptid);
+ keep_going_pass_signal (ecs);
+ if (!ecs->wait_some_more)
+ error (_("Command aborted."));
+ }
+ }
+ else if (!tp->resumed && !thread_is_in_step_over_chain (tp))
{
/* The thread wasn't started, and isn't queued, run it now. */
reset_ecs (ecs, tp);
switch_to_thread (tp->ptid);
keep_going_pass_signal (ecs);
if (!ecs->wait_some_more)
- error ("Command aborted.");
+ error (_("Command aborted."));
}
discard_cleanups (old_chain);
- /* Wait for it to stop (if not standalone)
- and in any case decode why it stopped, and act accordingly. */
- /* Do this only if we are not using the event loop, or if the target
- does not support asynchronous execution. */
+ /* Tell the event loop to wait for it to stop. If the target
+ supports asynchronous execution, it'll do this from within
+ target_resume. */
if (!target_can_async_p ())
- {
- wait_for_inferior ();
- normal_stop ();
- }
+ mark_async_event_handler (infrun_async_inferior_event_token);
}
\f
have consistent output as if the stop event had been
reported. */
ecs->ptid = info->ptid;
- ecs->event_thread = find_thread_ptid (info->ptid);
+ ecs->event_thread = info;
ecs->ws.kind = TARGET_WAITKIND_STOPPED;
ecs->ws.value.sig = GDB_SIGNAL_0;
if (!ecs->wait_some_more)
{
- struct thread_info *tp;
+ /* Cancel any running execution command. */
+ thread_cancel_execution_command (info);
normal_stop ();
-
- /* Finish off the continuations. */
- tp = inferior_thread ();
- do_all_intermediate_continuations_thread (tp, 1);
- do_all_continuations_thread (tp, 1);
}
do_cleanups (old_chain);
if (!target_has_execution || ptid_equal (inferior_ptid, null_ptid))
return;
- if (non_stop)
+ if (target_is_non_stop_p ())
{
/* If in non-stop mode, only the current thread stopped. */
func (inferior_thread ());
delete_just_stopped_threads_infrun_breakpoints ();
}
-/* Pretty print the results of target_wait, for debugging purposes. */
+/* See infrun.h. */
-static void
+void
print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid,
const struct target_waitstatus *ws)
{
ui_file_delete (tmp_stream);
}
-/* Prepare and stabilize the inferior for detaching it. E.g.,
- detaching while a thread is displaced stepping is a recipe for
- crashing it, as nothing would readjust the PC out of the scratch
- pad. */
+/* Select a thread at random, out of those which are resumed and have
+ had events. */
-void
-prepare_for_detach (void)
+static struct thread_info *
+random_pending_event_thread (ptid_t waiton_ptid)
{
- struct inferior *inf = current_inferior ();
- ptid_t pid_ptid = pid_to_ptid (inf->pid);
- struct cleanup *old_chain_1;
- struct displaced_step_inferior_state *displaced;
+ struct thread_info *event_tp;
+ int num_events = 0;
+ int random_selector;
- displaced = get_displaced_stepping_state (inf->pid);
-
- /* Is any thread of this process displaced stepping? If not,
- there's nothing else to do. */
- if (displaced == NULL || ptid_equal (displaced->step_ptid, null_ptid))
- return;
+ /* First see how many events we have. Count only resumed threads
+ that have an event pending. */
+ ALL_NON_EXITED_THREADS (event_tp)
+ if (ptid_match (event_tp->ptid, waiton_ptid)
+ && event_tp->resumed
+ && event_tp->suspend.waitstatus_pending_p)
+ num_events++;
- if (debug_infrun)
- fprintf_unfiltered (gdb_stdlog,
- "displaced-stepping in-process while detaching");
+ if (num_events == 0)
+ return NULL;
- old_chain_1 = make_cleanup_restore_integer (&inf->detaching);
- inf->detaching = 1;
+ /* Now randomly pick a thread out of those that have had events. */
+ random_selector = (int)
+ ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
- while (!ptid_equal (displaced->step_ptid, null_ptid))
- {
- struct cleanup *old_chain_2;
- struct execution_control_state ecss;
- struct execution_control_state *ecs;
+ if (debug_infrun && num_events > 1)
+ fprintf_unfiltered (gdb_stdlog,
+ "infrun: Found %d events, selecting #%d\n",
+ num_events, random_selector);
+
+ /* Select the Nth thread that has had an event. */
+ ALL_NON_EXITED_THREADS (event_tp)
+ if (ptid_match (event_tp->ptid, waiton_ptid)
+ && event_tp->resumed
+ && event_tp->suspend.waitstatus_pending_p)
+ if (random_selector-- == 0)
+ break;
- ecs = &ecss;
- memset (ecs, 0, sizeof (*ecs));
+ return event_tp;
+}
- overlay_cache_invalid = 1;
- /* Flush target cache before starting to handle each event.
- Target was running and cache could be stale. This is just a
- heuristic. Running threads may modify target memory, but we
- don't get any event. */
- target_dcache_invalidate ();
+/* Wrapper for target_wait that first checks whether threads have
+ pending statuses to report before actually asking the target for
+ more events. */
- if (deprecated_target_wait_hook)
- ecs->ptid = deprecated_target_wait_hook (pid_ptid, &ecs->ws, 0);
- else
- ecs->ptid = target_wait (pid_ptid, &ecs->ws, 0);
+static ptid_t
+do_target_wait (ptid_t ptid, struct target_waitstatus *status, int options)
+{
+ ptid_t event_ptid;
+ struct thread_info *tp;
+ /* First check if there is a resumed thread with a wait status
+ pending. */
+ if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid))
+ {
+ tp = random_pending_event_thread (ptid);
+ }
+ else
+ {
if (debug_infrun)
- print_target_wait_results (pid_ptid, ecs->ptid, &ecs->ws);
+ fprintf_unfiltered (gdb_stdlog,
+ "infrun: Waiting for specific thread %s.\n",
+ target_pid_to_str (ptid));
- /* If an error happens while handling the event, propagate GDB's
- knowledge of the executing state to the frontend/user running
- state. */
+ /* We have a specific thread to check. */
+ tp = find_thread_ptid (ptid);
+ gdb_assert (tp != NULL);
+ if (!tp->suspend.waitstatus_pending_p)
+ tp = NULL;
+ }
+
+ if (tp != NULL
+ && (tp->suspend.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
+ || tp->suspend.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
+ {
+ struct regcache *regcache = get_thread_regcache (tp->ptid);
+ struct gdbarch *gdbarch = get_regcache_arch (regcache);
+ CORE_ADDR pc;
+ int discard = 0;
+
+ pc = regcache_read_pc (regcache);
+
+ if (pc != tp->suspend.stop_pc)
+ {
+ if (debug_infrun)
+ fprintf_unfiltered (gdb_stdlog,
+ "infrun: PC of %s changed. was=%s, now=%s\n",
+ target_pid_to_str (tp->ptid),
+ paddress (gdbarch, tp->prev_pc),
+ paddress (gdbarch, pc));
+ discard = 1;
+ }
+ else if (!breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
+ {
+ if (debug_infrun)
+ fprintf_unfiltered (gdb_stdlog,
+ "infrun: previous breakpoint of %s, at %s gone\n",
+ target_pid_to_str (tp->ptid),
+ paddress (gdbarch, pc));
+
+ discard = 1;
+ }
+
+ if (discard)
+ {
+ if (debug_infrun)
+ fprintf_unfiltered (gdb_stdlog,
+ "infrun: pending event of %s cancelled.\n",
+ target_pid_to_str (tp->ptid));
+
+ tp->suspend.waitstatus.kind = TARGET_WAITKIND_SPURIOUS;
+ tp->suspend.stop_reason = TARGET_STOPPED_BY_NO_REASON;
+ }
+ }
+
+ if (tp != NULL)
+ {
+ if (debug_infrun)
+ {
+ char *statstr;
+
+ statstr = target_waitstatus_to_string (&tp->suspend.waitstatus);
+ fprintf_unfiltered (gdb_stdlog,
+ "infrun: Using pending wait status %s for %s.\n",
+ statstr,
+ target_pid_to_str (tp->ptid));
+ xfree (statstr);
+ }
+
+ /* Now that we've selected our final event LWP, un-adjust its PC
+ if it was a software breakpoint (and the target doesn't
+ always adjust the PC itself). */
+ if (tp->suspend.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
+ && !target_supports_stopped_by_sw_breakpoint ())
+ {
+ struct regcache *regcache;
+ struct gdbarch *gdbarch;
+ int decr_pc;
+
+ regcache = get_thread_regcache (tp->ptid);
+ gdbarch = get_regcache_arch (regcache);
+
+ decr_pc = gdbarch_decr_pc_after_break (gdbarch);
+ if (decr_pc != 0)
+ {
+ CORE_ADDR pc;
+
+ pc = regcache_read_pc (regcache);
+ regcache_write_pc (regcache, pc + decr_pc);
+ }
+ }
+
+ tp->suspend.stop_reason = TARGET_STOPPED_BY_NO_REASON;
+ *status = tp->suspend.waitstatus;
+ tp->suspend.waitstatus_pending_p = 0;
+
+ /* Wake up the event loop again, until all pending events are
+ processed. */
+ if (target_is_async_p ())
+ mark_async_event_handler (infrun_async_inferior_event_token);
+ return tp->ptid;
+ }
+
+ /* But if we don't find one, we'll have to wait. */
+
+ if (deprecated_target_wait_hook)
+ event_ptid = deprecated_target_wait_hook (ptid, status, options);
+ else
+ event_ptid = target_wait (ptid, status, options);
+
+ return event_ptid;
+}
+
+/* Prepare and stabilize the inferior for detaching it. E.g.,
+ detaching while a thread is displaced stepping is a recipe for
+ crashing it, as nothing would readjust the PC out of the scratch
+ pad. */
+
+void
+prepare_for_detach (void)
+{
+ struct inferior *inf = current_inferior ();
+ ptid_t pid_ptid = pid_to_ptid (inf->pid);
+ struct cleanup *old_chain_1;
+ struct displaced_step_inferior_state *displaced;
+
+ displaced = get_displaced_stepping_state (inf->pid);
+
+ /* Is any thread of this process displaced stepping? If not,
+ there's nothing else to do. */
+ if (displaced == NULL || ptid_equal (displaced->step_ptid, null_ptid))
+ return;
+
+ if (debug_infrun)
+ fprintf_unfiltered (gdb_stdlog,
+ "displaced-stepping in-process while detaching");
+
+ old_chain_1 = make_cleanup_restore_integer (&inf->detaching);
+ inf->detaching = 1;
+
+ while (!ptid_equal (displaced->step_ptid, null_ptid))
+ {
+ struct cleanup *old_chain_2;
+ struct execution_control_state ecss;
+ struct execution_control_state *ecs;
+
+ ecs = &ecss;
+ memset (ecs, 0, sizeof (*ecs));
+
+ overlay_cache_invalid = 1;
+ /* Flush target cache before starting to handle each event.
+ Target was running and cache could be stale. This is just a
+ heuristic. Running threads may modify target memory, but we
+ don't get any event. */
+ target_dcache_invalidate ();
+
+ ecs->ptid = do_target_wait (pid_ptid, &ecs->ws, 0);
+
+ if (debug_infrun)
+ print_target_wait_results (pid_ptid, ecs->ptid, &ecs->ws);
+
+ /* If an error happens while handling the event, propagate GDB's
+ knowledge of the executing state to the frontend/user running
+ state. */
old_chain_2 = make_cleanup (finish_thread_state_cleanup,
&minus_one_ptid);
don't get any event. */
target_dcache_invalidate ();
- if (deprecated_target_wait_hook)
- ecs->ptid = deprecated_target_wait_hook (waiton_ptid, &ecs->ws, 0);
- else
- ecs->ptid = target_wait (waiton_ptid, &ecs->ws, 0);
+ ecs->ptid = do_target_wait (waiton_ptid, &ecs->ws, 0);
if (debug_infrun)
print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
gdb_rl_callback_handler_reinstall ();
}
+/* Clean up the FSMs of threads that are now stopped. In non-stop,
+ that's just the event thread. In all-stop, that's all threads. */
+
+static void
+clean_up_just_stopped_threads_fsms (struct execution_control_state *ecs)
+{
+ struct thread_info *thr = ecs->event_thread;
+
+ if (thr != NULL && thr->thread_fsm != NULL)
+ thread_fsm_clean_up (thr->thread_fsm);
+
+ if (!non_stop)
+ {
+ ALL_NON_EXITED_THREADS (thr)
+ {
+ if (thr->thread_fsm == NULL)
+ continue;
+ if (thr == ecs->event_thread)
+ continue;
+
+ switch_to_thread (thr->ptid);
+ thread_fsm_clean_up (thr->thread_fsm);
+ }
+
+ if (ecs->event_thread != NULL)
+ switch_to_thread (ecs->event_thread->ptid);
+ }
+}
+
/* Asynchronous version of wait_for_inferior. It is called by the
event loop whenever a change of state is detected on the file
descriptor corresponding to the target. It can be called more than
make_cleanup_restore_integer (&execution_direction);
execution_direction = target_execution_direction ();
- if (deprecated_target_wait_hook)
- ecs->ptid =
- deprecated_target_wait_hook (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
- else
- ecs->ptid = target_wait (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
+ ecs->ptid = do_target_wait (waiton_ptid, &ecs->ws,
+ target_can_async_p () ? TARGET_WNOHANG : 0);
if (debug_infrun)
print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
/* If an error happens while handling the event, propagate GDB's
knowledge of the executing state to the frontend/user running
state. */
- if (!non_stop)
+ if (!target_is_non_stop_p ())
ts_old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
else
ts_old_chain = make_cleanup (finish_thread_state_cleanup, &ecs->ptid);
if (!ecs->wait_some_more)
{
struct inferior *inf = find_inferior_ptid (ecs->ptid);
+ int should_stop = 1;
+ struct thread_info *thr = ecs->event_thread;
+ int should_notify_stop = 1;
delete_just_stopped_threads_infrun_breakpoints ();
- /* We may not find an inferior if this was a process exit. */
- if (inf == NULL || inf->control.stop_soon == NO_STOP_QUIETLY)
- normal_stop ();
-
- if (target_has_execution
- && ecs->ws.kind != TARGET_WAITKIND_NO_RESUMED
- && ecs->ws.kind != TARGET_WAITKIND_EXITED
- && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
- && ecs->event_thread->step_multi
- && ecs->event_thread->control.stop_step)
- inferior_event_handler (INF_EXEC_CONTINUE, NULL);
+ if (thr != NULL)
+ {
+ struct thread_fsm *thread_fsm = thr->thread_fsm;
+
+ if (thread_fsm != NULL)
+ should_stop = thread_fsm_should_stop (thread_fsm);
+ }
+
+ if (!should_stop)
+ {
+ keep_going (ecs);
+ }
else
{
- inferior_event_handler (INF_EXEC_COMPLETE, NULL);
- cmd_done = 1;
+ clean_up_just_stopped_threads_fsms (ecs);
+
+ if (thr != NULL && thr->thread_fsm != NULL)
+ {
+ should_notify_stop
+ = thread_fsm_should_notify_stop (thr->thread_fsm);
+ }
+
+ if (should_notify_stop)
+ {
+ int proceeded = 0;
+
+ /* We may not find an inferior if this was a process exit. */
+ if (inf == NULL || inf->control.stop_soon == NO_STOP_QUIETLY)
+ proceeded = normal_stop ();
+
+ if (!proceeded)
+ {
+ inferior_event_handler (INF_EXEC_COMPLETE, NULL);
+ cmd_done = 1;
+ }
+ }
}
}
to get the "stopped by SW BP and needs adjustment" info out of
the target/kernel (and thus never reach here; see above). */
if (software_breakpoint_inserted_here_p (aspace, breakpoint_pc)
- || (non_stop && moribund_breakpoint_here_p (aspace, breakpoint_pc)))
+ || (target_is_non_stop_p ()
+ && moribund_breakpoint_here_p (aspace, breakpoint_pc)))
{
struct cleanup *old_cleanups = make_cleanup (null_cleanup, NULL);
return inf->control.stop_soon;
}
+/* Wait for one event. Store the resulting waitstatus in WS, and
+ return the event ptid. */
+
+static ptid_t
+wait_one (struct target_waitstatus *ws)
+{
+ ptid_t event_ptid;
+ ptid_t wait_ptid = minus_one_ptid;
+
+ overlay_cache_invalid = 1;
+
+ /* Flush target cache before starting to handle each event.
+ Target was running and cache could be stale. This is just a
+ heuristic. Running threads may modify target memory, but we
+ don't get any event. */
+ target_dcache_invalidate ();
+
+ if (deprecated_target_wait_hook)
+ event_ptid = deprecated_target_wait_hook (wait_ptid, ws, 0);
+ else
+ event_ptid = target_wait (wait_ptid, ws, 0);
+
+ if (debug_infrun)
+ print_target_wait_results (wait_ptid, event_ptid, ws);
+
+ return event_ptid;
+}
+
+/* Generate a wrapper for target_stopped_by_REASON that works on PTID
+ instead of the current thread. */
+#define THREAD_STOPPED_BY(REASON) \
+static int \
+thread_stopped_by_ ## REASON (ptid_t ptid) \
+{ \
+ struct cleanup *old_chain; \
+ int res; \
+ \
+ old_chain = save_inferior_ptid (); \
+ inferior_ptid = ptid; \
+ \
+ res = target_stopped_by_ ## REASON (); \
+ \
+ do_cleanups (old_chain); \
+ \
+ return res; \
+}
+
+/* Generate thread_stopped_by_watchpoint. */
+THREAD_STOPPED_BY (watchpoint)
+/* Generate thread_stopped_by_sw_breakpoint. */
+THREAD_STOPPED_BY (sw_breakpoint)
+/* Generate thread_stopped_by_hw_breakpoint. */
+THREAD_STOPPED_BY (hw_breakpoint)
+
+/* Cleanups that switches to the PTID pointed at by PTID_P. */
+
+static void
+switch_to_thread_cleanup (void *ptid_p)
+{
+ ptid_t ptid = *(ptid_t *) ptid_p;
+
+ switch_to_thread (ptid);
+}
+
+/* Save the thread's event and stop reason to process it later. */
+
+static void
+save_waitstatus (struct thread_info *tp, struct target_waitstatus *ws)
+{
+ struct regcache *regcache;
+ struct address_space *aspace;
+
+ if (debug_infrun)
+ {
+ char *statstr;
+
+ statstr = target_waitstatus_to_string (ws);
+ fprintf_unfiltered (gdb_stdlog,
+ "infrun: saving status %s for %d.%ld.%ld\n",
+ statstr,
+ ptid_get_pid (tp->ptid),
+ ptid_get_lwp (tp->ptid),
+ ptid_get_tid (tp->ptid));
+ xfree (statstr);
+ }
+
+ /* Record for later. */
+ tp->suspend.waitstatus = *ws;
+ tp->suspend.waitstatus_pending_p = 1;
+
+ regcache = get_thread_regcache (tp->ptid);
+ aspace = get_regcache_aspace (regcache);
+
+ if (ws->kind == TARGET_WAITKIND_STOPPED
+ && ws->value.sig == GDB_SIGNAL_TRAP)
+ {
+ CORE_ADDR pc = regcache_read_pc (regcache);
+
+ adjust_pc_after_break (tp, &tp->suspend.waitstatus);
+
+ if (thread_stopped_by_watchpoint (tp->ptid))
+ {
+ tp->suspend.stop_reason
+ = TARGET_STOPPED_BY_WATCHPOINT;
+ }
+ else if (target_supports_stopped_by_sw_breakpoint ()
+ && thread_stopped_by_sw_breakpoint (tp->ptid))
+ {
+ tp->suspend.stop_reason
+ = TARGET_STOPPED_BY_SW_BREAKPOINT;
+ }
+ else if (target_supports_stopped_by_hw_breakpoint ()
+ && thread_stopped_by_hw_breakpoint (tp->ptid))
+ {
+ tp->suspend.stop_reason
+ = TARGET_STOPPED_BY_HW_BREAKPOINT;
+ }
+ else if (!target_supports_stopped_by_hw_breakpoint ()
+ && hardware_breakpoint_inserted_here_p (aspace,
+ pc))
+ {
+ tp->suspend.stop_reason
+ = TARGET_STOPPED_BY_HW_BREAKPOINT;
+ }
+ else if (!target_supports_stopped_by_sw_breakpoint ()
+ && software_breakpoint_inserted_here_p (aspace,
+ pc))
+ {
+ tp->suspend.stop_reason
+ = TARGET_STOPPED_BY_SW_BREAKPOINT;
+ }
+ else if (!thread_has_single_step_breakpoints_set (tp)
+ && currently_stepping (tp))
+ {
+ tp->suspend.stop_reason
+ = TARGET_STOPPED_BY_SINGLE_STEP;
+ }
+ }
+}
+
+/* Stop all threads. */
+
+static void
+stop_all_threads (void)
+{
+ /* We may need multiple passes to discover all threads. */
+ int pass;
+ int iterations = 0;
+ ptid_t entry_ptid;
+ struct cleanup *old_chain;
+
+ gdb_assert (target_is_non_stop_p ());
+
+ if (debug_infrun)
+ fprintf_unfiltered (gdb_stdlog, "infrun: stop_all_threads\n");
+
+ entry_ptid = inferior_ptid;
+ old_chain = make_cleanup (switch_to_thread_cleanup, &entry_ptid);
+
+ /* Request threads to stop, and then wait for the stops. Because
+ threads we already know about can spawn more threads while we're
+ trying to stop them, and we only learn about new threads when we
+ update the thread list, do this in a loop, and keep iterating
+ until two passes find no threads that need to be stopped. */
+ for (pass = 0; pass < 2; pass++, iterations++)
+ {
+ if (debug_infrun)
+ fprintf_unfiltered (gdb_stdlog,
+ "infrun: stop_all_threads, pass=%d, "
+ "iterations=%d\n", pass, iterations);
+ while (1)
+ {
+ ptid_t event_ptid;
+ struct target_waitstatus ws;
+ int need_wait = 0;
+ struct thread_info *t;
+
+ update_thread_list ();
+
+ /* Go through all threads looking for threads that we need
+ to tell the target to stop. */
+ ALL_NON_EXITED_THREADS (t)
+ {
+ if (t->executing)
+ {
+ /* If already stopping, don't request a stop again.
+ We just haven't seen the notification yet. */
+ if (!t->stop_requested)
+ {
+ if (debug_infrun)
+ fprintf_unfiltered (gdb_stdlog,
+ "infrun: %s executing, "
+ "need stop\n",
+ target_pid_to_str (t->ptid));
+ target_stop (t->ptid);
+ t->stop_requested = 1;
+ }
+ else
+ {
+ if (debug_infrun)
+ fprintf_unfiltered (gdb_stdlog,
+ "infrun: %s executing, "
+ "already stopping\n",
+ target_pid_to_str (t->ptid));
+ }
+
+ if (t->stop_requested)
+ need_wait = 1;
+ }
+ else
+ {
+ if (debug_infrun)
+ fprintf_unfiltered (gdb_stdlog,
+ "infrun: %s not executing\n",
+ target_pid_to_str (t->ptid));
+
+ /* The thread may be not executing, but still be
+ resumed with a pending status to process. */
+ t->resumed = 0;
+ }
+ }
+
+ if (!need_wait)
+ break;
+
+ /* If we find new threads on the second iteration, restart
+ over. We want to see two iterations in a row with all
+ threads stopped. */
+ if (pass > 0)
+ pass = -1;
+
+ event_ptid = wait_one (&ws);
+ if (ws.kind == TARGET_WAITKIND_NO_RESUMED)
+ {
+ /* All resumed threads exited. */
+ }
+ else if (ws.kind == TARGET_WAITKIND_EXITED
+ || ws.kind == TARGET_WAITKIND_SIGNALLED)
+ {
+ if (debug_infrun)
+ {
+ ptid_t ptid = pid_to_ptid (ws.value.integer);
+
+ fprintf_unfiltered (gdb_stdlog,
+ "infrun: %s exited while "
+ "stopping threads\n",
+ target_pid_to_str (ptid));
+ }
+ }
+ else
+ {
+ t = find_thread_ptid (event_ptid);
+ if (t == NULL)
+ t = add_thread (event_ptid);
+
+ t->stop_requested = 0;
+ t->executing = 0;
+ t->resumed = 0;
+ t->control.may_range_step = 0;
+
+ if (ws.kind == TARGET_WAITKIND_STOPPED
+ && ws.value.sig == GDB_SIGNAL_0)
+ {
+ /* We caught the event that we intended to catch, so
+ there's no event pending. */
+ t->suspend.waitstatus.kind = TARGET_WAITKIND_IGNORE;
+ t->suspend.waitstatus_pending_p = 0;
+
+ if (displaced_step_fixup (t->ptid, GDB_SIGNAL_0) < 0)
+ {
+ /* Add it back to the step-over queue. */
+ if (debug_infrun)
+ {
+ fprintf_unfiltered (gdb_stdlog,
+ "infrun: displaced-step of %s "
+ "canceled: adding back to the "
+ "step-over queue\n",
+ target_pid_to_str (t->ptid));
+ }
+ t->control.trap_expected = 0;
+ thread_step_over_chain_enqueue (t);
+ }
+ }
+ else
+ {
+ enum gdb_signal sig;
+ struct regcache *regcache;
+ struct address_space *aspace;
+
+ if (debug_infrun)
+ {
+ char *statstr;
+
+ statstr = target_waitstatus_to_string (&ws);
+ fprintf_unfiltered (gdb_stdlog,
+ "infrun: target_wait %s, saving "
+ "status for %d.%ld.%ld\n",
+ statstr,
+ ptid_get_pid (t->ptid),
+ ptid_get_lwp (t->ptid),
+ ptid_get_tid (t->ptid));
+ xfree (statstr);
+ }
+
+ /* Record for later. */
+ save_waitstatus (t, &ws);
+
+ sig = (ws.kind == TARGET_WAITKIND_STOPPED
+ ? ws.value.sig : GDB_SIGNAL_0);
+
+ if (displaced_step_fixup (t->ptid, sig) < 0)
+ {
+ /* Add it back to the step-over queue. */
+ t->control.trap_expected = 0;
+ thread_step_over_chain_enqueue (t);
+ }
+
+ regcache = get_thread_regcache (t->ptid);
+ t->suspend.stop_pc = regcache_read_pc (regcache);
+
+ if (debug_infrun)
+ {
+ fprintf_unfiltered (gdb_stdlog,
+ "infrun: saved stop_pc=%s for %s "
+ "(currently_stepping=%d)\n",
+ paddress (target_gdbarch (),
+ t->suspend.stop_pc),
+ target_pid_to_str (t->ptid),
+ currently_stepping (t));
+ }
+ }
+ }
+ }
+ }
+
+ do_cleanups (old_chain);
+
+ if (debug_infrun)
+ fprintf_unfiltered (gdb_stdlog, "infrun: stop_all_threads done\n");
+}
+
/* Given an execution control state that has been freshly filled in by
an event from the inferior, figure out what it means and take
appropriate action.
/* Mark the non-executing threads accordingly. In all-stop, all
threads of all processes are stopped when we get any event
reported. In non-stop mode, only the event thread stops. */
- if (!non_stop)
- set_executing (minus_one_ptid, 0);
- else if (ecs->ws.kind == TARGET_WAITKIND_SIGNALLED
- || ecs->ws.kind == TARGET_WAITKIND_EXITED)
- {
- ptid_t pid_ptid;
-
- /* If we're handling a process exit in non-stop mode, even
- though threads haven't been deleted yet, one would think that
- there is nothing to do, as threads of the dead process will
- be soon deleted, and threads of any other process were left
- running. However, on some targets, threads survive a process
- exit event. E.g., for the "checkpoint" command, when the
- current checkpoint/fork exits, linux-fork.c automatically
- switches to another fork from within target_mourn_inferior,
- by associating the same inferior/thread to another fork. We
- haven't mourned yet at this point, but we must mark any
- threads left in the process as not-executing so that
- finish_thread_state marks them stopped (in the user's
- perspective) if/when we present the stop to the user. */
- pid_ptid = pid_to_ptid (ptid_get_pid (ecs->ptid));
- set_executing (pid_ptid, 0);
- }
- else
- set_executing (ecs->ptid, 0);
+ {
+ ptid_t mark_ptid;
+
+ if (!target_is_non_stop_p ())
+ mark_ptid = minus_one_ptid;
+ else if (ecs->ws.kind == TARGET_WAITKIND_SIGNALLED
+ || ecs->ws.kind == TARGET_WAITKIND_EXITED)
+ {
+ /* If we're handling a process exit in non-stop mode, even
+ though threads haven't been deleted yet, one would think
+ that there is nothing to do, as threads of the dead process
+ will be soon deleted, and threads of any other process were
+ left running. However, on some targets, threads survive a
+ process exit event. E.g., for the "checkpoint" command,
+ when the current checkpoint/fork exits, linux-fork.c
+ automatically switches to another fork from within
+ target_mourn_inferior, by associating the same
+ inferior/thread to another fork. We haven't mourned yet at
+ this point, but we must mark any threads left in the
+ process as not-executing so that finish_thread_state marks
+ them stopped (in the user's perspective) if/when we present
+ the stop to the user. */
+ mark_ptid = pid_to_ptid (ptid_get_pid (ecs->ptid));
+ }
+ else
+ mark_ptid = ecs->ptid;
+
+ set_executing (mark_ptid, 0);
+
+ /* Likewise the resumed flag. */
+ set_resumed (mark_ptid, 0);
+ }
switch (ecs->ws.kind)
{
child = ecs->ws.value.related_pid;
/* In non-stop mode, also resume the other branch. */
- if (non_stop && !detach_fork)
+ if (!detach_fork && (non_stop
+ || (sched_multi && target_is_non_stop_p ())))
{
if (follow_child)
switch_to_thread (parent);
stop. */
follow_exec (inferior_ptid, ecs->ws.value.execd_pathname);
+ /* In follow_exec we may have deleted the original thread and
+ created a new one. Make sure that the event thread is the
+ execd thread for that case (this is a nop otherwise). */
+ ecs->event_thread = inferior_thread ();
+
ecs->event_thread->control.stop_bpstat
= bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
stop_pc, ecs->ptid, &ecs->ws);
fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_NO_HISTORY\n");
/* Reverse execution: target ran out of history info. */
+ /* Switch to the stopped thread. */
+ if (!ptid_equal (ecs->ptid, inferior_ptid))
+ context_switch (ecs->ptid);
+ if (debug_infrun)
+ fprintf_unfiltered (gdb_stdlog, "infrun: stopped\n");
+
delete_just_stopped_threads_single_step_breakpoints ();
- stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
+ stop_pc = regcache_read_pc (get_thread_regcache (inferior_ptid));
observer_notify_no_history ();
stop_waiting (ecs);
return;
value_free_to_mark (mark);
}
-/* Called when we get an event that may finish an in-line or
- out-of-line (displaced stepping) step-over started previously. */
+/* Restart threads back to what they were trying to do back when we
+ paused them for an in-line step-over. The EVENT_THREAD thread is
+ ignored. */
static void
+restart_threads (struct thread_info *event_thread)
+{
+ struct thread_info *tp;
+ struct thread_info *step_over = NULL;
+
+ /* In case the instruction just stepped spawned a new thread. */
+ update_thread_list ();
+
+ ALL_NON_EXITED_THREADS (tp)
+ {
+ if (tp == event_thread)
+ {
+ if (debug_infrun)
+ fprintf_unfiltered (gdb_stdlog,
+ "infrun: restart threads: "
+ "[%s] is event thread\n",
+ target_pid_to_str (tp->ptid));
+ continue;
+ }
+
+ if (!(tp->state == THREAD_RUNNING || tp->control.in_infcall))
+ {
+ if (debug_infrun)
+ fprintf_unfiltered (gdb_stdlog,
+ "infrun: restart threads: "
+ "[%s] not meant to be running\n",
+ target_pid_to_str (tp->ptid));
+ continue;
+ }
+
+ if (tp->resumed)
+ {
+ if (debug_infrun)
+ fprintf_unfiltered (gdb_stdlog,
+ "infrun: restart threads: [%s] resumed\n",
+ target_pid_to_str (tp->ptid));
+ gdb_assert (tp->executing || tp->suspend.waitstatus_pending_p);
+ continue;
+ }
+
+ if (thread_is_in_step_over_chain (tp))
+ {
+ if (debug_infrun)
+ fprintf_unfiltered (gdb_stdlog,
+ "infrun: restart threads: "
+ "[%s] needs step-over\n",
+ target_pid_to_str (tp->ptid));
+ gdb_assert (!tp->resumed);
+ continue;
+ }
+
+
+ if (tp->suspend.waitstatus_pending_p)
+ {
+ if (debug_infrun)
+ fprintf_unfiltered (gdb_stdlog,
+ "infrun: restart threads: "
+ "[%s] has pending status\n",
+ target_pid_to_str (tp->ptid));
+ tp->resumed = 1;
+ continue;
+ }
+
+ /* If some thread needs to start a step-over at this point, it
+ should still be in the step-over queue, and thus skipped
+ above. */
+ if (thread_still_needs_step_over (tp))
+ {
+ internal_error (__FILE__, __LINE__,
+ "thread [%s] needs a step-over, but not in "
+ "step-over queue\n",
+ target_pid_to_str (tp->ptid));
+ }
+
+ if (currently_stepping (tp))
+ {
+ if (debug_infrun)
+ fprintf_unfiltered (gdb_stdlog,
+ "infrun: restart threads: [%s] was stepping\n",
+ target_pid_to_str (tp->ptid));
+ keep_going_stepped_thread (tp);
+ }
+ else
+ {
+ struct execution_control_state ecss;
+ struct execution_control_state *ecs = &ecss;
+
+ if (debug_infrun)
+ fprintf_unfiltered (gdb_stdlog,
+ "infrun: restart threads: [%s] continuing\n",
+ target_pid_to_str (tp->ptid));
+ reset_ecs (ecs, tp);
+ switch_to_thread (tp->ptid);
+ keep_going_pass_signal (ecs);
+ }
+ }
+}
+
+/* Callback for iterate_over_threads. Find a resumed thread that has
+ a pending waitstatus. */
+
+static int
+resumed_thread_with_pending_status (struct thread_info *tp,
+ void *arg)
+{
+ return (tp->resumed
+ && tp->suspend.waitstatus_pending_p);
+}
+
+/* Called when we get an event that may finish an in-line or
+ out-of-line (displaced stepping) step-over started previously.
+ Return true if the event is processed and we should go back to the
+ event loop; false if the caller should continue processing the
+ event. */
+
+static int
finish_step_over (struct execution_control_state *ecs)
{
+ int had_step_over_info;
+
displaced_step_fixup (ecs->ptid,
ecs->event_thread->suspend.stop_signal);
- if (step_over_info_valid_p ())
+ had_step_over_info = step_over_info_valid_p ();
+
+ if (had_step_over_info)
{
/* If we're stepping over a breakpoint with all threads locked,
then only the thread that was stepped should be reporting
clear_step_over_info ();
}
- if (!non_stop)
- return;
+ if (!target_is_non_stop_p ())
+ return 0;
+
+ /* Start a new step-over in another thread if there's one that
+ needs it. */
+ start_step_over ();
+
+ /* If we were stepping over a breakpoint before, and haven't started
+ a new in-line step-over sequence, then restart all other threads
+ (except the event thread). We can't do this in all-stop, as then
+ e.g., we wouldn't be able to issue any other remote packet until
+ these other threads stop. */
+ if (had_step_over_info && !step_over_info_valid_p ())
+ {
+ struct thread_info *pending;
+
+ /* If we only have threads with pending statuses, the restart
+ below won't restart any thread and so nothing re-inserts the
+ breakpoint we just stepped over. But we need it inserted
+ when we later process the pending events, otherwise if
+ another thread has a pending event for this breakpoint too,
+ we'd discard its event (because the breakpoint that
+ originally caused the event was no longer inserted). */
+ context_switch (ecs->ptid);
+ insert_breakpoints ();
+
+ restart_threads (ecs->event_thread);
+
+ /* If we have events pending, go through handle_inferior_event
+ again, picking up a pending event at random. This avoids
+ thread starvation. */
+
+ /* But not if we just stepped over a watchpoint in order to let
+ the instruction execute so we can evaluate its expression.
+ The set of watchpoints that triggered is recorded in the
+ breakpoint objects themselves (see bp->watchpoint_triggered).
+ If we processed another event first, that other event could
+ clobber this info. */
+ if (ecs->event_thread->stepping_over_watchpoint)
+ return 0;
+
+ pending = iterate_over_threads (resumed_thread_with_pending_status,
+ NULL);
+ if (pending != NULL)
+ {
+ struct thread_info *tp = ecs->event_thread;
+ struct regcache *regcache;
+
+ if (debug_infrun)
+ {
+ fprintf_unfiltered (gdb_stdlog,
+ "infrun: found resumed threads with "
+ "pending events, saving status\n");
+ }
+
+ gdb_assert (pending != tp);
+
+ /* Record the event thread's event for later. */
+ save_waitstatus (tp, &ecs->ws);
+ /* This was cleared early, by handle_inferior_event. Set it
+ so this pending event is considered by
+ do_target_wait. */
+ tp->resumed = 1;
+
+ gdb_assert (!tp->executing);
+
+ regcache = get_thread_regcache (tp->ptid);
+ tp->suspend.stop_pc = regcache_read_pc (regcache);
+
+ if (debug_infrun)
+ {
+ fprintf_unfiltered (gdb_stdlog,
+ "infrun: saved stop_pc=%s for %s "
+ "(currently_stepping=%d)\n",
+ paddress (target_gdbarch (),
+ tp->suspend.stop_pc),
+ target_pid_to_str (tp->ptid),
+ currently_stepping (tp));
+ }
+
+ /* This in-line step-over finished; clear this so we won't
+ start a new one. This is what handle_signal_stop would
+ do, if we returned false. */
+ tp->stepping_over_breakpoint = 0;
+
+ /* Wake up the event loop again. */
+ mark_async_event_handler (infrun_async_inferior_event_token);
+
+ prepare_to_wait (ecs);
+ return 1;
+ }
+ }
- /* Start a new step-over in another thread if there's one that
- needs it. */
- start_step_over ();
+ return 0;
}
/* Come here when the program has stopped with a signal. */
/* Do we need to clean up the state of a thread that has
completed a displaced single-step? (Doing so usually affects
the PC, so do it here, before we set stop_pc.) */
- finish_step_over (ecs);
+ if (finish_step_over (ecs))
+ return;
/* If we either finished a single-step or hit a breakpoint, but
the user wanted this thread to be stopped, pretend we got a
&& ecs->event_thread->control.trap_expected
&& ecs->event_thread->control.step_resume_breakpoint == NULL)
{
+ int was_in_line;
+
/* We were just starting a new sequence, attempting to
single-step off of a breakpoint and expecting a SIGTRAP.
Instead this signal arrives. This signal will take us out
"infrun: signal arrived while stepping over "
"breakpoint\n");
+ was_in_line = step_over_info_valid_p ();
+ clear_step_over_info ();
insert_hp_step_resume_breakpoint_at_frame (frame);
ecs->event_thread->step_after_step_resume_breakpoint = 1;
/* Reset trap_expected to ensure breakpoints are re-inserted. */
ecs->event_thread->control.trap_expected = 0;
+ if (target_is_non_stop_p ())
+ {
+ /* Either "set non-stop" is "on", or the target is
+ always in non-stop mode. In this case, we have a bit
+ more work to do. Resume the current thread, and if
+ we had paused all threads, restart them while the
+ signal handler runs. */
+ keep_going (ecs);
+
+ if (was_in_line)
+ {
+ restart_threads (ecs->event_thread);
+ }
+ else if (debug_infrun)
+ {
+ fprintf_unfiltered (gdb_stdlog,
+ "infrun: no need to restart threads\n");
+ }
+ return;
+ }
+
/* If we were nexting/stepping some other thread, switch to
it, so that we don't continue it, losing control. */
if (!switch_back_to_stepped_thread (ecs))
"infrun: signal may take us out of "
"single-step range\n");
+ clear_step_over_info ();
insert_hp_step_resume_breakpoint_at_frame (frame);
ecs->event_thread->step_after_step_resume_breakpoint = 1;
/* Reset trap_expected to ensure breakpoints are re-inserted. */
stop_stack_dummy = what.call_dummy;
}
+ /* A few breakpoint types have callbacks associated (e.g.,
+ bp_jit_event). Run them now. */
+ bpstat_run_callbacks (ecs->event_thread->control.stop_bpstat);
+
/* If we hit an internal event that triggers symbol changes, the
current frame will be invalidated within bpstat_what (e.g., if we
hit an internal solib event). Re-fetch it. */
static int
switch_back_to_stepped_thread (struct execution_control_state *ecs)
{
- if (!non_stop)
+ if (!target_is_non_stop_p ())
{
struct thread_info *tp;
struct thread_info *stepping_thread;
ALL_NON_EXITED_THREADS (tp)
{
- /* Ignore threads of processes we're not resuming. */
+ /* Ignore threads of processes the caller is not
+ resuming. */
if (!sched_multi
- && ptid_get_pid (tp->ptid) != ptid_get_pid (inferior_ptid))
+ && ptid_get_pid (tp->ptid) != ptid_get_pid (ecs->ptid))
continue;
/* When stepping over a breakpoint, we lock all threads
except the one that needs to move past the breakpoint.
If a non-event thread has this set, the "incomplete
step-over" check above should have caught it earlier. */
- gdb_assert (!tp->control.trap_expected);
+ if (tp->control.trap_expected)
+ {
+ internal_error (__FILE__, __LINE__,
+ "[%s] has inconsistent state: "
+ "trap_expected=%d\n",
+ target_pid_to_str (tp->ptid),
+ tp->control.trap_expected);
+ }
/* Did we find the stepping thread? */
if (tp->control.step_range_end)
if (stepping_thread != NULL)
{
- struct frame_info *frame;
- struct gdbarch *gdbarch;
+ if (debug_infrun)
+ fprintf_unfiltered (gdb_stdlog,
+ "infrun: switching back to stepped thread\n");
- tp = stepping_thread;
-
- /* If the stepping thread exited, then don't try to switch
- back and resume it, which could fail in several different
- ways depending on the target. Instead, just keep going.
-
- We can find a stepping dead thread in the thread list in
- two cases:
-
- - The target supports thread exit events, and when the
- target tries to delete the thread from the thread list,
- inferior_ptid pointed at the exiting thread. In such
- case, calling delete_thread does not really remove the
- thread from the list; instead, the thread is left listed,
- with 'exited' state.
-
- - The target's debug interface does not support thread
- exit events, and so we have no idea whatsoever if the
- previously stepping thread is still alive. For that
- reason, we need to synchronously query the target
- now. */
- if (is_exited (tp->ptid)
- || !target_thread_alive (tp->ptid))
+ if (keep_going_stepped_thread (stepping_thread))
{
- if (debug_infrun)
- fprintf_unfiltered (gdb_stdlog,
- "infrun: not switching back to "
- "stepped thread, it has vanished\n");
-
- delete_thread (tp->ptid);
- keep_going (ecs);
+ prepare_to_wait (ecs);
return 1;
}
+ }
+ }
- if (debug_infrun)
- fprintf_unfiltered (gdb_stdlog,
- "infrun: switching back to stepped thread\n");
+ return 0;
+}
- ecs->event_thread = tp;
- ecs->ptid = tp->ptid;
- context_switch (ecs->ptid);
+/* Set a previously stepped thread back to stepping. Returns true on
+ success, false if the resume is not possible (e.g., the thread
+ vanished). */
- stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
- frame = get_current_frame ();
- gdbarch = get_frame_arch (frame);
+static int
+keep_going_stepped_thread (struct thread_info *tp)
+{
+ struct frame_info *frame;
+ struct gdbarch *gdbarch;
+ struct execution_control_state ecss;
+ struct execution_control_state *ecs = &ecss;
- /* If the PC of the thread we were trying to single-step has
- changed, then that thread has trapped or been signaled,
- but the event has not been reported to GDB yet. Re-poll
- the target looking for this particular thread's event
- (i.e. temporarily enable schedlock) by:
+ /* If the stepping thread exited, then don't try to switch back and
+ resume it, which could fail in several different ways depending
+ on the target. Instead, just keep going.
- - setting a break at the current PC
- - resuming that particular thread, only (by setting
- trap expected)
+ We can find a stepping dead thread in the thread list in two
+ cases:
- This prevents us continuously moving the single-step
- breakpoint forward, one instruction at a time,
- overstepping. */
+ - The target supports thread exit events, and when the target
+ tries to delete the thread from the thread list, inferior_ptid
+ pointed at the exiting thread. In such case, calling
+ delete_thread does not really remove the thread from the list;
+ instead, the thread is left listed, with 'exited' state.
- if (stop_pc != tp->prev_pc)
- {
- ptid_t resume_ptid;
+ - The target's debug interface does not support thread exit
+ events, and so we have no idea whatsoever if the previously
+ stepping thread is still alive. For that reason, we need to
+ synchronously query the target now. */
- if (debug_infrun)
- fprintf_unfiltered (gdb_stdlog,
- "infrun: expected thread advanced also\n");
-
- /* Clear the info of the previous step-over, as it's no
- longer valid. It's what keep_going would do too, if
- we called it. Must do this before trying to insert
- the sss breakpoint, otherwise if we were previously
- trying to step over this exact address in another
- thread, the breakpoint ends up not installed. */
- clear_step_over_info ();
-
- insert_single_step_breakpoint (get_frame_arch (frame),
- get_frame_address_space (frame),
- stop_pc);
-
- resume_ptid = user_visible_resume_ptid (tp->control.stepping_command);
- do_target_resume (resume_ptid,
- currently_stepping (tp), GDB_SIGNAL_0);
- prepare_to_wait (ecs);
- }
- else
- {
- if (debug_infrun)
- fprintf_unfiltered (gdb_stdlog,
- "infrun: expected thread still "
- "hasn't advanced\n");
- keep_going_pass_signal (ecs);
- }
+ if (is_exited (tp->ptid)
+ || !target_thread_alive (tp->ptid))
+ {
+ if (debug_infrun)
+ fprintf_unfiltered (gdb_stdlog,
+ "infrun: not resuming previously "
+ "stepped thread, it has vanished\n");
- return 1;
- }
+ delete_thread (tp->ptid);
+ return 0;
}
- return 0;
+
+ if (debug_infrun)
+ fprintf_unfiltered (gdb_stdlog,
+ "infrun: resuming previously stepped thread\n");
+
+ reset_ecs (ecs, tp);
+ switch_to_thread (tp->ptid);
+
+ stop_pc = regcache_read_pc (get_thread_regcache (tp->ptid));
+ frame = get_current_frame ();
+ gdbarch = get_frame_arch (frame);
+
+ /* If the PC of the thread we were trying to single-step has
+ changed, then that thread has trapped or been signaled, but the
+ event has not been reported to GDB yet. Re-poll the target
+ looking for this particular thread's event (i.e. temporarily
+ enable schedlock) by:
+
+ - setting a break at the current PC
+ - resuming that particular thread, only (by setting trap
+ expected)
+
+ This prevents us continuously moving the single-step breakpoint
+ forward, one instruction at a time, overstepping. */
+
+ if (stop_pc != tp->prev_pc)
+ {
+ ptid_t resume_ptid;
+
+ if (debug_infrun)
+ fprintf_unfiltered (gdb_stdlog,
+ "infrun: expected thread advanced also (%s -> %s)\n",
+ paddress (target_gdbarch (), tp->prev_pc),
+ paddress (target_gdbarch (), stop_pc));
+
+ /* Clear the info of the previous step-over, as it's no longer
+ valid (if the thread was trying to step over a breakpoint, it
+ has already succeeded). It's what keep_going would do too,
+ if we called it. Do this before trying to insert the sss
+ breakpoint, otherwise if we were previously trying to step
+ over this exact address in another thread, the breakpoint is
+ skipped. */
+ clear_step_over_info ();
+ tp->control.trap_expected = 0;
+
+ insert_single_step_breakpoint (get_frame_arch (frame),
+ get_frame_address_space (frame),
+ stop_pc);
+
+ tp->resumed = 1;
+ resume_ptid = internal_resume_ptid (tp->control.stepping_command);
+ do_target_resume (resume_ptid, 0, GDB_SIGNAL_0);
+ }
+ else
+ {
+ if (debug_infrun)
+ fprintf_unfiltered (gdb_stdlog,
+ "infrun: expected thread still hasn't advanced\n");
+
+ keep_going_pass_signal (ecs);
+ }
+ return 1;
}
-/* Is thread TP in the middle of single-stepping? */
+/* Is thread TP in the middle of (software or hardware)
+ single-stepping? (Note the result of this function must never be
+ passed directly as target_resume's STEP parameter.) */
static int
currently_stepping (struct thread_info *tp)
{
TRY
{
- struct symbol *vsym;
+ struct block_symbol vsym;
struct value *value;
CORE_ADDR handler;
struct breakpoint *bp;
- vsym = lookup_symbol (SYMBOL_LINKAGE_NAME (sym), b, VAR_DOMAIN,
- NULL).symbol;
- value = read_var_value (vsym, frame);
+ vsym = lookup_symbol (SYMBOL_LINKAGE_NAME (sym), b, VAR_DOMAIN, NULL);
+ value = read_var_value (vsym.symbol, vsym.block, frame);
/* If the value was optimized out, revert to the old behavior. */
if (! value_optimized_out (value))
{
/* Let callers know we don't want to wait for the inferior anymore. */
ecs->wait_some_more = 0;
+
+ /* If all-stop, but the target is always in non-stop mode, stop all
+ threads now that we're presenting the stop to the user. */
+ if (!non_stop && target_is_non_stop_p ())
+ stop_all_threads ();
}
/* Like keep_going, but passes the signal to the inferior, even if the
struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
gdb_assert (ptid_equal (ecs->event_thread->ptid, inferior_ptid));
+ gdb_assert (!ecs->event_thread->resumed);
/* Save the pc before execution, to compare with pc after stop. */
ecs->event_thread->prev_pc
discard_cleanups (old_cleanups);
resume (ecs->event_thread->suspend.stop_signal);
}
+ else if (step_over_info_valid_p ())
+ {
+ /* Another thread is stepping over a breakpoint in-line. If
+ this thread needs a step-over too, queue the request. In
+ either case, this resume must be deferred for later. */
+ struct thread_info *tp = ecs->event_thread;
+
+ if (ecs->hit_singlestep_breakpoint
+ || thread_still_needs_step_over (tp))
+ {
+ if (debug_infrun)
+ fprintf_unfiltered (gdb_stdlog,
+ "infrun: step-over already in progress: "
+ "step-over for %s deferred\n",
+ target_pid_to_str (tp->ptid));
+ thread_step_over_chain_enqueue (tp);
+ }
+ else
+ {
+ if (debug_infrun)
+ fprintf_unfiltered (gdb_stdlog,
+ "infrun: step-over in progress: "
+ "resume of %s deferred\n",
+ target_pid_to_str (tp->ptid));
+ }
+
+ discard_cleanups (old_cleanups);
+ }
else
{
struct regcache *regcache = get_current_regcache ();
watchpoint. The instruction copied to the scratch pad would
still trigger the watchpoint. */
if (remove_bp
- && (remove_wps
- || !use_displaced_stepping (get_regcache_arch (regcache))))
+ && (remove_wps || !use_displaced_stepping (ecs->event_thread)))
{
set_step_over_info (get_regcache_aspace (regcache),
regcache_read_pc (regcache), remove_wps);
}
else if (remove_wps)
set_step_over_info (NULL, 0, remove_wps);
- else
- clear_step_over_info ();
+
+ /* If we now need to do an in-line step-over, we need to stop
+ all other threads. Note this must be done before
+ insert_breakpoints below, because that removes the breakpoint
+ we're about to step over, otherwise other threads could miss
+ it. */
+ if (step_over_info_valid_p () && target_is_non_stop_p ())
+ stop_all_threads ();
/* Stop stepping if inserting breakpoints fails. */
TRY
if (debug_infrun)
fprintf_unfiltered (gdb_stdlog, "infrun: prepare_to_wait\n");
- /* This is the old end of the while loop. Let everybody know we
- want to wait for the inferior some more and get called again
- soon. */
ecs->wait_some_more = 1;
+
+ if (!target_is_async_p ())
+ mark_infrun_async_event_handler ();
}
/* We are done with the step range of a step/next/si/ni command.
bpstat_print contains the logic deciding in detail what to print,
based on the event(s) that just occurred. */
-void
-print_stop_event (struct target_waitstatus *ws)
+static void
+print_stop_location (struct target_waitstatus *ws)
{
int bpstat_ret;
enum print_what source_flag;
SRC_AND_LOC: Print location and source line. */
if (do_frame_printing)
print_stack_frame (get_selected_frame (NULL), 0, source_flag, 1);
+}
+
+/* Cleanup that restores a previous current uiout. */
+
+static void
+restore_current_uiout_cleanup (void *arg)
+{
+ struct ui_out *saved_uiout = (struct ui_out *) arg;
+
+ current_uiout = saved_uiout;
+}
+
+/* See infrun.h. */
+
+void
+print_stop_event (struct ui_out *uiout)
+{
+ struct cleanup *old_chain;
+ struct target_waitstatus last;
+ ptid_t last_ptid;
+ struct thread_info *tp;
+
+ get_last_target_status (&last_ptid, &last);
+
+ old_chain = make_cleanup (restore_current_uiout_cleanup, current_uiout);
+ current_uiout = uiout;
+
+ print_stop_location (&last);
/* Display the auto-display expressions. */
do_displays ();
-}
-/* Here to return control to GDB when the inferior stops for real.
- Print appropriate messages, remove breakpoints, give terminal our modes.
+ do_cleanups (old_chain);
+
+ tp = inferior_thread ();
+ if (tp->thread_fsm != NULL
+ && thread_fsm_finished_p (tp->thread_fsm))
+ {
+ struct return_value_info *rv;
+
+ rv = thread_fsm_return_value (tp->thread_fsm);
+ if (rv != NULL)
+ print_return_value (uiout, rv);
+ }
+}
- STOP_PRINT_FRAME nonzero means print the executing frame
- (pc, function, args, file, line number and line text).
- BREAKPOINTS_FAILED nonzero means stop was due to error
- attempting to insert breakpoints. */
+/* See infrun.h. */
void
+maybe_remove_breakpoints (void)
+{
+ if (!breakpoints_should_be_inserted_now () && target_has_execution)
+ {
+ if (remove_breakpoints ())
+ {
+ target_terminal_ours_for_output ();
+ printf_filtered (_("Cannot remove breakpoints because "
+ "program is no longer writable.\nFurther "
+ "execution is probably impossible.\n"));
+ }
+ }
+}
+
+/* The execution context that just caused a normal stop. */
+
+struct stop_context
+{
+ /* The stop ID. */
+ ULONGEST stop_id;
+
+ /* The event PTID. */
+
+ ptid_t ptid;
+
+ /* If stopp for a thread event, this is the thread that caused the
+ stop. */
+ struct thread_info *thread;
+
+ /* The inferior that caused the stop. */
+ int inf_num;
+};
+
+/* Returns a new stop context. If stopped for a thread event, this
+ takes a strong reference to the thread. */
+
+static struct stop_context *
+save_stop_context (void)
+{
+ struct stop_context *sc = XNEW (struct stop_context);
+
+ sc->stop_id = get_stop_id ();
+ sc->ptid = inferior_ptid;
+ sc->inf_num = current_inferior ()->num;
+
+ if (!ptid_equal (inferior_ptid, null_ptid))
+ {
+ /* Take a strong reference so that the thread can't be deleted
+ yet. */
+ sc->thread = inferior_thread ();
+ sc->thread->refcount++;
+ }
+ else
+ sc->thread = NULL;
+
+ return sc;
+}
+
+/* Release a stop context previously created with save_stop_context.
+ Releases the strong reference to the thread as well. */
+
+static void
+release_stop_context_cleanup (void *arg)
+{
+ struct stop_context *sc = (struct stop_context *) arg;
+
+ if (sc->thread != NULL)
+ sc->thread->refcount--;
+ xfree (sc);
+}
+
+/* Return true if the current context no longer matches the saved stop
+ context. */
+
+static int
+stop_context_changed (struct stop_context *prev)
+{
+ if (!ptid_equal (prev->ptid, inferior_ptid))
+ return 1;
+ if (prev->inf_num != current_inferior ()->num)
+ return 1;
+ if (prev->thread != NULL && prev->thread->state != THREAD_STOPPED)
+ return 1;
+ if (get_stop_id () != prev->stop_id)
+ return 1;
+ return 0;
+}
+
+/* See infrun.h. */
+
+int
normal_stop (void)
{
struct target_waitstatus last;
get_last_target_status (&last_ptid, &last);
+ new_stop_id ();
+
/* If an exception is thrown from this point on, make sure to
propagate GDB's knowledge of the executing state to the
frontend/user running state. A QUIT is an easy exception to see
}
/* Note: this depends on the update_thread_list call above. */
- if (!breakpoints_should_be_inserted_now () && target_has_execution)
- {
- if (remove_breakpoints ())
- {
- target_terminal_ours_for_output ();
- printf_filtered (_("Cannot remove breakpoints because "
- "program is no longer writable.\nFurther "
- "execution is probably impossible.\n"));
- }
- }
+ maybe_remove_breakpoints ();
/* If an auto-display called a function and that got a signal,
delete that auto-display to avoid an infinite recursion. */
if (stopped_by_random_signal)
disable_current_display ();
- /* Notify observers if we finished a "step"-like command, etc. */
- if (target_has_execution
- && last.kind != TARGET_WAITKIND_SIGNALLED
- && last.kind != TARGET_WAITKIND_EXITED
- && inferior_thread ()->control.stop_step)
- {
- /* But not if in the middle of doing a "step n" operation for
- n > 1 */
- if (inferior_thread ()->step_multi)
- goto done;
-
- observer_notify_end_stepping_range ();
- }
-
target_terminal_ours ();
async_enable_stdin ();
- /* Set the current source location. This will also happen if we
- display the frame below, but the current SAL will be incorrect
- during a user hook-stop function. */
- if (has_stack_frames () && !stop_stack_dummy)
- set_current_sal_from_frame (get_current_frame ());
-
- /* Let the user/frontend see the threads as stopped, but defer to
- call_function_by_hand if the thread finished an infcall
- successfully. We may be e.g., evaluating a breakpoint condition.
- In that case, the thread had state THREAD_RUNNING before the
- infcall, and shall remain marked running, all without informing
- the user/frontend about state transition changes. */
- if (target_has_execution
- && inferior_thread ()->control.in_infcall
- && stop_stack_dummy == STOP_STACK_DUMMY)
- discard_cleanups (old_chain);
- else
- do_cleanups (old_chain);
-
- /* Look up the hook_stop and run it (CLI internally handles problem
- of stop_command's pre-hook not existing). */
- if (stop_command)
- catch_errors (hook_stop_stub, stop_command,
- "Error while running hook_stop:\n", RETURN_MASK_ALL);
-
- if (!has_stack_frames ())
- goto done;
-
- if (last.kind == TARGET_WAITKIND_SIGNALLED
- || last.kind == TARGET_WAITKIND_EXITED)
- goto done;
+ /* Let the user/frontend see the threads as stopped. */
+ do_cleanups (old_chain);
/* Select innermost stack frame - i.e., current frame is frame 0,
- and current location is based on that.
- Don't do this on return from a stack dummy routine,
- or if the program has exited. */
+ and current location is based on that. Handle the case where the
+ dummy call is returning after being stopped. E.g. the dummy call
+ previously hit a breakpoint. (If the dummy call returns
+ normally, we won't reach here.) Do this before the stop hook is
+ run, so that it doesn't get to see the temporary dummy frame,
+ which is not where we'll present the stop. */
+ if (has_stack_frames ())
+ {
+ if (stop_stack_dummy == STOP_STACK_DUMMY)
+ {
+ /* Pop the empty frame that contains the stack dummy. This
+ also restores inferior state prior to the call (struct
+ infcall_suspend_state). */
+ struct frame_info *frame = get_current_frame ();
+
+ gdb_assert (get_frame_type (frame) == DUMMY_FRAME);
+ frame_pop (frame);
+ /* frame_pop calls reinit_frame_cache as the last thing it
+ does which means there's now no selected frame. */
+ }
- if (!stop_stack_dummy)
- {
select_frame (get_current_frame ());
- /* If --batch-silent is enabled then there's no need to print the current
- source location, and to try risks causing an error message about
- missing source files. */
- if (stop_print_frame && !batch_silent)
- print_stop_event (&last);
- }
-
- if (stop_stack_dummy == STOP_STACK_DUMMY)
- {
- /* Pop the empty frame that contains the stack dummy.
- This also restores inferior state prior to the call
- (struct infcall_suspend_state). */
- struct frame_info *frame = get_current_frame ();
-
- gdb_assert (get_frame_type (frame) == DUMMY_FRAME);
- frame_pop (frame);
- /* frame_pop() calls reinit_frame_cache as the last thing it
- does which means there's currently no selected frame. We
- don't need to re-establish a selected frame if the dummy call
- returns normally, that will be done by
- restore_infcall_control_state. However, we do have to handle
- the case where the dummy call is returning after being
- stopped (e.g. the dummy call previously hit a breakpoint).
- We can't know which case we have so just always re-establish
- a selected frame here. */
- select_frame (get_current_frame ());
+ /* Set the current source location. */
+ set_current_sal_from_frame (get_current_frame ());
}
-done:
- annotate_stopped ();
-
- /* Suppress the stop observer if we're in the middle of:
+ /* Look up the hook_stop and run it (CLI internally handles problem
+ of stop_command's pre-hook not existing). */
+ if (stop_command != NULL)
+ {
+ struct stop_context *saved_context = save_stop_context ();
+ struct cleanup *old_chain
+ = make_cleanup (release_stop_context_cleanup, saved_context);
- - a step n (n > 1), as there still more steps to be done.
+ catch_errors (hook_stop_stub, stop_command,
+ "Error while running hook_stop:\n", RETURN_MASK_ALL);
- - a "finish" command, as the observer will be called in
- finish_command_continuation, so it can include the inferior
- function's return value.
+ /* If the stop hook resumes the target, then there's no point in
+ trying to notify about the previous stop; its context is
+ gone. Likewise if the command switches thread or inferior --
+ the observers would print a stop for the wrong
+ thread/inferior. */
+ if (stop_context_changed (saved_context))
+ {
+ do_cleanups (old_chain);
+ return 1;
+ }
+ do_cleanups (old_chain);
+ }
- - calling an inferior function, as we pretend we inferior didn't
- run at all. The return value of the call is handled by the
- expression evaluator, through call_function_by_hand. */
+ /* Notify observers about the stop. This is where the interpreters
+ print the stop event. */
+ if (!ptid_equal (inferior_ptid, null_ptid))
+ observer_notify_normal_stop (inferior_thread ()->control.stop_bpstat,
+ stop_print_frame);
+ else
+ observer_notify_normal_stop (NULL, stop_print_frame);
- if (!target_has_execution
- || last.kind == TARGET_WAITKIND_SIGNALLED
- || last.kind == TARGET_WAITKIND_EXITED
- || last.kind == TARGET_WAITKIND_NO_RESUMED
- || (!(inferior_thread ()->step_multi
- && inferior_thread ()->control.stop_step)
- && !(inferior_thread ()->control.stop_bpstat
- && inferior_thread ()->control.proceed_to_finish)
- && !inferior_thread ()->control.in_infcall))
- {
- if (!ptid_equal (inferior_ptid, null_ptid))
- observer_notify_normal_stop (inferior_thread ()->control.stop_bpstat,
- stop_print_frame);
- else
- observer_notify_normal_stop (NULL, stop_print_frame);
- }
+ annotate_stopped ();
if (target_has_execution)
{
longer needed. Keeping those around slows down things linearly.
Note that this never removes the current inferior. */
prune_inferiors ();
+
+ return 0;
}
static int
size_t len = TYPE_LENGTH (type);
struct cleanup *back_to;
- siginfo_data = xmalloc (len);
+ siginfo_data = (gdb_byte *) xmalloc (len);
back_to = make_cleanup (xfree, siginfo_data);
if (target_read (¤t_target, TARGET_OBJECT_SIGNAL_INFO, NULL,
static void
do_restore_infcall_suspend_state_cleanup (void *state)
{
- restore_infcall_suspend_state (state);
+ restore_infcall_suspend_state ((struct infcall_suspend_state *) state);
}
struct cleanup *
struct infcall_control_state *
save_infcall_control_state (void)
{
- struct infcall_control_state *inf_status = xmalloc (sizeof (*inf_status));
+ struct infcall_control_state *inf_status =
+ XNEW (struct infcall_control_state);
struct thread_info *tp = inferior_thread ();
struct inferior *inf = current_inferior ();
static void
do_restore_infcall_control_state_cleanup (void *sts)
{
- restore_infcall_control_state (sts);
+ restore_infcall_control_state ((struct infcall_control_state *) sts);
}
struct cleanup *
static void
restore_inferior_ptid (void *arg)
{
- ptid_t *saved_ptid_ptr = arg;
+ ptid_t *saved_ptid_ptr = (ptid_t *) arg;
inferior_ptid = *saved_ptid_ptr;
xfree (arg);
struct cleanup *
save_inferior_ptid (void)
{
- ptid_t *saved_ptid_ptr;
+ ptid_t *saved_ptid_ptr = XNEW (ptid_t);
- saved_ptid_ptr = xmalloc (sizeof (ptid_t));
*saved_ptid_ptr = inferior_ptid;
return make_cleanup (restore_inferior_ptid, saved_ptid_ptr);
}
NULL
};
+/* Callback for infrun's target events source. This is marked when a
+ thread has a pending status to process. */
+
+static void
+infrun_async_inferior_event_handler (gdb_client_data data)
+{
+ inferior_event_handler (INF_REG_EVENT, NULL);
+}
+
void
_initialize_infrun (void)
{
int numsigs;
struct cmd_list_element *c;
+ /* Register extra event sources in the event loop. */
+ infrun_async_inferior_event_token
+ = create_async_event_handler (infrun_async_inferior_event_handler, NULL);
+
add_info ("signals", signals_info, _("\
What debugger does when program gets various signals.\n\
Specify a signal as argument to print info on that signal only."));
&showlist);
numsigs = (int) GDB_SIGNAL_LAST;
- signal_stop = (unsigned char *) xmalloc (sizeof (signal_stop[0]) * numsigs);
- signal_print = (unsigned char *)
- xmalloc (sizeof (signal_print[0]) * numsigs);
- signal_program = (unsigned char *)
- xmalloc (sizeof (signal_program[0]) * numsigs);
- signal_catch = (unsigned char *)
- xmalloc (sizeof (signal_catch[0]) * numsigs);
- signal_pass = (unsigned char *)
- xmalloc (sizeof (signal_pass[0]) * numsigs);
+ signal_stop = XNEWVEC (unsigned char, numsigs);
+ signal_print = XNEWVEC (unsigned char, numsigs);
+ signal_program = XNEWVEC (unsigned char, numsigs);
+ signal_catch = XNEWVEC (unsigned char, numsigs);
+ signal_pass = XNEWVEC (unsigned char, numsigs);
for (i = 0; i < numsigs; i++)
{
signal_stop[i] = 1;
scheduler_enums, &scheduler_mode, _("\
Set mode for locking scheduler during execution."), _("\
Show mode for locking scheduler during execution."), _("\
-off == no locking (threads may preempt at any time)\n\
-on == full locking (no thread except the current thread may run)\n\
-step == scheduler locked during stepping commands (step, next, stepi, nexti).\n\
- In this mode, other threads may run during other commands."),
+off == no locking (threads may preempt at any time)\n\
+on == full locking (no thread except the current thread may run)\n\
+ This applies to both normal execution and replay mode.\n\
+step == scheduler locked during stepping commands (step, next, stepi, nexti).\n\
+ In this mode, other threads may run during other commands.\n\
+ This applies to both normal execution and replay mode.\n\
+replay == scheduler locked in replay mode and unlocked during normal execution."),
set_schedlock_func, /* traps on target vector */
show_scheduler_mode,
&setlist, &showlist);