+#ifdef __NR_tkill
+ {
+ static int tkill_failed;
+
+ if (!tkill_failed)
+ {
+ int ret;
+
+ errno = 0;
+ ret = syscall (__NR_tkill, lwpid, signo);
+ if (errno != ENOSYS)
+ return ret;
+ tkill_failed = 1;
+ }
+ }
+#endif
+
+ return kill (lwpid, signo);
+}
+
+void
+linux_stop_lwp (struct lwp_info *lwp)
+{
+ send_sigstop (lwp);
+}
+
+static void
+send_sigstop (struct lwp_info *lwp)
+{
+ int pid;
+
+ pid = lwpid_of (get_lwp_thread (lwp));
+
+ /* If we already have a pending stop signal for this process, don't
+ send another. */
+ if (lwp->stop_expected)
+ {
+ if (debug_threads)
+ debug_printf ("Have pending sigstop for lwp %d\n", pid);
+
+ return;
+ }
+
+ if (debug_threads)
+ debug_printf ("Sending sigstop to lwp %d\n", pid);
+
+ lwp->stop_expected = 1;
+ kill_lwp (pid, SIGSTOP);
+}
+
+static int
+send_sigstop_callback (struct inferior_list_entry *entry, void *except)
+{
+ struct thread_info *thread = (struct thread_info *) entry;
+ struct lwp_info *lwp = get_thread_lwp (thread);
+
+ /* Ignore EXCEPT. */
+ if (lwp == except)
+ return 0;
+
+ if (lwp->stopped)
+ return 0;
+
+ send_sigstop (lwp);
+ return 0;
+}
+
+/* Increment the suspend count of an LWP, and stop it, if not stopped
+ yet. */
+static int
+suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
+ void *except)
+{
+ struct thread_info *thread = (struct thread_info *) entry;
+ struct lwp_info *lwp = get_thread_lwp (thread);
+
+ /* Ignore EXCEPT. */
+ if (lwp == except)
+ return 0;
+
+ lwp->suspended++;
+
+ return send_sigstop_callback (entry, except);
+}
+
+static void
+mark_lwp_dead (struct lwp_info *lwp, int wstat)
+{
+ /* It's dead, really. */
+ lwp->dead = 1;
+
+ /* Store the exit status for later. */
+ lwp->status_pending_p = 1;
+ lwp->status_pending = wstat;
+
+ /* Prevent trying to stop it. */
+ lwp->stopped = 1;
+
+ /* No further stops are expected from a dead lwp. */
+ lwp->stop_expected = 0;
+}
+
+/* Wait for all children to stop for the SIGSTOPs we just queued. */
+
+static void
+wait_for_sigstop (void)
+{
+ struct thread_info *saved_thread;
+ ptid_t saved_tid;
+ int wstat;
+ int ret;
+
+ saved_thread = current_thread;
+ if (saved_thread != NULL)
+ saved_tid = saved_thread->entry.id;
+ else
+ saved_tid = null_ptid; /* avoid bogus unused warning */
+
+ if (debug_threads)
+ debug_printf ("wait_for_sigstop: pulling events\n");
+
+ /* Passing NULL_PTID as filter indicates we want all events to be
+ left pending. Eventually this returns when there are no
+ unwaited-for children left. */
+ ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
+ &wstat, __WALL);
+ gdb_assert (ret == -1);
+
+ if (saved_thread == NULL || linux_thread_alive (saved_tid))
+ current_thread = saved_thread;
+ else
+ {
+ if (debug_threads)
+ debug_printf ("Previously current thread died.\n");
+
+ if (non_stop)
+ {
+ /* We can't change the current inferior behind GDB's back,
+ otherwise, a subsequent command may apply to the wrong
+ process. */
+ current_thread = NULL;
+ }
+ else
+ {
+ /* Set a valid thread as current. */
+ set_desired_thread (0);
+ }
+ }
+}
+
+/* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
+ move it out, because we need to report the stop event to GDB. For
+ example, if the user puts a breakpoint in the jump pad, it's
+ because she wants to debug it. */
+
+static int
+stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
+{
+ struct thread_info *thread = (struct thread_info *) entry;
+ struct lwp_info *lwp = get_thread_lwp (thread);
+
+ gdb_assert (lwp->suspended == 0);
+ gdb_assert (lwp->stopped);
+
+ /* Allow debugging the jump pad, gdb_collect, etc.. */
+ return (supports_fast_tracepoints ()
+ && agent_loaded_p ()
+ && (gdb_breakpoint_here (lwp->stop_pc)
+ || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
+ || thread->last_resume_kind == resume_step)
+ && linux_fast_tracepoint_collecting (lwp, NULL));
+}
+
+static void
+move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
+{
+ struct thread_info *thread = (struct thread_info *) entry;
+ struct lwp_info *lwp = get_thread_lwp (thread);
+ int *wstat;
+
+ gdb_assert (lwp->suspended == 0);
+ gdb_assert (lwp->stopped);
+
+ wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
+
+ /* Allow debugging the jump pad, gdb_collect, etc. */
+ if (!gdb_breakpoint_here (lwp->stop_pc)
+ && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
+ && thread->last_resume_kind != resume_step
+ && maybe_move_out_of_jump_pad (lwp, wstat))
+ {
+ if (debug_threads)
+ debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
+ lwpid_of (thread));
+
+ if (wstat)
+ {
+ lwp->status_pending_p = 0;
+ enqueue_one_deferred_signal (lwp, wstat);
+
+ if (debug_threads)
+ debug_printf ("Signal %d for LWP %ld deferred "
+ "(in jump pad)\n",
+ WSTOPSIG (*wstat), lwpid_of (thread));
+ }
+
+ linux_resume_one_lwp (lwp, 0, 0, NULL);
+ }
+ else
+ lwp->suspended++;
+}
+
+static int
+lwp_running (struct inferior_list_entry *entry, void *data)
+{
+ struct thread_info *thread = (struct thread_info *) entry;
+ struct lwp_info *lwp = get_thread_lwp (thread);
+
+ if (lwp->dead)
+ return 0;
+ if (lwp->stopped)
+ return 0;
+ return 1;
+}
+
+/* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
+ If SUSPEND, then also increase the suspend count of every LWP,
+ except EXCEPT. */
+
+static void
+stop_all_lwps (int suspend, struct lwp_info *except)
+{
+ /* Should not be called recursively. */
+ gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
+
+ if (debug_threads)
+ {
+ debug_enter ();
+ debug_printf ("stop_all_lwps (%s, except=%s)\n",
+ suspend ? "stop-and-suspend" : "stop",
+ except != NULL
+ ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
+ : "none");
+ }
+
+ stopping_threads = (suspend
+ ? STOPPING_AND_SUSPENDING_THREADS
+ : STOPPING_THREADS);
+
+ if (suspend)
+ find_inferior (&all_threads, suspend_and_send_sigstop_callback, except);
+ else
+ find_inferior (&all_threads, send_sigstop_callback, except);
+ wait_for_sigstop ();
+ stopping_threads = NOT_STOPPING_THREADS;
+
+ if (debug_threads)
+ {
+ debug_printf ("stop_all_lwps done, setting stopping_threads "
+ "back to !stopping\n");
+ debug_exit ();
+ }
+}
+
+/* Resume execution of LWP. If STEP is nonzero, single-step it. If
+ SIGNAL is nonzero, give it that signal. */
+
+static void
+linux_resume_one_lwp_throw (struct lwp_info *lwp,
+ int step, int signal, siginfo_t *info)
+{
+ struct thread_info *thread = get_lwp_thread (lwp);
+ struct thread_info *saved_thread;
+ int fast_tp_collecting;
+
+ if (lwp->stopped == 0)
+ return;
+
+ fast_tp_collecting = lwp->collecting_fast_tracepoint;
+
+ gdb_assert (!stabilizing_threads || fast_tp_collecting);
+
+ /* Cancel actions that rely on GDB not changing the PC (e.g., the
+ user used the "jump" command, or "set $pc = foo"). */
+ if (lwp->stop_pc != get_pc (lwp))
+ {
+ /* Collecting 'while-stepping' actions doesn't make sense
+ anymore. */
+ release_while_stepping_state_list (thread);
+ }
+
+ /* If we have pending signals or status, and a new signal, enqueue the
+ signal. Also enqueue the signal if we are waiting to reinsert a
+ breakpoint; it will be picked up again below. */
+ if (signal != 0
+ && (lwp->status_pending_p
+ || lwp->pending_signals != NULL
+ || lwp->bp_reinsert != 0
+ || fast_tp_collecting))
+ {
+ struct pending_signals *p_sig;
+ p_sig = xmalloc (sizeof (*p_sig));
+ p_sig->prev = lwp->pending_signals;
+ p_sig->signal = signal;
+ if (info == NULL)
+ memset (&p_sig->info, 0, sizeof (siginfo_t));
+ else
+ memcpy (&p_sig->info, info, sizeof (siginfo_t));
+ lwp->pending_signals = p_sig;
+ }
+
+ if (lwp->status_pending_p)
+ {
+ if (debug_threads)
+ debug_printf ("Not resuming lwp %ld (%s, signal %d, stop %s);"
+ " has pending status\n",
+ lwpid_of (thread), step ? "step" : "continue", signal,
+ lwp->stop_expected ? "expected" : "not expected");
+ return;
+ }
+
+ saved_thread = current_thread;
+ current_thread = thread;
+
+ if (debug_threads)
+ debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
+ lwpid_of (thread), step ? "step" : "continue", signal,
+ lwp->stop_expected ? "expected" : "not expected");
+
+ /* This bit needs some thinking about. If we get a signal that
+ we must report while a single-step reinsert is still pending,
+ we often end up resuming the thread. It might be better to
+ (ew) allow a stack of pending events; then we could be sure that
+ the reinsert happened right away and not lose any signals.
+
+ Making this stack would also shrink the window in which breakpoints are
+ uninserted (see comment in linux_wait_for_lwp) but not enough for
+ complete correctness, so it won't solve that problem. It may be
+ worthwhile just to solve this one, however. */
+ if (lwp->bp_reinsert != 0)
+ {
+ if (debug_threads)
+ debug_printf (" pending reinsert at 0x%s\n",
+ paddress (lwp->bp_reinsert));
+
+ if (can_hardware_single_step ())
+ {
+ if (fast_tp_collecting == 0)
+ {
+ if (step == 0)
+ fprintf (stderr, "BAD - reinserting but not stepping.\n");
+ if (lwp->suspended)
+ fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
+ lwp->suspended);
+ }
+
+ step = 1;
+ }
+
+ /* Postpone any pending signal. It was enqueued above. */
+ signal = 0;
+ }
+
+ if (fast_tp_collecting == 1)
+ {
+ if (debug_threads)
+ debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
+ " (exit-jump-pad-bkpt)\n",
+ lwpid_of (thread));
+
+ /* Postpone any pending signal. It was enqueued above. */
+ signal = 0;
+ }
+ else if (fast_tp_collecting == 2)
+ {
+ if (debug_threads)
+ debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
+ " single-stepping\n",
+ lwpid_of (thread));
+
+ if (can_hardware_single_step ())
+ step = 1;
+ else
+ {
+ internal_error (__FILE__, __LINE__,
+ "moving out of jump pad single-stepping"
+ " not implemented on this target");
+ }
+
+ /* Postpone any pending signal. It was enqueued above. */
+ signal = 0;
+ }
+
+ /* If we have while-stepping actions in this thread set it stepping.
+ If we have a signal to deliver, it may or may not be set to
+ SIG_IGN, we don't know. Assume so, and allow collecting
+ while-stepping into a signal handler. A possible smart thing to
+ do would be to set an internal breakpoint at the signal return
+ address, continue, and carry on catching this while-stepping
+ action only when that breakpoint is hit. A future
+ enhancement. */
+ if (thread->while_stepping != NULL
+ && can_hardware_single_step ())
+ {
+ if (debug_threads)
+ debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
+ lwpid_of (thread));
+ step = 1;
+ }
+
+ if (the_low_target.get_pc != NULL)
+ {
+ struct regcache *regcache = get_thread_regcache (current_thread, 1);
+
+ lwp->stop_pc = (*the_low_target.get_pc) (regcache);
+
+ if (debug_threads)
+ {
+ debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
+ (long) lwp->stop_pc);
+ }
+ }
+
+ /* If we have pending signals, consume one unless we are trying to
+ reinsert a breakpoint or we're trying to finish a fast tracepoint
+ collect. */
+ if (lwp->pending_signals != NULL
+ && lwp->bp_reinsert == 0
+ && fast_tp_collecting == 0)
+ {
+ struct pending_signals **p_sig;
+
+ p_sig = &lwp->pending_signals;
+ while ((*p_sig)->prev != NULL)
+ p_sig = &(*p_sig)->prev;
+
+ signal = (*p_sig)->signal;
+ if ((*p_sig)->info.si_signo != 0)
+ ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
+ &(*p_sig)->info);
+
+ free (*p_sig);
+ *p_sig = NULL;
+ }
+
+ if (the_low_target.prepare_to_resume != NULL)
+ the_low_target.prepare_to_resume (lwp);
+
+ regcache_invalidate_thread (thread);
+ errno = 0;
+ lwp->stepping = step;
+ ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (thread),
+ (PTRACE_TYPE_ARG3) 0,
+ /* Coerce to a uintptr_t first to avoid potential gcc warning
+ of coercing an 8 byte integer to a 4 byte pointer. */
+ (PTRACE_TYPE_ARG4) (uintptr_t) signal);
+
+ current_thread = saved_thread;
+ if (errno)
+ perror_with_name ("resuming thread");
+
+ /* Successfully resumed. Clear state that no longer makes sense,
+ and mark the LWP as running. Must not do this before resuming
+ otherwise if that fails other code will be confused. E.g., we'd
+ later try to stop the LWP and hang forever waiting for a stop
+ status. Note that we must not throw after this is cleared,
+ otherwise handle_zombie_lwp_error would get confused. */
+ lwp->stopped = 0;
+ lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
+}
+
+/* Called when we try to resume a stopped LWP and that errors out. If
+ the LWP is no longer in ptrace-stopped state (meaning it's zombie,
+ or about to become), discard the error, clear any pending status
+ the LWP may have, and return true (we'll collect the exit status
+ soon enough). Otherwise, return false. */
+
+static int
+check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
+{
+ struct thread_info *thread = get_lwp_thread (lp);
+
+ /* If we get an error after resuming the LWP successfully, we'd
+ confuse !T state for the LWP being gone. */
+ gdb_assert (lp->stopped);
+
+ /* We can't just check whether the LWP is in 'Z (Zombie)' state,
+ because even if ptrace failed with ESRCH, the tracee may be "not
+ yet fully dead", but already refusing ptrace requests. In that
+ case the tracee has 'R (Running)' state for a little bit
+ (observed in Linux 3.18). See also the note on ESRCH in the
+ ptrace(2) man page. Instead, check whether the LWP has any state
+ other than ptrace-stopped. */
+
+ /* Don't assume anything if /proc/PID/status can't be read. */
+ if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
+ {
+ lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
+ lp->status_pending_p = 0;
+ return 1;
+ }
+ return 0;
+}
+
+/* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
+ disappears while we try to resume it. */
+
+static void
+linux_resume_one_lwp (struct lwp_info *lwp,
+ int step, int signal, siginfo_t *info)
+{
+ TRY
+ {
+ linux_resume_one_lwp_throw (lwp, step, signal, info);
+ }
+ CATCH (ex, RETURN_MASK_ERROR)
+ {
+ if (!check_ptrace_stopped_lwp_gone (lwp))
+ throw_exception (ex);
+ }
+ END_CATCH
+}
+
+struct thread_resume_array
+{
+ struct thread_resume *resume;
+ size_t n;
+};
+
+/* This function is called once per thread via find_inferior.
+ ARG is a pointer to a thread_resume_array struct.
+ We look up the thread specified by ENTRY in ARG, and mark the thread
+ with a pointer to the appropriate resume request.
+
+ This algorithm is O(threads * resume elements), but resume elements
+ is small (and will remain small at least until GDB supports thread
+ suspension). */
+
+static int
+linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
+{
+ struct thread_info *thread = (struct thread_info *) entry;
+ struct lwp_info *lwp = get_thread_lwp (thread);
+ int ndx;
+ struct thread_resume_array *r;
+
+ r = arg;
+
+ for (ndx = 0; ndx < r->n; ndx++)
+ {
+ ptid_t ptid = r->resume[ndx].thread;
+ if (ptid_equal (ptid, minus_one_ptid)
+ || ptid_equal (ptid, entry->id)
+ /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
+ of PID'. */
+ || (ptid_get_pid (ptid) == pid_of (thread)
+ && (ptid_is_pid (ptid)
+ || ptid_get_lwp (ptid) == -1)))
+ {
+ if (r->resume[ndx].kind == resume_stop
+ && thread->last_resume_kind == resume_stop)
+ {
+ if (debug_threads)
+ debug_printf ("already %s LWP %ld at GDB's request\n",
+ (thread->last_status.kind
+ == TARGET_WAITKIND_STOPPED)
+ ? "stopped"
+ : "stopping",
+ lwpid_of (thread));
+
+ continue;
+ }
+
+ lwp->resume = &r->resume[ndx];
+ thread->last_resume_kind = lwp->resume->kind;
+
+ lwp->step_range_start = lwp->resume->step_range_start;
+ lwp->step_range_end = lwp->resume->step_range_end;
+
+ /* If we had a deferred signal to report, dequeue one now.
+ This can happen if LWP gets more than one signal while
+ trying to get out of a jump pad. */
+ if (lwp->stopped
+ && !lwp->status_pending_p
+ && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
+ {
+ lwp->status_pending_p = 1;
+
+ if (debug_threads)
+ debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
+ "leaving status pending.\n",
+ WSTOPSIG (lwp->status_pending),
+ lwpid_of (thread));
+ }
+
+ return 0;
+ }
+ }
+
+ /* No resume action for this thread. */
+ lwp->resume = NULL;
+
+ return 0;
+}
+
+/* find_inferior callback for linux_resume.
+ Set *FLAG_P if this lwp has an interesting status pending. */
+
+static int
+resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
+{
+ struct thread_info *thread = (struct thread_info *) entry;
+ struct lwp_info *lwp = get_thread_lwp (thread);
+
+ /* LWPs which will not be resumed are not interesting, because
+ we might not wait for them next time through linux_wait. */
+ if (lwp->resume == NULL)
+ return 0;
+
+ if (thread_still_has_status_pending_p (thread))
+ * (int *) flag_p = 1;
+
+ return 0;
+}
+
+/* Return 1 if this lwp that GDB wants running is stopped at an
+ internal breakpoint that we need to step over. It assumes that any
+ required STOP_PC adjustment has already been propagated to the
+ inferior's regcache. */
+
+static int
+need_step_over_p (struct inferior_list_entry *entry, void *dummy)
+{
+ struct thread_info *thread = (struct thread_info *) entry;
+ struct lwp_info *lwp = get_thread_lwp (thread);
+ struct thread_info *saved_thread;
+ CORE_ADDR pc;
+
+ /* LWPs which will not be resumed are not interesting, because we
+ might not wait for them next time through linux_wait. */
+
+ if (!lwp->stopped)
+ {
+ if (debug_threads)
+ debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
+ lwpid_of (thread));
+ return 0;
+ }
+
+ if (thread->last_resume_kind == resume_stop)
+ {
+ if (debug_threads)
+ debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
+ " stopped\n",
+ lwpid_of (thread));
+ return 0;
+ }
+
+ gdb_assert (lwp->suspended >= 0);
+
+ if (lwp->suspended)
+ {
+ if (debug_threads)
+ debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
+ lwpid_of (thread));
+ return 0;
+ }
+
+ if (!lwp->need_step_over)
+ {
+ if (debug_threads)
+ debug_printf ("Need step over [LWP %ld]? No\n", lwpid_of (thread));
+ }
+
+ if (lwp->status_pending_p)
+ {
+ if (debug_threads)
+ debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
+ " status.\n",
+ lwpid_of (thread));
+ return 0;
+ }
+
+ /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
+ or we have. */
+ pc = get_pc (lwp);
+
+ /* If the PC has changed since we stopped, then don't do anything,
+ and let the breakpoint/tracepoint be hit. This happens if, for
+ instance, GDB handled the decr_pc_after_break subtraction itself,
+ GDB is OOL stepping this thread, or the user has issued a "jump"
+ command, or poked thread's registers herself. */
+ if (pc != lwp->stop_pc)
+ {
+ if (debug_threads)
+ debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
+ "Old stop_pc was 0x%s, PC is now 0x%s\n",
+ lwpid_of (thread),
+ paddress (lwp->stop_pc), paddress (pc));
+
+ lwp->need_step_over = 0;
+ return 0;
+ }
+
+ saved_thread = current_thread;
+ current_thread = thread;
+
+ /* We can only step over breakpoints we know about. */
+ if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
+ {
+ /* Don't step over a breakpoint that GDB expects to hit
+ though. If the condition is being evaluated on the target's side
+ and it evaluate to false, step over this breakpoint as well. */
+ if (gdb_breakpoint_here (pc)
+ && gdb_condition_true_at_breakpoint (pc)
+ && gdb_no_commands_at_breakpoint (pc))
+ {
+ if (debug_threads)
+ debug_printf ("Need step over [LWP %ld]? yes, but found"
+ " GDB breakpoint at 0x%s; skipping step over\n",
+ lwpid_of (thread), paddress (pc));
+
+ current_thread = saved_thread;
+ return 0;
+ }
+ else
+ {
+ if (debug_threads)
+ debug_printf ("Need step over [LWP %ld]? yes, "
+ "found breakpoint at 0x%s\n",
+ lwpid_of (thread), paddress (pc));
+
+ /* We've found an lwp that needs stepping over --- return 1 so
+ that find_inferior stops looking. */
+ current_thread = saved_thread;
+
+ /* If the step over is cancelled, this is set again. */
+ lwp->need_step_over = 0;
+ return 1;
+ }
+ }
+
+ current_thread = saved_thread;
+
+ if (debug_threads)
+ debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
+ " at 0x%s\n",
+ lwpid_of (thread), paddress (pc));
+
+ return 0;
+}
+
+/* Start a step-over operation on LWP. When LWP stopped at a
+ breakpoint, to make progress, we need to remove the breakpoint out
+ of the way. If we let other threads run while we do that, they may
+ pass by the breakpoint location and miss hitting it. To avoid
+ that, a step-over momentarily stops all threads while LWP is
+ single-stepped while the breakpoint is temporarily uninserted from
+ the inferior. When the single-step finishes, we reinsert the
+ breakpoint, and let all threads that are supposed to be running,
+ run again.
+
+ On targets that don't support hardware single-step, we don't
+ currently support full software single-stepping. Instead, we only
+ support stepping over the thread event breakpoint, by asking the
+ low target where to place a reinsert breakpoint. Since this
+ routine assumes the breakpoint being stepped over is a thread event
+ breakpoint, it usually assumes the return address of the current
+ function is a good enough place to set the reinsert breakpoint. */
+
+static int
+start_step_over (struct lwp_info *lwp)
+{
+ struct thread_info *thread = get_lwp_thread (lwp);
+ struct thread_info *saved_thread;
+ CORE_ADDR pc;
+ int step;
+
+ if (debug_threads)
+ debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
+ lwpid_of (thread));
+
+ stop_all_lwps (1, lwp);
+ gdb_assert (lwp->suspended == 0);
+
+ if (debug_threads)
+ debug_printf ("Done stopping all threads for step-over.\n");
+
+ /* Note, we should always reach here with an already adjusted PC,
+ either by GDB (if we're resuming due to GDB's request), or by our
+ caller, if we just finished handling an internal breakpoint GDB
+ shouldn't care about. */
+ pc = get_pc (lwp);
+
+ saved_thread = current_thread;
+ current_thread = thread;
+
+ lwp->bp_reinsert = pc;
+ uninsert_breakpoints_at (pc);
+ uninsert_fast_tracepoint_jumps_at (pc);
+
+ if (can_hardware_single_step ())
+ {
+ step = 1;
+ }
+ else
+ {
+ CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
+ set_reinsert_breakpoint (raddr);
+ step = 0;
+ }
+
+ current_thread = saved_thread;
+
+ linux_resume_one_lwp (lwp, step, 0, NULL);
+
+ /* Require next event from this LWP. */
+ step_over_bkpt = thread->entry.id;
+ return 1;
+}
+
+/* Finish a step-over. Reinsert the breakpoint we had uninserted in
+ start_step_over, if still there, and delete any reinsert
+ breakpoints we've set, on non hardware single-step targets. */
+
+static int
+finish_step_over (struct lwp_info *lwp)
+{
+ if (lwp->bp_reinsert != 0)
+ {
+ if (debug_threads)
+ debug_printf ("Finished step over.\n");
+
+ /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
+ may be no breakpoint to reinsert there by now. */
+ reinsert_breakpoints_at (lwp->bp_reinsert);
+ reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
+
+ lwp->bp_reinsert = 0;
+
+ /* Delete any software-single-step reinsert breakpoints. No
+ longer needed. We don't have to worry about other threads
+ hitting this trap, and later not being able to explain it,
+ because we were stepping over a breakpoint, and we hold all
+ threads but LWP stopped while doing that. */
+ if (!can_hardware_single_step ())
+ delete_reinsert_breakpoints ();
+
+ step_over_bkpt = null_ptid;
+ return 1;
+ }
+ else
+ return 0;
+}
+
+/* This function is called once per thread. We check the thread's resume
+ request, which will tell us whether to resume, step, or leave the thread
+ stopped; and what signal, if any, it should be sent.
+
+ For threads which we aren't explicitly told otherwise, we preserve
+ the stepping flag; this is used for stepping over gdbserver-placed
+ breakpoints.
+
+ If pending_flags was set in any thread, we queue any needed
+ signals, since we won't actually resume. We already have a pending
+ event to report, so we don't need to preserve any step requests;
+ they should be re-issued if necessary. */
+
+static int
+linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
+{
+ struct thread_info *thread = (struct thread_info *) entry;
+ struct lwp_info *lwp = get_thread_lwp (thread);
+ int step;
+ int leave_all_stopped = * (int *) arg;
+ int leave_pending;
+
+ if (lwp->resume == NULL)
+ return 0;
+
+ if (lwp->resume->kind == resume_stop)
+ {
+ if (debug_threads)
+ debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
+
+ if (!lwp->stopped)
+ {
+ if (debug_threads)
+ debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
+
+ /* Stop the thread, and wait for the event asynchronously,
+ through the event loop. */
+ send_sigstop (lwp);
+ }
+ else
+ {
+ if (debug_threads)
+ debug_printf ("already stopped LWP %ld\n",
+ lwpid_of (thread));
+
+ /* The LWP may have been stopped in an internal event that
+ was not meant to be notified back to GDB (e.g., gdbserver
+ breakpoint), so we should be reporting a stop event in
+ this case too. */
+
+ /* If the thread already has a pending SIGSTOP, this is a
+ no-op. Otherwise, something later will presumably resume
+ the thread and this will cause it to cancel any pending
+ operation, due to last_resume_kind == resume_stop. If
+ the thread already has a pending status to report, we
+ will still report it the next time we wait - see
+ status_pending_p_callback. */
+
+ /* If we already have a pending signal to report, then
+ there's no need to queue a SIGSTOP, as this means we're
+ midway through moving the LWP out of the jumppad, and we
+ will report the pending signal as soon as that is
+ finished. */
+ if (lwp->pending_signals_to_report == NULL)
+ send_sigstop (lwp);
+ }
+
+ /* For stop requests, we're done. */
+ lwp->resume = NULL;
+ thread->last_status.kind = TARGET_WAITKIND_IGNORE;
+ return 0;
+ }
+
+ /* If this thread which is about to be resumed has a pending status,
+ then don't resume any threads - we can just report the pending
+ status. Make sure to queue any signals that would otherwise be
+ sent. In all-stop mode, we do this decision based on if *any*
+ thread has a pending status. If there's a thread that needs the
+ step-over-breakpoint dance, then don't resume any other thread
+ but that particular one. */
+ leave_pending = (lwp->status_pending_p || leave_all_stopped);
+
+ if (!leave_pending)
+ {
+ if (debug_threads)
+ debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
+
+ step = (lwp->resume->kind == resume_step);
+ linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
+ }
+ else
+ {
+ if (debug_threads)
+ debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
+
+ /* If we have a new signal, enqueue the signal. */
+ if (lwp->resume->sig != 0)
+ {
+ struct pending_signals *p_sig;
+ p_sig = xmalloc (sizeof (*p_sig));
+ p_sig->prev = lwp->pending_signals;
+ p_sig->signal = lwp->resume->sig;
+ memset (&p_sig->info, 0, sizeof (siginfo_t));
+
+ /* If this is the same signal we were previously stopped by,
+ make sure to queue its siginfo. We can ignore the return
+ value of ptrace; if it fails, we'll skip
+ PTRACE_SETSIGINFO. */
+ if (WIFSTOPPED (lwp->last_status)
+ && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
+ ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
+ &p_sig->info);
+
+ lwp->pending_signals = p_sig;
+ }
+ }
+
+ thread->last_status.kind = TARGET_WAITKIND_IGNORE;
+ lwp->resume = NULL;
+ return 0;
+}
+
+static void
+linux_resume (struct thread_resume *resume_info, size_t n)
+{
+ struct thread_resume_array array = { resume_info, n };
+ struct thread_info *need_step_over = NULL;
+ int any_pending;
+ int leave_all_stopped;
+
+ if (debug_threads)
+ {
+ debug_enter ();
+ debug_printf ("linux_resume:\n");
+ }
+
+ find_inferior (&all_threads, linux_set_resume_request, &array);
+
+ /* If there is a thread which would otherwise be resumed, which has
+ a pending status, then don't resume any threads - we can just
+ report the pending status. Make sure to queue any signals that
+ would otherwise be sent. In non-stop mode, we'll apply this
+ logic to each thread individually. We consume all pending events
+ before considering to start a step-over (in all-stop). */
+ any_pending = 0;
+ if (!non_stop)
+ find_inferior (&all_threads, resume_status_pending_p, &any_pending);
+
+ /* If there is a thread which would otherwise be resumed, which is
+ stopped at a breakpoint that needs stepping over, then don't
+ resume any threads - have it step over the breakpoint with all
+ other threads stopped, then resume all threads again. Make sure
+ to queue any signals that would otherwise be delivered or
+ queued. */
+ if (!any_pending && supports_breakpoints ())
+ need_step_over
+ = (struct thread_info *) find_inferior (&all_threads,
+ need_step_over_p, NULL);
+
+ leave_all_stopped = (need_step_over != NULL || any_pending);
+
+ if (debug_threads)
+ {
+ if (need_step_over != NULL)
+ debug_printf ("Not resuming all, need step over\n");
+ else if (any_pending)
+ debug_printf ("Not resuming, all-stop and found "
+ "an LWP with pending status\n");
+ else
+ debug_printf ("Resuming, no pending status or step over needed\n");
+ }
+
+ /* Even if we're leaving threads stopped, queue all signals we'd
+ otherwise deliver. */
+ find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
+
+ if (need_step_over)
+ start_step_over (get_thread_lwp (need_step_over));
+
+ if (debug_threads)
+ {
+ debug_printf ("linux_resume done\n");
+ debug_exit ();
+ }
+}
+
+/* This function is called once per thread. We check the thread's
+ last resume request, which will tell us whether to resume, step, or
+ leave the thread stopped. Any signal the client requested to be
+ delivered has already been enqueued at this point.
+
+ If any thread that GDB wants running is stopped at an internal
+ breakpoint that needs stepping over, we start a step-over operation
+ on that particular thread, and leave all others stopped. */
+
+static int
+proceed_one_lwp (struct inferior_list_entry *entry, void *except)
+{
+ struct thread_info *thread = (struct thread_info *) entry;
+ struct lwp_info *lwp = get_thread_lwp (thread);
+ int step;
+
+ if (lwp == except)
+ return 0;
+
+ if (debug_threads)
+ debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
+
+ if (!lwp->stopped)
+ {
+ if (debug_threads)
+ debug_printf (" LWP %ld already running\n", lwpid_of (thread));
+ return 0;
+ }
+
+ if (thread->last_resume_kind == resume_stop
+ && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
+ {
+ if (debug_threads)
+ debug_printf (" client wants LWP to remain %ld stopped\n",
+ lwpid_of (thread));
+ return 0;
+ }
+
+ if (lwp->status_pending_p)
+ {
+ if (debug_threads)
+ debug_printf (" LWP %ld has pending status, leaving stopped\n",
+ lwpid_of (thread));
+ return 0;
+ }
+
+ gdb_assert (lwp->suspended >= 0);
+
+ if (lwp->suspended)
+ {
+ if (debug_threads)
+ debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
+ return 0;
+ }
+
+ if (thread->last_resume_kind == resume_stop
+ && lwp->pending_signals_to_report == NULL
+ && lwp->collecting_fast_tracepoint == 0)
+ {
+ /* We haven't reported this LWP as stopped yet (otherwise, the
+ last_status.kind check above would catch it, and we wouldn't
+ reach here. This LWP may have been momentarily paused by a
+ stop_all_lwps call while handling for example, another LWP's
+ step-over. In that case, the pending expected SIGSTOP signal
+ that was queued at vCont;t handling time will have already
+ been consumed by wait_for_sigstop, and so we need to requeue
+ another one here. Note that if the LWP already has a SIGSTOP
+ pending, this is a no-op. */
+
+ if (debug_threads)
+ debug_printf ("Client wants LWP %ld to stop. "
+ "Making sure it has a SIGSTOP pending\n",
+ lwpid_of (thread));
+
+ send_sigstop (lwp);
+ }
+
+ step = thread->last_resume_kind == resume_step;
+ linux_resume_one_lwp (lwp, step, 0, NULL);
+ return 0;
+}
+
+static int
+unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
+{
+ struct thread_info *thread = (struct thread_info *) entry;
+ struct lwp_info *lwp = get_thread_lwp (thread);
+
+ if (lwp == except)
+ return 0;
+
+ lwp->suspended--;
+ gdb_assert (lwp->suspended >= 0);
+
+ return proceed_one_lwp (entry, except);
+}
+
+/* When we finish a step-over, set threads running again. If there's
+ another thread that may need a step-over, now's the time to start
+ it. Eventually, we'll move all threads past their breakpoints. */
+
+static void
+proceed_all_lwps (void)
+{
+ struct thread_info *need_step_over;
+
+ /* If there is a thread which would otherwise be resumed, which is
+ stopped at a breakpoint that needs stepping over, then don't
+ resume any threads - have it step over the breakpoint with all
+ other threads stopped, then resume all threads again. */
+
+ if (supports_breakpoints ())
+ {
+ need_step_over
+ = (struct thread_info *) find_inferior (&all_threads,
+ need_step_over_p, NULL);
+
+ if (need_step_over != NULL)
+ {
+ if (debug_threads)
+ debug_printf ("proceed_all_lwps: found "
+ "thread %ld needing a step-over\n",
+ lwpid_of (need_step_over));
+
+ start_step_over (get_thread_lwp (need_step_over));
+ return;
+ }
+ }
+
+ if (debug_threads)
+ debug_printf ("Proceeding, no step-over needed\n");
+
+ find_inferior (&all_threads, proceed_one_lwp, NULL);
+}
+
+/* Stopped LWPs that the client wanted to be running, that don't have
+ pending statuses, are set to run again, except for EXCEPT, if not
+ NULL. This undoes a stop_all_lwps call. */
+
+static void
+unstop_all_lwps (int unsuspend, struct lwp_info *except)
+{
+ if (debug_threads)
+ {
+ debug_enter ();
+ if (except)
+ debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
+ lwpid_of (get_lwp_thread (except)));
+ else
+ debug_printf ("unstopping all lwps\n");
+ }
+
+ if (unsuspend)
+ find_inferior (&all_threads, unsuspend_and_proceed_one_lwp, except);
+ else
+ find_inferior (&all_threads, proceed_one_lwp, except);
+
+ if (debug_threads)
+ {
+ debug_printf ("unstop_all_lwps done\n");
+ debug_exit ();
+ }
+}
+
+
+#ifdef HAVE_LINUX_REGSETS
+
+#define use_linux_regsets 1
+
+/* Returns true if REGSET has been disabled. */
+
+static int
+regset_disabled (struct regsets_info *info, struct regset_info *regset)
+{
+ return (info->disabled_regsets != NULL
+ && info->disabled_regsets[regset - info->regsets]);
+}
+
+/* Disable REGSET. */
+
+static void
+disable_regset (struct regsets_info *info, struct regset_info *regset)
+{
+ int dr_offset;
+
+ dr_offset = regset - info->regsets;
+ if (info->disabled_regsets == NULL)
+ info->disabled_regsets = xcalloc (1, info->num_regsets);
+ info->disabled_regsets[dr_offset] = 1;
+}
+
+static int
+regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
+ struct regcache *regcache)
+{
+ struct regset_info *regset;
+ int saw_general_regs = 0;
+ int pid;
+ struct iovec iov;
+
+ pid = lwpid_of (current_thread);
+ for (regset = regsets_info->regsets; regset->size >= 0; regset++)
+ {
+ void *buf, *data;
+ int nt_type, res;
+
+ if (regset->size == 0 || regset_disabled (regsets_info, regset))
+ continue;
+
+ buf = xmalloc (regset->size);
+
+ nt_type = regset->nt_type;
+ if (nt_type)
+ {
+ iov.iov_base = buf;
+ iov.iov_len = regset->size;
+ data = (void *) &iov;
+ }
+ else
+ data = buf;
+
+#ifndef __sparc__
+ res = ptrace (regset->get_request, pid,
+ (PTRACE_TYPE_ARG3) (long) nt_type, data);
+#else
+ res = ptrace (regset->get_request, pid, data, nt_type);
+#endif
+ if (res < 0)
+ {
+ if (errno == EIO)
+ {
+ /* If we get EIO on a regset, do not try it again for
+ this process mode. */
+ disable_regset (regsets_info, regset);
+ }
+ else if (errno == ENODATA)
+ {
+ /* ENODATA may be returned if the regset is currently
+ not "active". This can happen in normal operation,
+ so suppress the warning in this case. */
+ }
+ else
+ {
+ char s[256];
+ sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
+ pid);
+ perror (s);
+ }
+ }
+ else
+ {
+ if (regset->type == GENERAL_REGS)
+ saw_general_regs = 1;
+ regset->store_function (regcache, buf);
+ }
+ free (buf);
+ }
+ if (saw_general_regs)
+ return 0;
+ else
+ return 1;
+}
+
+static int
+regsets_store_inferior_registers (struct regsets_info *regsets_info,
+ struct regcache *regcache)
+{
+ struct regset_info *regset;
+ int saw_general_regs = 0;
+ int pid;
+ struct iovec iov;
+
+ pid = lwpid_of (current_thread);
+ for (regset = regsets_info->regsets; regset->size >= 0; regset++)
+ {
+ void *buf, *data;
+ int nt_type, res;
+
+ if (regset->size == 0 || regset_disabled (regsets_info, regset)
+ || regset->fill_function == NULL)
+ continue;
+
+ buf = xmalloc (regset->size);
+
+ /* First fill the buffer with the current register set contents,
+ in case there are any items in the kernel's regset that are
+ not in gdbserver's regcache. */
+
+ nt_type = regset->nt_type;
+ if (nt_type)
+ {
+ iov.iov_base = buf;
+ iov.iov_len = regset->size;
+ data = (void *) &iov;
+ }
+ else
+ data = buf;
+
+#ifndef __sparc__
+ res = ptrace (regset->get_request, pid,
+ (PTRACE_TYPE_ARG3) (long) nt_type, data);
+#else
+ res = ptrace (regset->get_request, pid, data, nt_type);
+#endif
+
+ if (res == 0)
+ {
+ /* Then overlay our cached registers on that. */
+ regset->fill_function (regcache, buf);
+
+ /* Only now do we write the register set. */
+#ifndef __sparc__
+ res = ptrace (regset->set_request, pid,
+ (PTRACE_TYPE_ARG3) (long) nt_type, data);
+#else
+ res = ptrace (regset->set_request, pid, data, nt_type);
+#endif
+ }
+
+ if (res < 0)
+ {
+ if (errno == EIO)
+ {
+ /* If we get EIO on a regset, do not try it again for
+ this process mode. */
+ disable_regset (regsets_info, regset);
+ }
+ else if (errno == ESRCH)
+ {
+ /* At this point, ESRCH should mean the process is
+ already gone, in which case we simply ignore attempts
+ to change its registers. See also the related
+ comment in linux_resume_one_lwp. */
+ free (buf);
+ return 0;
+ }
+ else
+ {
+ perror ("Warning: ptrace(regsets_store_inferior_registers)");
+ }
+ }
+ else if (regset->type == GENERAL_REGS)
+ saw_general_regs = 1;
+ free (buf);
+ }
+ if (saw_general_regs)
+ return 0;
+ else
+ return 1;
+}
+
+#else /* !HAVE_LINUX_REGSETS */
+
+#define use_linux_regsets 0
+#define regsets_fetch_inferior_registers(regsets_info, regcache) 1
+#define regsets_store_inferior_registers(regsets_info, regcache) 1
+
+#endif
+
+/* Return 1 if register REGNO is supported by one of the regset ptrace
+ calls or 0 if it has to be transferred individually. */
+
+static int
+linux_register_in_regsets (const struct regs_info *regs_info, int regno)
+{
+ unsigned char mask = 1 << (regno % 8);
+ size_t index = regno / 8;
+
+ return (use_linux_regsets
+ && (regs_info->regset_bitmap == NULL
+ || (regs_info->regset_bitmap[index] & mask) != 0));
+}
+
+#ifdef HAVE_LINUX_USRREGS
+
+int
+register_addr (const struct usrregs_info *usrregs, int regnum)
+{
+ int addr;
+
+ if (regnum < 0 || regnum >= usrregs->num_regs)
+ error ("Invalid register number %d.", regnum);
+
+ addr = usrregs->regmap[regnum];
+
+ return addr;
+}
+
+/* Fetch one register. */
+static void
+fetch_register (const struct usrregs_info *usrregs,
+ struct regcache *regcache, int regno)
+{
+ CORE_ADDR regaddr;
+ int i, size;
+ char *buf;
+ int pid;
+
+ if (regno >= usrregs->num_regs)
+ return;
+ if ((*the_low_target.cannot_fetch_register) (regno))
+ return;
+
+ regaddr = register_addr (usrregs, regno);
+ if (regaddr == -1)
+ return;
+
+ size = ((register_size (regcache->tdesc, regno)
+ + sizeof (PTRACE_XFER_TYPE) - 1)
+ & -sizeof (PTRACE_XFER_TYPE));
+ buf = alloca (size);
+
+ pid = lwpid_of (current_thread);
+ for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
+ {
+ errno = 0;
+ *(PTRACE_XFER_TYPE *) (buf + i) =
+ ptrace (PTRACE_PEEKUSER, pid,
+ /* Coerce to a uintptr_t first to avoid potential gcc warning
+ of coercing an 8 byte integer to a 4 byte pointer. */
+ (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
+ regaddr += sizeof (PTRACE_XFER_TYPE);
+ if (errno != 0)
+ error ("reading register %d: %s", regno, strerror (errno));
+ }
+
+ if (the_low_target.supply_ptrace_register)
+ the_low_target.supply_ptrace_register (regcache, regno, buf);
+ else
+ supply_register (regcache, regno, buf);
+}
+
+/* Store one register. */
+static void
+store_register (const struct usrregs_info *usrregs,
+ struct regcache *regcache, int regno)
+{
+ CORE_ADDR regaddr;
+ int i, size;
+ char *buf;
+ int pid;
+
+ if (regno >= usrregs->num_regs)
+ return;
+ if ((*the_low_target.cannot_store_register) (regno))
+ return;
+
+ regaddr = register_addr (usrregs, regno);
+ if (regaddr == -1)
+ return;
+
+ size = ((register_size (regcache->tdesc, regno)
+ + sizeof (PTRACE_XFER_TYPE) - 1)
+ & -sizeof (PTRACE_XFER_TYPE));
+ buf = alloca (size);
+ memset (buf, 0, size);
+
+ if (the_low_target.collect_ptrace_register)
+ the_low_target.collect_ptrace_register (regcache, regno, buf);
+ else
+ collect_register (regcache, regno, buf);
+
+ pid = lwpid_of (current_thread);
+ for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
+ {
+ errno = 0;
+ ptrace (PTRACE_POKEUSER, pid,
+ /* Coerce to a uintptr_t first to avoid potential gcc warning
+ about coercing an 8 byte integer to a 4 byte pointer. */
+ (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
+ (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
+ if (errno != 0)
+ {
+ /* At this point, ESRCH should mean the process is
+ already gone, in which case we simply ignore attempts
+ to change its registers. See also the related
+ comment in linux_resume_one_lwp. */
+ if (errno == ESRCH)
+ return;
+
+ if ((*the_low_target.cannot_store_register) (regno) == 0)
+ error ("writing register %d: %s", regno, strerror (errno));
+ }
+ regaddr += sizeof (PTRACE_XFER_TYPE);
+ }
+}
+
+/* Fetch all registers, or just one, from the child process.
+ If REGNO is -1, do this for all registers, skipping any that are
+ assumed to have been retrieved by regsets_fetch_inferior_registers,
+ unless ALL is non-zero.
+ Otherwise, REGNO specifies which register (so we can save time). */
+static void
+usr_fetch_inferior_registers (const struct regs_info *regs_info,
+ struct regcache *regcache, int regno, int all)
+{
+ struct usrregs_info *usr = regs_info->usrregs;
+
+ if (regno == -1)
+ {
+ for (regno = 0; regno < usr->num_regs; regno++)
+ if (all || !linux_register_in_regsets (regs_info, regno))
+ fetch_register (usr, regcache, regno);
+ }
+ else
+ fetch_register (usr, regcache, regno);
+}
+
+/* Store our register values back into the inferior.
+ If REGNO is -1, do this for all registers, skipping any that are
+ assumed to have been saved by regsets_store_inferior_registers,
+ unless ALL is non-zero.
+ Otherwise, REGNO specifies which register (so we can save time). */
+static void
+usr_store_inferior_registers (const struct regs_info *regs_info,
+ struct regcache *regcache, int regno, int all)
+{
+ struct usrregs_info *usr = regs_info->usrregs;
+
+ if (regno == -1)
+ {
+ for (regno = 0; regno < usr->num_regs; regno++)
+ if (all || !linux_register_in_regsets (regs_info, regno))
+ store_register (usr, regcache, regno);
+ }
+ else
+ store_register (usr, regcache, regno);
+}
+
+#else /* !HAVE_LINUX_USRREGS */
+
+#define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
+#define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
+
+#endif
+
+
+void
+linux_fetch_registers (struct regcache *regcache, int regno)
+{
+ int use_regsets;
+ int all = 0;
+ const struct regs_info *regs_info = (*the_low_target.regs_info) ();
+
+ if (regno == -1)
+ {
+ if (the_low_target.fetch_register != NULL
+ && regs_info->usrregs != NULL)
+ for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
+ (*the_low_target.fetch_register) (regcache, regno);
+
+ all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
+ if (regs_info->usrregs != NULL)
+ usr_fetch_inferior_registers (regs_info, regcache, -1, all);
+ }
+ else
+ {
+ if (the_low_target.fetch_register != NULL
+ && (*the_low_target.fetch_register) (regcache, regno))
+ return;
+
+ use_regsets = linux_register_in_regsets (regs_info, regno);
+ if (use_regsets)
+ all = regsets_fetch_inferior_registers (regs_info->regsets_info,
+ regcache);
+ if ((!use_regsets || all) && regs_info->usrregs != NULL)
+ usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
+ }
+}
+
+void
+linux_store_registers (struct regcache *regcache, int regno)
+{
+ int use_regsets;
+ int all = 0;
+ const struct regs_info *regs_info = (*the_low_target.regs_info) ();
+
+ if (regno == -1)
+ {
+ all = regsets_store_inferior_registers (regs_info->regsets_info,
+ regcache);
+ if (regs_info->usrregs != NULL)
+ usr_store_inferior_registers (regs_info, regcache, regno, all);
+ }
+ else
+ {
+ use_regsets = linux_register_in_regsets (regs_info, regno);
+ if (use_regsets)
+ all = regsets_store_inferior_registers (regs_info->regsets_info,
+ regcache);
+ if ((!use_regsets || all) && regs_info->usrregs != NULL)
+ usr_store_inferior_registers (regs_info, regcache, regno, 1);
+ }
+}
+
+
+/* Copy LEN bytes from inferior's memory starting at MEMADDR
+ to debugger memory starting at MYADDR. */
+
+static int
+linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
+{
+ int pid = lwpid_of (current_thread);
+ register PTRACE_XFER_TYPE *buffer;
+ register CORE_ADDR addr;
+ register int count;
+ char filename[64];
+ register int i;
+ int ret;
+ int fd;
+
+ /* Try using /proc. Don't bother for one word. */
+ if (len >= 3 * sizeof (long))
+ {
+ int bytes;
+
+ /* We could keep this file open and cache it - possibly one per
+ thread. That requires some juggling, but is even faster. */
+ sprintf (filename, "/proc/%d/mem", pid);
+ fd = open (filename, O_RDONLY | O_LARGEFILE);
+ if (fd == -1)
+ goto no_proc;
+
+ /* If pread64 is available, use it. It's faster if the kernel
+ supports it (only one syscall), and it's 64-bit safe even on
+ 32-bit platforms (for instance, SPARC debugging a SPARC64
+ application). */
+#ifdef HAVE_PREAD64
+ bytes = pread64 (fd, myaddr, len, memaddr);
+#else
+ bytes = -1;
+ if (lseek (fd, memaddr, SEEK_SET) != -1)
+ bytes = read (fd, myaddr, len);
+#endif
+
+ close (fd);
+ if (bytes == len)
+ return 0;
+
+ /* Some data was read, we'll try to get the rest with ptrace. */
+ if (bytes > 0)
+ {
+ memaddr += bytes;
+ myaddr += bytes;
+ len -= bytes;
+ }
+ }
+
+ no_proc:
+ /* Round starting address down to longword boundary. */
+ addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
+ /* Round ending address up; get number of longwords that makes. */
+ count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
+ / sizeof (PTRACE_XFER_TYPE));
+ /* Allocate buffer of that many longwords. */
+ buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
+
+ /* Read all the longwords */
+ errno = 0;
+ for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
+ {
+ /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
+ about coercing an 8 byte integer to a 4 byte pointer. */
+ buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
+ (PTRACE_TYPE_ARG3) (uintptr_t) addr,
+ (PTRACE_TYPE_ARG4) 0);
+ if (errno)
+ break;
+ }
+ ret = errno;
+
+ /* Copy appropriate bytes out of the buffer. */
+ if (i > 0)
+ {
+ i *= sizeof (PTRACE_XFER_TYPE);
+ i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
+ memcpy (myaddr,
+ (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
+ i < len ? i : len);
+ }
+
+ return ret;
+}
+
+/* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
+ memory at MEMADDR. On failure (cannot write to the inferior)
+ returns the value of errno. Always succeeds if LEN is zero. */
+
+static int
+linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
+{
+ register int i;
+ /* Round starting address down to longword boundary. */
+ register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
+ /* Round ending address up; get number of longwords that makes. */
+ register int count
+ = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
+ / sizeof (PTRACE_XFER_TYPE);
+
+ /* Allocate buffer of that many longwords. */
+ register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *)
+ alloca (count * sizeof (PTRACE_XFER_TYPE));
+
+ int pid = lwpid_of (current_thread);
+
+ if (len == 0)
+ {
+ /* Zero length write always succeeds. */
+ return 0;
+ }
+
+ if (debug_threads)
+ {