linux-low: Remove unused variables
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-low.c
index 0f4bb8709bb966302f89c25cf014917f8475fdad..1d96ec23282f2247a96db9a230afb79ffea8d510 100644 (file)
@@ -1,5 +1,5 @@
 /* Low level interface to ptrace, for the remote server for GDB.
-   Copyright (C) 1995-2016 Free Software Foundation, Inc.
+   Copyright (C) 1995-2017 Free Software Foundation, Inc.
 
    This file is part of GDB.
 
@@ -22,7 +22,7 @@
 #include "agent.h"
 #include "tdesc.h"
 #include "rsp-low.h"
-
+#include "signals-state-save-restore.h"
 #include "nat/linux-nat.h"
 #include "nat/linux-waitpid.h"
 #include "gdb_wait.h"
@@ -47,6 +47,9 @@
 #include "tracepoint.h"
 #include "hostio.h"
 #include <inttypes.h>
+#include "common-inferior.h"
+#include "nat/fork-inferior.h"
+#include "environ.h"
 #ifndef ELFMAG0
 /* Don't include <linux/elf.h> here.  If it got included by gdb_proc_service.h
    then ELFMAG0 will have been defined.  If it didn't get included by
@@ -176,6 +179,14 @@ lwp_stop_reason (struct lwp_info *lwp)
   return lwp->stop_reason;
 }
 
+/* See nat/linux-nat.h.  */
+
+int
+lwp_is_stepping (struct lwp_info *lwp)
+{
+  return lwp->stepping;
+}
+
 /* A list of all unknown processes which receive stop signals.  Some
    other process will presumably claim each of these as forked
    children momentarily.  */
@@ -267,6 +278,8 @@ static int kill_lwp (unsigned long lwpid, int signo);
 static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
 static void complete_ongoing_step_over (void);
 static int linux_low_ptrace_options (int attached);
+static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);
+static int proceed_one_lwp (struct inferior_list_entry *entry, void *except);
 
 /* When the event-loop is doing a step-over, this points at the thread
    being stepped.  */
@@ -548,22 +561,16 @@ handle_extended_wait (struct lwp_info **orig_event_lwp, int wstat)
              && can_software_single_step ()
              && event == PTRACE_EVENT_VFORK)
            {
-             struct thread_info *saved_thread = current_thread;
-
-             current_thread = event_thr;
-             /* If we leave reinsert breakpoints there, child will
-                hit it, so uninsert reinsert breakpoints from parent
+             /* If we leave single-step breakpoints there, child will
+                hit it, so uninsert single-step breakpoints from parent
                 (and child).  Once vfork child is done, reinsert
                 them back to parent.  */
-             uninsert_reinsert_breakpoints ();
-             current_thread = saved_thread;
+             uninsert_single_step_breakpoints (event_thr);
            }
 
-         clone_all_breakpoints (&child_proc->breakpoints,
-                                &child_proc->raw_breakpoints,
-                                parent_proc->breakpoints);
+         clone_all_breakpoints (child_thr, event_thr);
 
-         tdesc = XNEW (struct target_desc);
+         tdesc = allocate_target_description ();
          copy_target_description (tdesc, parent_proc->tdesc);
          child_proc->tdesc = tdesc;
 
@@ -585,25 +592,26 @@ handle_extended_wait (struct lwp_info **orig_event_lwp, int wstat)
          event_lwp->status_pending_p = 1;
          event_lwp->status_pending = wstat;
 
-         /* If the parent thread is doing step-over with reinsert
-            breakpoints, the list of reinsert breakpoints are cloned
+         /* Link the threads until the parent event is passed on to
+            higher layers.  */
+         event_lwp->fork_relative = child_lwp;
+         child_lwp->fork_relative = event_lwp;
+
+         /* If the parent thread is doing step-over with single-step
+            breakpoints, the list of single-step breakpoints are cloned
             from the parent's.  Remove them from the child process.
             In case of vfork, we'll reinsert them back once vforked
             child is done.  */
          if (event_lwp->bp_reinsert != 0
              && can_software_single_step ())
            {
-             struct thread_info *saved_thread = current_thread;
-
              /* The child process is forked and stopped, so it is safe
                 to access its memory without stopping all other threads
                 from other processes.  */
-             current_thread = child_thr;
-             delete_reinsert_breakpoints ();
-             current_thread = saved_thread;
+             delete_single_step_breakpoints (child_thr);
 
-             gdb_assert (has_reinsert_breakpoints (parent_proc));
-             gdb_assert (!has_reinsert_breakpoints (child_proc));
+             gdb_assert (has_single_step_breakpoints (event_thr));
+             gdb_assert (!has_single_step_breakpoints (child_thr));
            }
 
          /* Report the event.  */
@@ -657,14 +665,9 @@ handle_extended_wait (struct lwp_info **orig_event_lwp, int wstat)
 
       if (event_lwp->bp_reinsert != 0 && can_software_single_step ())
        {
-         struct thread_info *saved_thread = current_thread;
-         struct process_info *proc = get_thread_process (event_thr);
+         reinsert_single_step_breakpoints (event_thr);
 
-         current_thread = event_thr;
-         reinsert_reinsert_breakpoints ();
-         current_thread = saved_thread;
-
-         gdb_assert (has_reinsert_breakpoints (proc));
+         gdb_assert (has_single_step_breakpoints (event_thr));
        }
 
       /* Report the event.  */
@@ -946,57 +949,57 @@ add_lwp (ptid_t ptid)
   return lwp;
 }
 
+/* Callback to be used when calling fork_inferior, responsible for
+   actually initiating the tracing of the inferior.  */
+
+static void
+linux_ptrace_fun ()
+{
+  if (ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0,
+             (PTRACE_TYPE_ARG4) 0) < 0)
+    trace_start_error_with_name ("ptrace");
+
+  if (setpgid (0, 0) < 0)
+    trace_start_error_with_name ("setpgid");
+
+  /* If GDBserver is connected to gdb via stdio, redirect the inferior's
+     stdout to stderr so that inferior i/o doesn't corrupt the connection.
+     Also, redirect stdin to /dev/null.  */
+  if (remote_connection_is_stdio ())
+    {
+      if (close (0) < 0)
+       trace_start_error_with_name ("close");
+      if (open ("/dev/null", O_RDONLY) < 0)
+       trace_start_error_with_name ("open");
+      if (dup2 (2, 1) < 0)
+       trace_start_error_with_name ("dup2");
+      if (write (2, "stdin/stdout redirected\n",
+                sizeof ("stdin/stdout redirected\n") - 1) < 0)
+       {
+         /* Errors ignored.  */;
+       }
+    }
+}
+
 /* Start an inferior process and returns its pid.
-   ALLARGS is a vector of program-name and args. */
+   PROGRAM is the name of the program to be started, and PROGRAM_ARGS
+   are its arguments.  */
 
 static int
-linux_create_inferior (char *program, char **allargs)
+linux_create_inferior (const char *program,
+                      const std::vector<char *> &program_args)
 {
   struct lwp_info *new_lwp;
   int pid;
   ptid_t ptid;
   struct cleanup *restore_personality
     = maybe_disable_address_space_randomization (disable_randomization);
+  std::string str_program_args = stringify_argv (program_args);
 
-#if defined(__UCLIBC__) && defined(HAS_NOMMU)
-  pid = vfork ();
-#else
-  pid = fork ();
-#endif
-  if (pid < 0)
-    perror_with_name ("fork");
-
-  if (pid == 0)
-    {
-      close_most_fds ();
-      ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
-
-      setpgid (0, 0);
-
-      /* If gdbserver is connected to gdb via stdio, redirect the inferior's
-        stdout to stderr so that inferior i/o doesn't corrupt the connection.
-        Also, redirect stdin to /dev/null.  */
-      if (remote_connection_is_stdio ())
-       {
-         close (0);
-         open ("/dev/null", O_RDONLY);
-         dup2 (2, 1);
-         if (write (2, "stdin/stdout redirected\n",
-                    sizeof ("stdin/stdout redirected\n") - 1) < 0)
-           {
-             /* Errors ignored.  */;
-           }
-       }
-
-      execv (program, allargs);
-      if (errno == ENOENT)
-       execvp (program, allargs);
-
-      fprintf (stderr, "Cannot exec %s: %s.\n", program,
-              strerror (errno));
-      fflush (stderr);
-      _exit (0177);
-    }
+  pid = fork_inferior (program,
+                      str_program_args.c_str (),
+                      get_environ ()->envp (), linux_ptrace_fun,
+                      NULL, NULL, NULL, NULL);
 
   do_cleanups (restore_personality);
 
@@ -1006,6 +1009,8 @@ linux_create_inferior (char *program, char **allargs)
   new_lwp = add_lwp (ptid);
   new_lwp->must_set_ptrace_flags = 1;
 
+  post_fork_inferior (pid, program);
+
   return pid;
 }
 
@@ -1492,16 +1497,14 @@ get_detach_signal (struct thread_info *thread)
     }
 }
 
-static int
-linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
+/* Detach from LWP.  */
+
+static void
+linux_detach_one_lwp (struct lwp_info *lwp)
 {
-  struct thread_info *thread = (struct thread_info *) entry;
-  struct lwp_info *lwp = get_thread_lwp (thread);
-  int pid = * (int *) args;
+  struct thread_info *thread = get_lwp_thread (lwp);
   int sig;
-
-  if (ptid_get_pid (entry->id) != pid)
-    return 0;
+  int lwpid;
 
   /* If there is a pending SIGSTOP, get rid of it.  */
   if (lwp->stop_expected)
@@ -1514,22 +1517,94 @@ linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
       lwp->stop_expected = 0;
     }
 
-  /* Flush any pending changes to the process's registers.  */
-  regcache_invalidate_thread (thread);
-
   /* Pass on any pending signal for this thread.  */
   sig = get_detach_signal (thread);
 
-  /* Finally, let it resume.  */
-  if (the_low_target.prepare_to_resume != NULL)
-    the_low_target.prepare_to_resume (lwp);
-  if (ptrace (PTRACE_DETACH, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
+  /* Preparing to resume may try to write registers, and fail if the
+     lwp is zombie.  If that happens, ignore the error.  We'll handle
+     it below, when detach fails with ESRCH.  */
+  TRY
+    {
+      /* Flush any pending changes to the process's registers.  */
+      regcache_invalidate_thread (thread);
+
+      /* Finally, let it resume.  */
+      if (the_low_target.prepare_to_resume != NULL)
+       the_low_target.prepare_to_resume (lwp);
+    }
+  CATCH (ex, RETURN_MASK_ERROR)
+    {
+      if (!check_ptrace_stopped_lwp_gone (lwp))
+       throw_exception (ex);
+    }
+  END_CATCH
+
+  lwpid = lwpid_of (thread);
+  if (ptrace (PTRACE_DETACH, lwpid, (PTRACE_TYPE_ARG3) 0,
              (PTRACE_TYPE_ARG4) (long) sig) < 0)
-    error (_("Can't detach %s: %s"),
-          target_pid_to_str (ptid_of (thread)),
-          strerror (errno));
+    {
+      int save_errno = errno;
+
+      /* We know the thread exists, so ESRCH must mean the lwp is
+        zombie.  This can happen if one of the already-detached
+        threads exits the whole thread group.  In that case we're
+        still attached, and must reap the lwp.  */
+      if (save_errno == ESRCH)
+       {
+         int ret, status;
+
+         ret = my_waitpid (lwpid, &status, __WALL);
+         if (ret == -1)
+           {
+             warning (_("Couldn't reap LWP %d while detaching: %s"),
+                      lwpid, strerror (errno));
+           }
+         else if (!WIFEXITED (status) && !WIFSIGNALED (status))
+           {
+             warning (_("Reaping LWP %d while detaching "
+                        "returned unexpected status 0x%x"),
+                      lwpid, status);
+           }
+       }
+      else
+       {
+         error (_("Can't detach %s: %s"),
+                target_pid_to_str (ptid_of (thread)),
+                strerror (save_errno));
+       }
+    }
+  else if (debug_threads)
+    {
+      debug_printf ("PTRACE_DETACH (%s, %s, 0) (OK)\n",
+                   target_pid_to_str (ptid_of (thread)),
+                   strsignal (sig));
+    }
 
   delete_lwp (lwp);
+}
+
+/* Callback for find_inferior.  Detaches from non-leader threads of a
+   given process.  */
+
+static int
+linux_detach_lwp_callback (struct inferior_list_entry *entry, void *args)
+{
+  struct thread_info *thread = (struct thread_info *) entry;
+  struct lwp_info *lwp = get_thread_lwp (thread);
+  int pid = *(int *) args;
+  int lwpid = lwpid_of (thread);
+
+  /* Skip other processes.  */
+  if (ptid_get_pid (entry->id) != pid)
+    return 0;
+
+  /* We don't actually detach from the thread group leader just yet.
+     If the thread group exits, we must reap the zombie clone lwps
+     before we're able to reap the leader.  */
+  if (ptid_get_pid (entry->id) == lwpid)
+    return 0;
+
+  linux_detach_one_lwp (lwp);
   return 0;
 }
 
@@ -1537,6 +1612,7 @@ static int
 linux_detach (int pid)
 {
   struct process_info *process;
+  struct lwp_info *main_lwp;
 
   process = find_process_pid (pid);
   if (process == NULL)
@@ -1560,7 +1636,13 @@ linux_detach (int pid)
   /* Stabilize threads (move out of jump pads).  */
   stabilize_threads ();
 
-  find_inferior (&all_threads, linux_detach_one_lwp, &pid);
+  /* Detach from the clone lwps first.  If the thread group exits just
+     while we're detaching, we must reap the clone lwps before we're
+     able to reap the leader.  */
+  find_inferior (&all_threads, linux_detach_lwp_callback, &pid);
+
+  main_lwp = find_lwp_pid (pid_to_ptid (pid));
+  linux_detach_one_lwp (main_lwp);
 
   the_target->mourn (process);
 
@@ -1902,10 +1984,9 @@ check_zombie_leaders (void)
             thread execs).  */
 
          if (debug_threads)
-           fprintf (stderr,
-                    "CZL: Thread group leader %d zombie "
-                    "(it exited, or another thread execd).\n",
-                    leader_pid);
+           debug_printf ("CZL: Thread group leader %d zombie "
+                         "(it exited, or another thread execd).\n",
+                         leader_pid);
 
          delete_lwp (leader_lp);
        }
@@ -2001,7 +2082,9 @@ handle_tracepoints (struct lwp_info *lwp)
   lwp_suspended_decr (lwp);
 
   gdb_assert (lwp->suspended == 0);
-  gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
+  gdb_assert (!stabilizing_threads
+             || (lwp->collecting_fast_tracepoint
+                 != fast_tpoint_collect_result::not_collecting));
 
   if (tpoint_related_event)
     {
@@ -2013,10 +2096,10 @@ handle_tracepoints (struct lwp_info *lwp)
   return 0;
 }
 
-/* Convenience wrapper.  Returns true if LWP is presently collecting a
-   fast tracepoint.  */
+/* Convenience wrapper.  Returns information about LWP's fast tracepoint
+   collection status.  */
 
-static int
+static fast_tpoint_collect_result
 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
                                  struct fast_tpoint_collect_status *status)
 {
@@ -2024,14 +2107,14 @@ linux_fast_tracepoint_collecting (struct lwp_info *lwp,
   struct thread_info *thread = get_lwp_thread (lwp);
 
   if (the_low_target.get_thread_area == NULL)
-    return 0;
+    return fast_tpoint_collect_result::not_collecting;
 
   /* Get the thread area address.  This is used to recognize which
      thread is which when tracing with the in-process agent library.
      We don't read anything from the address, and treat it as opaque;
      it's the address itself that we assume is unique per-thread.  */
   if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
-    return 0;
+    return fast_tpoint_collect_result::not_collecting;
 
   return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
 }
@@ -2055,14 +2138,14 @@ maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
       && agent_loaded_p ())
     {
       struct fast_tpoint_collect_status status;
-      int r;
 
       if (debug_threads)
        debug_printf ("Checking whether LWP %ld needs to move out of the "
                      "jump pad.\n",
                      lwpid_of (current_thread));
 
-      r = linux_fast_tracepoint_collecting (lwp, &status);
+      fast_tpoint_collect_result r
+       = linux_fast_tracepoint_collecting (lwp, &status);
 
       if (wstat == NULL
          || (WSTOPSIG (*wstat) != SIGILL
@@ -2072,9 +2155,10 @@ maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
        {
          lwp->collecting_fast_tracepoint = r;
 
-         if (r != 0)
+         if (r != fast_tpoint_collect_result::not_collecting)
            {
-             if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
+             if (r == fast_tpoint_collect_result::before_insn
+                 && lwp->exit_jump_pad_bkpt == NULL)
                {
                  /* Haven't executed the original instruction yet.
                     Set breakpoint there, and wait till it's hit,
@@ -2100,9 +2184,10 @@ maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
             reporting to GDB.  Otherwise, it's an IPA lib bug: just
             report the signal to GDB, and pray for the best.  */
 
-         lwp->collecting_fast_tracepoint = 0;
+         lwp->collecting_fast_tracepoint
+           = fast_tpoint_collect_result::not_collecting;
 
-         if (r != 0
+         if (r != fast_tpoint_collect_result::not_collecting
              && (status.adjusted_insn_addr <= lwp->stop_pc
                  && lwp->stop_pc < status.adjusted_insn_addr_end))
            {
@@ -2556,11 +2641,9 @@ maybe_hw_step (struct thread_info *thread)
     return 1;
   else
     {
-      struct process_info *proc = get_thread_process (thread);
-
-      /* GDBserver must insert reinsert breakpoint for software
+      /* GDBserver must insert single-step breakpoint for software
         single step.  */
-      gdb_assert (has_reinsert_breakpoints (proc));
+      gdb_assert (has_single_step_breakpoints (thread));
       return 0;
     }
 }
@@ -2579,7 +2662,10 @@ resume_stopped_resumed_lwps (struct inferior_list_entry *entry)
       && !lp->status_pending_p
       && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
     {
-      int step = thread->last_resume_kind == resume_step;
+      int step = 0;
+
+      if (thread->last_resume_kind == resume_step)
+       step = maybe_hw_step (thread);
 
       if (debug_threads)
        debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
@@ -2620,7 +2706,8 @@ linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
   if (ptid_equal (filter_ptid, minus_one_ptid) || ptid_is_pid (filter_ptid))
     {
       event_thread = (struct thread_info *)
-       find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
+       find_inferior_in_random (&all_threads, status_pending_p_callback,
+                                &filter_ptid);
       if (event_thread != NULL)
        event_child = get_thread_lwp (event_thread);
       if (debug_threads && event_thread)
@@ -2632,7 +2719,8 @@ linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
 
       if (stopping_threads == NOT_STOPPING_THREADS
          && requested_child->status_pending_p
-         && requested_child->collecting_fast_tracepoint)
+         && (requested_child->collecting_fast_tracepoint
+             != fast_tpoint_collect_result::not_collecting))
        {
          enqueue_one_deferred_signal (requested_child,
                                       &requested_child->status_pending);
@@ -2731,7 +2819,8 @@ linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
       /* ... and find an LWP with a status to report to the core, if
         any.  */
       event_thread = (struct thread_info *)
-       find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
+       find_inferior_in_random (&all_threads, status_pending_p_callback,
+                                &filter_ptid);
       if (event_thread != NULL)
        {
          event_child = get_thread_lwp (event_thread);
@@ -3263,13 +3352,13 @@ linux_wait_1 (ptid_t ptid,
      the breakpoint address.
      So in the case of the hardware single step advance the PC manually
      past the breakpoint and in the case of software single step advance only
-     if it's not the reinsert_breakpoint we are hitting.
+     if it's not the single_step_breakpoint we are hitting.
      This avoids that a program would keep trapping a permanent breakpoint
      forever.  */
   if (!ptid_equal (step_over_bkpt, null_ptid)
       && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
       && (event_child->stepping
-         || !reinsert_breakpoint_inserted_here (event_child->stop_pc)))
+         || !single_step_breakpoint_inserted_here (event_child->stop_pc)))
     {
       int increment_pc = 0;
       int breakpoint_kind = 0;
@@ -3323,8 +3412,8 @@ linux_wait_1 (ptid_t ptid,
 
       /* We have a SIGTRAP, possibly a step-over dance has just
         finished.  If so, tweak the state machine accordingly,
-        reinsert breakpoints and delete any reinsert (software
-        single-step) breakpoints.  */
+        reinsert breakpoints and delete any single-step
+        breakpoints.  */
       step_over_finished = finish_step_over (event_child);
 
       /* Now invoke the callbacks of any internal breakpoints there.  */
@@ -3377,24 +3466,28 @@ linux_wait_1 (ptid_t ptid,
 
          linux_resume_one_lwp (event_child, 0, 0, NULL);
 
+         if (debug_threads)
+           debug_exit ();
          return ignore_event (ourstatus);
        }
     }
 
-  if (event_child->collecting_fast_tracepoint)
+  if (event_child->collecting_fast_tracepoint
+      != fast_tpoint_collect_result::not_collecting)
     {
       if (debug_threads)
        debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
                      "Check if we're already there.\n",
                      lwpid_of (current_thread),
-                     event_child->collecting_fast_tracepoint);
+                     (int) event_child->collecting_fast_tracepoint);
 
       trace_event = 1;
 
       event_child->collecting_fast_tracepoint
        = linux_fast_tracepoint_collecting (event_child, NULL);
 
-      if (event_child->collecting_fast_tracepoint != 1)
+      if (event_child->collecting_fast_tracepoint
+         != fast_tpoint_collect_result::before_insn)
        {
          /* No longer need this breakpoint.  */
          if (event_child->exit_jump_pad_bkpt != NULL)
@@ -3421,7 +3514,8 @@ linux_wait_1 (ptid_t ptid,
            }
        }
 
-      if (event_child->collecting_fast_tracepoint == 0)
+      if (event_child->collecting_fast_tracepoint
+         == fast_tpoint_collect_result::not_collecting)
        {
          if (debug_threads)
            debug_printf ("fast tracepoint finished "
@@ -3472,6 +3566,9 @@ linux_wait_1 (ptid_t ptid,
 
       linux_resume_one_lwp (event_child, event_child->stepping,
                            0, NULL);
+
+      if (debug_threads)
+       debug_exit ();
       return ignore_event (ourstatus);
     }
 
@@ -3527,6 +3624,10 @@ linux_wait_1 (ptid_t ptid,
          linux_resume_one_lwp (event_child, event_child->stepping,
                                WSTOPSIG (w), info_p);
        }
+
+      if (debug_threads)
+       debug_exit ();
+
       return ignore_event (ourstatus);
     }
 
@@ -3595,18 +3696,36 @@ linux_wait_1 (ptid_t ptid,
          (*the_low_target.set_pc) (regcache, event_child->stop_pc);
        }
 
-      /* We may have finished stepping over a breakpoint.  If so,
-        we've stopped and suspended all LWPs momentarily except the
-        stepping one.  This is where we resume them all again.  We're
-        going to keep waiting, so use proceed, which handles stepping
-        over the next breakpoint.  */
+      if (step_over_finished)
+       {
+         /* If we have finished stepping over a breakpoint, we've
+            stopped and suspended all LWPs momentarily except the
+            stepping one.  This is where we resume them all again.
+            We're going to keep waiting, so use proceed, which
+            handles stepping over the next breakpoint.  */
+         unsuspend_all_lwps (event_child);
+       }
+      else
+       {
+         /* Remove the single-step breakpoints if any.  Note that
+            there isn't single-step breakpoint if we finished stepping
+            over.  */
+         if (can_software_single_step ()
+             && has_single_step_breakpoints (current_thread))
+           {
+             stop_all_lwps (0, event_child);
+             delete_single_step_breakpoints (current_thread);
+             unstop_all_lwps (0, event_child);
+           }
+       }
+
       if (debug_threads)
        debug_printf ("proceeding all threads.\n");
+      proceed_all_lwps ();
 
-      if (step_over_finished)
-       unsuspend_all_lwps (event_child);
+      if (debug_threads)
+       debug_exit ();
 
-      proceed_all_lwps ();
       return ignore_event (ourstatus);
     }
 
@@ -3614,12 +3733,11 @@ linux_wait_1 (ptid_t ptid,
     {
       if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
        {
-         char *str;
+         std::string str
+           = target_waitstatus_to_string (&event_child->waitstatus);
 
-         str = target_waitstatus_to_string (&event_child->waitstatus);
          debug_printf ("LWP %ld: extended event with waitstatus %s\n",
-                       lwpid_of (get_lwp_thread (event_child)), str);
-         xfree (str);
+                       lwpid_of (get_lwp_thread (event_child)), str.c_str ());
        }
       if (current_thread->last_resume_kind == resume_step)
        {
@@ -3638,29 +3756,71 @@ linux_wait_1 (ptid_t ptid,
 
   /* Alright, we're going to report a stop.  */
 
-  if (!stabilizing_threads)
+  /* Remove single-step breakpoints.  */
+  if (can_software_single_step ())
     {
-      /* In all-stop, stop all threads.  */
-      if (!non_stop)
-       stop_all_lwps (0, NULL);
+      /* Remove single-step breakpoints or not.  It it is true, stop all
+        lwps, so that other threads won't hit the breakpoint in the
+        staled memory.  */
+      int remove_single_step_breakpoints_p = 0;
 
-      /* If we're not waiting for a specific LWP, choose an event LWP
-        from among those that have had events.  Giving equal priority
-        to all LWPs that have had events helps prevent
-        starvation.  */
-      if (ptid_equal (ptid, minus_one_ptid))
+      if (non_stop)
        {
-         event_child->status_pending_p = 1;
-         event_child->status_pending = w;
+         remove_single_step_breakpoints_p
+           = has_single_step_breakpoints (current_thread);
+       }
+      else
+       {
+         /* In all-stop, a stop reply cancels all previous resume
+            requests.  Delete all single-step breakpoints.  */
+         struct inferior_list_entry *inf, *tmp;
 
-         select_event_lwp (&event_child);
+         ALL_INFERIORS (&all_threads, inf, tmp)
+           {
+             struct thread_info *thread = (struct thread_info *) inf;
 
-         /* current_thread and event_child must stay in sync.  */
-         current_thread = get_lwp_thread (event_child);
+             if (has_single_step_breakpoints (thread))
+               {
+                 remove_single_step_breakpoints_p = 1;
+                 break;
+               }
+           }
+       }
 
-         event_child->status_pending_p = 0;
-         w = event_child->status_pending;
+      if (remove_single_step_breakpoints_p)
+       {
+         /* If we remove single-step breakpoints from memory, stop all lwps,
+            so that other threads won't hit the breakpoint in the staled
+            memory.  */
+         stop_all_lwps (0, event_child);
+
+         if (non_stop)
+           {
+             gdb_assert (has_single_step_breakpoints (current_thread));
+             delete_single_step_breakpoints (current_thread);
+           }
+         else
+           {
+             struct inferior_list_entry *inf, *tmp;
+
+             ALL_INFERIORS (&all_threads, inf, tmp)
+               {
+                 struct thread_info *thread = (struct thread_info *) inf;
+
+                 if (has_single_step_breakpoints (thread))
+                   delete_single_step_breakpoints (thread);
+               }
+           }
+
+         unstop_all_lwps (0, event_child);
        }
+    }
+
+  if (!stabilizing_threads)
+    {
+      /* In all-stop, stop all threads.  */
+      if (!non_stop)
+       stop_all_lwps (0, NULL);
 
       if (step_over_finished)
        {
@@ -3686,6 +3846,25 @@ linux_wait_1 (ptid_t ptid,
            }
        }
 
+      /* If we're not waiting for a specific LWP, choose an event LWP
+        from among those that have had events.  Giving equal priority
+        to all LWPs that have had events helps prevent
+        starvation.  */
+      if (ptid_equal (ptid, minus_one_ptid))
+       {
+         event_child->status_pending_p = 1;
+         event_child->status_pending = w;
+
+         select_event_lwp (&event_child);
+
+         /* current_thread and event_child must stay in sync.  */
+         current_thread = get_lwp_thread (event_child);
+
+         event_child->status_pending_p = 0;
+         w = event_child->status_pending;
+       }
+
+
       /* Stabilize threads (move out of jump pads).  */
       if (!non_stop)
        stabilize_threads ();
@@ -3704,6 +3883,15 @@ linux_wait_1 (ptid_t ptid,
     {
       /* If the reported event is an exit, fork, vfork or exec, let
         GDB know.  */
+
+      /* Break the unreported fork relationship chain.  */
+      if (event_child->waitstatus.kind == TARGET_WAITKIND_FORKED
+         || event_child->waitstatus.kind == TARGET_WAITKIND_VFORKED)
+       {
+         event_child->fork_relative->fork_relative = NULL;
+         event_child->fork_relative = NULL;
+       }
+
       *ourstatus = event_child->waitstatus;
       /* Clear the event lwp's waitstatus since we handled it already.  */
       event_child->waitstatus.kind = TARGET_WAITKIND_IGNORE;
@@ -4011,7 +4199,8 @@ stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
          && (gdb_breakpoint_here (lwp->stop_pc)
              || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
              || thread->last_resume_kind == resume_step)
-         && linux_fast_tracepoint_collecting (lwp, NULL));
+         && (linux_fast_tracepoint_collecting (lwp, NULL)
+             != fast_tpoint_collect_result::not_collecting));
 }
 
 static void
@@ -4139,16 +4328,15 @@ enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
 static void
 install_software_single_step_breakpoints (struct lwp_info *lwp)
 {
-  int i;
-  CORE_ADDR pc;
-  struct regcache *regcache = get_thread_regcache (current_thread, 1);
-  VEC (CORE_ADDR) *next_pcs = NULL;
-  struct cleanup *old_chain = make_cleanup (VEC_cleanup (CORE_ADDR), &next_pcs);
+  struct thread_info *thread = get_lwp_thread (lwp);
+  struct regcache *regcache = get_thread_regcache (thread, 1);
+  struct cleanup *old_chain = make_cleanup_restore_current_thread ();
 
-  next_pcs = (*the_low_target.get_next_pcs) (regcache);
+  current_thread = thread;
+  std::vector<CORE_ADDR> next_pcs = the_low_target.get_next_pcs (regcache);
 
-  for (i = 0; VEC_iterate (CORE_ADDR, next_pcs, i, pc); ++i)
-    set_reinsert_breakpoint (pc);
+  for (CORE_ADDR pc : next_pcs)
+    set_single_step_breakpoint (pc, current_ptid);
 
   do_cleanups (old_chain);
 }
@@ -4189,7 +4377,8 @@ single_step (struct lwp_info* lwp)
 static int
 lwp_signal_can_be_delivered (struct lwp_info *lwp)
 {
-  return !lwp->collecting_fast_tracepoint;
+  return (lwp->collecting_fast_tracepoint
+         == fast_tpoint_collect_result::not_collecting);
 }
 
 /* Resume execution of LWP.  If STEP is nonzero, single-step it.  If
@@ -4201,7 +4390,6 @@ linux_resume_one_lwp_throw (struct lwp_info *lwp,
 {
   struct thread_info *thread = get_lwp_thread (lwp);
   struct thread_info *saved_thread;
-  int fast_tp_collecting;
   int ptrace_request;
   struct process_info *proc = get_thread_process (thread);
 
@@ -4217,9 +4405,12 @@ linux_resume_one_lwp_throw (struct lwp_info *lwp,
 
   gdb_assert (lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
 
-  fast_tp_collecting = lwp->collecting_fast_tracepoint;
+  fast_tpoint_collect_result fast_tp_collecting
+    = lwp->collecting_fast_tracepoint;
 
-  gdb_assert (!stabilizing_threads || fast_tp_collecting);
+  gdb_assert (!stabilizing_threads
+             || (fast_tp_collecting
+                 != fast_tpoint_collect_result::not_collecting));
 
   /* Cancel actions that rely on GDB not changing the PC (e.g., the
      user used the "jump" command, or "set $pc = foo").  */
@@ -4275,33 +4466,27 @@ linux_resume_one_lwp_throw (struct lwp_info *lwp,
 
       if (can_hardware_single_step ())
        {
-         if (fast_tp_collecting == 0)
+         if (fast_tp_collecting == fast_tpoint_collect_result::not_collecting)
            {
              if (step == 0)
-               fprintf (stderr, "BAD - reinserting but not stepping.\n");
+               warning ("BAD - reinserting but not stepping.");
              if (lwp->suspended)
-               fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
-                        lwp->suspended);
+               warning ("BAD - reinserting and suspended(%d).",
+                                lwp->suspended);
            }
        }
 
       step = maybe_hw_step (thread);
     }
-  else
-    {
-      /* If the thread isn't doing step-over, there shouldn't be any
-        reinsert breakpoints.  */
-      gdb_assert (!has_reinsert_breakpoints (proc));
-    }
 
-  if (fast_tp_collecting == 1)
+  if (fast_tp_collecting == fast_tpoint_collect_result::before_insn)
     {
       if (debug_threads)
        debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
                      " (exit-jump-pad-bkpt)\n",
                      lwpid_of (thread));
     }
-  else if (fast_tp_collecting == 2)
+  else if (fast_tp_collecting == fast_tpoint_collect_result::at_insn)
     {
       if (debug_threads)
        debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
@@ -4507,6 +4692,50 @@ linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
              continue;
            }
 
+         /* Ignore (wildcard) resume requests for already-resumed
+            threads.  */
+         if (r->resume[ndx].kind != resume_stop
+             && thread->last_resume_kind != resume_stop)
+           {
+             if (debug_threads)
+               debug_printf ("already %s LWP %ld at GDB's request\n",
+                             (thread->last_resume_kind
+                              == resume_step)
+                             ? "stepping"
+                             : "continuing",
+                             lwpid_of (thread));
+             continue;
+           }
+
+         /* Don't let wildcard resumes resume fork children that GDB
+            does not yet know are new fork children.  */
+         if (lwp->fork_relative != NULL)
+           {
+             struct lwp_info *rel = lwp->fork_relative;
+
+             if (rel->status_pending_p
+                 && (rel->waitstatus.kind == TARGET_WAITKIND_FORKED
+                     || rel->waitstatus.kind == TARGET_WAITKIND_VFORKED))
+               {
+                 if (debug_threads)
+                   debug_printf ("not resuming LWP %ld: has queued stop reply\n",
+                                 lwpid_of (thread));
+                 continue;
+               }
+           }
+
+         /* If the thread has a pending event that has already been
+            reported to GDBserver core, but GDB has not pulled the
+            event out of the vStopped queue yet, likewise, ignore the
+            (wildcard) resume request.  */
+         if (in_queued_stop_replies (entry->id))
+           {
+             if (debug_threads)
+               debug_printf ("not resuming LWP %ld: has queued stop reply\n",
+                             lwpid_of (thread));
+             continue;
+           }
+
          lwp->resume = &r->resume[ndx];
          thread->last_resume_kind = lwp->resume->kind;
 
@@ -4755,7 +4984,7 @@ start_step_over (struct lwp_info *lwp)
 }
 
 /* Finish a step-over.  Reinsert the breakpoint we had uninserted in
-   start_step_over, if still there, and delete any reinsert
+   start_step_over, if still there, and delete any single-step
    breakpoints we've set, on non hardware single-step targets.  */
 
 static int
@@ -4777,15 +5006,15 @@ finish_step_over (struct lwp_info *lwp)
 
       lwp->bp_reinsert = 0;
 
-      /* Delete any software-single-step reinsert breakpoints.  No
-        longer needed.  We don't have to worry about other threads
-        hitting this trap, and later not being able to explain it,
-        because we were stepping over a breakpoint, and we hold all
-        threads but LWP stopped while doing that.  */
+      /* Delete any single-step breakpoints.  No longer needed.  We
+        don't have to worry about other threads hitting this trap,
+        and later not being able to explain it, because we were
+        stepping over a breakpoint, and we hold all threads but
+        LWP stopped while doing that.  */
       if (!can_hardware_single_step ())
        {
-         gdb_assert (has_reinsert_breakpoints (current_process ()));
-         delete_reinsert_breakpoints ();
+         gdb_assert (has_single_step_breakpoints (current_thread));
+         delete_single_step_breakpoints (current_thread);
        }
 
       step_over_bkpt = null_ptid;
@@ -4847,7 +5076,6 @@ linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
 {
   struct thread_info *thread = (struct thread_info *) entry;
   struct lwp_info *lwp = get_thread_lwp (thread);
-  int step;
   int leave_all_stopped = * (int *) arg;
   int leave_pending;
 
@@ -4914,38 +5142,35 @@ linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
                   || lwp->status_pending_p
                   || leave_all_stopped);
 
+  /* If we have a new signal, enqueue the signal.  */
+  if (lwp->resume->sig != 0)
+    {
+      siginfo_t info, *info_p;
+
+      /* If this is the same signal we were previously stopped by,
+        make sure to queue its siginfo.  */
+      if (WIFSTOPPED (lwp->last_status)
+         && WSTOPSIG (lwp->last_status) == lwp->resume->sig
+         && ptrace (PTRACE_GETSIGINFO, lwpid_of (thread),
+                    (PTRACE_TYPE_ARG3) 0, &info) == 0)
+       info_p = &info;
+      else
+       info_p = NULL;
+
+      enqueue_pending_signal (lwp, lwp->resume->sig, info_p);
+    }
+
   if (!leave_pending)
     {
       if (debug_threads)
        debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
 
-      step = (lwp->resume->kind == resume_step);
-      linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
+      proceed_one_lwp (entry, NULL);
     }
   else
     {
       if (debug_threads)
        debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
-
-      /* If we have a new signal, enqueue the signal.  */
-      if (lwp->resume->sig != 0)
-       {
-         struct pending_signals *p_sig = XCNEW (struct pending_signals);
-
-         p_sig->prev = lwp->pending_signals;
-         p_sig->signal = lwp->resume->sig;
-
-         /* If this is the same signal we were previously stopped by,
-            make sure to queue its siginfo.  We can ignore the return
-            value of ptrace; if it fails, we'll skip
-            PTRACE_SETSIGINFO.  */
-         if (WIFSTOPPED (lwp->last_status)
-             && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
-           ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
-                   &p_sig->info);
-
-         lwp->pending_signals = p_sig;
-       }
     }
 
   thread->last_status.kind = TARGET_WAITKIND_IGNORE;
@@ -5079,7 +5304,8 @@ proceed_one_lwp (struct inferior_list_entry *entry, void *except)
 
   if (thread->last_resume_kind == resume_stop
       && lwp->pending_signals_to_report == NULL
-      && lwp->collecting_fast_tracepoint == 0)
+      && (lwp->collecting_fast_tracepoint
+         == fast_tpoint_collect_result::not_collecting))
     {
       /* We haven't reported this LWP as stopped yet (otherwise, the
         last_status.kind check above would catch it, and we wouldn't
@@ -5104,7 +5330,15 @@ proceed_one_lwp (struct inferior_list_entry *entry, void *except)
       if (debug_threads)
        debug_printf ("   stepping LWP %ld, client wants it stepping\n",
                      lwpid_of (thread));
-      step = 1;
+
+      /* If resume_step is requested by GDB, install single-step
+        breakpoints when the thread is about to be actually resumed if
+        the single-step breakpoints weren't removed.  */
+      if (can_software_single_step ()
+         && !has_single_step_breakpoints (thread))
+       install_software_single_step_breakpoints (lwp);
+
+      step = maybe_hw_step (thread);
     }
   else if (lwp->bp_reinsert != 0)
     {
@@ -5279,6 +5513,12 @@ regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
                 not "active".  This can happen in normal operation,
                 so suppress the warning in this case.  */
            }
+         else if (errno == ESRCH)
+           {
+             /* At this point, ESRCH should mean the process is
+                already gone, in which case we simply ignore attempts
+                to read its registers.  */
+           }
          else
            {
              char s[256];
@@ -5641,11 +5881,11 @@ static int
 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
 {
   int pid = lwpid_of (current_thread);
-  register PTRACE_XFER_TYPE *buffer;
-  register CORE_ADDR addr;
-  register int count;
+  PTRACE_XFER_TYPE *buffer;
+  CORE_ADDR addr;
+  int count;
   char filename[64];
-  register int i;
+  int i;
   int ret;
   int fd;
 
@@ -5729,16 +5969,16 @@ linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
 static int
 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
 {
-  register int i;
+  int i;
   /* Round starting address down to longword boundary.  */
-  register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
+  CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
   /* Round ending address up; get number of longwords that makes.  */
-  register int count
+  int count
     = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
     / sizeof (PTRACE_XFER_TYPE);
 
   /* Allocate buffer of that many longwords.  */
-  register PTRACE_XFER_TYPE *buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
+  PTRACE_XFER_TYPE *buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
 
   int pid = lwpid_of (current_thread);
 
@@ -5829,8 +6069,6 @@ linux_look_up_symbols (void)
 static void
 linux_request_interrupt (void)
 {
-  extern unsigned long signal_pid;
-
   /* Send a SIGINT to the process group.  This acts just like the user
      typed a ^C on the controlling terminal.  */
   kill (-signal_pid, SIGINT);
@@ -6287,6 +6525,8 @@ linux_supports_agent (void)
 static int
 linux_supports_range_stepping (void)
 {
+  if (can_software_single_step ())
+    return 1;
   if (*the_low_target.supports_range_stepping == NULL)
     return 0;
 
This page took 0.037967 seconds and 4 git commands to generate.