/* GNU/Linux native-dependent code common to multiple platforms.
- Copyright (C) 2001-2014 Free Software Foundation, Inc.
+ Copyright (C) 2001-2015 Free Software Foundation, Inc.
This file is part of GDB.
#include "defs.h"
#include "inferior.h"
+#include "infrun.h"
#include "target.h"
#include "nat/linux-nat.h"
#include "nat/linux-waitpid.h"
-#include <string.h>
#include "gdb_wait.h"
-#include "gdb_assert.h"
#ifdef HAVE_TKILL_SYSCALL
#include <unistd.h>
#include <sys/syscall.h>
#endif
#include <sys/ptrace.h>
#include "linux-nat.h"
-#include "linux-ptrace.h"
-#include "linux-procfs.h"
+#include "nat/linux-ptrace.h"
+#include "nat/linux-procfs.h"
#include "linux-fork.h"
#include "gdbthread.h"
#include "gdbcmd.h"
#include <sys/types.h>
#include <dirent.h>
#include "xml-support.h"
-#include "terminal.h"
#include <sys/vfs.h>
#include "solib.h"
-#include "linux-osdata.h"
+#include "nat/linux-osdata.h"
#include "linux-tdep.h"
#include "symfile.h"
#include "agent.h"
#include "tracepoint.h"
-#include "exceptions.h"
#include "buffer.h"
#include "target-descriptions.h"
#include "filestuff.h"
+#include "objfiles.h"
#ifndef SPUFS_MAGIC
#define SPUFS_MAGIC 0x23c9b64e
Called by our to_xfer_partial. */
static target_xfer_partial_ftype *super_xfer_partial;
+/* The saved to_close method, inherited from inf-ptrace.c.
+ Called by our to_close. */
+static void (*super_close) (struct target_ops *);
+
static unsigned int debug_linux_nat;
static void
show_debug_linux_nat (struct ui_file *file, int from_tty,
static void delete_lwp (ptid_t ptid);
static struct lwp_info *find_lwp_pid (ptid_t ptid);
+static int lwp_status_pending_p (struct lwp_info *lp);
+
+static int check_stopped_by_breakpoint (struct lwp_info *lp);
+static int sigtrap_is_event (int status);
+static int (*linux_nat_status_is_event) (int status) = sigtrap_is_event;
+
\f
/* Trivial list manipulation functions to keep track of a list of
new stopped processes. */
}
/* Initialize ptrace warnings and check for supported ptrace
- features given PID. */
+ features given PID.
+
+ ATTACHED should be nonzero iff we attached to the inferior. */
static void
-linux_init_ptrace (pid_t pid)
+linux_init_ptrace (pid_t pid, int attached)
{
- linux_enable_event_reporting (pid);
+ linux_enable_event_reporting (pid, attached);
linux_ptrace_init_warnings ();
}
static void
linux_child_post_attach (struct target_ops *self, int pid)
{
- linux_init_ptrace (pid);
+ linux_init_ptrace (pid, 1);
}
static void
linux_child_post_startup_inferior (struct target_ops *self, ptid_t ptid)
{
- linux_init_ptrace (ptid_get_pid (ptid));
+ linux_init_ptrace (ptid_get_pid (ptid), 0);
}
/* Return the number of known LWPs in the tgid given by PID. */
delete_lwp (lp->ptid);
}
+/* Target hook for follow_fork. On entry inferior_ptid must be the
+ ptid of the followed inferior. At return, inferior_ptid will be
+ unchanged. */
+
static int
linux_child_follow_fork (struct target_ops *ops, int follow_child,
int detach_fork)
{
- int has_vforked;
- int parent_pid, child_pid;
-
- has_vforked = (inferior_thread ()->pending_follow.kind
- == TARGET_WAITKIND_VFORKED);
- parent_pid = ptid_get_lwp (inferior_ptid);
- if (parent_pid == 0)
- parent_pid = ptid_get_pid (inferior_ptid);
- child_pid
- = ptid_get_pid (inferior_thread ()->pending_follow.value.related_pid);
-
- if (has_vforked
- && !non_stop /* Non-stop always resumes both branches. */
- && (!target_is_async_p () || sync_execution)
- && !(follow_child || detach_fork || sched_multi))
- {
- /* The parent stays blocked inside the vfork syscall until the
- child execs or exits. If we don't let the child run, then
- the parent stays blocked. If we're telling the parent to run
- in the foreground, the user will not be able to ctrl-c to get
- back the terminal, effectively hanging the debug session. */
- fprintf_filtered (gdb_stderr, _("\
-Can not resume the parent process over vfork in the foreground while\n\
-holding the child stopped. Try \"set detach-on-fork\" or \
-\"set schedule-multiple\".\n"));
- /* FIXME output string > 80 columns. */
- return 1;
- }
-
- if (! follow_child)
+ if (!follow_child)
{
struct lwp_info *child_lp = NULL;
+ int status = W_STOPCODE (0);
+ struct cleanup *old_chain;
+ int has_vforked;
+ int parent_pid, child_pid;
+
+ has_vforked = (inferior_thread ()->pending_follow.kind
+ == TARGET_WAITKIND_VFORKED);
+ parent_pid = ptid_get_lwp (inferior_ptid);
+ if (parent_pid == 0)
+ parent_pid = ptid_get_pid (inferior_ptid);
+ child_pid
+ = ptid_get_pid (inferior_thread ()->pending_follow.value.related_pid);
+
/* We're already attached to the parent, by default. */
+ old_chain = save_inferior_ptid ();
+ inferior_ptid = ptid_build (child_pid, child_pid, 0);
+ child_lp = add_lwp (inferior_ptid);
+ child_lp->stopped = 1;
+ child_lp->last_resume_kind = resume_stop;
/* Detach new forked process? */
if (detach_fork)
{
- struct cleanup *old_chain;
-
- /* Before detaching from the child, remove all breakpoints
- from it. If we forked, then this has already been taken
- care of by infrun.c. If we vforked however, any
- breakpoint inserted in the parent is visible in the
- child, even those added while stopped in a vfork
- catchpoint. This will remove the breakpoints from the
- parent also, but they'll be reinserted below. */
- if (has_vforked)
- {
- /* keep breakpoints list in sync. */
- remove_breakpoints_pid (ptid_get_pid (inferior_ptid));
- }
-
- if (info_verbose || debug_linux_nat)
- {
- target_terminal_ours ();
- fprintf_filtered (gdb_stdlog,
- "Detaching after fork from "
- "child process %d.\n",
- child_pid);
- }
-
- old_chain = save_inferior_ptid ();
- inferior_ptid = ptid_build (child_pid, child_pid, 0);
-
- child_lp = add_lwp (inferior_ptid);
- child_lp->stopped = 1;
- child_lp->last_resume_kind = resume_stop;
make_cleanup (delete_lwp_cleanup, child_lp);
if (linux_nat_prepare_to_resume != NULL)
linux_nat_prepare_to_resume (child_lp);
- ptrace (PTRACE_DETACH, child_pid, 0, 0);
- do_cleanups (old_chain);
- }
- else
- {
- struct inferior *parent_inf, *child_inf;
- struct cleanup *old_chain;
-
- /* Add process to GDB's tables. */
- child_inf = add_inferior (child_pid);
-
- parent_inf = current_inferior ();
- child_inf->attach_flag = parent_inf->attach_flag;
- copy_terminal_info (child_inf, parent_inf);
- child_inf->gdbarch = parent_inf->gdbarch;
- copy_inferior_target_desc_info (child_inf, parent_inf);
-
- old_chain = save_inferior_ptid ();
- save_current_program_space ();
-
- inferior_ptid = ptid_build (child_pid, child_pid, 0);
- add_thread (inferior_ptid);
- child_lp = add_lwp (inferior_ptid);
- child_lp->stopped = 1;
- child_lp->last_resume_kind = resume_stop;
- child_inf->symfile_flags = SYMFILE_NO_READ;
-
- /* If this is a vfork child, then the address-space is
- shared with the parent. */
- if (has_vforked)
+ /* When debugging an inferior in an architecture that supports
+ hardware single stepping on a kernel without commit
+ 6580807da14c423f0d0a708108e6df6ebc8bc83d, the vfork child
+ process starts with the TIF_SINGLESTEP/X86_EFLAGS_TF bits
+ set if the parent process had them set.
+ To work around this, single step the child process
+ once before detaching to clear the flags. */
+
+ if (!gdbarch_software_single_step_p (target_thread_architecture
+ (child_lp->ptid)))
{
- child_inf->pspace = parent_inf->pspace;
- child_inf->aspace = parent_inf->aspace;
-
- /* The parent will be frozen until the child is done
- with the shared region. Keep track of the
- parent. */
- child_inf->vfork_parent = parent_inf;
- child_inf->pending_detach = 0;
- parent_inf->vfork_child = child_inf;
- parent_inf->pending_detach = 0;
+ linux_disable_event_reporting (child_pid);
+ if (ptrace (PTRACE_SINGLESTEP, child_pid, 0, 0) < 0)
+ perror_with_name (_("Couldn't do single step"));
+ if (my_waitpid (child_pid, &status, 0) < 0)
+ perror_with_name (_("Couldn't wait vfork process"));
}
- else
+
+ if (WIFSTOPPED (status))
{
- child_inf->aspace = new_address_space ();
- child_inf->pspace = add_program_space (child_inf->aspace);
- child_inf->removable = 1;
- set_current_program_space (child_inf->pspace);
- clone_program_space (child_inf->pspace, parent_inf->pspace);
-
- /* Let the shared library layer (solib-svr4) learn about
- this new process, relocate the cloned exec, pull in
- shared libraries, and install the solib event
- breakpoint. If a "cloned-VM" event was propagated
- better throughout the core, this wouldn't be
- required. */
- solib_create_inferior_hook (0);
+ int signo;
+
+ signo = WSTOPSIG (status);
+ if (signo != 0
+ && !signal_pass_state (gdb_signal_from_host (signo)))
+ signo = 0;
+ ptrace (PTRACE_DETACH, child_pid, 0, signo);
}
+ /* Resets value of inferior_ptid to parent ptid. */
+ do_cleanups (old_chain);
+ }
+ else
+ {
/* Let the thread_db layer learn about this new process. */
check_for_thread_db ();
-
- do_cleanups (old_chain);
}
+ do_cleanups (old_chain);
+
if (has_vforked)
{
struct lwp_info *parent_lp;
- struct inferior *parent_inf;
-
- parent_inf = current_inferior ();
-
- /* If we detached from the child, then we have to be careful
- to not insert breakpoints in the parent until the child
- is done with the shared memory region. However, if we're
- staying attached to the child, then we can and should
- insert breakpoints, so that we can debug it. A
- subsequent child exec or exit is enough to know when does
- the child stops using the parent's address space. */
- parent_inf->waiting_for_vfork_done = detach_fork;
- parent_inf->pspace->breakpoints_not_allowed = detach_fork;
parent_lp = find_lwp_pid (pid_to_ptid (parent_pid));
gdb_assert (linux_supports_tracefork () >= 0);
}
else
{
- struct inferior *parent_inf, *child_inf;
struct lwp_info *child_lp;
- struct program_space *parent_pspace;
-
- if (info_verbose || debug_linux_nat)
- {
- target_terminal_ours ();
- if (has_vforked)
- fprintf_filtered (gdb_stdlog,
- _("Attaching after process %d "
- "vfork to child process %d.\n"),
- parent_pid, child_pid);
- else
- fprintf_filtered (gdb_stdlog,
- _("Attaching after process %d "
- "fork to child process %d.\n"),
- parent_pid, child_pid);
- }
-
- /* Add the new inferior first, so that the target_detach below
- doesn't unpush the target. */
-
- child_inf = add_inferior (child_pid);
- parent_inf = current_inferior ();
- child_inf->attach_flag = parent_inf->attach_flag;
- copy_terminal_info (child_inf, parent_inf);
- child_inf->gdbarch = parent_inf->gdbarch;
- copy_inferior_target_desc_info (child_inf, parent_inf);
-
- parent_pspace = parent_inf->pspace;
-
- /* If we're vforking, we want to hold on to the parent until the
- child exits or execs. At child exec or exit time we can
- remove the old breakpoints from the parent and detach or
- resume debugging it. Otherwise, detach the parent now; we'll
- want to reuse it's program/address spaces, but we can't set
- them to the child before removing breakpoints from the
- parent, otherwise, the breakpoints module could decide to
- remove breakpoints from the wrong process (since they'd be
- assigned to the same address space). */
-
- if (has_vforked)
- {
- gdb_assert (child_inf->vfork_parent == NULL);
- gdb_assert (parent_inf->vfork_child == NULL);
- child_inf->vfork_parent = parent_inf;
- child_inf->pending_detach = 0;
- parent_inf->vfork_child = child_inf;
- parent_inf->pending_detach = detach_fork;
- parent_inf->waiting_for_vfork_done = 0;
- }
- else if (detach_fork)
- target_detach (NULL, 0);
-
- /* Note that the detach above makes PARENT_INF dangling. */
-
- /* Add the child thread to the appropriate lists, and switch to
- this new thread, before cloning the program space, and
- informing the solib layer about this new process. */
-
- inferior_ptid = ptid_build (child_pid, child_pid, 0);
- add_thread (inferior_ptid);
child_lp = add_lwp (inferior_ptid);
child_lp->stopped = 1;
child_lp->last_resume_kind = resume_stop;
- /* If this is a vfork child, then the address-space is shared
- with the parent. If we detached from the parent, then we can
- reuse the parent's program/address spaces. */
- if (has_vforked || detach_fork)
- {
- child_inf->pspace = parent_pspace;
- child_inf->aspace = child_inf->pspace->aspace;
- }
- else
- {
- child_inf->aspace = new_address_space ();
- child_inf->pspace = add_program_space (child_inf->aspace);
- child_inf->removable = 1;
- child_inf->symfile_flags = SYMFILE_NO_READ;
- set_current_program_space (child_inf->pspace);
- clone_program_space (child_inf->pspace, parent_pspace);
-
- /* Let the shared library layer (solib-svr4) learn about
- this new process, relocate the cloned exec, pull in
- shared libraries, and install the solib event breakpoint.
- If a "cloned-VM" event was propagated better throughout
- the core, this wouldn't be required. */
- solib_create_inferior_hook (0);
- }
-
/* Let the thread_db layer learn about this new process. */
check_for_thread_db ();
}
}
static int
-linux_child_set_syscall_catchpoint (int pid, int needed, int any_count,
+linux_child_set_syscall_catchpoint (struct target_ops *self,
+ int pid, int needed, int any_count,
int table_size, int *table)
{
if (!linux_supports_tracesysgood ())
/* Update signals to pass to the inferior. */
static void
-linux_nat_pass_signals (int numsigs, unsigned char *pass_signals)
+linux_nat_pass_signals (struct target_ops *self,
+ int numsigs, unsigned char *pass_signals)
{
int signo;
/* Prototypes for local functions. */
static int stop_wait_callback (struct lwp_info *lp, void *data);
static int linux_thread_alive (ptid_t ptid);
-static char *linux_child_pid_to_exec_file (int pid);
+static char *linux_child_pid_to_exec_file (struct target_ops *self, int pid);
\f
-/* Convert wait status STATUS to a string. Used for printing debug
- messages only. */
-
-static char *
-status_to_str (int status)
-{
- static char buf[64];
-
- if (WIFSTOPPED (status))
- {
- if (WSTOPSIG (status) == SYSCALL_SIGTRAP)
- snprintf (buf, sizeof (buf), "%s (stopped at syscall)",
- strsignal (SIGTRAP));
- else
- snprintf (buf, sizeof (buf), "%s (stopped)",
- strsignal (WSTOPSIG (status)));
- }
- else if (WIFSIGNALED (status))
- snprintf (buf, sizeof (buf), "%s (terminated)",
- strsignal (WTERMSIG (status)));
- else
- snprintf (buf, sizeof (buf), "%d (exited)", WEXITSTATUS (status));
-
- return buf;
-}
/* Destroy and free LP. */
/* See if we've got a stop for this new child
pending. If so, we're already attached. */
+ gdb_assert (lwpid > 0);
new_pid = my_waitpid (lwpid, &status, WNOHANG);
if (new_pid == -1 && errno == ECHILD)
new_pid = my_waitpid (lwpid, &status, __WCLONE | WNOHANG);
#endif /* HAVE_PERSONALITY */
/* Make sure we report all signals during startup. */
- linux_nat_pass_signals (0, NULL);
+ linux_nat_pass_signals (ops, 0, NULL);
linux_ops->to_create_inferior (ops, exec_file, allargs, env, from_tty);
#endif /* HAVE_PERSONALITY */
}
+/* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
+ already attached. Returns true if a new LWP is found, false
+ otherwise. */
+
+static int
+attach_proc_task_lwp_callback (ptid_t ptid)
+{
+ struct lwp_info *lp;
+
+ /* Ignore LWPs we're already attached to. */
+ lp = find_lwp_pid (ptid);
+ if (lp == NULL)
+ {
+ int lwpid = ptid_get_lwp (ptid);
+
+ if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) < 0)
+ {
+ int err = errno;
+
+ /* Be quiet if we simply raced with the thread exiting.
+ EPERM is returned if the thread's task still exists, and
+ is marked as exited or zombie, as well as other
+ conditions, so in that case, confirm the status in
+ /proc/PID/status. */
+ if (err == ESRCH
+ || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
+ {
+ if (debug_linux_nat)
+ {
+ fprintf_unfiltered (gdb_stdlog,
+ "Cannot attach to lwp %d: "
+ "thread is gone (%d: %s)\n",
+ lwpid, err, safe_strerror (err));
+ }
+ }
+ else
+ {
+ warning (_("Cannot attach to lwp %d: %s"),
+ lwpid,
+ linux_ptrace_attach_fail_reason_string (ptid,
+ err));
+ }
+ }
+ else
+ {
+ if (debug_linux_nat)
+ fprintf_unfiltered (gdb_stdlog,
+ "PTRACE_ATTACH %s, 0, 0 (OK)\n",
+ target_pid_to_str (ptid));
+
+ lp = add_lwp (ptid);
+ lp->cloned = 1;
+
+ /* The next time we wait for this LWP we'll see a SIGSTOP as
+ PTRACE_ATTACH brings it to a halt. */
+ lp->signalled = 1;
+
+ /* We need to wait for a stop before being able to make the
+ next ptrace call on this LWP. */
+ lp->must_set_ptrace_flags = 1;
+ }
+
+ return 1;
+ }
+ return 0;
+}
+
static void
-linux_nat_attach (struct target_ops *ops, char *args, int from_tty)
+linux_nat_attach (struct target_ops *ops, const char *args, int from_tty)
{
struct lwp_info *lp;
int status;
volatile struct gdb_exception ex;
/* Make sure we report all signals during attach. */
- linux_nat_pass_signals (0, NULL);
+ linux_nat_pass_signals (ops, 0, NULL);
TRY_CATCH (ex, RETURN_MASK_ERROR)
{
make_cleanup (xfree, message);
buffer_init (&buffer);
- linux_ptrace_attach_warnings (pid, &buffer);
+ linux_ptrace_attach_fail_reason (pid, &buffer);
buffer_grow_str0 (&buffer, "");
buffer_s = buffer_finish (&buffer);
make_cleanup (xfree, buffer_s);
- throw_error (ex.error, "%s%s", buffer_s, message);
+ if (*buffer_s != '\0')
+ throw_error (ex.error, "warning: %s\n%s", buffer_s, message);
+ else
+ throw_error (ex.error, "%s", message);
}
/* The ptrace base target adds the main thread with (pid,0,0)
lp->status = status;
+ /* We must attach to every LWP. If /proc is mounted, use that to
+ find them now. The inferior may be using raw clone instead of
+ using pthreads. But even if it is using pthreads, thread_db
+ walks structures in the inferior's address space to find the list
+ of threads/LWPs, and those structures may well be corrupted.
+ Note that once thread_db is loaded, we'll still use it to list
+ threads and associate pthread info with each LWP. */
+ linux_proc_attach_tgid_threads (ptid_get_pid (lp->ptid),
+ attach_proc_task_lwp_callback);
+
if (target_can_async_p ())
target_async (inferior_event_handler, 0);
}
linux_ops->to_detach (ops, args, from_tty);
}
+/* Resume execution of the inferior process. If STEP is nonzero,
+ single-step it. If SIGNAL is nonzero, give it that signal. */
+
+static void
+linux_resume_one_lwp (struct lwp_info *lp, int step, enum gdb_signal signo)
+{
+ ptid_t ptid;
+
+ lp->step = step;
+
+ /* stop_pc doubles as the PC the LWP had when it was last resumed.
+ We only presently need that if the LWP is stepped though (to
+ handle the case of stepping a breakpoint instruction). */
+ if (step)
+ {
+ struct regcache *regcache = get_thread_regcache (lp->ptid);
+
+ lp->stop_pc = regcache_read_pc (regcache);
+ }
+ else
+ lp->stop_pc = 0;
+
+ if (linux_nat_prepare_to_resume != NULL)
+ linux_nat_prepare_to_resume (lp);
+ /* Convert to something the lower layer understands. */
+ ptid = pid_to_ptid (ptid_get_lwp (lp->ptid));
+ linux_ops->to_resume (linux_ops, ptid, step, signo);
+ lp->stop_reason = LWP_STOPPED_BY_NO_REASON;
+ lp->stopped = 0;
+ registers_changed_ptid (lp->ptid);
+}
+
/* Resume LP. */
static void
{
if (lp->stopped)
{
- struct inferior *inf = find_inferior_pid (ptid_get_pid (lp->ptid));
+ struct inferior *inf = find_inferior_ptid (lp->ptid);
if (inf->vfork_child != NULL)
{
"RC: Not resuming %s (vfork parent)\n",
target_pid_to_str (lp->ptid));
}
- else if (lp->status == 0
- && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE)
+ else if (!lwp_status_pending_p (lp))
{
if (debug_linux_nat)
fprintf_unfiltered (gdb_stdlog,
: "0"),
step ? "step" : "resume");
- if (linux_nat_prepare_to_resume != NULL)
- linux_nat_prepare_to_resume (lp);
- linux_ops->to_resume (linux_ops,
- pid_to_ptid (ptid_get_lwp (lp->ptid)),
- step, signo);
- lp->stopped = 0;
- lp->step = step;
- lp->stopped_by_watchpoint = 0;
+ linux_resume_one_lwp (lp, step, signo);
}
else
{
}
}
-/* Resume LWP, with the last stop signal, if it is in pass state. */
+/* Callback for iterate_over_lwps. If LWP is EXCEPT, do nothing.
+ Resume LWP with the last stop signal, if it is in pass state. */
static int
-linux_nat_resume_callback (struct lwp_info *lp, void *data)
+linux_nat_resume_callback (struct lwp_info *lp, void *except)
{
enum gdb_signal signo = GDB_SIGNAL_0;
+ if (lp == except)
+ return 0;
+
if (lp->stopped)
{
struct thread_info *thread;
thread = find_thread_ptid (lp->ptid);
if (thread != NULL)
{
- if (signal_pass_state (thread->suspend.stop_signal))
- signo = thread->suspend.stop_signal;
+ signo = thread->suspend.stop_signal;
thread->suspend.stop_signal = GDB_SIGNAL_0;
}
}
gdb_assert (lp != NULL);
/* Remember if we're stepping. */
- lp->step = step;
lp->last_resume_kind = step ? resume_step : resume_continue;
/* If we have a pending wait status for this thread, there is no
}
}
- if (lp->status || lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
+ if (lwp_status_pending_p (lp))
{
/* FIXME: What should we do if we are supposed to continue
this thread with a signal? */
return;
}
- /* Mark LWP as not stopped to prevent it from being continued by
- linux_nat_resume_callback. */
- lp->stopped = 0;
-
if (resume_many)
- iterate_over_lwps (ptid, linux_nat_resume_callback, NULL);
+ iterate_over_lwps (ptid, linux_nat_resume_callback, lp);
- /* Convert to something the lower layer understands. */
- ptid = pid_to_ptid (ptid_get_lwp (lp->ptid));
-
- if (linux_nat_prepare_to_resume != NULL)
- linux_nat_prepare_to_resume (lp);
- linux_ops->to_resume (linux_ops, ptid, step, signo);
- lp->stopped_by_watchpoint = 0;
+ linux_resume_one_lwp (lp, step, signo);
if (debug_linux_nat)
fprintf_unfiltered (gdb_stdlog,
lp->syscall_state = TARGET_WAITKIND_IGNORE;
ptrace (PTRACE_CONT, ptid_get_lwp (lp->ptid), 0, 0);
+ lp->stopped = 0;
return 1;
}
subsequent syscall. Simply resume using the inf-ptrace layer,
which knows when to use PT_SYSCALL or PT_CONTINUE. */
- /* Note that gdbarch_get_syscall_number may access registers, hence
- fill a regcache. */
- registers_changed ();
- if (linux_nat_prepare_to_resume != NULL)
- linux_nat_prepare_to_resume (lp);
- linux_ops->to_resume (linux_ops, pid_to_ptid (ptid_get_lwp (lp->ptid)),
- lp->step, GDB_SIGNAL_0);
+ linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
return 1;
}
{
int pid = ptid_get_lwp (lp->ptid);
struct target_waitstatus *ourstatus = &lp->waitstatus;
- int event = status >> 16;
+ int event = linux_ptrace_get_extended_event (status);
if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK
|| event == PTRACE_EVENT_CLONE)
fprintf_unfiltered (gdb_stdlog,
"LHEW: resuming new LWP %ld\n",
ptid_get_lwp (new_lp->ptid));
- if (linux_nat_prepare_to_resume != NULL)
- linux_nat_prepare_to_resume (new_lp);
- linux_ops->to_resume (linux_ops, pid_to_ptid (new_pid),
- 0, GDB_SIGNAL_0);
- new_lp->stopped = 0;
+ linux_resume_one_lwp (new_lp, 0, GDB_SIGNAL_0);
}
}
if (debug_linux_nat)
fprintf_unfiltered (gdb_stdlog,
"LHEW: resuming parent LWP %d\n", pid);
- if (linux_nat_prepare_to_resume != NULL)
- linux_nat_prepare_to_resume (lp);
- linux_ops->to_resume (linux_ops,
- pid_to_ptid (ptid_get_lwp (lp->ptid)),
- 0, GDB_SIGNAL_0);
-
+ linux_resume_one_lwp (lp, 0, GDB_SIGNAL_0);
return 1;
}
ourstatus->kind = TARGET_WAITKIND_EXECD;
ourstatus->value.execd_pathname
- = xstrdup (linux_child_pid_to_exec_file (pid));
+ = xstrdup (linux_child_pid_to_exec_file (NULL, pid));
+ /* The thread that execed must have been resumed, but, when a
+ thread execs, it changes its tid to the tgid, and the old
+ tgid thread might have not been resumed. */
+ lp->resumed = 1;
return 0;
}
again before it gets to sigsuspend so we can safely let the handlers
get executed here. */
+ if (debug_linux_nat)
+ fprintf_unfiltered (gdb_stdlog, "WL: about to sigsuspend\n");
sigsuspend (&suspend_mask);
}
}
gdb_assert (WIFSTOPPED (status));
+ lp->stopped = 1;
+
+ if (lp->must_set_ptrace_flags)
+ {
+ struct inferior *inf = find_inferior_pid (ptid_get_pid (lp->ptid));
+
+ linux_enable_event_reporting (ptid_get_lwp (lp->ptid), inf->attach_flag);
+ lp->must_set_ptrace_flags = 0;
+ }
/* Handle GNU/Linux's syscall SIGTRAPs. */
if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
}
/* Handle GNU/Linux's extended waitstatus for trace events. */
- if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
+ if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP
+ && linux_is_extended_waitstatus (status))
{
if (debug_linux_nat)
fprintf_unfiltered (gdb_stdlog,
soon as we see LP stop with a SIGTRAP. If GDB changes the debug
registers meanwhile, we have the cached data we can rely on. */
-static void
-save_sigtrap (struct lwp_info *lp)
+static int
+check_stopped_by_watchpoint (struct lwp_info *lp)
{
struct cleanup *old_chain;
if (linux_ops->to_stopped_by_watchpoint == NULL)
- {
- lp->stopped_by_watchpoint = 0;
- return;
- }
+ return 0;
old_chain = save_inferior_ptid ();
inferior_ptid = lp->ptid;
- lp->stopped_by_watchpoint = linux_ops->to_stopped_by_watchpoint (linux_ops);
-
- if (lp->stopped_by_watchpoint)
+ if (linux_ops->to_stopped_by_watchpoint (linux_ops))
{
+ lp->stop_reason = LWP_STOPPED_BY_WATCHPOINT;
+
if (linux_ops->to_stopped_data_address != NULL)
lp->stopped_data_address_p =
linux_ops->to_stopped_data_address (¤t_target,
}
do_cleanups (old_chain);
+
+ return lp->stop_reason == LWP_STOPPED_BY_WATCHPOINT;
}
-/* See save_sigtrap. */
+/* Called when the LWP stopped for a trap that could be explained by a
+ watchpoint or a breakpoint. */
+
+static void
+save_sigtrap (struct lwp_info *lp)
+{
+ gdb_assert (lp->stop_reason == LWP_STOPPED_BY_NO_REASON);
+ gdb_assert (lp->status != 0);
+
+ if (check_stopped_by_watchpoint (lp))
+ return;
+
+ if (linux_nat_status_is_event (lp->status))
+ check_stopped_by_breakpoint (lp);
+}
+
+/* Returns true if the LWP had stopped for a watchpoint. */
static int
linux_nat_stopped_by_watchpoint (struct target_ops *ops)
gdb_assert (lp != NULL);
- return lp->stopped_by_watchpoint;
+ return lp->stop_reason == LWP_STOPPED_BY_WATCHPOINT;
}
static int
return WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP;
}
-/* SIGTRAP-like events recognizer. */
-
-static int (*linux_nat_status_is_event) (int status) = sigtrap_is_event;
-
-/* Check for SIGTRAP-like events in LP. */
-
-static int
-linux_nat_lp_status_is_event (struct lwp_info *lp)
-{
- /* We check for lp->waitstatus in addition to lp->status, because we can
- have pending process exits recorded in lp->status
- and W_EXITCODE(0,0) == 0. We should probably have an additional
- lp->status_p flag. */
-
- return (lp->waitstatus.kind == TARGET_WAITKIND_IGNORE
- && linux_nat_status_is_event (lp->status));
-}
-
/* Set alternative SIGTRAP-like events recognizer. If
breakpoint_inserted_here_p there then gdbarch_decr_pc_after_break will be
applied. */
static int
stop_wait_callback (struct lwp_info *lp, void *data)
{
- struct inferior *inf = find_inferior_pid (ptid_get_pid (lp->ptid));
+ struct inferior *inf = find_inferior_ptid (lp->ptid);
/* If this is a vfork parent, bail out, it is not going to report
any SIGSTOP until the vfork is done with. */
errno = 0;
ptrace (PTRACE_CONT, ptid_get_lwp (lp->ptid), 0, 0);
+ lp->stopped = 0;
if (debug_linux_nat)
fprintf_unfiltered (gdb_stdlog,
"PTRACE_CONT %s, 0, 0 (%s) "
{
/* The thread was stopped with a signal other than SIGSTOP. */
- save_sigtrap (lp);
-
if (debug_linux_nat)
fprintf_unfiltered (gdb_stdlog,
"SWC: Pending event %s in %s\n",
/* Save the sigtrap event. */
lp->status = status;
- gdb_assert (!lp->stopped);
gdb_assert (lp->signalled);
- lp->stopped = 1;
+ save_sigtrap (lp);
}
else
{
"SWC: Delayed SIGSTOP caught for %s.\n",
target_pid_to_str (lp->ptid));
- lp->stopped = 1;
-
/* Reset SIGNALLED only after the stop_wait_callback call
above as it does gdb_assert on SIGNALLED. */
lp->signalled = 0;
return 0;
}
-/* Return non-zero if LP has a wait status pending. */
+/* Return non-zero if LP has a wait status pending. Discard the
+ pending event and resume the LWP if the event that originally
+ caused the stop became uninteresting. */
static int
status_callback (struct lwp_info *lp, void *data)
if (!lp->resumed)
return 0;
- if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
+ if (lp->stop_reason == LWP_STOPPED_BY_SW_BREAKPOINT
+ || lp->stop_reason == LWP_STOPPED_BY_HW_BREAKPOINT)
{
- /* A ptrace event, like PTRACE_FORK|VFORK|EXEC, syscall event,
- or a pending process exit. Note that `W_EXITCODE(0,0) ==
- 0', so a clean process exit can not be stored pending in
- lp->status, it is indistinguishable from
- no-pending-status. */
+ struct regcache *regcache = get_thread_regcache (lp->ptid);
+ struct gdbarch *gdbarch = get_regcache_arch (regcache);
+ CORE_ADDR pc;
+ int discard = 0;
+
+ gdb_assert (lp->status != 0);
+
+ pc = regcache_read_pc (regcache);
+
+ if (pc != lp->stop_pc)
+ {
+ if (debug_linux_nat)
+ fprintf_unfiltered (gdb_stdlog,
+ "SC: PC of %s changed. was=%s, now=%s\n",
+ target_pid_to_str (lp->ptid),
+ paddress (target_gdbarch (), lp->stop_pc),
+ paddress (target_gdbarch (), pc));
+ discard = 1;
+ }
+ else if (!breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
+ {
+ if (debug_linux_nat)
+ fprintf_unfiltered (gdb_stdlog,
+ "SC: previous breakpoint of %s, at %s gone\n",
+ target_pid_to_str (lp->ptid),
+ paddress (target_gdbarch (), lp->stop_pc));
+
+ discard = 1;
+ }
+
+ if (discard)
+ {
+ if (debug_linux_nat)
+ fprintf_unfiltered (gdb_stdlog,
+ "SC: pending event of %s cancelled.\n",
+ target_pid_to_str (lp->ptid));
+
+ lp->status = 0;
+ linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
+ return 0;
+ }
return 1;
}
- if (lp->status != 0)
- return 1;
-
- return 0;
+ return lwp_status_pending_p (lp);
}
/* Return non-zero if LP isn't stopped. */
running_callback (struct lwp_info *lp, void *data)
{
return (!lp->stopped
- || ((lp->status != 0
- || lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
- && lp->resumed));
+ || (lwp_status_pending_p (lp) && lp->resumed));
}
/* Count the LWP's that have had events. */
gdb_assert (count != NULL);
- /* Count only resumed LWPs that have a SIGTRAP event pending. */
- if (lp->resumed && linux_nat_lp_status_is_event (lp))
+ /* Select only resumed LWPs that have an event pending. */
+ if (lp->resumed && lwp_status_pending_p (lp))
(*count)++;
return 0;
return 0;
}
+/* Returns true if LP has a status pending. */
+
+static int
+lwp_status_pending_p (struct lwp_info *lp)
+{
+ /* We check for lp->waitstatus in addition to lp->status, because we
+ can have pending process exits recorded in lp->status and
+ W_EXITCODE(0,0) happens to be 0. */
+ return lp->status != 0 || lp->waitstatus.kind != TARGET_WAITKIND_IGNORE;
+}
+
/* Select the Nth LWP that has had a SIGTRAP event. */
static int
gdb_assert (selector != NULL);
- /* Select only resumed LWPs that have a SIGTRAP event pending. */
- if (lp->resumed && linux_nat_lp_status_is_event (lp))
+ /* Select only resumed LWPs that have an event pending. */
+ if (lp->resumed && lwp_status_pending_p (lp))
if ((*selector)-- == 0)
return 1;
return 0;
}
+/* Called when the LWP got a signal/trap that could be explained by a
+ software or hardware breakpoint. */
+
static int
-cancel_breakpoint (struct lwp_info *lp)
+check_stopped_by_breakpoint (struct lwp_info *lp)
{
/* Arrange for a breakpoint to be hit again later. We don't keep
the SIGTRAP status and don't forward the SIGTRAP signal to the
struct regcache *regcache = get_thread_regcache (lp->ptid);
struct gdbarch *gdbarch = get_regcache_arch (regcache);
CORE_ADDR pc;
+ CORE_ADDR sw_bp_pc;
+
+ pc = regcache_read_pc (regcache);
+ sw_bp_pc = pc - target_decr_pc_after_break (gdbarch);
- pc = regcache_read_pc (regcache) - target_decr_pc_after_break (gdbarch);
- if (breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
+ if ((!lp->step || lp->stop_pc == sw_bp_pc)
+ && software_breakpoint_inserted_here_p (get_regcache_aspace (regcache),
+ sw_bp_pc))
{
+ /* The LWP was either continued, or stepped a software
+ breakpoint instruction. */
if (debug_linux_nat)
fprintf_unfiltered (gdb_stdlog,
- "CB: Push back breakpoint for %s\n",
+ "CB: Push back software breakpoint for %s\n",
target_pid_to_str (lp->ptid));
/* Back up the PC if necessary. */
- if (target_decr_pc_after_break (gdbarch))
- regcache_write_pc (regcache, pc);
+ if (pc != sw_bp_pc)
+ regcache_write_pc (regcache, sw_bp_pc);
+ lp->stop_pc = sw_bp_pc;
+ lp->stop_reason = LWP_STOPPED_BY_SW_BREAKPOINT;
return 1;
}
- return 0;
-}
-
-static int
-cancel_breakpoints_callback (struct lwp_info *lp, void *data)
-{
- struct lwp_info *event_lp = data;
-
- /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
- if (lp == event_lp)
- return 0;
- /* If a LWP other than the LWP that we're reporting an event for has
- hit a GDB breakpoint (as opposed to some random trap signal),
- then just arrange for it to hit it again later. We don't keep
- the SIGTRAP status and don't forward the SIGTRAP signal to the
- LWP. We will handle the current event, eventually we will resume
- all LWPs, and this one will get its breakpoint trap again.
-
- If we do not do this, then we run the risk that the user will
- delete or disable the breakpoint, but the LWP will have already
- tripped on it. */
+ if (hardware_breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
+ {
+ if (debug_linux_nat)
+ fprintf_unfiltered (gdb_stdlog,
+ "CB: Push back hardware breakpoint for %s\n",
+ target_pid_to_str (lp->ptid));
- if (linux_nat_lp_status_is_event (lp)
- && cancel_breakpoint (lp))
- /* Throw away the SIGTRAP. */
- lp->status = 0;
+ lp->stop_pc = pc;
+ lp->stop_reason = LWP_STOPPED_BY_HW_BREAKPOINT;
+ return 1;
+ }
return 0;
}
{
int num_events = 0;
int random_selector;
- struct lwp_info *event_lp;
+ struct lwp_info *event_lp = NULL;
/* Record the wait status for the original LWP. */
(*orig_lp)->status = *status;
- /* Give preference to any LWP that is being single-stepped. */
- event_lp = iterate_over_lwps (filter,
- select_singlestep_lwp_callback, NULL);
- if (event_lp != NULL)
+ /* In all-stop, give preference to the LWP that is being
+ single-stepped. There will be at most one, and it will be the
+ LWP that the core is most interested in. If we didn't do this,
+ then we'd have to handle pending step SIGTRAPs somehow in case
+ the core later continues the previously-stepped thread, as
+ otherwise we'd report the pending SIGTRAP then, and the core, not
+ having stepped the thread, wouldn't understand what the trap was
+ for, and therefore would report it to the user as a random
+ signal. */
+ if (!non_stop)
{
- if (debug_linux_nat)
- fprintf_unfiltered (gdb_stdlog,
- "SEL: Select single-step %s\n",
- target_pid_to_str (event_lp->ptid));
+ event_lp = iterate_over_lwps (filter,
+ select_singlestep_lwp_callback, NULL);
+ if (event_lp != NULL)
+ {
+ if (debug_linux_nat)
+ fprintf_unfiltered (gdb_stdlog,
+ "SEL: Select single-step %s\n",
+ target_pid_to_str (event_lp->ptid));
+ }
}
- else
+
+ if (event_lp == NULL)
{
- /* No single-stepping LWP. Select one at random, out of those
- which have had SIGTRAP events. */
+ /* Pick one at random, out of those which have had events. */
- /* First see how many SIGTRAP events we have. */
+ /* First see how many events we have. */
iterate_over_lwps (filter, count_events_callback, &num_events);
- /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
+ /* Now randomly pick a LWP out of those that have had
+ events. */
random_selector = (int)
((num_events * (double) rand ()) / (RAND_MAX + 1.0));
if (debug_linux_nat && num_events > 1)
fprintf_unfiltered (gdb_stdlog,
- "SEL: Found %d SIGTRAP events, selecting #%d\n",
+ "SEL: Found %d events, selecting #%d\n",
num_events, random_selector);
event_lp = iterate_over_lwps (filter,
static int
stop_and_resume_callback (struct lwp_info *lp, void *data)
{
- int *new_pending_p = data;
-
if (!lp->stopped)
{
ptid_t ptid = lp->ptid;
if (lp != NULL)
{
if (lp->last_resume_kind == resume_stop
- && lp->status == 0)
+ && !lwp_status_pending_p (lp))
{
/* The core wanted the LWP to stop. Even if it stopped
cleanly (with SIGSTOP), leave the event pending. */
lp->status = W_STOPCODE (SIGSTOP);
}
- if (lp->status == 0)
+ if (!lwp_status_pending_p (lp))
{
if (debug_linux_nat)
fprintf_unfiltered (gdb_stdlog,
"SARC: not re-resuming LWP %ld "
"(has pending)\n",
ptid_get_lwp (lp->ptid));
- if (new_pending_p)
- *new_pending_p = 1;
}
}
}
}
/* Check if we should go on and pass this event to common code.
- Return the affected lwp if we are, or NULL otherwise. If we stop
- all lwps temporarily, we may end up with new pending events in some
- other lwp. In that case set *NEW_PENDING_P to true. */
+ Return the affected lwp if we are, or NULL otherwise. */
static struct lwp_info *
-linux_nat_filter_event (int lwpid, int status, int *new_pending_p)
+linux_nat_filter_event (int lwpid, int status)
{
struct lwp_info *lp;
-
- *new_pending_p = 0;
+ int event = linux_ptrace_get_extended_event (status);
lp = find_lwp_pid (pid_to_ptid (lwpid));
thread changes its tid to the tgid. */
if (WIFSTOPPED (status) && lp == NULL
- && (WSTOPSIG (status) == SIGTRAP && status >> 16 == PTRACE_EVENT_EXEC))
+ && (WSTOPSIG (status) == SIGTRAP && event == PTRACE_EVENT_EXEC))
{
/* A multi-thread exec after we had seen the leader exiting. */
if (debug_linux_nat)
if (!WIFSTOPPED (status) && !lp)
return NULL;
+ /* This LWP is stopped now. (And if dead, this prevents it from
+ ever being continued.) */
+ lp->stopped = 1;
+
+ if (WIFSTOPPED (status) && lp->must_set_ptrace_flags)
+ {
+ struct inferior *inf = find_inferior_pid (ptid_get_pid (lp->ptid));
+
+ linux_enable_event_reporting (ptid_get_lwp (lp->ptid), inf->attach_flag);
+ lp->must_set_ptrace_flags = 0;
+ }
+
/* Handle GNU/Linux's syscall SIGTRAPs. */
if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
{
}
/* Handle GNU/Linux's extended waitstatus for trace events. */
- if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
+ if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP
+ && linux_is_extended_waitstatus (status))
{
if (debug_linux_nat)
fprintf_unfiltered (gdb_stdlog,
return NULL;
}
- if (linux_nat_status_is_event (status))
- save_sigtrap (lp);
-
/* Check if the thread has exited. */
- if ((WIFEXITED (status) || WIFSIGNALED (status))
- && num_lwps (ptid_get_pid (lp->ptid)) > 1)
+ if (WIFEXITED (status) || WIFSIGNALED (status))
{
- /* If this is the main thread, we must stop all threads and verify
- if they are still alive. This is because in the nptl thread model
- on Linux 2.4, there is no signal issued for exiting LWPs
- other than the main thread. We only get the main thread exit
- signal once all child threads have already exited. If we
- stop all the threads and use the stop_wait_callback to check
- if they have exited we can determine whether this signal
- should be ignored or whether it means the end of the debugged
- application, regardless of which threading model is being
- used. */
- if (ptid_get_pid (lp->ptid) == ptid_get_lwp (lp->ptid))
+ if (num_lwps (ptid_get_pid (lp->ptid)) > 1)
{
- lp->stopped = 1;
- iterate_over_lwps (pid_to_ptid (ptid_get_pid (lp->ptid)),
- stop_and_resume_callback, new_pending_p);
+ /* If this is the main thread, we must stop all threads and
+ verify if they are still alive. This is because in the
+ nptl thread model on Linux 2.4, there is no signal issued
+ for exiting LWPs other than the main thread. We only get
+ the main thread exit signal once all child threads have
+ already exited. If we stop all the threads and use the
+ stop_wait_callback to check if they have exited we can
+ determine whether this signal should be ignored or
+ whether it means the end of the debugged application,
+ regardless of which threading model is being used. */
+ if (ptid_get_pid (lp->ptid) == ptid_get_lwp (lp->ptid))
+ {
+ iterate_over_lwps (pid_to_ptid (ptid_get_pid (lp->ptid)),
+ stop_and_resume_callback, NULL);
+ }
+
+ if (debug_linux_nat)
+ fprintf_unfiltered (gdb_stdlog,
+ "LLW: %s exited.\n",
+ target_pid_to_str (lp->ptid));
+
+ if (num_lwps (ptid_get_pid (lp->ptid)) > 1)
+ {
+ /* If there is at least one more LWP, then the exit signal
+ was not the end of the debugged application and should be
+ ignored. */
+ exit_lwp (lp);
+ return NULL;
+ }
}
+ gdb_assert (lp->resumed);
+
if (debug_linux_nat)
fprintf_unfiltered (gdb_stdlog,
- "LLW: %s exited.\n",
- target_pid_to_str (lp->ptid));
+ "Process %ld exited\n",
+ ptid_get_lwp (lp->ptid));
- if (num_lwps (ptid_get_pid (lp->ptid)) > 1)
- {
- /* If there is at least one more LWP, then the exit signal
- was not the end of the debugged application and should be
- ignored. */
- exit_lwp (lp);
- return NULL;
- }
+ /* This was the last lwp in the process. Since events are
+ serialized to GDB core, we may not be able report this one
+ right now, but GDB core and the other target layers will want
+ to be notified about the exit code/signal, leave the status
+ pending for the next time we're able to report it. */
+
+ /* Dead LWP's aren't expected to reported a pending sigstop. */
+ lp->signalled = 0;
+
+ /* Store the pending event in the waitstatus, because
+ W_EXITCODE(0,0) == 0. */
+ store_waitstatus (&lp->waitstatus, status);
+ return lp;
}
/* Check if the current LWP has previously exited. In the nptl
{
/* This is a delayed SIGSTOP. */
- registers_changed ();
-
- if (linux_nat_prepare_to_resume != NULL)
- linux_nat_prepare_to_resume (lp);
- linux_ops->to_resume (linux_ops,
- pid_to_ptid (ptid_get_lwp (lp->ptid)),
- lp->step, GDB_SIGNAL_0);
+ linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
if (debug_linux_nat)
fprintf_unfiltered (gdb_stdlog,
"LLW: %s %s, 0, 0 (discard SIGSTOP)\n",
"PTRACE_SINGLESTEP" : "PTRACE_CONT",
target_pid_to_str (lp->ptid));
- lp->stopped = 0;
gdb_assert (lp->resumed);
/* Discard the event. */
/* This is a delayed SIGINT. */
lp->ignore_sigint = 0;
- registers_changed ();
- if (linux_nat_prepare_to_resume != NULL)
- linux_nat_prepare_to_resume (lp);
- linux_ops->to_resume (linux_ops, pid_to_ptid (ptid_get_lwp (lp->ptid)),
- lp->step, GDB_SIGNAL_0);
+ linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
if (debug_linux_nat)
fprintf_unfiltered (gdb_stdlog,
"LLW: %s %s, 0, 0 (discard SIGINT)\n",
lp->step ?
"PTRACE_SINGLESTEP" : "PTRACE_CONT",
target_pid_to_str (lp->ptid));
-
- lp->stopped = 0;
gdb_assert (lp->resumed);
/* Discard the event. */
return NULL;
}
+ /* Don't report signals that GDB isn't interested in, such as
+ signals that are neither printed nor stopped upon. Stopping all
+ threads can be a bit time-consuming so if we want decent
+ performance with heavily multi-threaded programs, especially when
+ they're using a high frequency timer, we'd better avoid it if we
+ can. */
+ if (WIFSTOPPED (status))
+ {
+ enum gdb_signal signo = gdb_signal_from_host (WSTOPSIG (status));
+
+ if (!non_stop)
+ {
+ /* Only do the below in all-stop, as we currently use SIGSTOP
+ to implement target_stop (see linux_nat_stop) in
+ non-stop. */
+ if (signo == GDB_SIGNAL_INT && signal_pass_state (signo) == 0)
+ {
+ /* If ^C/BREAK is typed at the tty/console, SIGINT gets
+ forwarded to the entire process group, that is, all LWPs
+ will receive it - unless they're using CLONE_THREAD to
+ share signals. Since we only want to report it once, we
+ mark it as ignored for all LWPs except this one. */
+ iterate_over_lwps (pid_to_ptid (ptid_get_pid (lp->ptid)),
+ set_ignore_sigint, NULL);
+ lp->ignore_sigint = 0;
+ }
+ else
+ maybe_clear_ignore_sigint (lp);
+ }
+
+ /* When using hardware single-step, we need to report every signal.
+ Otherwise, signals in pass_mask may be short-circuited. */
+ if (!lp->step
+ && WSTOPSIG (status) && sigismember (&pass_mask, WSTOPSIG (status)))
+ {
+ linux_resume_one_lwp (lp, lp->step, signo);
+ if (debug_linux_nat)
+ fprintf_unfiltered (gdb_stdlog,
+ "LLW: %s %s, %s (preempt 'handle')\n",
+ lp->step ?
+ "PTRACE_SINGLESTEP" : "PTRACE_CONT",
+ target_pid_to_str (lp->ptid),
+ (signo != GDB_SIGNAL_0
+ ? strsignal (gdb_signal_to_host (signo))
+ : "0"));
+ return NULL;
+ }
+ }
+
/* An interesting event. */
gdb_assert (lp);
lp->status = status;
+ save_sigtrap (lp);
return lp;
}
ptid_t ptid, struct target_waitstatus *ourstatus,
int target_options)
{
- static sigset_t prev_mask;
+ sigset_t prev_mask;
enum resume_kind last_resume_kind;
struct lwp_info *lp;
int status;
/* Make sure SIGCHLD is blocked until the sigsuspend below. */
block_child_signals (&prev_mask);
-retry:
- lp = NULL;
- status = 0;
-
/* First check if there is a LWP with a wait status pending. */
- if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid))
- {
- /* Any LWP in the PTID group that's been resumed will do. */
- lp = iterate_over_lwps (ptid, status_callback, NULL);
- if (lp)
- {
- if (debug_linux_nat && lp->status)
- fprintf_unfiltered (gdb_stdlog,
- "LLW: Using pending wait status %s for %s.\n",
- status_to_str (lp->status),
- target_pid_to_str (lp->ptid));
- }
- }
- else if (ptid_lwp_p (ptid))
+ lp = iterate_over_lwps (ptid, status_callback, NULL);
+ if (lp != NULL)
{
if (debug_linux_nat)
- fprintf_unfiltered (gdb_stdlog,
- "LLW: Waiting for specific LWP %s.\n",
- target_pid_to_str (ptid));
-
- /* We have a specific LWP to check. */
- lp = find_lwp_pid (ptid);
- gdb_assert (lp);
-
- if (debug_linux_nat && lp->status)
fprintf_unfiltered (gdb_stdlog,
"LLW: Using pending wait status %s for %s.\n",
status_to_str (lp->status),
target_pid_to_str (lp->ptid));
-
- /* We check for lp->waitstatus in addition to lp->status,
- because we can have pending process exits recorded in
- lp->status and W_EXITCODE(0,0) == 0. We should probably have
- an additional lp->status_p flag. */
- if (lp->status == 0 && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE)
- lp = NULL;
}
if (!target_can_async_p ())
set_sigint_trap ();
}
- /* But if we don't find a pending event, we'll have to wait. */
+ /* But if we don't find a pending event, we'll have to wait. Always
+ pull all events out of the kernel. We'll randomly select an
+ event LWP out of all that have events, to prevent starvation. */
while (lp == NULL)
{
if (lwpid > 0)
{
- /* If this is true, then we paused LWPs momentarily, and may
- now have pending events to handle. */
- int new_pending;
-
if (debug_linux_nat)
{
fprintf_unfiltered (gdb_stdlog,
(long) lwpid, status_to_str (status));
}
- lp = linux_nat_filter_event (lwpid, status, &new_pending);
-
- /* STATUS is now no longer valid, use LP->STATUS instead. */
- status = 0;
-
- if (lp && !ptid_match (lp->ptid, ptid))
- {
- gdb_assert (lp->resumed);
-
- if (debug_linux_nat)
- fprintf (stderr,
- "LWP %ld got an event %06x, leaving pending.\n",
- ptid_get_lwp (lp->ptid), lp->status);
-
- if (WIFSTOPPED (lp->status))
- {
- if (WSTOPSIG (lp->status) != SIGSTOP)
- {
- /* Cancel breakpoint hits. The breakpoint may
- be removed before we fetch events from this
- process to report to the core. It is best
- not to assume the moribund breakpoints
- heuristic always handles these cases --- it
- could be too many events go through to the
- core before this one is handled. All-stop
- always cancels breakpoint hits in all
- threads. */
- if (non_stop
- && linux_nat_lp_status_is_event (lp)
- && cancel_breakpoint (lp))
- {
- /* Throw away the SIGTRAP. */
- lp->status = 0;
-
- if (debug_linux_nat)
- fprintf (stderr,
- "LLW: LWP %ld hit a breakpoint while"
- " waiting for another process;"
- " cancelled it\n",
- ptid_get_lwp (lp->ptid));
- }
- lp->stopped = 1;
- }
- else
- {
- lp->stopped = 1;
- lp->signalled = 0;
- }
- }
- else if (WIFEXITED (lp->status) || WIFSIGNALED (lp->status))
- {
- if (debug_linux_nat)
- fprintf (stderr,
- "Process %ld exited while stopping LWPs\n",
- ptid_get_lwp (lp->ptid));
-
- /* This was the last lwp in the process. Since
- events are serialized to GDB core, and we can't
- report this one right now, but GDB core and the
- other target layers will want to be notified
- about the exit code/signal, leave the status
- pending for the next time we're able to report
- it. */
-
- /* Prevent trying to stop this thread again. We'll
- never try to resume it because it has a pending
- status. */
- lp->stopped = 1;
-
- /* Dead LWP's aren't expected to reported a pending
- sigstop. */
- lp->signalled = 0;
-
- /* Store the pending event in the waitstatus as
- well, because W_EXITCODE(0,0) == 0. */
- store_waitstatus (&lp->waitstatus, lp->status);
- }
-
- /* Keep looking. */
- lp = NULL;
- }
-
- if (new_pending)
- {
- /* Some LWP now has a pending event. Go all the way
- back to check it. */
- goto retry;
- }
-
- if (lp)
- {
- /* We got an event to report to the core. */
- break;
- }
-
+ linux_nat_filter_event (lwpid, status);
/* Retry until nothing comes out of waitpid. A single
SIGCHLD can indicate more than one child stopped. */
continue;
}
+ /* Now that we've pulled all events out of the kernel, check if
+ there's any LWP with a status to report to the core. */
+ lp = iterate_over_lwps (ptid, status_callback, NULL);
+ if (lp != NULL)
+ break;
+
/* Check for zombie thread group leaders. Those can't be reaped
until all other threads in the thread group are. */
check_zombie_leaders ();
gdb_assert (lp == NULL);
/* Block until we get an event reported with SIGCHLD. */
+ if (debug_linux_nat)
+ fprintf_unfiltered (gdb_stdlog, "LNW: about to sigsuspend\n");
sigsuspend (&suspend_mask);
}
status = lp->status;
lp->status = 0;
- /* Don't report signals that GDB isn't interested in, such as
- signals that are neither printed nor stopped upon. Stopping all
- threads can be a bit time-consuming so if we want decent
- performance with heavily multi-threaded programs, especially when
- they're using a high frequency timer, we'd better avoid it if we
- can. */
-
- if (WIFSTOPPED (status))
- {
- enum gdb_signal signo = gdb_signal_from_host (WSTOPSIG (status));
-
- /* When using hardware single-step, we need to report every signal.
- Otherwise, signals in pass_mask may be short-circuited. */
- if (!lp->step
- && WSTOPSIG (status) && sigismember (&pass_mask, WSTOPSIG (status)))
- {
- /* FIMXE: kettenis/2001-06-06: Should we resume all threads
- here? It is not clear we should. GDB may not expect
- other threads to run. On the other hand, not resuming
- newly attached threads may cause an unwanted delay in
- getting them running. */
- registers_changed ();
- if (linux_nat_prepare_to_resume != NULL)
- linux_nat_prepare_to_resume (lp);
- linux_ops->to_resume (linux_ops,
- pid_to_ptid (ptid_get_lwp (lp->ptid)),
- lp->step, signo);
- if (debug_linux_nat)
- fprintf_unfiltered (gdb_stdlog,
- "LLW: %s %s, %s (preempt 'handle')\n",
- lp->step ?
- "PTRACE_SINGLESTEP" : "PTRACE_CONT",
- target_pid_to_str (lp->ptid),
- (signo != GDB_SIGNAL_0
- ? strsignal (gdb_signal_to_host (signo))
- : "0"));
- lp->stopped = 0;
- goto retry;
- }
-
- if (!non_stop)
- {
- /* Only do the below in all-stop, as we currently use SIGINT
- to implement target_stop (see linux_nat_stop) in
- non-stop. */
- if (signo == GDB_SIGNAL_INT && signal_pass_state (signo) == 0)
- {
- /* If ^C/BREAK is typed at the tty/console, SIGINT gets
- forwarded to the entire process group, that is, all LWPs
- will receive it - unless they're using CLONE_THREAD to
- share signals. Since we only want to report it once, we
- mark it as ignored for all LWPs except this one. */
- iterate_over_lwps (pid_to_ptid (ptid_get_pid (ptid)),
- set_ignore_sigint, NULL);
- lp->ignore_sigint = 0;
- }
- else
- maybe_clear_ignore_sigint (lp);
- }
- }
-
- /* This LWP is stopped now. */
- lp->stopped = 1;
-
- if (debug_linux_nat)
- fprintf_unfiltered (gdb_stdlog, "LLW: Candidate event %s in %s.\n",
- status_to_str (status), target_pid_to_str (lp->ptid));
-
if (!non_stop)
{
/* Now stop all other LWP's ... */
/* ... and wait until all of them have reported back that
they're no longer running. */
iterate_over_lwps (minus_one_ptid, stop_wait_callback, NULL);
+ }
- /* If we're not waiting for a specific LWP, choose an event LWP
- from among those that have had events. Giving equal priority
- to all LWPs that have had events helps prevent
- starvation. */
- if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid))
- select_event_lwp (ptid, &lp, &status);
+ /* If we're not waiting for a specific LWP, choose an event LWP from
+ among those that have had events. Giving equal priority to all
+ LWPs that have had events helps prevent starvation. */
+ if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid))
+ select_event_lwp (ptid, &lp, &status);
- /* Now that we've selected our final event LWP, cancel any
- breakpoints in other LWPs that have hit a GDB breakpoint.
- See the comment in cancel_breakpoints_callback to find out
- why. */
- iterate_over_lwps (minus_one_ptid, cancel_breakpoints_callback, lp);
+ gdb_assert (lp != NULL);
- /* We'll need this to determine whether to report a SIGSTOP as
- TARGET_WAITKIND_0. Need to take a copy because
- resume_clear_callback clears it. */
- last_resume_kind = lp->last_resume_kind;
+ /* Now that we've selected our final event LWP, un-adjust its PC if
+ it was a software breakpoint. */
+ if (lp->stop_reason == LWP_STOPPED_BY_SW_BREAKPOINT)
+ {
+ struct regcache *regcache = get_thread_regcache (lp->ptid);
+ struct gdbarch *gdbarch = get_regcache_arch (regcache);
+ int decr_pc = target_decr_pc_after_break (gdbarch);
+ if (decr_pc != 0)
+ {
+ CORE_ADDR pc;
+
+ pc = regcache_read_pc (regcache);
+ regcache_write_pc (regcache, pc + decr_pc);
+ }
+ }
+
+ /* We'll need this to determine whether to report a SIGSTOP as
+ GDB_SIGNAL_0. Need to take a copy because resume_clear_callback
+ clears it. */
+ last_resume_kind = lp->last_resume_kind;
+
+ if (!non_stop)
+ {
/* In all-stop, from the core's perspective, all LWPs are now
stopped until a new resume action is sent over. */
iterate_over_lwps (minus_one_ptid, resume_clear_callback, NULL);
}
else
{
- /* See above. */
- last_resume_kind = lp->last_resume_kind;
resume_clear_callback (lp, NULL);
}
if (lp->stopped
&& lp->resumed
- && lp->status == 0
- && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE)
+ && !lwp_status_pending_p (lp))
{
struct regcache *regcache = get_thread_regcache (lp->ptid);
struct gdbarch *gdbarch = get_regcache_arch (regcache);
paddress (gdbarch, pc),
lp->step);
- registers_changed ();
- if (linux_nat_prepare_to_resume != NULL)
- linux_nat_prepare_to_resume (lp);
- linux_ops->to_resume (linux_ops, pid_to_ptid (ptid_get_lwp (lp->ptid)),
- lp->step, GDB_SIGNAL_0);
- lp->stopped = 0;
- lp->stopped_by_watchpoint = 0;
+ linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
}
return 0;
/* PTRACE_KILL may resume the inferior. Send SIGKILL first. */
errno = 0;
- kill (ptid_get_lwp (lp->ptid), SIGKILL);
+ kill_lwp (ptid_get_lwp (lp->ptid), SIGKILL);
if (debug_linux_nat)
- fprintf_unfiltered (gdb_stdlog,
- "KC: kill (SIGKILL) %s, 0, 0 (%s)\n",
- target_pid_to_str (lp->ptid),
- errno ? safe_strerror (errno) : "OK");
+ {
+ int save_errno = errno;
+
+ fprintf_unfiltered (gdb_stdlog,
+ "KC: kill (SIGKILL) %s, 0, 0 (%s)\n",
+ target_pid_to_str (lp->ptid),
+ save_errno ? safe_strerror (save_errno) : "OK");
+ }
/* Some kernels ignore even SIGKILL for processes under ptrace. */
errno = 0;
ptrace (PTRACE_KILL, ptid_get_lwp (lp->ptid), 0, 0);
if (debug_linux_nat)
- fprintf_unfiltered (gdb_stdlog,
- "KC: PTRACE_KILL %s, 0, 0 (%s)\n",
- target_pid_to_str (lp->ptid),
- errno ? safe_strerror (errno) : "OK");
+ {
+ int save_errno = errno;
+
+ fprintf_unfiltered (gdb_stdlog,
+ "KC: PTRACE_KILL %s, 0, 0 (%s)\n",
+ target_pid_to_str (lp->ptid),
+ save_errno ? safe_strerror (save_errno) : "OK");
+ }
return 0;
}
}
static char *
-linux_nat_thread_name (struct thread_info *thr)
+linux_nat_thread_name (struct target_ops *self, struct thread_info *thr)
{
int pid = ptid_get_pid (thr->ptid);
long lwp = ptid_get_lwp (thr->ptid);
can be opened to get the symbols for the child process. */
static char *
-linux_child_pid_to_exec_file (int pid)
+linux_child_pid_to_exec_file (struct target_ops *self, int pid)
{
- char *name1, *name2;
-
- name1 = xmalloc (PATH_MAX);
- name2 = xmalloc (PATH_MAX);
- make_cleanup (xfree, name1);
- make_cleanup (xfree, name2);
- memset (name2, 0, PATH_MAX);
-
- xsnprintf (name1, PATH_MAX, "/proc/%d/exe", pid);
- if (readlink (name1, name2, PATH_MAX - 1) > 0)
- return name2;
- else
- return name1;
-}
-
-/* Records the thread's register state for the corefile note
- section. */
-
-static char *
-linux_nat_collect_thread_registers (const struct regcache *regcache,
- ptid_t ptid, bfd *obfd,
- char *note_data, int *note_size,
- enum gdb_signal stop_signal)
-{
- struct gdbarch *gdbarch = get_regcache_arch (regcache);
- const struct regset *regset;
- int core_regset_p;
- gdb_gregset_t gregs;
- gdb_fpregset_t fpregs;
-
- core_regset_p = gdbarch_regset_from_core_section_p (gdbarch);
-
- if (core_regset_p
- && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg",
- sizeof (gregs)))
- != NULL && regset->collect_regset != NULL)
- regset->collect_regset (regset, regcache, -1, &gregs, sizeof (gregs));
- else
- fill_gregset (regcache, &gregs, -1);
+ static char buf[PATH_MAX];
+ char name[PATH_MAX];
- note_data = (char *) elfcore_write_prstatus
- (obfd, note_data, note_size, ptid_get_lwp (ptid),
- gdb_signal_to_host (stop_signal), &gregs);
+ xsnprintf (name, PATH_MAX, "/proc/%d/exe", pid);
+ memset (buf, 0, PATH_MAX);
+ if (readlink (name, buf, PATH_MAX - 1) <= 0)
+ strcpy (buf, name);
- if (core_regset_p
- && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg2",
- sizeof (fpregs)))
- != NULL && regset->collect_regset != NULL)
- regset->collect_regset (regset, regcache, -1, &fpregs, sizeof (fpregs));
- else
- fill_fpregset (regcache, &fpregs, -1);
-
- note_data = (char *) elfcore_write_prfpreg (obfd, note_data, note_size,
- &fpregs, sizeof (fpregs));
-
- return note_data;
-}
-
-/* Fills the "to_make_corefile_note" target vector. Builds the note
- section for a corefile, and returns it in a malloc buffer. */
-
-static char *
-linux_nat_make_corefile_notes (bfd *obfd, int *note_size)
-{
- /* FIXME: uweigand/2011-10-06: Once all GNU/Linux architectures have been
- converted to gdbarch_core_regset_sections, this function can go away. */
- return linux_make_corefile_notes (target_gdbarch (), obfd, note_size,
- linux_nat_collect_thread_registers);
+ return buf;
}
/* Implement the to_xfer_partial interface for memory reads using the /proc
}
static VEC(static_tracepoint_marker_p) *
-linux_child_static_tracepoint_markers_by_strid (const char *strid)
+linux_child_static_tracepoint_markers_by_strid (struct target_ops *self,
+ const char *strid)
{
char s[IPA_CMD_BUF_SIZE];
struct cleanup *old_chain;
t->to_post_startup_inferior = linux_child_post_startup_inferior;
t->to_post_attach = linux_child_post_attach;
t->to_follow_fork = linux_child_follow_fork;
- t->to_make_corefile_notes = linux_nat_make_corefile_notes;
super_xfer_partial = t->to_xfer_partial;
t->to_xfer_partial = linux_xfer_partial;
}
static int
-linux_nat_supports_non_stop (void)
+linux_nat_supports_non_stop (struct target_ops *self)
{
return 1;
}
int linux_multi_process = 1;
static int
-linux_nat_supports_multi_process (void)
+linux_nat_supports_multi_process (struct target_ops *self)
{
return linux_multi_process;
}
static int
-linux_nat_supports_disable_randomization (void)
+linux_nat_supports_disable_randomization (struct target_ops *self)
{
#ifdef HAVE_PERSONALITY
return 1;
static int async_terminal_is_ours = 1;
-/* target_terminal_inferior implementation. */
+/* target_terminal_inferior implementation.
+
+ This is a wrapper around child_terminal_inferior to add async support. */
static void
linux_nat_terminal_inferior (struct target_ops *self)
if (!target_is_async_p ())
{
/* Async mode is disabled. */
- terminal_inferior (self);
+ child_terminal_inferior (self);
return;
}
- terminal_inferior (self);
+ child_terminal_inferior (self);
/* Calls to target_terminal_*() are meant to be idempotent. */
if (!async_terminal_is_ours)
set_sigint_trap ();
}
-/* target_terminal_ours implementation. */
+/* target_terminal_ours implementation.
+
+ This is a wrapper around child_terminal_ours to add async support (and
+ implement the target_terminal_ours vs target_terminal_ours_for_output
+ distinction). child_terminal_ours is currently no different than
+ child_terminal_ours_for_output.
+ We leave target_terminal_ours_for_output alone, leaving it to
+ child_terminal_ours_for_output. */
static void
linux_nat_terminal_ours (struct target_ops *self)
if (!target_is_async_p ())
{
/* Async mode is disabled. */
- terminal_ours (self);
+ child_terminal_ours (self);
return;
}
/* GDB should never give the terminal to the inferior if the
inferior is running in the background (run&, continue&, etc.),
but claiming it sure should. */
- terminal_ours (self);
+ child_terminal_ours (self);
if (async_terminal_is_ours)
return;
}
static void
-linux_nat_stop (ptid_t ptid)
+linux_nat_stop (struct target_ops *self, ptid_t ptid)
{
if (non_stop)
iterate_over_lwps (ptid, linux_nat_stop_lwp, NULL);
else
- linux_ops->to_stop (ptid);
+ linux_ops->to_stop (linux_ops, ptid);
}
static void
linux_nat_close (struct target_ops *self)
{
/* Unregister from the event loop. */
- if (linux_nat_is_async_p (NULL))
- linux_nat_async (NULL, NULL, 0);
+ if (linux_nat_is_async_p (self))
+ linux_nat_async (self, NULL, NULL);
if (linux_ops->to_close)
linux_ops->to_close (linux_ops);
+
+ super_close (self);
}
/* When requests are passed down from the linux-nat layer to the
struct inferior *inf;
int pid;
- pid = ptid_get_lwp (ptid);
if (ptid_get_lwp (ptid) == 0)
{
/* An (lwpid,0,0) ptid. Look up the lwp object to get at the
t->to_async = linux_nat_async;
t->to_terminal_inferior = linux_nat_terminal_inferior;
t->to_terminal_ours = linux_nat_terminal_ours;
+
+ super_close = t->to_close;
t->to_close = linux_nat_close;
/* Methods for non-stop support. */
sigdelset (&suspend_mask, SIGCHLD);
sigemptyset (&blocked_mask);
+
+ /* Do not enable PTRACE_O_TRACEEXIT until GDB is more prepared to
+ support read-only process state. */
+ linux_ptrace_set_additional_flags (PTRACE_O_TRACESYSGOOD
+ | PTRACE_O_TRACEVFORKDONE
+ | PTRACE_O_TRACEVFORK
+ | PTRACE_O_TRACEFORK
+ | PTRACE_O_TRACEEXEC);
}
\f
static int
get_signo (const char *name)
{
- struct minimal_symbol *ms;
+ struct bound_minimal_symbol ms;
int signo;
ms = lookup_minimal_symbol (name, NULL, NULL);
- if (ms == NULL)
+ if (ms.minsym == NULL)
return 0;
- if (target_read_memory (SYMBOL_VALUE_ADDRESS (ms), (gdb_byte *) &signo,
+ if (target_read_memory (BMSYMBOL_VALUE_ADDRESS (ms), (gdb_byte *) &signo,
sizeof (signo)) != 0)
return 0;