/* GNU/Linux native-dependent code common to multiple platforms.
- Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
- Free Software Foundation, Inc.
+ Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
+ 2011 Free Software Foundation, Inc.
This file is part of GDB.
#include "inf-ptrace.h"
#include "auxv.h"
#include <sys/param.h> /* for MAXPATHLEN */
-#include <sys/procfs.h> /* for elf_gregset etc. */
+#include <sys/procfs.h> /* for elf_gregset etc. */
#include "elf-bfd.h" /* for elfcore_write_* */
#include "gregset.h" /* for gregset */
#include "gdbcore.h" /* for get_exec_file */
#include <ctype.h> /* for isdigit */
-#include "gdbthread.h" /* for struct thread_info etc. */
+#include "gdbthread.h" /* for struct thread_info etc. */
#include "gdb_stat.h" /* for struct stat */
#include <fcntl.h> /* for O_RDONLY */
#include "inf-loop.h"
#include "xml-support.h"
#include "terminal.h"
#include <sys/vfs.h>
+#include "solib.h"
#ifndef SPUFS_MAGIC
#define SPUFS_MAGIC 0x23c9b64e
# endif
#endif /* HAVE_PERSONALITY */
-/* This comment documents high-level logic of this file.
+/* This comment documents high-level logic of this file.
Waiting for events in sync mode
===============================
When waiting for an event in a specific thread, we just use waitpid, passing
the specific pid, and not passing WNOHANG.
-When waiting for an event in all threads, waitpid is not quite good. Prior to
+When waiting for an event in all threads, waitpid is not quite good. Prior to
version 2.4, Linux can either wait for event in main thread, or in secondary
-threads. (2.4 has the __WALL flag). So, if we use blocking waitpid, we might
+threads. (2.4 has the __WALL flag). So, if we use blocking waitpid, we might
miss an event. The solution is to use non-blocking waitpid, together with
sigsuspend. First, we use non-blocking waitpid to get an event in the main
-process, if any. Second, we use non-blocking waitpid with the __WCLONED
+process, if any. Second, we use non-blocking waitpid with the __WCLONED
flag to check for events in cloned processes. If nothing is found, we use
sigsuspend to wait for SIGCHLD. When SIGCHLD arrives, it means something
happened to a child process -- and SIGCHLD will be delivered both for events
in main debugged process and in cloned processes. As soon as we know there's
-an event, we get back to calling nonblocking waitpid with and without __WCLONED.
+an event, we get back to calling nonblocking waitpid with and without
+__WCLONED.
Note that SIGCHLD should be blocked between waitpid and sigsuspend calls,
-so that we don't miss a signal. If SIGCHLD arrives in between, when it's
+so that we don't miss a signal. If SIGCHLD arrives in between, when it's
blocked, the signal becomes pending and sigsuspend immediately
notices it and returns.
#define PTRACE_SETOPTIONS 0x4200
#define PTRACE_GETEVENTMSG 0x4201
-/* options set using PTRACE_SETOPTIONS */
+/* Options set using PTRACE_SETOPTIONS. */
#define PTRACE_O_TRACESYSGOOD 0x00000001
#define PTRACE_O_TRACEFORK 0x00000002
#define PTRACE_O_TRACEVFORK 0x00000004
#endif /* PTRACE_EVENT_FORK */
+/* Unlike other extended result codes, WSTOPSIG (status) on
+ PTRACE_O_TRACESYSGOOD syscall events doesn't return SIGTRAP, but
+ instead SIGTRAP with bit 7 set. */
+#define SYSCALL_SIGTRAP (SIGTRAP | 0x80)
+
/* We can't always assume that this flag is available, but all systems
with the ptrace event handlers also have __WALL, so it's safe to use
here. */
show_debug_linux_nat_async (struct ui_file *file, int from_tty,
struct cmd_list_element *c, const char *value)
{
- fprintf_filtered (file, _("Debugging of GNU/Linux async lwp module is %s.\n"),
+ fprintf_filtered (file,
+ _("Debugging of GNU/Linux async lwp module is %s.\n"),
value);
}
struct cmd_list_element *c, const char *value)
{
#ifdef HAVE_PERSONALITY
- fprintf_filtered (file, _("\
-Disabling randomization of debuggee's virtual address space is %s.\n"),
+ fprintf_filtered (file,
+ _("Disabling randomization of debuggee's "
+ "virtual address space is %s.\n"),
value);
#else /* !HAVE_PERSONALITY */
- fputs_filtered (_("\
-Disabling randomization of debuggee's virtual address space is unsupported on\n\
-this platform.\n"), file);
+ fputs_filtered (_("Disabling randomization of debuggee's "
+ "virtual address space is unsupported on\n"
+ "this platform.\n"), file);
#endif /* !HAVE_PERSONALITY */
}
static void
-set_disable_randomization (char *args, int from_tty, struct cmd_list_element *c)
+set_disable_randomization (char *args, int from_tty,
+ struct cmd_list_element *c)
{
#ifndef HAVE_PERSONALITY
- error (_("\
-Disabling randomization of debuggee's virtual address space is unsupported on\n\
-this platform."));
+ error (_("Disabling randomization of debuggee's "
+ "virtual address space is unsupported on\n"
+ "this platform."));
#endif /* !HAVE_PERSONALITY */
}
-static int linux_parent_pid;
-
struct simple_pid_list
{
int pid;
static int linux_supports_tracefork_flag = -1;
+/* This variable is a tri-state flag: -1 for unknown, 0 if
+ PTRACE_O_TRACESYSGOOD can not be used, 1 if it can. */
+
+static int linux_supports_tracesysgood_flag = -1;
+
/* If we have PTRACE_O_TRACEFORK, this flag indicates whether we also have
PTRACE_O_TRACEVFORKDONE. */
static int linux_supports_tracevforkdone_flag = -1;
-/* Async mode support */
+/* Async mode support. */
/* Zero if the async mode, although enabled, is masked, which means
linux_nat_wait should behave as if async mode was off. */
static int linux_nat_async_mask_value = 1;
+/* Stores the current used ptrace() options. */
+static int current_ptrace_options = 0;
+
/* The read/write ends of the pipe registered as waitable file in the
event loop. */
static int linux_nat_event_pipe[2] = { -1, -1 };
}
static void linux_nat_async (void (*callback)
- (enum inferior_event_type event_type, void *context),
+ (enum inferior_event_type event_type,
+ void *context),
void *context);
static int linux_nat_async_mask (int mask);
static int kill_lwp (int lwpid, int signo);
add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
{
struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
+
new_pid->pid = pid;
new_pid->status = status;
new_pid->next = *listp;
}
static int
-pull_pid_from_list (struct simple_pid_list **listp, int pid, int *status)
+pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
{
struct simple_pid_list **p;
if ((*p)->pid == pid)
{
struct simple_pid_list *next = (*p)->next;
- *status = (*p)->status;
+
+ *statusp = (*p)->status;
xfree (*p);
*p = next;
return 1;
static void
linux_tracefork_child (void)
{
- int ret;
-
ptrace (PTRACE_TRACEME, 0, 0, 0);
kill (getpid (), SIGSTOP);
fork ();
/* Wrapper function for waitpid which handles EINTR. */
static int
-my_waitpid (int pid, int *status, int flags)
+my_waitpid (int pid, int *statusp, int flags)
{
int ret;
do
{
- ret = waitpid (pid, status, flags);
+ ret = waitpid (pid, statusp, flags);
}
while (ret == -1 && errno == EINTR);
else if (ret != child_pid)
error (_("linux_test_for_tracefork: waitpid: unexpected result %d."), ret);
if (! WIFSTOPPED (status))
- error (_("linux_test_for_tracefork: waitpid: unexpected status %d."), status);
+ error (_("linux_test_for_tracefork: waitpid: unexpected status %d."),
+ status);
ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0, PTRACE_O_TRACEFORK);
if (ret != 0)
ret = my_waitpid (child_pid, &status, 0);
if (ret != child_pid)
- warning (_("linux_test_for_tracefork: failed to wait for killed child"));
+ warning (_("linux_test_for_tracefork: failed "
+ "to wait for killed child"));
else if (!WIFSIGNALED (status))
- warning (_("linux_test_for_tracefork: unexpected wait status 0x%x from "
- "killed child"), status);
+ warning (_("linux_test_for_tracefork: unexpected "
+ "wait status 0x%x from killed child"), status);
restore_child_signals_mask (&prev_mask);
return;
my_waitpid (second_pid, &second_status, 0);
ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
if (ret != 0)
- warning (_("linux_test_for_tracefork: failed to kill second child"));
+ warning (_("linux_test_for_tracefork: "
+ "failed to kill second child"));
my_waitpid (second_pid, &status, 0);
}
}
restore_child_signals_mask (&prev_mask);
}
+/* Determine if PTRACE_O_TRACESYSGOOD can be used to follow syscalls.
+
+ We try to enable syscall tracing on ORIGINAL_PID. If this fails,
+ we know that the feature is not available. This may change the tracing
+ options for ORIGINAL_PID, but we'll be setting them shortly anyway. */
+
+static void
+linux_test_for_tracesysgood (int original_pid)
+{
+ int ret;
+ sigset_t prev_mask;
+
+ /* We don't want those ptrace calls to be interrupted. */
+ block_child_signals (&prev_mask);
+
+ linux_supports_tracesysgood_flag = 0;
+
+ ret = ptrace (PTRACE_SETOPTIONS, original_pid, 0, PTRACE_O_TRACESYSGOOD);
+ if (ret != 0)
+ goto out;
+
+ linux_supports_tracesysgood_flag = 1;
+out:
+ restore_child_signals_mask (&prev_mask);
+}
+
+/* Determine wether we support PTRACE_O_TRACESYSGOOD option available.
+ This function also sets linux_supports_tracesysgood_flag. */
+
+static int
+linux_supports_tracesysgood (int pid)
+{
+ if (linux_supports_tracesysgood_flag == -1)
+ linux_test_for_tracesysgood (pid);
+ return linux_supports_tracesysgood_flag;
+}
+
/* Return non-zero iff we have tracefork functionality available.
This function also sets linux_supports_tracefork_flag. */
return linux_supports_tracevforkdone_flag;
}
+static void
+linux_enable_tracesysgood (ptid_t ptid)
+{
+ int pid = ptid_get_lwp (ptid);
+
+ if (pid == 0)
+ pid = ptid_get_pid (ptid);
+
+ if (linux_supports_tracesysgood (pid) == 0)
+ return;
+
+ current_ptrace_options |= PTRACE_O_TRACESYSGOOD;
+
+ ptrace (PTRACE_SETOPTIONS, pid, 0, current_ptrace_options);
+}
+
\f
void
linux_enable_event_reporting (ptid_t ptid)
{
int pid = ptid_get_lwp (ptid);
- int options;
if (pid == 0)
pid = ptid_get_pid (ptid);
if (! linux_supports_tracefork (pid))
return;
- options = PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORK | PTRACE_O_TRACEEXEC
- | PTRACE_O_TRACECLONE;
+ current_ptrace_options |= PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORK
+ | PTRACE_O_TRACEEXEC | PTRACE_O_TRACECLONE;
+
if (linux_supports_tracevforkdone (pid))
- options |= PTRACE_O_TRACEVFORKDONE;
+ current_ptrace_options |= PTRACE_O_TRACEVFORKDONE;
/* Do not enable PTRACE_O_TRACEEXIT until GDB is more prepared to support
read-only process state. */
- ptrace (PTRACE_SETOPTIONS, pid, 0, options);
+ ptrace (PTRACE_SETOPTIONS, pid, 0, current_ptrace_options);
}
static void
{
linux_enable_event_reporting (pid_to_ptid (pid));
check_for_thread_db ();
+ linux_enable_tracesysgood (pid_to_ptid (pid));
}
static void
{
linux_enable_event_reporting (ptid);
check_for_thread_db ();
+ linux_enable_tracesysgood (ptid);
}
static int
if (!detach_fork)
linux_enable_event_reporting (pid_to_ptid (child_pid));
+ if (has_vforked
+ && !non_stop /* Non-stop always resumes both branches. */
+ && (!target_is_async_p () || sync_execution)
+ && !(follow_child || detach_fork || sched_multi))
+ {
+ /* The parent stays blocked inside the vfork syscall until the
+ child execs or exits. If we don't let the child run, then
+ the parent stays blocked. If we're telling the parent to run
+ in the foreground, the user will not be able to ctrl-c to get
+ back the terminal, effectively hanging the debug session. */
+ fprintf_filtered (gdb_stderr, _("\
+Can not resume the parent process over vfork in the foreground while\n\
+holding the child stopped. Try \"set detach-on-fork\" or \
+\"set schedule-multiple\".\n"));
+ /* FIXME output string > 80 columns. */
+ return 1;
+ }
+
if (! follow_child)
{
- /* We're already attached to the parent, by default. */
-
- /* Before detaching from the child, remove all breakpoints from
- it. If we forked, then this has already been taken care of
- by infrun.c. If we vforked however, any breakpoint inserted
- in the parent is visible in the child, even those added while
- stopped in a vfork catchpoint. This won't actually modify
- the breakpoint list, but will physically remove the
- breakpoints from the child. This will remove the breakpoints
- from the parent also, but they'll be reinserted below. */
- if (has_vforked)
- detach_breakpoints (child_pid);
+ struct lwp_info *child_lp = NULL;
+
+ /* We're already attached to the parent, by default. */
/* Detach new forked process? */
if (detach_fork)
{
+ /* Before detaching from the child, remove all breakpoints
+ from it. If we forked, then this has already been taken
+ care of by infrun.c. If we vforked however, any
+ breakpoint inserted in the parent is visible in the
+ child, even those added while stopped in a vfork
+ catchpoint. This will remove the breakpoints from the
+ parent also, but they'll be reinserted below. */
+ if (has_vforked)
+ {
+ /* keep breakpoints list in sync. */
+ remove_breakpoints_pid (GET_PID (inferior_ptid));
+ }
+
if (info_verbose || debug_linux_nat)
{
target_terminal_ours ();
fprintf_filtered (gdb_stdlog,
- "Detaching after fork from child process %d.\n",
+ "Detaching after fork from "
+ "child process %d.\n",
child_pid);
}
else
{
struct inferior *parent_inf, *child_inf;
- struct lwp_info *lp;
struct cleanup *old_chain;
/* Add process to GDB's tables. */
copy_terminal_info (child_inf, parent_inf);
old_chain = save_inferior_ptid ();
+ save_current_program_space ();
inferior_ptid = ptid_build (child_pid, child_pid, 0);
add_thread (inferior_ptid);
- lp = add_lwp (inferior_ptid);
- lp->stopped = 1;
+ child_lp = add_lwp (inferior_ptid);
+ child_lp->stopped = 1;
+ child_lp->resumed = 1;
+ /* If this is a vfork child, then the address-space is
+ shared with the parent. */
+ if (has_vforked)
+ {
+ child_inf->pspace = parent_inf->pspace;
+ child_inf->aspace = parent_inf->aspace;
+
+ /* The parent will be frozen until the child is done
+ with the shared region. Keep track of the
+ parent. */
+ child_inf->vfork_parent = parent_inf;
+ child_inf->pending_detach = 0;
+ parent_inf->vfork_child = child_inf;
+ parent_inf->pending_detach = 0;
+ }
+ else
+ {
+ child_inf->aspace = new_address_space ();
+ child_inf->pspace = add_program_space (child_inf->aspace);
+ child_inf->removable = 1;
+ set_current_program_space (child_inf->pspace);
+ clone_program_space (child_inf->pspace, parent_inf->pspace);
+
+ /* Let the shared library layer (solib-svr4) learn about
+ this new process, relocate the cloned exec, pull in
+ shared libraries, and install the solib event
+ breakpoint. If a "cloned-VM" event was propagated
+ better throughout the core, this wouldn't be
+ required. */
+ solib_create_inferior_hook (0);
+ }
+
+ /* Let the thread_db layer learn about this new process. */
check_for_thread_db ();
do_cleanups (old_chain);
if (has_vforked)
{
+ struct lwp_info *lp;
+ struct inferior *parent_inf;
+
+ parent_inf = current_inferior ();
+
+ /* If we detached from the child, then we have to be careful
+ to not insert breakpoints in the parent until the child
+ is done with the shared memory region. However, if we're
+ staying attached to the child, then we can and should
+ insert breakpoints, so that we can debug it. A
+ subsequent child exec or exit is enough to know when does
+ the child stops using the parent's address space. */
+ parent_inf->waiting_for_vfork_done = detach_fork;
+ parent_inf->pspace->breakpoints_not_allowed = detach_fork;
+
+ lp = find_lwp_pid (pid_to_ptid (parent_pid));
gdb_assert (linux_supports_tracefork_flag >= 0);
if (linux_supports_tracevforkdone (0))
{
- int status;
+ if (debug_linux_nat)
+ fprintf_unfiltered (gdb_stdlog,
+ "LCFF: waiting for VFORK_DONE on %d\n",
+ parent_pid);
+
+ lp->stopped = 1;
+ lp->resumed = 1;
- ptrace (PTRACE_CONT, parent_pid, 0, 0);
- my_waitpid (parent_pid, &status, __WALL);
- if ((status >> 16) != PTRACE_EVENT_VFORK_DONE)
- warning (_("Unexpected waitpid result %06x when waiting for "
- "vfork-done"), status);
+ /* We'll handle the VFORK_DONE event like any other
+ event, in target_wait. */
}
else
{
is only the single-step breakpoint at vfork's return
point. */
+ if (debug_linux_nat)
+ fprintf_unfiltered (gdb_stdlog,
+ "LCFF: no VFORK_DONE "
+ "support, sleeping a bit\n");
+
usleep (10000);
- }
- /* Since we vforked, breakpoints were removed in the parent
- too. Put them back. */
- reattach_breakpoints (parent_pid);
+ /* Pretend we've seen a PTRACE_EVENT_VFORK_DONE event,
+ and leave it pending. The next linux_nat_resume call
+ will notice a pending event, and bypasses actually
+ resuming the inferior. */
+ lp->status = 0;
+ lp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
+ lp->stopped = 0;
+ lp->resumed = 1;
+
+ /* If we're in async mode, need to tell the event loop
+ there's something here to process. */
+ if (target_can_async_p ())
+ async_file_mark ();
+ }
}
}
else
{
- struct thread_info *tp;
struct inferior *parent_inf, *child_inf;
struct lwp_info *lp;
-
- /* Before detaching from the parent, remove all breakpoints from it. */
- remove_breakpoints ();
+ struct program_space *parent_pspace;
if (info_verbose || debug_linux_nat)
{
target_terminal_ours ();
- fprintf_filtered (gdb_stdlog,
- "Attaching after fork to child process %d.\n",
- child_pid);
+ if (has_vforked)
+ fprintf_filtered (gdb_stdlog,
+ _("Attaching after process %d "
+ "vfork to child process %d.\n"),
+ parent_pid, child_pid);
+ else
+ fprintf_filtered (gdb_stdlog,
+ _("Attaching after process %d "
+ "fork to child process %d.\n"),
+ parent_pid, child_pid);
}
/* Add the new inferior first, so that the target_detach below
child_inf->attach_flag = parent_inf->attach_flag;
copy_terminal_info (child_inf, parent_inf);
- /* If we're vforking, we may want to hold on to the parent until
- the child exits or execs. At exec time we can remove the old
- breakpoints from the parent and detach it; at exit time we
- could do the same (or even, sneakily, resume debugging it - the
- child's exec has failed, or something similar).
-
- This doesn't clean up "properly", because we can't call
- target_detach, but that's OK; if the current target is "child",
- then it doesn't need any further cleanups, and lin_lwp will
- generally not encounter vfork (vfork is defined to fork
- in libpthread.so).
+ parent_pspace = parent_inf->pspace;
- The holding part is very easy if we have VFORKDONE events;
- but keeping track of both processes is beyond GDB at the
- moment. So we don't expose the parent to the rest of GDB.
- Instead we quietly hold onto it until such time as we can
- safely resume it. */
+ /* If we're vforking, we want to hold on to the parent until the
+ child exits or execs. At child exec or exit time we can
+ remove the old breakpoints from the parent and detach or
+ resume debugging it. Otherwise, detach the parent now; we'll
+ want to reuse it's program/address spaces, but we can't set
+ them to the child before removing breakpoints from the
+ parent, otherwise, the breakpoints module could decide to
+ remove breakpoints from the wrong process (since they'd be
+ assigned to the same address space). */
if (has_vforked)
{
- struct lwp_info *parent_lwp;
-
- linux_parent_pid = parent_pid;
-
- /* Get rid of the inferior on the core side as well. */
- inferior_ptid = null_ptid;
- detach_inferior (parent_pid);
-
- /* Also get rid of all its lwps. We will detach from this
- inferior soon-ish, but, we will still get an exit event
- reported through waitpid when it exits. If we didn't get
- rid of the lwps from our list, we would end up reporting
- the inferior exit to the core, which would then try to
- mourn a non-existing (from the core's perspective)
- inferior. */
- parent_lwp = find_lwp_pid (pid_to_ptid (parent_pid));
- purge_lwp_list (GET_PID (parent_lwp->ptid));
- linux_parent_pid = parent_pid;
+ gdb_assert (child_inf->vfork_parent == NULL);
+ gdb_assert (parent_inf->vfork_child == NULL);
+ child_inf->vfork_parent = parent_inf;
+ child_inf->pending_detach = 0;
+ parent_inf->vfork_child = child_inf;
+ parent_inf->pending_detach = detach_fork;
+ parent_inf->waiting_for_vfork_done = 0;
}
else if (detach_fork)
target_detach (NULL, 0);
+ /* Note that the detach above makes PARENT_INF dangling. */
+
+ /* Add the child thread to the appropriate lists, and switch to
+ this new thread, before cloning the program space, and
+ informing the solib layer about this new process. */
+
inferior_ptid = ptid_build (child_pid, child_pid, 0);
add_thread (inferior_ptid);
lp = add_lwp (inferior_ptid);
lp->stopped = 1;
+ lp->resumed = 1;
+
+ /* If this is a vfork child, then the address-space is shared
+ with the parent. If we detached from the parent, then we can
+ reuse the parent's program/address spaces. */
+ if (has_vforked || detach_fork)
+ {
+ child_inf->pspace = parent_pspace;
+ child_inf->aspace = child_inf->pspace->aspace;
+ }
+ else
+ {
+ child_inf->aspace = new_address_space ();
+ child_inf->pspace = add_program_space (child_inf->aspace);
+ child_inf->removable = 1;
+ set_current_program_space (child_inf->pspace);
+ clone_program_space (child_inf->pspace, parent_pspace);
+
+ /* Let the shared library layer (solib-svr4) learn about
+ this new process, relocate the cloned exec, pull in
+ shared libraries, and install the solib event breakpoint.
+ If a "cloned-VM" event was propagated better throughout
+ the core, this wouldn't be required. */
+ solib_create_inferior_hook (0);
+ }
+ /* Let the thread_db layer learn about this new process. */
check_for_thread_db ();
}
}
\f
-static void
+static int
linux_child_insert_fork_catchpoint (int pid)
{
- if (! linux_supports_tracefork (pid))
- error (_("Your system does not support fork catchpoints."));
+ return !linux_supports_tracefork (pid);
}
-static void
+static int
linux_child_insert_vfork_catchpoint (int pid)
{
- if (!linux_supports_tracefork (pid))
- error (_("Your system does not support vfork catchpoints."));
+ return !linux_supports_tracefork (pid);
}
-static void
+static int
linux_child_insert_exec_catchpoint (int pid)
{
- if (!linux_supports_tracefork (pid))
- error (_("Your system does not support exec catchpoints."));
+ return !linux_supports_tracefork (pid);
+}
+
+static int
+linux_child_set_syscall_catchpoint (int pid, int needed, int any_count,
+ int table_size, int *table)
+{
+ if (!linux_supports_tracesysgood (pid))
+ return 1;
+
+ /* On GNU/Linux, we ignore the arguments. It means that we only
+ enable the syscall catchpoints, but do not disable them.
+
+ Also, we do not use the `table' information because we do not
+ filter system calls here. We let GDB do the logic for us. */
+ return 0;
}
/* On GNU/Linux there are no real LWP's. The closest thing to LWP's
static int stop_wait_callback (struct lwp_info *lp, void *data);
static int linux_thread_alive (ptid_t ptid);
static char *linux_child_pid_to_exec_file (int pid);
-static int cancel_breakpoint (struct lwp_info *lp);
\f
/* Convert wait status STATUS to a string. Used for printing debug
static char buf[64];
if (WIFSTOPPED (status))
- snprintf (buf, sizeof (buf), "%s (stopped)",
- strsignal (WSTOPSIG (status)));
+ {
+ if (WSTOPSIG (status) == SYSCALL_SIGTRAP)
+ snprintf (buf, sizeof (buf), "%s (stopped at syscall)",
+ strsignal (SIGTRAP));
+ else
+ snprintf (buf, sizeof (buf), "%s (stopped)",
+ strsignal (WSTOPSIG (status)));
+ }
else if (WIFSIGNALED (status))
snprintf (buf, sizeof (buf), "%s (terminated)",
- strsignal (WSTOPSIG (status)));
+ strsignal (WTERMSIG (status)));
else
snprintf (buf, sizeof (buf), "%d (exited)", WEXITSTATUS (status));
return buf;
}
-/* Initialize the list of LWPs. Note that this module, contrary to
- what GDB's generic threads layer does for its thread list,
- re-initializes the LWP lists whenever we mourn or detach (which
- doesn't involve mourning) the inferior. */
-
-static void
-init_lwp_list (void)
-{
- struct lwp_info *lp, *lpnext;
-
- for (lp = lwp_list; lp; lp = lpnext)
- {
- lpnext = lp->next;
- xfree (lp);
- }
-
- lwp_list = NULL;
-}
-
/* Remove all LWPs belong to PID from the lwp list. */
static void
lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
lp->ptid = ptid;
+ lp->core = -1;
lp->next = lwp_list;
lwp_list = lp;
return NULL;
}
-/* Returns true if PTID matches filter FILTER. FILTER can be the wild
- card MINUS_ONE_PTID (all ptid match it); can be a ptid representing
- a process (ptid_is_pid returns true), in which case, all lwps of
- that give process match, lwps of other process do not; or, it can
- represent a specific thread, in which case, only that thread will
- match true. PTID must represent an LWP, it can never be a wild
- card. */
-
-static int
-ptid_match (ptid_t ptid, ptid_t filter)
-{
- /* Since both parameters have the same type, prevent easy mistakes
- from happening. */
- gdb_assert (!ptid_equal (ptid, minus_one_ptid)
- && !ptid_equal (ptid, null_ptid));
-
- if (ptid_equal (filter, minus_one_ptid))
- return 1;
- if (ptid_is_pid (filter)
- && ptid_get_pid (ptid) == ptid_get_pid (filter))
- return 1;
- else if (ptid_equal (ptid, filter))
- return 1;
-
- return 0;
-}
-
/* Call CALLBACK with its second argument set to DATA for every LWP in
the list. If CALLBACK returns 1 for a particular LWP, return a
pointer to the structure describing that LWP immediately.
*cloned = 1;
}
- gdb_assert (pid == new_pid && WIFSTOPPED (status));
+ gdb_assert (pid == new_pid);
+
+ if (!WIFSTOPPED (status))
+ {
+ /* The pid we tried to attach has apparently just exited. */
+ if (debug_linux_nat)
+ fprintf_unfiltered (gdb_stdlog, "LNPAW: Failed to stop %d: %s",
+ pid, status_to_str (status));
+ return status;
+ }
if (WSTOPSIG (status) != SIGSTOP)
{
target_pid_to_str (ptid));
status = linux_nat_post_attach_wait (ptid, 0, &cloned, &signalled);
+ if (!WIFSTOPPED (status))
+ return -1;
+
lp = add_lwp (ptid);
lp->stopped = 1;
lp->cloned = cloned;
status = linux_nat_post_attach_wait (lp->ptid, 1, &lp->cloned,
&lp->signalled);
+ if (!WIFSTOPPED (status))
+ {
+ if (WIFEXITED (status))
+ {
+ int exit_code = WEXITSTATUS (status);
+
+ target_terminal_ours ();
+ target_mourn_inferior ();
+ if (exit_code == 0)
+ error (_("Unable to attach: program exited normally."));
+ else
+ error (_("Unable to attach: program exited with code %d."),
+ exit_code);
+ }
+ else if (WIFSIGNALED (status))
+ {
+ enum target_signal signo;
+
+ target_terminal_ours ();
+ target_mourn_inferior ();
+
+ signo = target_signal_from_host (WTERMSIG (status));
+ error (_("Unable to attach: program terminated with signal "
+ "%s, %s."),
+ target_signal_to_name (signo),
+ target_signal_to_string (signo));
+ }
+
+ internal_error (__FILE__, __LINE__,
+ _("unexpected status %d for PID %ld"),
+ status, (long) GET_LWP (ptid));
+ }
+
lp->stopped = 1;
/* Save the wait status to report later. */
static int
get_pending_status (struct lwp_info *lp, int *status)
{
- struct target_waitstatus last;
- ptid_t last_ptid;
+ enum target_signal signo = TARGET_SIGNAL_0;
+
+ /* If we paused threads momentarily, we may have stored pending
+ events in lp->status or lp->waitstatus (see stop_wait_callback),
+ and GDB core hasn't seen any signal for those threads.
+ Otherwise, the last signal reported to the core is found in the
+ thread object's stop_signal.
+
+ There's a corner case that isn't handled here at present. Only
+ if the thread stopped with a TARGET_WAITKIND_STOPPED does
+ stop_signal make sense as a real signal to pass to the inferior.
+ Some catchpoint related events, like
+ TARGET_WAITKIND_(V)FORK|EXEC|SYSCALL, have their stop_signal set
+ to TARGET_SIGNAL_SIGTRAP when the catchpoint triggers. But,
+ those traps are debug API (ptrace in our case) related and
+ induced; the inferior wouldn't see them if it wasn't being
+ traced. Hence, we should never pass them to the inferior, even
+ when set to pass state. Since this corner case isn't handled by
+ infrun.c when proceeding with a signal, for consistency, neither
+ do we handle it here (or elsewhere in the file we check for
+ signal pass state). Normally SIGTRAP isn't set to pass state, so
+ this is really a corner case. */
- get_last_target_status (&last_ptid, &last);
-
- /* If this lwp is the ptid that GDB is processing an event from, the
- signal will be in stop_signal. Otherwise, we may cache pending
- events in lp->status while trying to stop all threads (see
- stop_wait_callback). */
-
- *status = 0;
+ if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
+ signo = TARGET_SIGNAL_0; /* a pending ptrace event, not a real signal. */
+ else if (lp->status)
+ signo = target_signal_from_host (WSTOPSIG (lp->status));
+ else if (non_stop && !is_executing (lp->ptid))
+ {
+ struct thread_info *tp = find_thread_ptid (lp->ptid);
- if (non_stop)
+ signo = tp->suspend.stop_signal;
+ }
+ else if (!non_stop)
{
- enum target_signal signo = TARGET_SIGNAL_0;
+ struct target_waitstatus last;
+ ptid_t last_ptid;
- if (is_executing (lp->ptid))
- {
- /* If the core thought this lwp was executing --- e.g., the
- executing property hasn't been updated yet, but the
- thread has been stopped with a stop_callback /
- stop_wait_callback sequence (see linux_nat_detach for
- example) --- we can only have pending events in the local
- queue. */
- signo = target_signal_from_host (WSTOPSIG (lp->status));
- }
- else
- {
- /* If the core knows the thread is not executing, then we
- have the last signal recorded in
- thread_info->stop_signal. */
+ get_last_target_status (&last_ptid, &last);
+ if (GET_LWP (lp->ptid) == GET_LWP (last_ptid))
+ {
struct thread_info *tp = find_thread_ptid (lp->ptid);
- signo = tp->stop_signal;
- }
- if (signo != TARGET_SIGNAL_0
- && !signal_pass_state (signo))
- {
- if (debug_linux_nat)
- fprintf_unfiltered (gdb_stdlog, "\
-GPT: lwp %s had signal %s, but it is in no pass state\n",
- target_pid_to_str (lp->ptid),
- target_signal_to_string (signo));
+ signo = tp->suspend.stop_signal;
}
- else
- {
- if (signo != TARGET_SIGNAL_0)
- *status = W_STOPCODE (target_signal_to_host (signo));
+ }
- if (debug_linux_nat)
- fprintf_unfiltered (gdb_stdlog,
- "GPT: lwp %s as pending signal %s\n",
- target_pid_to_str (lp->ptid),
- target_signal_to_string (signo));
- }
+ *status = 0;
+
+ if (signo == TARGET_SIGNAL_0)
+ {
+ if (debug_linux_nat)
+ fprintf_unfiltered (gdb_stdlog,
+ "GPT: lwp %s has no pending signal\n",
+ target_pid_to_str (lp->ptid));
+ }
+ else if (!signal_pass_state (signo))
+ {
+ if (debug_linux_nat)
+ fprintf_unfiltered (gdb_stdlog,
+ "GPT: lwp %s had signal %s, "
+ "but it is in no pass state\n",
+ target_pid_to_str (lp->ptid),
+ target_signal_to_string (signo));
}
else
{
- if (GET_LWP (lp->ptid) == GET_LWP (last_ptid))
- {
- struct thread_info *tp = find_thread_ptid (lp->ptid);
- if (tp->stop_signal != TARGET_SIGNAL_0
- && signal_pass_state (tp->stop_signal))
- *status = W_STOPCODE (target_signal_to_host (tp->stop_signal));
- }
- else
- *status = lp->status;
+ *status = W_STOPCODE (target_signal_to_host (signo));
+
+ if (debug_linux_nat)
+ fprintf_unfiltered (gdb_stdlog,
+ "GPT: lwp %s has pending signal %s\n",
+ target_pid_to_str (lp->ptid),
+ target_signal_to_string (signo));
}
return 0;
{
int pid;
int status;
- enum target_signal sig;
struct lwp_info *main_lwp;
pid = GET_PID (inferior_ptid);
pass it along with PTRACE_DETACH. */
args = alloca (8);
sprintf (args, "%d", (int) WSTOPSIG (status));
- fprintf_unfiltered (gdb_stdlog,
- "LND: Sending signal %s to %s\n",
- args,
- target_pid_to_str (main_lwp->ptid));
+ if (debug_linux_nat)
+ fprintf_unfiltered (gdb_stdlog,
+ "LND: Sending signal %s to %s\n",
+ args,
+ target_pid_to_str (main_lwp->ptid));
}
delete_lwp (main_lwp->ptid);
static int
resume_callback (struct lwp_info *lp, void *data)
{
- if (lp->stopped && lp->status == 0)
+ struct inferior *inf = find_inferior_pid (GET_PID (lp->ptid));
+
+ if (lp->stopped && inf->vfork_child != NULL)
+ {
+ if (debug_linux_nat)
+ fprintf_unfiltered (gdb_stdlog,
+ "RC: Not resuming %s (vfork parent)\n",
+ target_pid_to_str (lp->ptid));
+ }
+ else if (lp->stopped && lp->status == 0)
{
if (debug_linux_nat)
fprintf_unfiltered (gdb_stdlog,
lp->stopped = 0;
lp->step = 0;
memset (&lp->siginfo, 0, sizeof (lp->siginfo));
+ lp->stopped_by_watchpoint = 0;
}
else if (lp->stopped && debug_linux_nat)
- fprintf_unfiltered (gdb_stdlog, "RC: Not resuming sibling %s (has pending)\n",
+ fprintf_unfiltered (gdb_stdlog,
+ "RC: Not resuming sibling %s (has pending)\n",
target_pid_to_str (lp->ptid));
else if (debug_linux_nat)
- fprintf_unfiltered (gdb_stdlog, "RC: Not resuming sibling %s (not stopped)\n",
+ fprintf_unfiltered (gdb_stdlog,
+ "RC: Not resuming sibling %s (not stopped)\n",
target_pid_to_str (lp->ptid));
return 0;
"LLR: Preparing to %s %s, %s, inferior_ptid %s\n",
step ? "step" : "resume",
target_pid_to_str (ptid),
- signo ? strsignal (signo) : "0",
+ (signo != TARGET_SIGNAL_0
+ ? strsignal (target_signal_to_host (signo)) : "0"),
target_pid_to_str (inferior_ptid));
block_child_signals (&prev_mask);
resume_many = (ptid_equal (minus_one_ptid, ptid)
|| ptid_is_pid (ptid));
- if (!non_stop)
- {
- /* Mark the lwps we're resuming as resumed. */
- iterate_over_lwps (minus_one_ptid, resume_clear_callback, NULL);
- iterate_over_lwps (ptid, resume_set_callback, NULL);
- }
- else
- iterate_over_lwps (minus_one_ptid, resume_set_callback, NULL);
+ /* Mark the lwps we're resuming as resumed. */
+ iterate_over_lwps (ptid, resume_set_callback, NULL);
/* See if it's the current inferior that should be handled
specially. */
if (lp->status && WIFSTOPPED (lp->status))
{
- int saved_signo;
+ enum target_signal saved_signo;
struct inferior *inf;
inf = find_inferior_pid (ptid_get_pid (lp->ptid));
/* Defer to common code if we're gaining control of the
inferior. */
- if (inf->stop_soon == NO_STOP_QUIETLY
+ if (inf->control.stop_soon == NO_STOP_QUIETLY
&& signal_stop_state (saved_signo) == 0
&& signal_print_state (saved_signo) == 0
&& signal_pass_state (saved_signo) == 1)
}
}
- if (lp->status)
+ if (lp->status || lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
{
/* FIXME: What should we do if we are supposed to continue
this thread with a signal? */
linux_ops->to_resume (linux_ops, ptid, step, signo);
memset (&lp->siginfo, 0, sizeof (lp->siginfo));
+ lp->stopped_by_watchpoint = 0;
if (debug_linux_nat)
fprintf_unfiltered (gdb_stdlog,
"LLR: %s %s, %s (resume event thread)\n",
step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
target_pid_to_str (ptid),
- signo ? strsignal (signo) : "0");
+ (signo != TARGET_SIGNAL_0
+ ? strsignal (target_signal_to_host (signo)) : "0"));
restore_child_signals_mask (&prev_mask);
if (target_can_async_p ())
target_async (inferior_event_handler, 0);
}
-/* Issue kill to specified lwp. */
-
-static int tkill_failed;
+/* Send a signal to an LWP. */
static int
kill_lwp (int lwpid, int signo)
{
- errno = 0;
-
-/* Use tkill, if possible, in case we are using nptl threads. If tkill
- fails, then we are not using nptl threads and we should be using kill. */
+ /* Use tkill, if possible, in case we are using nptl threads. If tkill
+ fails, then we are not using nptl threads and we should be using kill. */
#ifdef HAVE_TKILL_SYSCALL
- if (!tkill_failed)
- {
- int ret = syscall (__NR_tkill, lwpid, signo);
- if (errno != ENOSYS)
- return ret;
- errno = 0;
- tkill_failed = 1;
- }
+ {
+ static int tkill_failed;
+
+ if (!tkill_failed)
+ {
+ int ret;
+
+ errno = 0;
+ ret = syscall (__NR_tkill, lwpid, signo);
+ if (errno != ENOSYS)
+ return ret;
+ tkill_failed = 1;
+ }
+ }
#endif
return kill (lwpid, signo);
}
+/* Handle a GNU/Linux syscall trap wait response. If we see a syscall
+ event, check if the core is interested in it: if not, ignore the
+ event, and keep waiting; otherwise, we need to toggle the LWP's
+ syscall entry/exit status, since the ptrace event itself doesn't
+ indicate it, and report the trap to higher layers. */
+
+static int
+linux_handle_syscall_trap (struct lwp_info *lp, int stopping)
+{
+ struct target_waitstatus *ourstatus = &lp->waitstatus;
+ struct gdbarch *gdbarch = target_thread_architecture (lp->ptid);
+ int syscall_number = (int) gdbarch_get_syscall_number (gdbarch, lp->ptid);
+
+ if (stopping)
+ {
+ /* If we're stopping threads, there's a SIGSTOP pending, which
+ makes it so that the LWP reports an immediate syscall return,
+ followed by the SIGSTOP. Skip seeing that "return" using
+ PTRACE_CONT directly, and let stop_wait_callback collect the
+ SIGSTOP. Later when the thread is resumed, a new syscall
+ entry event. If we didn't do this (and returned 0), we'd
+ leave a syscall entry pending, and our caller, by using
+ PTRACE_CONT to collect the SIGSTOP, skips the syscall return
+ itself. Later, when the user re-resumes this LWP, we'd see
+ another syscall entry event and we'd mistake it for a return.
+
+ If stop_wait_callback didn't force the SIGSTOP out of the LWP
+ (leaving immediately with LWP->signalled set, without issuing
+ a PTRACE_CONT), it would still be problematic to leave this
+ syscall enter pending, as later when the thread is resumed,
+ it would then see the same syscall exit mentioned above,
+ followed by the delayed SIGSTOP, while the syscall didn't
+ actually get to execute. It seems it would be even more
+ confusing to the user. */
+
+ if (debug_linux_nat)
+ fprintf_unfiltered (gdb_stdlog,
+ "LHST: ignoring syscall %d "
+ "for LWP %ld (stopping threads), "
+ "resuming with PTRACE_CONT for SIGSTOP\n",
+ syscall_number,
+ GET_LWP (lp->ptid));
+
+ lp->syscall_state = TARGET_WAITKIND_IGNORE;
+ ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
+ return 1;
+ }
+
+ if (catch_syscall_enabled ())
+ {
+ /* Always update the entry/return state, even if this particular
+ syscall isn't interesting to the core now. In async mode,
+ the user could install a new catchpoint for this syscall
+ between syscall enter/return, and we'll need to know to
+ report a syscall return if that happens. */
+ lp->syscall_state = (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
+ ? TARGET_WAITKIND_SYSCALL_RETURN
+ : TARGET_WAITKIND_SYSCALL_ENTRY);
+
+ if (catching_syscall_number (syscall_number))
+ {
+ /* Alright, an event to report. */
+ ourstatus->kind = lp->syscall_state;
+ ourstatus->value.syscall_number = syscall_number;
+
+ if (debug_linux_nat)
+ fprintf_unfiltered (gdb_stdlog,
+ "LHST: stopping for %s of syscall %d"
+ " for LWP %ld\n",
+ lp->syscall_state
+ == TARGET_WAITKIND_SYSCALL_ENTRY
+ ? "entry" : "return",
+ syscall_number,
+ GET_LWP (lp->ptid));
+ return 0;
+ }
+
+ if (debug_linux_nat)
+ fprintf_unfiltered (gdb_stdlog,
+ "LHST: ignoring %s of syscall %d "
+ "for LWP %ld\n",
+ lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
+ ? "entry" : "return",
+ syscall_number,
+ GET_LWP (lp->ptid));
+ }
+ else
+ {
+ /* If we had been syscall tracing, and hence used PT_SYSCALL
+ before on this LWP, it could happen that the user removes all
+ syscall catchpoints before we get to process this event.
+ There are two noteworthy issues here:
+
+ - When stopped at a syscall entry event, resuming with
+ PT_STEP still resumes executing the syscall and reports a
+ syscall return.
+
+ - Only PT_SYSCALL catches syscall enters. If we last
+ single-stepped this thread, then this event can't be a
+ syscall enter. If we last single-stepped this thread, this
+ has to be a syscall exit.
+
+ The points above mean that the next resume, be it PT_STEP or
+ PT_CONTINUE, can not trigger a syscall trace event. */
+ if (debug_linux_nat)
+ fprintf_unfiltered (gdb_stdlog,
+ "LHST: caught syscall event "
+ "with no syscall catchpoints."
+ " %d for LWP %ld, ignoring\n",
+ syscall_number,
+ GET_LWP (lp->ptid));
+ lp->syscall_state = TARGET_WAITKIND_IGNORE;
+ }
+
+ /* The core isn't interested in this event. For efficiency, avoid
+ stopping all threads only to have the core resume them all again.
+ Since we're not stopping threads, if we're still syscall tracing
+ and not stepping, we can't use PTRACE_CONT here, as we'd miss any
+ subsequent syscall. Simply resume using the inf-ptrace layer,
+ which knows when to use PT_SYSCALL or PT_CONTINUE. */
+
+ /* Note that gdbarch_get_syscall_number may access registers, hence
+ fill a regcache. */
+ registers_changed ();
+ linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
+ lp->step, TARGET_SIGNAL_0);
+ return 1;
+}
+
/* Handle a GNU/Linux extended wait response. If we see a clone
event, we need to add the new LWP to our list (and not report the
trap to higher layers). This function returns non-zero if the
{
int pid = GET_LWP (lp->ptid);
struct target_waitstatus *ourstatus = &lp->waitstatus;
- struct lwp_info *new_lp = NULL;
int event = status >> 16;
if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK
ourstatus->kind = TARGET_WAITKIND_VFORKED;
else
{
- struct cleanup *old_chain;
+ struct lwp_info *new_lp;
ourstatus->kind = TARGET_WAITKIND_IGNORE;
+
new_lp = add_lwp (BUILD_LWP (new_pid, GET_PID (lp->ptid)));
new_lp->cloned = 1;
new_lp->stopped = 1;
}
}
+ /* Note the need to use the low target ops to resume, to
+ handle resuming with PT_SYSCALL if we have syscall
+ catchpoints. */
if (!stopping)
{
+ enum target_signal signo;
+
new_lp->stopped = 0;
new_lp->resumed = 1;
- ptrace (PTRACE_CONT, new_pid, 0,
- status ? WSTOPSIG (status) : 0);
+
+ signo = (status
+ ? target_signal_from_host (WSTOPSIG (status))
+ : TARGET_SIGNAL_0);
+
+ linux_ops->to_resume (linux_ops, pid_to_ptid (new_pid),
+ 0, signo);
+ }
+ else
+ {
+ if (status != 0)
+ {
+ /* We created NEW_LP so it cannot yet contain STATUS. */
+ gdb_assert (new_lp->status == 0);
+
+ /* Save the wait status to report later. */
+ if (debug_linux_nat)
+ fprintf_unfiltered (gdb_stdlog,
+ "LHEW: waitpid of new LWP %ld, "
+ "saving status %s\n",
+ (long) GET_LWP (new_lp->ptid),
+ status_to_str (status));
+ new_lp->status = status;
+ }
}
if (debug_linux_nat)
fprintf_unfiltered (gdb_stdlog,
- "LHEW: Got clone event from LWP %ld, resuming\n",
+ "LHEW: Got clone event "
+ "from LWP %ld, resuming\n",
GET_LWP (lp->ptid));
- ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
+ linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
+ 0, TARGET_SIGNAL_0);
return 1;
}
ourstatus->value.execd_pathname
= xstrdup (linux_child_pid_to_exec_file (pid));
- if (linux_parent_pid)
+ return 0;
+ }
+
+ if (event == PTRACE_EVENT_VFORK_DONE)
+ {
+ if (current_inferior ()->waiting_for_vfork_done)
{
- detach_breakpoints (linux_parent_pid);
- ptrace (PTRACE_DETACH, linux_parent_pid, 0, 0);
+ if (debug_linux_nat)
+ fprintf_unfiltered (gdb_stdlog,
+ "LHEW: Got expected PTRACE_EVENT_"
+ "VFORK_DONE from LWP %ld: stopping\n",
+ GET_LWP (lp->ptid));
- linux_parent_pid = 0;
+ ourstatus->kind = TARGET_WAITKIND_VFORK_DONE;
+ return 0;
}
- /* At this point, all inserted breakpoints are gone. Doing this
- as soon as we detect an exec prevents the badness of deleting
- a breakpoint writing the current "shadow contents" to lift
- the bp. That shadow is NOT valid after an exec.
-
- Note that we have to do this after the detach_breakpoints
- call above, otherwise breakpoints wouldn't be lifted from the
- parent on a vfork, because detach_breakpoints would think
- that breakpoints are not inserted. */
- mark_breakpoints_out ();
- return 0;
+ if (debug_linux_nat)
+ fprintf_unfiltered (gdb_stdlog,
+ "LHEW: Got PTRACE_EVENT_VFORK_DONE "
+ "from LWP %ld: resuming\n",
+ GET_LWP (lp->ptid));
+ ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
+ return 1;
}
internal_error (__FILE__, __LINE__,
return 0;
}
- gdb_assert (WIFSTOPPED (status));
-
+ gdb_assert (WIFSTOPPED (status));
+
+ /* Handle GNU/Linux's syscall SIGTRAPs. */
+ if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
+ {
+ /* No longer need the sysgood bit. The ptrace event ends up
+ recorded in lp->waitstatus if we care for it. We can carry
+ on handling the event like a regular SIGTRAP from here
+ on. */
+ status = W_STOPCODE (SIGTRAP);
+ if (linux_handle_syscall_trap (lp, 1))
+ return wait_lwp (lp);
+ }
+
/* Handle GNU/Linux's extended waitstatus for trace events. */
if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
{
linux_nat_has_pending_sigint (int pid)
{
sigset_t pending, blocked, ignored;
- int i;
linux_proc_pending_signals (pid, &pending, &blocked, &ignored);
}
}
+/* Fetch the possible triggered data watchpoint info and store it in
+ LP.
+
+ On some archs, like x86, that use debug registers to set
+ watchpoints, it's possible that the way to know which watched
+ address trapped, is to check the register that is used to select
+ which address to watch. Problem is, between setting the watchpoint
+ and reading back which data address trapped, the user may change
+ the set of watchpoints, and, as a consequence, GDB changes the
+ debug registers in the inferior. To avoid reading back a stale
+ stopped-data-address when that happens, we cache in LP the fact
+ that a watchpoint trapped, and the corresponding data address, as
+ soon as we see LP stop with a SIGTRAP. If GDB changes the debug
+ registers meanwhile, we have the cached data we can rely on. */
+
+static void
+save_sigtrap (struct lwp_info *lp)
+{
+ struct cleanup *old_chain;
+
+ if (linux_ops->to_stopped_by_watchpoint == NULL)
+ {
+ lp->stopped_by_watchpoint = 0;
+ return;
+ }
+
+ old_chain = save_inferior_ptid ();
+ inferior_ptid = lp->ptid;
+
+ lp->stopped_by_watchpoint = linux_ops->to_stopped_by_watchpoint ();
+
+ if (lp->stopped_by_watchpoint)
+ {
+ if (linux_ops->to_stopped_data_address != NULL)
+ lp->stopped_data_address_p =
+ linux_ops->to_stopped_data_address (¤t_target,
+ &lp->stopped_data_address);
+ else
+ lp->stopped_data_address_p = 0;
+ }
+
+ do_cleanups (old_chain);
+}
+
+/* See save_sigtrap. */
+
+static int
+linux_nat_stopped_by_watchpoint (void)
+{
+ struct lwp_info *lp = find_lwp_pid (inferior_ptid);
+
+ gdb_assert (lp != NULL);
+
+ return lp->stopped_by_watchpoint;
+}
+
+static int
+linux_nat_stopped_data_address (struct target_ops *ops, CORE_ADDR *addr_p)
+{
+ struct lwp_info *lp = find_lwp_pid (inferior_ptid);
+
+ gdb_assert (lp != NULL);
+
+ *addr_p = lp->stopped_data_address;
+
+ return lp->stopped_data_address_p;
+}
+
+/* Commonly any breakpoint / watchpoint generate only SIGTRAP. */
+
+static int
+sigtrap_is_event (int status)
+{
+ return WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP;
+}
+
+/* SIGTRAP-like events recognizer. */
+
+static int (*linux_nat_status_is_event) (int status) = sigtrap_is_event;
+
+/* Check for SIGTRAP-like events in LP. */
+
+static int
+linux_nat_lp_status_is_event (struct lwp_info *lp)
+{
+ /* We check for lp->waitstatus in addition to lp->status, because we can
+ have pending process exits recorded in lp->status
+ and W_EXITCODE(0,0) == 0. We should probably have an additional
+ lp->status_p flag. */
+
+ return (lp->waitstatus.kind == TARGET_WAITKIND_IGNORE
+ && linux_nat_status_is_event (lp->status));
+}
+
+/* Set alternative SIGTRAP-like events recognizer. If
+ breakpoint_inserted_here_p there then gdbarch_decr_pc_after_break will be
+ applied. */
+
+void
+linux_nat_set_status_is_event (struct target_ops *t,
+ int (*status_is_event) (int status))
+{
+ linux_nat_status_is_event = status_is_event;
+}
+
/* Wait until LP is stopped. */
static int
stop_wait_callback (struct lwp_info *lp, void *data)
{
+ struct inferior *inf = find_inferior_pid (GET_PID (lp->ptid));
+
+ /* If this is a vfork parent, bail out, it is not going to report
+ any SIGSTOP until the vfork is done with. */
+ if (inf->vfork_child != NULL)
+ return 0;
+
if (!lp->stopped)
{
int status;
ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
if (debug_linux_nat)
fprintf_unfiltered (gdb_stdlog,
- "PTRACE_CONT %s, 0, 0 (%s) (discarding SIGINT)\n",
+ "PTRACE_CONT %s, 0, 0 (%s) "
+ "(discarding SIGINT)\n",
target_pid_to_str (lp->ptid),
errno ? safe_strerror (errno) : "OK");
if (WSTOPSIG (status) != SIGSTOP)
{
- if (WSTOPSIG (status) == SIGTRAP)
+ if (linux_nat_status_is_event (status))
{
/* If a LWP other than the LWP that we're reporting an
event for has hit a GDB breakpoint (as opposed to
/* Save the trap's siginfo in case we need it later. */
save_siginfo (lp);
- /* Now resume this LWP and get the SIGSTOP event. */
+ save_sigtrap (lp);
+
+ /* Now resume this LWP and get the SIGSTOP event. */
errno = 0;
ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
if (debug_linux_nat)
target_pid_to_str (lp->ptid));
}
/* Hold this event/waitstatus while we check to see if
- there are any more (we still want to get that SIGSTOP). */
+ there are any more (we still want to get that SIGSTOP). */
stop_wait_callback (lp, NULL);
/* Hold the SIGTRAP for handling by linux_nat_wait. If
there's another event, throw it back into the
- queue. */
+ queue. */
if (lp->status)
{
if (debug_linux_nat)
kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (lp->status));
}
- /* Save the sigtrap event. */
+ /* Save the sigtrap event. */
lp->status = status;
return 0;
}
else
{
/* The thread was stopped with a signal other than
- SIGSTOP, and didn't accidentally trip a breakpoint. */
+ SIGSTOP, and didn't accidentally trip a breakpoint. */
if (debug_linux_nat)
{
status_to_str ((int) status),
target_pid_to_str (lp->ptid));
}
- /* Now resume this LWP and get the SIGSTOP event. */
+ /* Now resume this LWP and get the SIGSTOP event. */
errno = 0;
ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
if (debug_linux_nat)
errno ? safe_strerror (errno) : "OK");
/* Hold this event/waitstatus while we check to see if
- there are any more (we still want to get that SIGSTOP). */
+ there are any more (we still want to get that SIGSTOP). */
stop_wait_callback (lp, NULL);
/* If the lp->status field is still empty, use it to
{
/* Only report a pending wait status if we pretend that this has
indeed been resumed. */
- /* We check for lp->waitstatus in addition to lp->status, because we
- can have pending process exits recorded in lp->waitstatus, and
- W_EXITCODE(0,0) == 0. */
- return ((lp->status != 0
- || lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
- && lp->resumed);
+ if (!lp->resumed)
+ return 0;
+
+ if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
+ {
+ /* A ptrace event, like PTRACE_FORK|VFORK|EXEC, syscall event,
+ or a a pending process exit. Note that `W_EXITCODE(0,0) ==
+ 0', so a clean process exit can not be stored pending in
+ lp->status, it is indistinguishable from
+ no-pending-status. */
+ return 1;
+ }
+
+ if (lp->status != 0)
+ return 1;
+
+ return 0;
}
/* Return non-zero if LP isn't stopped. */
gdb_assert (count != NULL);
/* Count only resumed LWPs that have a SIGTRAP event pending. */
- if (lp->status != 0 && lp->resumed
- && WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP)
+ if (lp->resumed && linux_nat_lp_status_is_event (lp))
(*count)++;
return 0;
gdb_assert (selector != NULL);
- /* Select only resumed LWPs that have a SIGTRAP event pending. */
- if (lp->status != 0 && lp->resumed
- && WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP)
+ /* Select only resumed LWPs that have a SIGTRAP event pending. */
+ if (lp->resumed && linux_nat_lp_status_is_event (lp))
if ((*selector)-- == 0)
return 1;
CORE_ADDR pc;
pc = regcache_read_pc (regcache) - gdbarch_decr_pc_after_break (gdbarch);
- if (breakpoint_inserted_here_p (pc))
+ if (breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
{
if (debug_linux_nat)
fprintf_unfiltered (gdb_stdlog,
delete or disable the breakpoint, but the LWP will have already
tripped on it. */
- if (lp->status != 0
- && WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP
+ if (linux_nat_lp_status_is_event (lp)
&& cancel_breakpoint (lp))
/* Throw away the SIGTRAP. */
lp->status = 0;
}
/* Make sure we don't report an event for the exit of an LWP not in
- our list, i.e. not part of the current process. This can happen
+ our list, i.e. not part of the current process. This can happen
if we detach from a program we original forked and then it
exits. */
if (!WIFSTOPPED (status) && !lp)
add_thread (lp->ptid);
}
- /* Save the trap's siginfo in case we need it later. */
- if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP)
- save_siginfo (lp);
+ /* Handle GNU/Linux's syscall SIGTRAPs. */
+ if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
+ {
+ /* No longer need the sysgood bit. The ptrace event ends up
+ recorded in lp->waitstatus if we care for it. We can carry
+ on handling the event like a regular SIGTRAP from here
+ on. */
+ status = W_STOPCODE (SIGTRAP);
+ if (linux_handle_syscall_trap (lp, 0))
+ return NULL;
+ }
/* Handle GNU/Linux's extended waitstatus for trace events. */
if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
return NULL;
}
+ if (linux_nat_status_is_event (status))
+ {
+ /* Save the trap's siginfo in case we need it later. */
+ save_siginfo (lp);
+
+ save_sigtrap (lp);
+ }
+
/* Check if the thread has exited. */
if ((WIFEXITED (status) || WIFSIGNALED (status))
&& num_lwps (GET_PID (lp->ptid)) > 1)
/* An interesting event. */
gdb_assert (lp);
+ lp->status = status;
return lp;
}
lp = NULL;
status = 0;
- /* Make sure there is at least one LWP that has been resumed. */
- gdb_assert (iterate_over_lwps (ptid, resumed_callback, NULL));
+ /* Make sure that of those LWPs we want to get an event from, there
+ is at least one LWP that has been resumed. If there's none, just
+ bail out. The core may just be flushing asynchronously all
+ events. */
+ if (iterate_over_lwps (ptid, resumed_callback, NULL) == NULL)
+ {
+ ourstatus->kind = TARGET_WAITKIND_IGNORE;
+
+ if (debug_linux_nat_async)
+ fprintf_unfiltered (gdb_stdlog, "LLW: exit (no resumed LWP)\n");
+
+ restore_child_signals_mask (&prev_mask);
+ return minus_one_ptid;
+ }
/* First check if there is a LWP with a wait status pending. */
if (pid == -1)
lp = iterate_over_lwps (ptid, status_callback, NULL);
if (lp)
{
- status = lp->status;
- lp->status = 0;
-
- if (debug_linux_nat && status)
+ if (debug_linux_nat && lp->status)
fprintf_unfiltered (gdb_stdlog,
"LLW: Using pending wait status %s for %s.\n",
- status_to_str (status),
+ status_to_str (lp->status),
target_pid_to_str (lp->ptid));
}
/* We have a specific LWP to check. */
lp = find_lwp_pid (ptid);
gdb_assert (lp);
- status = lp->status;
- lp->status = 0;
- if (debug_linux_nat && status)
+ if (debug_linux_nat && lp->status)
fprintf_unfiltered (gdb_stdlog,
"LLW: Using pending wait status %s for %s.\n",
- status_to_str (status),
+ status_to_str (lp->status),
target_pid_to_str (lp->ptid));
/* If we have to wait, take into account whether PID is a cloned
because we can have pending process exits recorded in
lp->status and W_EXITCODE(0,0) == 0. We should probably have
an additional lp->status_p flag. */
- if (status == 0 && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE)
+ if (lp->status == 0 && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE)
lp = NULL;
}
lp->stopped = 0;
gdb_assert (lp->resumed);
- /* This should catch the pending SIGSTOP. */
+ /* Catch the pending SIGSTOP. */
+ status = lp->status;
+ lp->status = 0;
+
stop_wait_callback (lp, NULL);
+
+ /* If the lp->status field isn't empty, we caught another signal
+ while flushing the SIGSTOP. Return it back to the event
+ queue of the LWP, as we already have an event to handle. */
+ if (lp->status)
+ {
+ if (debug_linux_nat)
+ fprintf_unfiltered (gdb_stdlog,
+ "LLW: kill %s, %s\n",
+ target_pid_to_str (lp->ptid),
+ status_to_str (lp->status));
+ kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (lp->status));
+ }
+
+ lp->status = status;
}
if (!target_can_async_p ())
lp = linux_nat_filter_event (lwpid, status, options);
+ /* STATUS is now no longer valid, use LP->STATUS instead. */
+ status = 0;
+
if (lp
&& ptid_is_pid (ptid)
&& ptid_get_pid (lp->ptid) != ptid_get_pid (ptid))
{
+ gdb_assert (lp->resumed);
+
if (debug_linux_nat)
- fprintf (stderr, "LWP %ld got an event %06x, leaving pending.\n",
- ptid_get_lwp (lp->ptid), status);
+ fprintf (stderr,
+ "LWP %ld got an event %06x, leaving pending.\n",
+ ptid_get_lwp (lp->ptid), lp->status);
- if (WIFSTOPPED (status))
+ if (WIFSTOPPED (lp->status))
{
- if (WSTOPSIG (status) != SIGSTOP)
+ if (WSTOPSIG (lp->status) != SIGSTOP)
{
- lp->status = status;
-
- stop_callback (lp, NULL);
-
- /* Resume in order to collect the sigstop. */
- ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
-
- stop_wait_callback (lp, NULL);
+ /* Cancel breakpoint hits. The breakpoint may
+ be removed before we fetch events from this
+ process to report to the core. It is best
+ not to assume the moribund breakpoints
+ heuristic always handles these cases --- it
+ could be too many events go through to the
+ core before this one is handled. All-stop
+ always cancels breakpoint hits in all
+ threads. */
+ if (non_stop
+ && linux_nat_lp_status_is_event (lp)
+ && cancel_breakpoint (lp))
+ {
+ /* Throw away the SIGTRAP. */
+ lp->status = 0;
+
+ if (debug_linux_nat)
+ fprintf (stderr,
+ "LLW: LWP %ld hit a breakpoint while"
+ " waiting for another process;"
+ " cancelled it\n",
+ ptid_get_lwp (lp->ptid));
+ }
+ lp->stopped = 1;
}
else
{
lp->signalled = 0;
}
}
- else if (WIFEXITED (status) || WIFSIGNALED (status))
+ else if (WIFEXITED (lp->status) || WIFSIGNALED (lp->status))
{
if (debug_linux_nat)
- fprintf (stderr, "Process %ld exited while stopping LWPs\n",
+ fprintf (stderr,
+ "Process %ld exited while stopping LWPs\n",
ptid_get_lwp (lp->ptid));
/* This was the last lwp in the process. Since
about the exit code/signal, leave the status
pending for the next time we're able to report
it. */
- lp->status = status;
/* Prevent trying to stop this thread again. We'll
never try to resume it because it has a pending
/* Store the pending event in the waitstatus as
well, because W_EXITCODE(0,0) == 0. */
- store_waitstatus (&lp->waitstatus, status);
+ store_waitstatus (&lp->waitstatus, lp->status);
}
/* Keep looking. */
sigsuspend (&suspend_mask);
}
}
+ else if (target_options & TARGET_WNOHANG)
+ {
+ /* No interesting event for PID yet. */
+ ourstatus->kind = TARGET_WAITKIND_IGNORE;
+
+ if (debug_linux_nat_async)
+ fprintf_unfiltered (gdb_stdlog, "LLW: exit (ignore)\n");
+
+ restore_child_signals_mask (&prev_mask);
+ return minus_one_ptid;
+ }
/* We shouldn't end up here unless we want to try again. */
gdb_assert (lp == NULL);
gdb_assert (lp);
+ status = lp->status;
+ lp->status = 0;
+
/* Don't report signals that GDB isn't interested in, such as
signals that are neither printed nor stopped upon. Stopping all
threads can be a bit time-consuming so if we want decent
if (WIFSTOPPED (status))
{
- int signo = target_signal_from_host (WSTOPSIG (status));
+ enum target_signal signo = target_signal_from_host (WSTOPSIG (status));
struct inferior *inf;
inf = find_inferior_pid (ptid_get_pid (lp->ptid));
skip the signal handler, or, if we're gaining control of the
inferior. */
if (!lp->step
- && inf->stop_soon == NO_STOP_QUIETLY
+ && inf->control.stop_soon == NO_STOP_QUIETLY
&& signal_stop_state (signo) == 0
&& signal_print_state (signo) == 0
&& signal_pass_state (signo) == 1)
lp->step ?
"PTRACE_SINGLESTEP" : "PTRACE_CONT",
target_pid_to_str (lp->ptid),
- signo ? strsignal (signo) : "0");
+ (signo != TARGET_SIGNAL_0
+ ? strsignal (target_signal_to_host (signo))
+ : "0"));
lp->stopped = 0;
goto retry;
}
starvation. */
if (pid == -1)
select_event_lwp (ptid, &lp, &status);
- }
- /* Now that we've selected our final event LWP, cancel any
- breakpoints in other LWPs that have hit a GDB breakpoint. See
- the comment in cancel_breakpoints_callback to find out why. */
- iterate_over_lwps (minus_one_ptid, cancel_breakpoints_callback, lp);
+ /* Now that we've selected our final event LWP, cancel any
+ breakpoints in other LWPs that have hit a GDB breakpoint.
+ See the comment in cancel_breakpoints_callback to find out
+ why. */
+ iterate_over_lwps (minus_one_ptid, cancel_breakpoints_callback, lp);
+
+ /* In all-stop, from the core's perspective, all LWPs are now
+ stopped until a new resume action is sent over. */
+ iterate_over_lwps (minus_one_ptid, resume_clear_callback, NULL);
+ }
+ else
+ lp->resumed = 0;
- if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP)
+ if (linux_nat_status_is_event (status))
{
if (debug_linux_nat)
fprintf_unfiltered (gdb_stdlog,
fprintf_unfiltered (gdb_stdlog, "LLW: exit\n");
restore_child_signals_mask (&prev_mask);
+
+ if (ourstatus->kind == TARGET_WAITKIND_EXITED
+ || ourstatus->kind == TARGET_WAITKIND_SIGNALLED)
+ lp->core = -1;
+ else
+ lp->core = linux_nat_core_of_thread_1 (lp->ptid);
+
return lp->ptid;
}
+/* Resume LWPs that are currently stopped without any pending status
+ to report, but are resumed from the core's perspective. */
+
+static int
+resume_stopped_resumed_lwps (struct lwp_info *lp, void *data)
+{
+ ptid_t *wait_ptid_p = data;
+
+ if (lp->stopped
+ && lp->resumed
+ && lp->status == 0
+ && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE)
+ {
+ gdb_assert (is_executing (lp->ptid));
+
+ /* Don't bother if there's a breakpoint at PC that we'd hit
+ immediately, and we're not waiting for this LWP. */
+ if (!ptid_match (lp->ptid, *wait_ptid_p))
+ {
+ struct regcache *regcache = get_thread_regcache (lp->ptid);
+ CORE_ADDR pc = regcache_read_pc (regcache);
+
+ if (breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
+ return 0;
+ }
+
+ if (debug_linux_nat)
+ fprintf_unfiltered (gdb_stdlog,
+ "RSRL: resuming stopped-resumed LWP %s\n",
+ target_pid_to_str (lp->ptid));
+
+ linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
+ lp->step, TARGET_SIGNAL_0);
+ lp->stopped = 0;
+ memset (&lp->siginfo, 0, sizeof (lp->siginfo));
+ lp->stopped_by_watchpoint = 0;
+ }
+
+ return 0;
+}
+
static ptid_t
linux_nat_wait (struct target_ops *ops,
ptid_t ptid, struct target_waitstatus *ourstatus,
ptid_t event_ptid;
if (debug_linux_nat)
- fprintf_unfiltered (gdb_stdlog, "linux_nat_wait: [%s]\n", target_pid_to_str (ptid));
+ fprintf_unfiltered (gdb_stdlog,
+ "linux_nat_wait: [%s]\n", target_pid_to_str (ptid));
/* Flush the async file first. */
if (target_can_async_p ())
async_file_flush ();
+ /* Resume LWPs that are currently stopped without any pending status
+ to report, but are resumed from the core's perspective. LWPs get
+ in this state if we find them stopping at a time we're not
+ interested in reporting the event (target_wait on a
+ specific_process, for example, see linux_nat_wait_1), and
+ meanwhile the event became uninteresting. Don't bother resuming
+ LWPs we're not going to wait for if they'd stop immediately. */
+ if (non_stop)
+ iterate_over_lwps (minus_one_ptid, resume_stopped_resumed_lwps, &ptid);
+
event_ptid = linux_nat_wait_1 (ops, ptid, ourstatus, target_options);
/* If we requested any event, and something came out, assume there
else
{
ptid_t ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
+
/* Stop all threads before killing them, since ptrace requires
that the thread is stopped to sucessfully PTRACE_KILL. */
iterate_over_lwps (ptid, stop_callback, NULL);
return normal_pid_to_str (ptid);
}
+static char *
+linux_nat_thread_name (struct thread_info *thr)
+{
+ int pid = ptid_get_pid (thr->ptid);
+ long lwp = ptid_get_lwp (thr->ptid);
+#define FORMAT "/proc/%d/task/%ld/comm"
+ char buf[sizeof (FORMAT) + 30];
+ FILE *comm_file;
+ char *result = NULL;
+
+ snprintf (buf, sizeof (buf), FORMAT, pid, lwp);
+ comm_file = fopen (buf, "r");
+ if (comm_file)
+ {
+ /* Not exported by the kernel, so we define it here. */
+#define COMM_LEN 16
+ static char line[COMM_LEN + 1];
+
+ if (fgets (line, sizeof (line), comm_file))
+ {
+ char *nl = strchr (line, '\n');
+
+ if (nl)
+ *nl = '\0';
+ if (*line != '\0')
+ result = line;
+ }
+
+ fclose (comm_file);
+ }
+
+#undef COMM_LEN
+#undef FORMAT
+
+ return result;
+}
+
/* Accepts an integer PID; Returns a string representing a file that
can be opened to get the symbols for the child process. */
regions in the inferior for a corefile. */
static int
-linux_nat_find_memory_regions (int (*func) (CORE_ADDR,
- unsigned long,
- int, int, int, void *), void *obfd)
+linux_nat_find_memory_regions (find_memory_region_ftype func, void *obfd)
{
int pid = PIDGET (inferior_ptid);
char mapsfilename[MAXPATHLEN];
long long addr, endaddr, size, offset, inode;
char permissions[8], device[8], filename[MAXPATHLEN];
int read, write, exec;
- int ret;
struct cleanup *cleanup;
/* Compose the filename for the /proc memory map, and open it. */
if (info_verbose)
{
fprintf_filtered (gdb_stdout,
- "Save segment, %lld bytes at %s (%c%c%c)",
- size, paddress (target_gdbarch, addr),
+ "Save segment, %s bytes at %s (%c%c%c)",
+ plongest (size), paddress (target_gdbarch, addr),
read ? 'r' : ' ',
write ? 'w' : ' ', exec ? 'x' : ' ');
if (filename[0])
static int
find_signalled_thread (struct thread_info *info, void *data)
{
- if (info->stop_signal != TARGET_SIGNAL_0
+ if (info->suspend.stop_signal != TARGET_SIGNAL_0
&& ptid_get_pid (info->ptid) == ptid_get_pid (inferior_ptid))
return 1;
iterate_over_threads (find_signalled_thread, NULL);
if (info)
- return info->stop_signal;
+ return info->suspend.stop_signal;
else
return TARGET_SIGNAL_0;
}
char *note_data, int *note_size,
enum target_signal stop_signal)
{
- gdb_gregset_t gregs;
- gdb_fpregset_t fpregs;
unsigned long lwp = ptid_get_lwp (ptid);
struct gdbarch *gdbarch = target_gdbarch;
struct regcache *regcache = get_thread_arch_regcache (ptid, gdbarch);
core_regset_p = gdbarch_regset_from_core_section_p (gdbarch);
sect_list = gdbarch_core_regset_sections (gdbarch);
- if (core_regset_p
- && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg",
- sizeof (gregs))) != NULL
- && regset->collect_regset != NULL)
- regset->collect_regset (regset, regcache, -1,
- &gregs, sizeof (gregs));
- else
- fill_gregset (regcache, &gregs, -1);
-
- note_data = (char *) elfcore_write_prstatus (obfd,
- note_data,
- note_size,
- lwp,
- stop_signal, &gregs);
-
/* The loop below uses the new struct core_regset_section, which stores
the supported section names and sizes for the core file. Note that
note PRSTATUS needs to be treated specially. But the other notes are
if (core_regset_p && sect_list != NULL)
while (sect_list->sect_name != NULL)
{
- /* .reg was already handled above. */
- if (strcmp (sect_list->sect_name, ".reg") == 0)
- {
- sect_list++;
- continue;
- }
regset = gdbarch_regset_from_core_section (gdbarch,
sect_list->sect_name,
sect_list->size);
gdb_regset = xmalloc (sect_list->size);
regset->collect_regset (regset, regcache, -1,
gdb_regset, sect_list->size);
- note_data = (char *) elfcore_write_register_note (obfd,
- note_data,
- note_size,
- sect_list->sect_name,
- gdb_regset,
- sect_list->size);
+
+ if (strcmp (sect_list->sect_name, ".reg") == 0)
+ note_data = (char *) elfcore_write_prstatus
+ (obfd, note_data, note_size,
+ lwp, target_signal_to_host (stop_signal),
+ gdb_regset);
+ else
+ note_data = (char *) elfcore_write_register_note
+ (obfd, note_data, note_size,
+ sect_list->sect_name, gdb_regset,
+ sect_list->size);
xfree (gdb_regset);
sect_list++;
}
the new support, the code below should be deleted. */
else
{
+ gdb_gregset_t gregs;
+ gdb_fpregset_t fpregs;
+
+ if (core_regset_p
+ && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg",
+ sizeof (gregs)))
+ != NULL && regset->collect_regset != NULL)
+ regset->collect_regset (regset, regcache, -1,
+ &gregs, sizeof (gregs));
+ else
+ fill_gregset (regcache, &gregs, -1);
+
+ note_data = (char *) elfcore_write_prstatus
+ (obfd, note_data, note_size, lwp, target_signal_to_host (stop_signal),
+ &gregs);
+
if (core_regset_p
&& (regset = gdbarch_regset_from_core_section (gdbarch, ".reg2",
- sizeof (fpregs))) != NULL
- && regset->collect_regset != NULL)
+ sizeof (fpregs)))
+ != NULL && regset->collect_regset != NULL)
regset->collect_regset (regset, regcache, -1,
&fpregs, sizeof (fpregs));
else
linux_spu_make_corefile_notes (bfd *obfd, char *note_data, int *note_size)
{
struct linux_spu_corefile_data args;
+
args.obfd = obfd;
args.note_data = note_data;
args.note_size = note_size;
linux_nat_make_corefile_notes (bfd *obfd, int *note_size)
{
struct linux_nat_corefile_thread_data thread_args;
- struct cleanup *old_chain;
/* The variable size must be >= sizeof (prpsinfo_t.pr_fname). */
char fname[16] = { '\0' };
/* The variable size must be >= sizeof (prpsinfo_t.pr_psargs). */
char psargs[80] = { '\0' };
char *note_data = NULL;
- ptid_t current_ptid = inferior_ptid;
ptid_t filter = pid_to_ptid (ptid_get_pid (inferior_ptid));
gdb_byte *auxv;
int auxv_len;
int cwd_f = 1;
int exe_f = 1;
int mappings_f = 0;
- int environ_f = 0;
int status_f = 0;
int stat_f = 0;
int all = 0;
}
else
{
- /* [...] (future options here) */
+ /* [...] (future options here). */
}
argv++;
}
if ((procfile = fopen (fname1, "r")) != NULL)
{
struct cleanup *cleanup = make_cleanup_fclose (procfile);
+
if (fgets (buffer, sizeof (buffer), procfile))
printf_filtered ("cmdline = '%s'\n", buffer);
else
if ((procfile = fopen (fname1, "r")) != NULL)
{
struct cleanup *cleanup = make_cleanup_fclose (procfile);
+
while (fgets (buffer, sizeof (buffer), procfile) != NULL)
puts_filtered (buffer);
do_cleanups (cleanup);
if (fscanf (procfile, "%ld ", <mp) > 0)
printf_filtered (_("stime, children: %ld\n"), ltmp);
if (fscanf (procfile, "%ld ", <mp) > 0)
- printf_filtered (_("jiffies remaining in current time slice: %ld\n"),
- ltmp);
+ printf_filtered (_("jiffies remaining in current "
+ "time slice: %ld\n"), ltmp);
if (fscanf (procfile, "%ld ", <mp) > 0)
printf_filtered (_("'nice' value: %ld\n"), ltmp);
if (fscanf (procfile, "%lu ", <mp) > 0)
printf_filtered (_("jiffies until next SIGALRM: %lu\n"),
(unsigned long) ltmp);
if (fscanf (procfile, "%ld ", <mp) > 0)
- printf_filtered (_("start time (jiffies since system boot): %ld\n"),
- ltmp);
+ printf_filtered (_("start time (jiffies since "
+ "system boot): %ld\n"), ltmp);
if (fscanf (procfile, "%lu ", <mp) > 0)
printf_filtered (_("Virtual memory size: %lu\n"),
(unsigned long) ltmp);
if (fscanf (procfile, "%lu ", <mp) > 0)
- printf_filtered (_("Resident set size: %lu\n"), (unsigned long) ltmp);
+ printf_filtered (_("Resident set size: %lu\n"),
+ (unsigned long) ltmp);
if (fscanf (procfile, "%lu ", <mp) > 0)
printf_filtered (_("rlim: %lu\n"), (unsigned long) ltmp);
if (fscanf (procfile, "%lu ", <mp) > 0)
printf_filtered (_("End of text: 0x%lx\n"), ltmp);
if (fscanf (procfile, "%lu ", <mp) > 0)
printf_filtered (_("Start of stack: 0x%lx\n"), ltmp);
-#if 0 /* Don't know how architecture-dependent the rest is...
- Anyway the signal bitmap info is available from "status". */
- if (fscanf (procfile, "%lu ", <mp) > 0) /* FIXME arch? */
+#if 0 /* Don't know how architecture-dependent the rest is...
+ Anyway the signal bitmap info is available from "status". */
+ if (fscanf (procfile, "%lu ", <mp) > 0) /* FIXME arch? */
printf_filtered (_("Kernel stack pointer: 0x%lx\n"), ltmp);
- if (fscanf (procfile, "%lu ", <mp) > 0) /* FIXME arch? */
+ if (fscanf (procfile, "%lu ", <mp) > 0) /* FIXME arch? */
printf_filtered (_("Kernel instr pointer: 0x%lx\n"), ltmp);
if (fscanf (procfile, "%ld ", <mp) > 0)
printf_filtered (_("Pending signals bitmap: 0x%lx\n"), ltmp);
printf_filtered (_("Ignored signals bitmap: 0x%lx\n"), ltmp);
if (fscanf (procfile, "%ld ", <mp) > 0)
printf_filtered (_("Catched signals bitmap: 0x%lx\n"), ltmp);
- if (fscanf (procfile, "%lu ", <mp) > 0) /* FIXME arch? */
+ if (fscanf (procfile, "%lu ", <mp) > 0) /* FIXME arch? */
printf_filtered (_("wchan (system call): 0x%lx\n"), ltmp);
#endif
do_cleanups (cleanup);
SIGS to match. */
void
-linux_proc_pending_signals (int pid, sigset_t *pending, sigset_t *blocked, sigset_t *ignored)
+linux_proc_pending_signals (int pid, sigset_t *pending,
+ sigset_t *blocked, sigset_t *ignored)
{
FILE *procfile;
char buffer[MAXPATHLEN], fname[MAXPATHLEN];
- int signum;
struct cleanup *cleanup;
sigemptyset (pending);
static LONGEST
linux_nat_xfer_osdata (struct target_ops *ops, enum target_object object,
- const char *annex, gdb_byte *readbuf,
- const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
+ const char *annex, gdb_byte *readbuf,
+ const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
{
/* We make the process list snapshot when the object starts to be
read. */
gdb_assert (object == TARGET_OBJECT_OSDATA);
+ if (!annex)
+ {
+ if (offset == 0)
+ {
+ if (len_avail != -1 && len_avail != 0)
+ obstack_free (&obstack, NULL);
+ len_avail = 0;
+ buf = NULL;
+ obstack_init (&obstack);
+ obstack_grow_str (&obstack, "<osdata type=\"types\">\n");
+
+ obstack_xml_printf (&obstack,
+ "<item>"
+ "<column name=\"Type\">processes</column>"
+ "<column name=\"Description\">"
+ "Listing of all processes</column>"
+ "</item>");
+
+ obstack_grow_str0 (&obstack, "</osdata>\n");
+ buf = obstack_finish (&obstack);
+ len_avail = strlen (buf);
+ }
+
+ if (offset >= len_avail)
+ {
+ /* Done. Get rid of the obstack. */
+ obstack_free (&obstack, NULL);
+ buf = NULL;
+ len_avail = 0;
+ return 0;
+ }
+
+ if (len > len_avail - offset)
+ len = len_avail - offset;
+ memcpy (readbuf, buf + offset, len);
+
+ return len;
+ }
+
if (strcmp (annex, "processes") != 0)
return 0;
if (offset == 0)
{
if (len_avail != -1 && len_avail != 0)
- obstack_free (&obstack, NULL);
+ obstack_free (&obstack, NULL);
len_avail = 0;
buf = NULL;
obstack_init (&obstack);
dirp = opendir ("/proc");
if (dirp)
- {
- struct dirent *dp;
- while ((dp = readdir (dirp)) != NULL)
- {
- struct stat statbuf;
- char procentry[sizeof ("/proc/4294967295")];
-
- if (!isdigit (dp->d_name[0])
- || NAMELEN (dp) > sizeof ("4294967295") - 1)
- continue;
-
- sprintf (procentry, "/proc/%s", dp->d_name);
- if (stat (procentry, &statbuf) == 0
- && S_ISDIR (statbuf.st_mode))
- {
- char *pathname;
- FILE *f;
- char cmd[MAXPATHLEN + 1];
- struct passwd *entry;
-
- pathname = xstrprintf ("/proc/%s/cmdline", dp->d_name);
- entry = getpwuid (statbuf.st_uid);
-
- if ((f = fopen (pathname, "r")) != NULL)
- {
- size_t len = fread (cmd, 1, sizeof (cmd) - 1, f);
- if (len > 0)
- {
- int i;
- for (i = 0; i < len; i++)
- if (cmd[i] == '\0')
- cmd[i] = ' ';
- cmd[len] = '\0';
-
- obstack_xml_printf (
- &obstack,
- "<item>"
- "<column name=\"pid\">%s</column>"
- "<column name=\"user\">%s</column>"
- "<column name=\"command\">%s</column>"
- "</item>",
- dp->d_name,
- entry ? entry->pw_name : "?",
- cmd);
- }
- fclose (f);
- }
-
- xfree (pathname);
- }
- }
-
- closedir (dirp);
- }
+ {
+ struct dirent *dp;
+
+ while ((dp = readdir (dirp)) != NULL)
+ {
+ struct stat statbuf;
+ char procentry[sizeof ("/proc/4294967295")];
+
+ if (!isdigit (dp->d_name[0])
+ || NAMELEN (dp) > sizeof ("4294967295") - 1)
+ continue;
+
+ sprintf (procentry, "/proc/%s", dp->d_name);
+ if (stat (procentry, &statbuf) == 0
+ && S_ISDIR (statbuf.st_mode))
+ {
+ char *pathname;
+ FILE *f;
+ char cmd[MAXPATHLEN + 1];
+ struct passwd *entry;
+
+ pathname = xstrprintf ("/proc/%s/cmdline", dp->d_name);
+ entry = getpwuid (statbuf.st_uid);
+
+ if ((f = fopen (pathname, "r")) != NULL)
+ {
+ size_t len = fread (cmd, 1, sizeof (cmd) - 1, f);
+
+ if (len > 0)
+ {
+ int i;
+
+ for (i = 0; i < len; i++)
+ if (cmd[i] == '\0')
+ cmd[i] = ' ';
+ cmd[len] = '\0';
+
+ obstack_xml_printf (
+ &obstack,
+ "<item>"
+ "<column name=\"pid\">%s</column>"
+ "<column name=\"user\">%s</column>"
+ "<column name=\"command\">%s</column>"
+ "</item>",
+ dp->d_name,
+ entry ? entry->pw_name : "?",
+ cmd);
+ }
+ fclose (f);
+ }
+
+ xfree (pathname);
+ }
+ }
+
+ closedir (dirp);
+ }
obstack_grow_str0 (&obstack, "</osdata>\n");
buf = obstack_finish (&obstack);
LONGEST xfer;
if (object == TARGET_OBJECT_AUXV)
- return procfs_xfer_auxv (ops, object, annex, readbuf, writebuf,
+ return memory_xfer_auxv (ops, object, annex, readbuf, writebuf,
offset, len);
if (object == TARGET_OBJECT_OSDATA)
t->to_insert_fork_catchpoint = linux_child_insert_fork_catchpoint;
t->to_insert_vfork_catchpoint = linux_child_insert_vfork_catchpoint;
t->to_insert_exec_catchpoint = linux_child_insert_exec_catchpoint;
+ t->to_set_syscall_catchpoint = linux_child_set_syscall_catchpoint;
t->to_pid_to_exec_file = linux_child_pid_to_exec_file;
t->to_post_startup_inferior = linux_child_post_startup_inferior;
t->to_post_attach = linux_child_post_attach;
{
if (!lwp->stopped)
{
- int pid, status;
ptid_t ptid = lwp->ptid;
if (debug_linux_nat)
if (debug_linux_nat)
{
if (find_thread_ptid (lwp->ptid)->stop_requested)
- fprintf_unfiltered (gdb_stdlog, "\
-LNSL: already stopped/stop_requested %s\n",
+ fprintf_unfiltered (gdb_stdlog,
+ "LNSL: already stopped/stop_requested %s\n",
target_pid_to_str (lwp->ptid));
else
- fprintf_unfiltered (gdb_stdlog, "\
-LNSL: already stopped/no stop_requested yet %s\n",
+ fprintf_unfiltered (gdb_stdlog,
+ "LNSL: already stopped/no "
+ "stop_requested yet %s\n",
target_pid_to_str (lwp->ptid));
}
}
linux_ops->to_close (quitting);
}
+/* When requests are passed down from the linux-nat layer to the
+ single threaded inf-ptrace layer, ptids of (lwpid,0,0) form are
+ used. The address space pointer is stored in the inferior object,
+ but the common code that is passed such ptid can't tell whether
+ lwpid is a "main" process id or not (it assumes so). We reverse
+ look up the "main" process id from the lwp here. */
+
+struct address_space *
+linux_nat_thread_address_space (struct target_ops *t, ptid_t ptid)
+{
+ struct lwp_info *lwp;
+ struct inferior *inf;
+ int pid;
+
+ pid = GET_LWP (ptid);
+ if (GET_LWP (ptid) == 0)
+ {
+ /* An (lwpid,0,0) ptid. Look up the lwp object to get at the
+ tgid. */
+ lwp = find_lwp_pid (ptid);
+ pid = GET_PID (lwp->ptid);
+ }
+ else
+ {
+ /* A (pid,lwpid,0) ptid. */
+ pid = GET_PID (ptid);
+ }
+
+ inf = find_inferior_pid (pid);
+ gdb_assert (inf != NULL);
+ return inf->aspace;
+}
+
+int
+linux_nat_core_of_thread_1 (ptid_t ptid)
+{
+ struct cleanup *back_to;
+ char *filename;
+ FILE *f;
+ char *content = NULL;
+ char *p;
+ char *ts = 0;
+ int content_read = 0;
+ int i;
+ int core;
+
+ filename = xstrprintf ("/proc/%d/task/%ld/stat",
+ GET_PID (ptid), GET_LWP (ptid));
+ back_to = make_cleanup (xfree, filename);
+
+ f = fopen (filename, "r");
+ if (!f)
+ {
+ do_cleanups (back_to);
+ return -1;
+ }
+
+ make_cleanup_fclose (f);
+
+ for (;;)
+ {
+ int n;
+
+ content = xrealloc (content, content_read + 1024);
+ n = fread (content + content_read, 1, 1024, f);
+ content_read += n;
+ if (n < 1024)
+ {
+ content[content_read] = '\0';
+ break;
+ }
+ }
+
+ make_cleanup (xfree, content);
+
+ p = strchr (content, '(');
+
+ /* Skip ")". */
+ if (p != NULL)
+ p = strchr (p, ')');
+ if (p != NULL)
+ p++;
+
+ /* If the first field after program name has index 0, then core number is
+ the field with index 36. There's no constant for that anywhere. */
+ if (p != NULL)
+ p = strtok_r (p, " ", &ts);
+ for (i = 0; p != NULL && i != 36; ++i)
+ p = strtok_r (NULL, " ", &ts);
+
+ if (p == NULL || sscanf (p, "%d", &core) == 0)
+ core = -1;
+
+ do_cleanups (back_to);
+
+ return core;
+}
+
+/* Return the cached value of the processor core for thread PTID. */
+
+int
+linux_nat_core_of_thread (struct target_ops *ops, ptid_t ptid)
+{
+ struct lwp_info *info = find_lwp_pid (ptid);
+
+ if (info)
+ return info->core;
+ return -1;
+}
+
void
linux_nat_add_target (struct target_ops *t)
{
t->to_mourn_inferior = linux_nat_mourn_inferior;
t->to_thread_alive = linux_nat_thread_alive;
t->to_pid_to_str = linux_nat_pid_to_str;
+ t->to_thread_name = linux_nat_thread_name;
t->to_has_thread_control = tc_schedlock;
+ t->to_thread_address_space = linux_nat_thread_address_space;
+ t->to_stopped_by_watchpoint = linux_nat_stopped_by_watchpoint;
+ t->to_stopped_data_address = linux_nat_stopped_data_address;
t->to_can_async_p = linux_nat_can_async_p;
t->to_is_async_p = linux_nat_is_async_p;
t->to_supports_multi_process = linux_nat_supports_multi_process;
+ t->to_core_of_thread = linux_nat_core_of_thread;
+
/* We don't change the stratum; this target will sit at
process_stratum and thread_db will set at thread_stratum. This
is a little strange, since this is a multi-threaded-capable
void
_initialize_linux_nat (void)
{
- sigset_t mask;
-
add_info ("proc", linux_nat_info_proc_cmd, _("\
Show /proc process information about any running process.\n\
Specify any process id, or use the program being debugged by default.\n\