/* GNU/Linux native-dependent code common to multiple platforms.
- Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
+ Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
Free Software Foundation, Inc.
This file is part of GDB.
#include "inf-loop.h"
#include "event-loop.h"
#include "event-top.h"
+#include <pwd.h>
+#include <sys/types.h>
+#include "gdb_dirent.h"
+#include "xml-support.h"
+
+#ifdef HAVE_PERSONALITY
+# include <sys/personality.h>
+# if !HAVE_DECL_ADDR_NO_RANDOMIZE
+# define ADDR_NO_RANDOMIZE 0x0040000
+# endif
+#endif /* HAVE_PERSONALITY */
/* This comment documents high-level logic of this file.
#endif
#ifndef PTRACE_GETSIGINFO
-#define PTRACE_GETSIGINFO 0x4202
+# define PTRACE_GETSIGINFO 0x4202
+# define PTRACE_SETSIGINFO 0x4203
#endif
/* The single-threaded native GNU/Linux target_ops. We save a pointer for
/* The method to call, if any, when a new thread is attached. */
static void (*linux_nat_new_thread) (ptid_t);
+/* The method to call, if any, when the siginfo object needs to be
+ converted between the layout returned by ptrace, and the layout in
+ the architecture of the inferior. */
+static int (*linux_nat_siginfo_fixup) (struct siginfo *,
+ gdb_byte *,
+ int);
+
/* The saved to_xfer_partial method, inherited from inf-ptrace.c.
Called by our to_xfer_partial. */
static LONGEST (*super_xfer_partial) (struct target_ops *,
value);
}
+static int disable_randomization = 1;
+
+static void
+show_disable_randomization (struct ui_file *file, int from_tty,
+ struct cmd_list_element *c, const char *value)
+{
+#ifdef HAVE_PERSONALITY
+ fprintf_filtered (file, _("\
+Disabling randomization of debuggee's virtual address space is %s.\n"),
+ value);
+#else /* !HAVE_PERSONALITY */
+ fputs_filtered (_("\
+Disabling randomization of debuggee's virtual address space is unsupported on\n\
+this platform.\n"), file);
+#endif /* !HAVE_PERSONALITY */
+}
+
+static void
+set_disable_randomization (char *args, int from_tty, struct cmd_list_element *c)
+{
+#ifndef HAVE_PERSONALITY
+ error (_("\
+Disabling randomization of debuggee's virtual address space is unsupported on\n\
+this platform."));
+#endif /* !HAVE_PERSONALITY */
+}
+
static int linux_parent_pid;
struct simple_pid_list
/* Async mode support */
-/* True if async mode is currently on. */
-static int linux_nat_async_enabled;
-
/* Zero if the async mode, although enabled, is masked, which means
linux_nat_wait should behave as if async mode was off. */
static int linux_nat_async_mask_value = 1;
/* Number of queued events in the pipe. */
static volatile int linux_nat_num_queued_events;
-/* If async mode is on, true if we're listening for events; false if
- target events are blocked. */
-static int linux_nat_async_events_enabled;
+/* The possible SIGCHLD handling states. */
-static int linux_nat_async_events (int enable);
+enum sigchld_state
+{
+ /* SIGCHLD disabled, with action set to sigchld_handler, for the
+ sigsuspend in linux_nat_wait. */
+ sigchld_sync,
+ /* SIGCHLD enabled, with action set to async_sigchld_handler. */
+ sigchld_async,
+ /* Set SIGCHLD to default action. Used while creating an
+ inferior. */
+ sigchld_default
+};
+
+/* The current SIGCHLD handling state. */
+static enum sigchld_state linux_nat_async_events_state;
+
+static enum sigchld_state linux_nat_async_events (enum sigchld_state enable);
static void pipe_to_local_event_queue (void);
static void local_event_queue_to_pipe (void);
static void linux_nat_event_pipe_push (int pid, int status, int options);
static int linux_nat_async_mask (int mask);
static int kill_lwp (int lwpid, int signo);
+static int stop_callback (struct lwp_info *lp, void *data);
+
/* Captures the result of a successful waitpid call, along with the
options used in that call. */
struct waitpid_result
in the async SIGCHLD handler. */
static struct waitpid_result *waitpid_queue = NULL;
+/* Similarly to `waitpid', but check the local event queue instead of
+ querying the kernel queue. If PEEK, don't remove the event found
+ from the queue. */
+
static int
-queued_waitpid (int pid, int *status, int flags)
+queued_waitpid_1 (int pid, int *status, int flags, int peek)
{
struct waitpid_result *msg = waitpid_queue, *prev = NULL;
if (debug_linux_nat_async)
fprintf_unfiltered (gdb_stdlog,
"\
-QWPID: linux_nat_async_events_enabled(%d), linux_nat_num_queued_events(%d)\n",
- linux_nat_async_events_enabled,
+QWPID: linux_nat_async_events_state(%d), linux_nat_num_queued_events(%d)\n",
+ linux_nat_async_events_state,
linux_nat_num_queued_events);
if (flags & __WALL)
{
int pid;
- if (prev)
- prev->next = msg->next;
- else
- waitpid_queue = msg->next;
-
- msg->next = NULL;
if (status)
*status = msg->status;
pid = msg->pid;
if (debug_linux_nat_async)
fprintf_unfiltered (gdb_stdlog, "QWPID: pid(%d), status(%x)\n",
pid, msg->status);
- xfree (msg);
+
+ if (!peek)
+ {
+ if (prev)
+ prev->next = msg->next;
+ else
+ waitpid_queue = msg->next;
+
+ msg->next = NULL;
+ xfree (msg);
+ }
return pid;
}
return -1;
}
+/* Similarly to `waitpid', but check the local event queue. */
+
+static int
+queued_waitpid (int pid, int *status, int flags)
+{
+ return queued_waitpid_1 (pid, status, flags, 0);
+}
+
static void
push_waitpid (int pid, int status, int options)
{
int ret;
/* There should be no concurrent calls to waitpid. */
- gdb_assert (!linux_nat_async_events_enabled);
+ gdb_assert (linux_nat_async_events_state == sigchld_sync);
ret = queued_waitpid (pid, status, flags);
if (ret != -1)
{
int child_pid, ret, status;
long second_pid;
+ enum sigchld_state async_events_original_state;
+
+ async_events_original_state = linux_nat_async_events (sigchld_sync);
linux_supports_tracefork_flag = 0;
linux_supports_tracevforkdone_flag = 0;
if (ret != 0)
{
warning (_("linux_test_for_tracefork: failed to kill child"));
+ linux_nat_async_events (async_events_original_state);
return;
}
warning (_("linux_test_for_tracefork: unexpected wait status 0x%x from "
"killed child"), status);
+ linux_nat_async_events (async_events_original_state);
return;
}
if (ret != 0)
warning (_("linux_test_for_tracefork: failed to kill child"));
my_waitpid (child_pid, &status, 0);
+
+ linux_nat_async_events (async_events_original_state);
}
/* Return non-zero iff we have tracefork functionality available.
parent_pid = ptid_get_lwp (last_ptid);
if (parent_pid == 0)
parent_pid = ptid_get_pid (last_ptid);
- child_pid = last_status.value.related_pid;
+ child_pid = PIDGET (last_status.value.related_pid);
if (! follow_child)
{
else
{
struct fork_info *fp;
+ struct inferior *parent_inf, *child_inf;
+
+ /* Add process to GDB's tables. */
+ child_inf = add_inferior (child_pid);
+
+ parent_inf = find_inferior_pid (GET_PID (last_ptid));
+ child_inf->attach_flag = parent_inf->attach_flag;
+
/* Retain child fork in ptrace (stopped) state. */
fp = find_fork_pid (child_pid);
if (!fp)
}
else
{
+ struct thread_info *last_tp = find_thread_pid (last_ptid);
+ struct thread_info *tp;
char child_pid_spelling[40];
+ struct inferior *parent_inf, *child_inf;
+
+ /* Copy user stepping state to the new inferior thread. */
+ struct breakpoint *step_resume_breakpoint = last_tp->step_resume_breakpoint;
+ CORE_ADDR step_range_start = last_tp->step_range_start;
+ CORE_ADDR step_range_end = last_tp->step_range_end;
+ struct frame_id step_frame_id = last_tp->step_frame_id;
+
+ /* Otherwise, deleting the parent would get rid of this
+ breakpoint. */
+ last_tp->step_resume_breakpoint = NULL;
/* Needed to keep the breakpoint lists in sync. */
if (! has_vforked)
child_pid);
}
+ /* Add the new inferior first, so that the target_detach below
+ doesn't unpush the target. */
+
+ child_inf = add_inferior (child_pid);
+
+ parent_inf = find_inferior_pid (GET_PID (last_ptid));
+ child_inf->attach_flag = parent_inf->attach_flag;
+
/* If we're vforking, we may want to hold on to the parent until
the child exits or execs. At exec time we can remove the old
breakpoints from the parent and detach it; at exit time we
safely resume it. */
if (has_vforked)
- linux_parent_pid = parent_pid;
+ {
+ linux_parent_pid = parent_pid;
+ detach_inferior (parent_pid);
+ }
else if (!detach_fork)
{
struct fork_info *fp;
if (!fp)
fp = add_fork (parent_pid);
fork_save_infrun_state (fp, 0);
+
+ /* Also add an entry for the child fork. */
+ fp = find_fork_pid (child_pid);
+ if (!fp)
+ fp = add_fork (child_pid);
+ fork_save_infrun_state (fp, 0);
}
else
target_detach (NULL, 0);
inferior_ptid = ptid_build (child_pid, child_pid, 0);
- /* Reinstall ourselves, since we might have been removed in
- target_detach (which does other necessary cleanup). */
-
- push_target (ops);
linux_nat_switch_fork (inferior_ptid);
check_for_thread_db ();
+ tp = inferior_thread ();
+ tp->step_resume_breakpoint = step_resume_breakpoint;
+ tp->step_range_start = step_range_start;
+ tp->step_range_end = step_range_end;
+ tp->step_frame_id = step_frame_id;
+
/* Reset breakpoints in the child as appropriate. */
follow_inferior_reset_breakpoints ();
}
/* SIGCHLD action for asynchronous mode. */
static struct sigaction async_sigchld_action;
+
+/* SIGCHLD default action, to pass to new inferiors. */
+static struct sigaction sigchld_default_action;
\f
/* Prototypes for local functions. */
static int stop_wait_callback (struct lwp_info *lp, void *data);
-static int linux_nat_thread_alive (ptid_t ptid);
+static int linux_thread_alive (ptid_t ptid);
static char *linux_child_pid_to_exec_file (int pid);
static int cancel_breakpoint (struct lwp_info *lp);
{
struct lwp_info *lp;
- init_thread_list ();
init_lwp_list ();
lp = add_lwp (new_ptid);
- add_thread_silent (new_ptid);
lp->stopped = 1;
-}
-/* Record a PTID for later deletion. */
-
-struct saved_ptids
-{
- ptid_t ptid;
- struct saved_ptids *next;
-};
-static struct saved_ptids *threads_to_delete;
-
-static void
-record_dead_thread (ptid_t ptid)
-{
- struct saved_ptids *p = xmalloc (sizeof (struct saved_ptids));
- p->ptid = ptid;
- p->next = threads_to_delete;
- threads_to_delete = p;
-}
-
-/* Delete any dead threads which are not the current thread. */
-
-static void
-prune_lwps (void)
-{
- struct saved_ptids **p = &threads_to_delete;
-
- while (*p)
- if (! ptid_equal ((*p)->ptid, inferior_ptid))
- {
- struct saved_ptids *tmp = *p;
- delete_thread (tmp->ptid);
- *p = tmp->next;
- xfree (tmp);
- }
- else
- p = &(*p)->next;
+ init_thread_list ();
+ add_thread_silent (new_ptid);
}
/* Handle the exit of a single thread LP. */
if (print_thread_events)
printf_unfiltered (_("[%s exited]\n"), target_pid_to_str (lp->ptid));
- /* Core GDB cannot deal with us deleting the current thread. */
- if (!ptid_equal (lp->ptid, inferior_ptid))
- delete_thread (lp->ptid);
- else
- record_dead_thread (lp->ptid);
+ delete_thread (lp->ptid);
}
delete_lwp (lp->ptid);
lin_lwp_attach_lwp (ptid_t ptid)
{
struct lwp_info *lp;
- int async_events_were_enabled = 0;
+ enum sigchld_state async_events_original_state;
gdb_assert (is_lwp (ptid));
- if (target_can_async_p ())
- async_events_were_enabled = linux_nat_async_events (0);
+ async_events_original_state = linux_nat_async_events (sigchld_sync);
lp = find_lwp_pid (ptid);
lp->stopped = 1;
}
- if (async_events_were_enabled)
- linux_nat_async_events (1);
-
+ linux_nat_async_events (async_events_original_state);
return 0;
}
static void
-linux_nat_create_inferior (char *exec_file, char *allargs, char **env,
+linux_nat_create_inferior (struct target_ops *ops,
+ char *exec_file, char *allargs, char **env,
int from_tty)
{
int saved_async = 0;
+#ifdef HAVE_PERSONALITY
+ int personality_orig = 0, personality_set = 0;
+#endif /* HAVE_PERSONALITY */
/* The fork_child mechanism is synchronous and calls target_wait, so
we have to mask the async mode. */
if (target_can_async_p ())
+ /* Mask async mode. Creating a child requires a loop calling
+ wait_for_inferior currently. */
saved_async = linux_nat_async_mask (0);
else
{
sigdelset (&suspend_mask, SIGCHLD);
}
- linux_ops->to_create_inferior (exec_file, allargs, env, from_tty);
+ /* Set SIGCHLD to the default action, until after execing the child,
+ since the inferior inherits the superior's signal mask. It will
+ be blocked again in linux_nat_wait, which is only reached after
+ the inferior execing. */
+ linux_nat_async_events (sigchld_default);
+
+#ifdef HAVE_PERSONALITY
+ if (disable_randomization)
+ {
+ errno = 0;
+ personality_orig = personality (0xffffffff);
+ if (errno == 0 && !(personality_orig & ADDR_NO_RANDOMIZE))
+ {
+ personality_set = 1;
+ personality (personality_orig | ADDR_NO_RANDOMIZE);
+ }
+ if (errno != 0 || (personality_set
+ && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE)))
+ warning (_("Error disabling address space randomization: %s"),
+ safe_strerror (errno));
+ }
+#endif /* HAVE_PERSONALITY */
+
+ linux_ops->to_create_inferior (ops, exec_file, allargs, env, from_tty);
+
+#ifdef HAVE_PERSONALITY
+ if (personality_set)
+ {
+ errno = 0;
+ personality (personality_orig);
+ if (errno != 0)
+ warning (_("Error restoring address space randomization: %s"),
+ safe_strerror (errno));
+ }
+#endif /* HAVE_PERSONALITY */
if (saved_async)
linux_nat_async_mask (saved_async);
}
static void
-linux_nat_attach (char *args, int from_tty)
+linux_nat_attach (struct target_ops *ops, char *args, int from_tty)
{
struct lwp_info *lp;
int status;
+ ptid_t ptid;
/* FIXME: We should probably accept a list of process id's, and
attach all of them. */
- linux_ops->to_attach (args, from_tty);
+ linux_ops->to_attach (ops, args, from_tty);
if (!target_can_async_p ())
{
sigdelset (&suspend_mask, SIGCHLD);
}
+ /* The ptrace base target adds the main thread with (pid,0,0)
+ format. Decorate it with lwp info. */
+ ptid = BUILD_LWP (GET_PID (inferior_ptid), GET_PID (inferior_ptid));
+ thread_change_ptid (inferior_ptid, ptid);
+
/* Add the initial process as the first LWP to the list. */
- inferior_ptid = BUILD_LWP (GET_PID (inferior_ptid), GET_PID (inferior_ptid));
- lp = add_lwp (inferior_ptid);
+ lp = add_lwp (ptid);
status = linux_nat_post_attach_wait (lp->ptid, 1, &lp->cloned,
&lp->signalled);
lp->stopped = 1;
- /* If this process is not using thread_db, then we still don't
- detect any other threads, but add at least this one. */
- add_thread_silent (lp->ptid);
-
/* Save the wait status to report later. */
lp->resumed = 1;
if (debug_linux_nat)
events are always cached in waitpid_queue. */
*status = 0;
- if (GET_LWP (lp->ptid) == GET_LWP (last_ptid))
+
+ if (non_stop)
{
- if (stop_signal != TARGET_SIGNAL_0
- && signal_pass_state (stop_signal))
- *status = W_STOPCODE (target_signal_to_host (stop_signal));
+ enum target_signal signo = TARGET_SIGNAL_0;
+
+ if (is_executing (lp->ptid))
+ {
+ /* If the core thought this lwp was executing --- e.g., the
+ executing property hasn't been updated yet, but the
+ thread has been stopped with a stop_callback /
+ stop_wait_callback sequence (see linux_nat_detach for
+ example) --- we can only have pending events in the local
+ queue. */
+ if (queued_waitpid (GET_LWP (lp->ptid), status, __WALL) != -1)
+ {
+ if (WIFSTOPPED (*status))
+ signo = target_signal_from_host (WSTOPSIG (*status));
+
+ /* If not stopped, then the lwp is gone, no use in
+ resending a signal. */
+ }
+ }
+ else
+ {
+ /* If the core knows the thread is not executing, then we
+ have the last signal recorded in
+ thread_info->stop_signal. */
+
+ struct thread_info *tp = find_thread_pid (lp->ptid);
+ signo = tp->stop_signal;
+ }
+
+ if (signo != TARGET_SIGNAL_0
+ && !signal_pass_state (signo))
+ {
+ if (debug_linux_nat)
+ fprintf_unfiltered (gdb_stdlog, "\
+GPT: lwp %s had signal %s, but it is in no pass state\n",
+ target_pid_to_str (lp->ptid),
+ target_signal_to_string (signo));
+ }
+ else
+ {
+ if (signo != TARGET_SIGNAL_0)
+ *status = W_STOPCODE (target_signal_to_host (signo));
+
+ if (debug_linux_nat)
+ fprintf_unfiltered (gdb_stdlog,
+ "GPT: lwp %s as pending signal %s\n",
+ target_pid_to_str (lp->ptid),
+ target_signal_to_string (signo));
+ }
}
- else if (target_can_async_p ())
- queued_waitpid (GET_LWP (lp->ptid), status, __WALL);
else
- *status = lp->status;
+ {
+ if (GET_LWP (lp->ptid) == GET_LWP (last_ptid))
+ {
+ struct thread_info *tp = find_thread_pid (lp->ptid);
+ if (tp->stop_signal != TARGET_SIGNAL_0
+ && signal_pass_state (tp->stop_signal))
+ *status = W_STOPCODE (target_signal_to_host (tp->stop_signal));
+ }
+ else if (target_can_async_p ())
+ queued_waitpid (GET_LWP (lp->ptid), status, __WALL);
+ else
+ *status = lp->status;
+ }
return 0;
}
}
static void
-linux_nat_detach (char *args, int from_tty)
+linux_nat_detach (struct target_ops *ops, char *args, int from_tty)
{
int pid;
int status;
if (target_can_async_p ())
linux_nat_async (NULL, 0);
+ /* Stop all threads before detaching. ptrace requires that the
+ thread is stopped to sucessfully detach. */
+ iterate_over_lwps (stop_callback, NULL);
+ /* ... and wait until all of them have reported back that
+ they're no longer running. */
+ iterate_over_lwps (stop_wait_callback, NULL);
+
iterate_over_lwps (detach_callback, NULL);
/* Only the initial process should be left right now. */
/* Destroy LWP info; it's no longer valid. */
init_lwp_list ();
- pid = GET_PID (inferior_ptid);
- inferior_ptid = pid_to_ptid (pid);
- linux_ops->to_detach (args, from_tty);
+ pid = ptid_get_pid (inferior_ptid);
if (target_can_async_p ())
drain_queued_events (pid);
+
+ if (forks_exist_p ())
+ {
+ /* Multi-fork case. The current inferior_ptid is being detached
+ from, but there are other viable forks to debug. Detach from
+ the current fork, and context-switch to the first
+ available. */
+ linux_fork_detach (args, from_tty);
+
+ if (non_stop && target_can_async_p ())
+ target_async (inferior_event_handler, 0);
+ }
+ else
+ linux_ops->to_detach (ops, args, from_tty);
}
/* Resume LP. */
{
if (lp->stopped && lp->status == 0)
{
- linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
+ linux_ops->to_resume (linux_ops,
+ pid_to_ptid (GET_LWP (lp->ptid)),
0, TARGET_SIGNAL_0);
if (debug_linux_nat)
fprintf_unfiltered (gdb_stdlog,
lp->step = 0;
memset (&lp->siginfo, 0, sizeof (lp->siginfo));
}
+ else if (lp->stopped && debug_linux_nat)
+ fprintf_unfiltered (gdb_stdlog, "RC: Not resuming sibling %s (has pending)\n",
+ target_pid_to_str (lp->ptid));
+ else if (debug_linux_nat)
+ fprintf_unfiltered (gdb_stdlog, "RC: Not resuming sibling %s (not stopped)\n",
+ target_pid_to_str (lp->ptid));
return 0;
}
}
static void
-linux_nat_resume (ptid_t ptid, int step, enum target_signal signo)
+linux_nat_resume (struct target_ops *ops,
+ ptid_t ptid, int step, enum target_signal signo)
{
struct lwp_info *lp;
int resume_all;
signo ? strsignal (signo) : "0",
target_pid_to_str (inferior_ptid));
- prune_lwps ();
-
if (target_can_async_p ())
/* Block events while we're here. */
- linux_nat_async_events (0);
+ linux_nat_async_events (sigchld_sync);
/* A specific PTID means `step only this process id'. */
resume_all = (PIDGET (ptid) == -1);
- if (resume_all)
- iterate_over_lwps (resume_set_callback, NULL);
- else
- iterate_over_lwps (resume_clear_callback, NULL);
+ if (non_stop && resume_all)
+ internal_error (__FILE__, __LINE__,
+ "can't resume all in non-stop mode");
+
+ if (!non_stop)
+ {
+ if (resume_all)
+ iterate_over_lwps (resume_set_callback, NULL);
+ else
+ iterate_over_lwps (resume_clear_callback, NULL);
+ }
/* If PID is -1, it's the current inferior that should be
handled specially. */
lp = find_lwp_pid (ptid);
gdb_assert (lp != NULL);
+ /* Convert to something the lower layer understands. */
ptid = pid_to_ptid (GET_LWP (lp->ptid));
/* Remember if we're stepping. */
if (lp->status && WIFSTOPPED (lp->status))
{
- int saved_signo = target_signal_from_host (WSTOPSIG (lp->status));
+ int saved_signo;
+ struct inferior *inf;
- if (signal_stop_state (saved_signo) == 0
+ inf = find_inferior_pid (ptid_get_pid (ptid));
+ gdb_assert (inf);
+ saved_signo = target_signal_from_host (WSTOPSIG (lp->status));
+
+ /* Defer to common code if we're gaining control of the
+ inferior. */
+ if (inf->stop_soon == NO_STOP_QUIETLY
+ && signal_stop_state (saved_signo) == 0
&& signal_print_state (saved_signo) == 0
&& signal_pass_state (saved_signo) == 1)
{
if (resume_all)
iterate_over_lwps (resume_callback, NULL);
- linux_ops->to_resume (ptid, step, signo);
+ linux_ops->to_resume (linux_ops, ptid, step, signo);
memset (&lp->siginfo, 0, sizeof (lp->siginfo));
if (debug_linux_nat)
signo ? strsignal (signo) : "0");
if (target_can_async_p ())
- {
- target_executing = 1;
- target_async (inferior_event_handler, 0);
- }
+ target_async (inferior_event_handler, 0);
}
/* Issue kill to specified lwp. */
_("wait returned unexpected status 0x%x"), status);
}
- ourstatus->value.related_pid = new_pid;
+ ourstatus->value.related_pid = ptid_build (new_pid, new_pid, 0);
if (event == PTRACE_EVENT_FORK)
ourstatus->kind = TARGET_WAITKIND_FORKED;
ourstatus->kind = TARGET_WAITKIND_VFORKED;
else
{
+ struct cleanup *old_chain;
+
ourstatus->kind = TARGET_WAITKIND_IGNORE;
new_lp = add_lwp (BUILD_LWP (new_pid, GET_PID (inferior_ptid)));
new_lp->cloned = 1;
+ new_lp->stopped = 1;
if (WSTOPSIG (status) != SIGSTOP)
{
else
status = 0;
- if (stopping)
- new_lp->stopped = 1;
- else
+ if (non_stop)
{
+ /* Add the new thread to GDB's lists as soon as possible
+ so that:
+
+ 1) the frontend doesn't have to wait for a stop to
+ display them, and,
+
+ 2) we tag it with the correct running state. */
+
+ /* If the thread_db layer is active, let it know about
+ this new thread, and add it to GDB's list. */
+ if (!thread_db_attach_lwp (new_lp->ptid))
+ {
+ /* We're not using thread_db. Add it to GDB's
+ list. */
+ target_post_attach (GET_LWP (new_lp->ptid));
+ add_thread (new_lp->ptid);
+ }
+
+ if (!stopping)
+ {
+ set_running (new_lp->ptid, 1);
+ set_executing (new_lp->ptid, 1);
+ }
+ }
+
+ if (!stopping)
+ {
+ new_lp->stopped = 0;
new_lp->resumed = 1;
- ptrace (PTRACE_CONT, lp->waitstatus.value.related_pid, 0,
+ ptrace (PTRACE_CONT, new_pid, 0,
status ? WSTOPSIG (status) : 0);
}
linux_parent_pid = 0;
}
+ /* At this point, all inserted breakpoints are gone. Doing this
+ as soon as we detect an exec prevents the badness of deleting
+ a breakpoint writing the current "shadow contents" to lift
+ the bp. That shadow is NOT valid after an exec.
+
+ Note that we have to do this after the detach_breakpoints
+ call above, otherwise breakpoints wouldn't be lifted from the
+ parent on a vfork, because detach_breakpoints would think
+ that breakpoints are not inserted. */
+ mark_breakpoints_out ();
return 0;
}
return 0;
}
-/* Wait until LP is stopped. If DATA is non-null it is interpreted as
- a pointer to a set of signals to be flushed immediately. */
+/* Return non-zero if LWP PID has a pending SIGINT. */
static int
-stop_wait_callback (struct lwp_info *lp, void *data)
+linux_nat_has_pending_sigint (int pid)
+{
+ sigset_t pending, blocked, ignored;
+ int i;
+
+ linux_proc_pending_signals (pid, &pending, &blocked, &ignored);
+
+ if (sigismember (&pending, SIGINT)
+ && !sigismember (&ignored, SIGINT))
+ return 1;
+
+ return 0;
+}
+
+/* Set a flag in LP indicating that we should ignore its next SIGINT. */
+
+static int
+set_ignore_sigint (struct lwp_info *lp, void *data)
+{
+ /* If a thread has a pending SIGINT, consume it; otherwise, set a
+ flag to consume the next one. */
+ if (lp->stopped && lp->status != 0 && WIFSTOPPED (lp->status)
+ && WSTOPSIG (lp->status) == SIGINT)
+ lp->status = 0;
+ else
+ lp->ignore_sigint = 1;
+
+ return 0;
+}
+
+/* If LP does not have a SIGINT pending, then clear the ignore_sigint flag.
+ This function is called after we know the LWP has stopped; if the LWP
+ stopped before the expected SIGINT was delivered, then it will never have
+ arrived. Also, if the signal was delivered to a shared queue and consumed
+ by a different thread, it will never be delivered to this LWP. */
+
+static void
+maybe_clear_ignore_sigint (struct lwp_info *lp)
{
- sigset_t *flush_mask = data;
+ if (!lp->ignore_sigint)
+ return;
+ if (!linux_nat_has_pending_sigint (GET_LWP (lp->ptid)))
+ {
+ if (debug_linux_nat)
+ fprintf_unfiltered (gdb_stdlog,
+ "MCIS: Clearing bogus flag for %s\n",
+ target_pid_to_str (lp->ptid));
+ lp->ignore_sigint = 0;
+ }
+}
+
+/* Wait until LP is stopped. */
+
+static int
+stop_wait_callback (struct lwp_info *lp, void *data)
+{
if (!lp->stopped)
{
int status;
if (status == 0)
return 0;
- /* Ignore any signals in FLUSH_MASK. */
- if (flush_mask && sigismember (flush_mask, WSTOPSIG (status)))
+ if (lp->ignore_sigint && WIFSTOPPED (status)
+ && WSTOPSIG (status) == SIGINT)
{
- if (!lp->signalled)
- {
- lp->stopped = 1;
- return 0;
- }
+ lp->ignore_sigint = 0;
errno = 0;
ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
if (debug_linux_nat)
fprintf_unfiltered (gdb_stdlog,
- "PTRACE_CONT %s, 0, 0 (%s)\n",
+ "PTRACE_CONT %s, 0, 0 (%s) (discarding SIGINT)\n",
target_pid_to_str (lp->ptid),
errno ? safe_strerror (errno) : "OK");
- return stop_wait_callback (lp, flush_mask);
+ return stop_wait_callback (lp, NULL);
}
+ maybe_clear_ignore_sigint (lp);
+
if (WSTOPSIG (status) != SIGSTOP)
{
if (WSTOPSIG (status) == SIGTRAP)
}
/* Hold this event/waitstatus while we check to see if
there are any more (we still want to get that SIGSTOP). */
- stop_wait_callback (lp, data);
+ stop_wait_callback (lp, NULL);
if (target_can_async_p ())
{
/* There was no gdb breakpoint set at pc. Put
the event back in the queue. */
if (debug_linux_nat)
- fprintf_unfiltered (gdb_stdlog,
- "SWC: kill %s, %s\n",
- target_pid_to_str (lp->ptid),
- status_to_str ((int) status));
- kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (status));
+ fprintf_unfiltered (gdb_stdlog, "\
+SWC: leaving SIGTRAP in local queue of %s\n", target_pid_to_str (lp->ptid));
+ push_waitpid (GET_LWP (lp->ptid),
+ W_STOPCODE (SIGTRAP),
+ lp->cloned ? __WCLONE : 0);
}
}
else
/* Hold this event/waitstatus while we check to see if
there are any more (we still want to get that SIGSTOP). */
- stop_wait_callback (lp, data);
+ stop_wait_callback (lp, NULL);
/* If the lp->status field is still empty, use it to
hold this event. If not, then this event must be
return 0;
}
-/* Check whether PID has any pending signals in FLUSH_MASK. If so set
- the appropriate bits in PENDING, and return 1 - otherwise return 0. */
-
-static int
-linux_nat_has_pending (int pid, sigset_t *pending, sigset_t *flush_mask)
-{
- sigset_t blocked, ignored;
- int i;
-
- linux_proc_pending_signals (pid, pending, &blocked, &ignored);
-
- if (!flush_mask)
- return 0;
-
- for (i = 1; i < NSIG; i++)
- if (sigismember (pending, i))
- if (!sigismember (flush_mask, i)
- || sigismember (&blocked, i)
- || sigismember (&ignored, i))
- sigdelset (pending, i);
-
- if (sigisemptyset (pending))
- return 0;
-
- return 1;
-}
-
-/* DATA is interpreted as a mask of signals to flush. If LP has
- signals pending, and they are all in the flush mask, then arrange
- to flush them. LP should be stopped, as should all other threads
- it might share a signal queue with. */
-
-static int
-flush_callback (struct lwp_info *lp, void *data)
-{
- sigset_t *flush_mask = data;
- sigset_t pending, intersection, blocked, ignored;
- int pid, status;
-
- /* Normally, when an LWP exits, it is removed from the LWP list. The
- last LWP isn't removed till later, however. So if there is only
- one LWP on the list, make sure it's alive. */
- if (lwp_list == lp && lp->next == NULL)
- if (!linux_nat_thread_alive (lp->ptid))
- return 0;
-
- /* Just because the LWP is stopped doesn't mean that new signals
- can't arrive from outside, so this function must be careful of
- race conditions. However, because all threads are stopped, we
- can assume that the pending mask will not shrink unless we resume
- the LWP, and that it will then get another signal. We can't
- control which one, however. */
-
- if (lp->status)
- {
- if (debug_linux_nat)
- printf_unfiltered (_("FC: LP has pending status %06x\n"), lp->status);
- if (WIFSTOPPED (lp->status) && sigismember (flush_mask, WSTOPSIG (lp->status)))
- lp->status = 0;
- }
-
- /* While there is a pending signal we would like to flush, continue
- the inferior and collect another signal. But if there's already
- a saved status that we don't want to flush, we can't resume the
- inferior - if it stopped for some other reason we wouldn't have
- anywhere to save the new status. In that case, we must leave the
- signal unflushed (and possibly generate an extra SIGINT stop).
- That's much less bad than losing a signal. */
- while (lp->status == 0
- && linux_nat_has_pending (GET_LWP (lp->ptid), &pending, flush_mask))
- {
- int ret;
-
- errno = 0;
- ret = ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
- if (debug_linux_nat)
- fprintf_unfiltered (gdb_stderr,
- "FC: Sent PTRACE_CONT, ret %d %d\n", ret, errno);
-
- lp->stopped = 0;
- stop_wait_callback (lp, flush_mask);
- if (debug_linux_nat)
- fprintf_unfiltered (gdb_stderr,
- "FC: Wait finished; saved status is %d\n",
- lp->status);
- }
-
- return 0;
-}
-
/* Return non-zero if LP has a wait status pending. */
static int
gdb_assert (count != NULL);
- /* Count only LWPs that have a SIGTRAP event pending. */
- if (lp->status != 0
+ /* Count only resumed LWPs that have a SIGTRAP event pending. */
+ if (lp->status != 0 && lp->resumed
&& WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP)
(*count)++;
gdb_assert (selector != NULL);
- /* Select only LWPs that have a SIGTRAP event pending. */
- if (lp->status != 0
+ /* Select only resumed LWPs that have a SIGTRAP event pending. */
+ if (lp->status != 0 && lp->resumed
&& WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP)
if ((*selector)-- == 0)
return 1;
/* Check if the thread has exited. */
if ((WIFEXITED (status) || WIFSIGNALED (status)) && num_lwps > 1)
{
- /* If this is the main thread, we must stop all threads and
- verify if they are still alive. This is because in the nptl
- thread model, there is no signal issued for exiting LWPs
+ /* If this is the main thread, we must stop all threads and verify
+ if they are still alive. This is because in the nptl thread model
+ on Linux 2.4, there is no signal issued for exiting LWPs
other than the main thread. We only get the main thread exit
signal once all child threads have already exited. If we
stop all the threads and use the stop_wait_callback to check
"LLW: %s exited.\n",
target_pid_to_str (lp->ptid));
- exit_lwp (lp);
-
- /* If there is at least one more LWP, then the exit signal was
- not the end of the debugged application and should be
- ignored. */
- if (num_lwps > 0)
- {
- /* Make sure there is at least one thread running. */
- gdb_assert (iterate_over_lwps (running_callback, NULL));
-
- /* Discard the event. */
- return NULL;
- }
+ if (num_lwps > 1)
+ {
+ /* If there is at least one more LWP, then the exit signal
+ was not the end of the debugged application and should be
+ ignored. */
+ exit_lwp (lp);
+ return NULL;
+ }
}
/* Check if the current LWP has previously exited. In the nptl
thread model, LWPs other than the main thread do not issue
signals when they exit so we must check whenever the thread has
stopped. A similar check is made in stop_wait_callback(). */
- if (num_lwps > 1 && !linux_nat_thread_alive (lp->ptid))
+ if (num_lwps > 1 && !linux_thread_alive (lp->ptid))
{
if (debug_linux_nat)
fprintf_unfiltered (gdb_stdlog,
registers_changed ();
- linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
+ linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
lp->step, TARGET_SIGNAL_0);
if (debug_linux_nat)
fprintf_unfiltered (gdb_stdlog,
return NULL;
}
+ /* Make sure we don't report a SIGINT that we have already displayed
+ for another thread. */
+ if (lp->ignore_sigint
+ && WIFSTOPPED (status) && WSTOPSIG (status) == SIGINT)
+ {
+ if (debug_linux_nat)
+ fprintf_unfiltered (gdb_stdlog,
+ "LLW: Delayed SIGINT caught for %s.\n",
+ target_pid_to_str (lp->ptid));
+
+ /* This is a delayed SIGINT. */
+ lp->ignore_sigint = 0;
+
+ registers_changed ();
+ linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
+ lp->step, TARGET_SIGNAL_0);
+ if (debug_linux_nat)
+ fprintf_unfiltered (gdb_stdlog,
+ "LLW: %s %s, 0, 0 (discard SIGINT)\n",
+ lp->step ?
+ "PTRACE_SINGLESTEP" : "PTRACE_CONT",
+ target_pid_to_str (lp->ptid));
+
+ lp->stopped = 0;
+ gdb_assert (lp->resumed);
+
+ /* Discard the event. */
+ return NULL;
+ }
+
/* An interesting event. */
gdb_assert (lp);
return lp;
}
static ptid_t
-linux_nat_wait (ptid_t ptid, struct target_waitstatus *ourstatus)
+linux_nat_wait (struct target_ops *ops,
+ ptid_t ptid, struct target_waitstatus *ourstatus)
{
struct lwp_info *lp = NULL;
int options = 0;
int status = 0;
pid_t pid = PIDGET (ptid);
- sigset_t flush_mask;
if (debug_linux_nat_async)
fprintf_unfiltered (gdb_stdlog, "LLW: enter\n");
{
gdb_assert (!is_lwp (inferior_ptid));
- inferior_ptid = BUILD_LWP (GET_PID (inferior_ptid),
- GET_PID (inferior_ptid));
+ /* Upgrade the main thread's ptid. */
+ thread_change_ptid (inferior_ptid,
+ BUILD_LWP (GET_PID (inferior_ptid),
+ GET_PID (inferior_ptid)));
+
lp = add_lwp (inferior_ptid);
lp->resumed = 1;
- /* Add the main thread to GDB's thread list. */
- add_thread_silent (lp->ptid);
}
- sigemptyset (&flush_mask);
-
- if (target_can_async_p ())
- /* Block events while we're here. */
- target_async (NULL, 0);
+ /* Block events while we're here. */
+ linux_nat_async_events (sigchld_sync);
retry:
/* Resume the thread. It should halt immediately returning the
pending SIGSTOP. */
registers_changed ();
- linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
+ linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
lp->step, TARGET_SIGNAL_0);
if (debug_linux_nat)
fprintf_unfiltered (gdb_stdlog,
{
/* Causes SIGINT to be passed on to the attached process. */
set_sigint_trap ();
- set_sigio_trap ();
}
while (status == 0)
}
if (!target_can_async_p ())
- {
- clear_sigio_trap ();
- clear_sigint_trap ();
- }
+ clear_sigint_trap ();
gdb_assert (lp);
if (WIFSTOPPED (status))
{
int signo = target_signal_from_host (WSTOPSIG (status));
+ struct inferior *inf;
+
+ inf = find_inferior_pid (ptid_get_pid (lp->ptid));
+ gdb_assert (inf);
- /* If we get a signal while single-stepping, we may need special
- care, e.g. to skip the signal handler. Defer to common code. */
+ /* Defer to common code if we get a signal while
+ single-stepping, since that may need special care, e.g. to
+ skip the signal handler, or, if we're gaining control of the
+ inferior. */
if (!lp->step
+ && inf->stop_soon == NO_STOP_QUIETLY
&& signal_stop_state (signo) == 0
&& signal_print_state (signo) == 0
&& signal_pass_state (signo) == 1)
newly attached threads may cause an unwanted delay in
getting them running. */
registers_changed ();
- linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
+ linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
lp->step, signo);
if (debug_linux_nat)
fprintf_unfiltered (gdb_stdlog,
goto retry;
}
- if (signo == TARGET_SIGNAL_INT && signal_pass_state (signo) == 0)
+ if (!non_stop)
{
- /* If ^C/BREAK is typed at the tty/console, SIGINT gets
- forwarded to the entire process group, that is, all LWP's
- will receive it. Since we only want to report it once,
- we try to flush it from all LWPs except this one. */
- sigaddset (&flush_mask, SIGINT);
+ /* Only do the below in all-stop, as we currently use SIGINT
+ to implement target_stop (see linux_nat_stop) in
+ non-stop. */
+ if (signo == TARGET_SIGNAL_INT && signal_pass_state (signo) == 0)
+ {
+ /* If ^C/BREAK is typed at the tty/console, SIGINT gets
+ forwarded to the entire process group, that is, all LWPs
+ will receive it - unless they're using CLONE_THREAD to
+ share signals. Since we only want to report it once, we
+ mark it as ignored for all LWPs except this one. */
+ iterate_over_lwps (set_ignore_sigint, NULL);
+ lp->ignore_sigint = 0;
+ }
+ else
+ maybe_clear_ignore_sigint (lp);
}
}
fprintf_unfiltered (gdb_stdlog, "LLW: Candidate event %s in %s.\n",
status_to_str (status), target_pid_to_str (lp->ptid));
- /* Now stop all other LWP's ... */
- iterate_over_lwps (stop_callback, NULL);
+ if (!non_stop)
+ {
+ /* Now stop all other LWP's ... */
+ iterate_over_lwps (stop_callback, NULL);
- /* ... and wait until all of them have reported back that they're no
- longer running. */
- iterate_over_lwps (stop_wait_callback, &flush_mask);
- iterate_over_lwps (flush_callback, &flush_mask);
+ /* ... and wait until all of them have reported back that
+ they're no longer running. */
+ iterate_over_lwps (stop_wait_callback, NULL);
- /* If we're not waiting for a specific LWP, choose an event LWP from
- among those that have had events. Giving equal priority to all
- LWPs that have had events helps prevent starvation. */
- if (pid == -1)
- select_event_lwp (&lp, &status);
+ /* If we're not waiting for a specific LWP, choose an event LWP
+ from among those that have had events. Giving equal priority
+ to all LWPs that have had events helps prevent
+ starvation. */
+ if (pid == -1)
+ select_event_lwp (&lp, &status);
+ }
/* Now that we've selected our final event LWP, cancel any
breakpoints in other LWPs that have hit a GDB breakpoint. See
}
static void
-linux_nat_kill (void)
+linux_nat_kill (struct target_ops *ops)
{
struct target_waitstatus last;
ptid_t last_ptid;
if (last.kind == TARGET_WAITKIND_FORKED
|| last.kind == TARGET_WAITKIND_VFORKED)
{
- ptrace (PT_KILL, last.value.related_pid, 0, 0);
+ ptrace (PT_KILL, PIDGET (last.value.related_pid), 0, 0);
wait (&status);
}
}
else
{
+ /* Stop all threads before killing them, since ptrace requires
+ that the thread is stopped to sucessfully PTRACE_KILL. */
+ iterate_over_lwps (stop_callback, NULL);
+ /* ... and wait until all of them have reported back that
+ they're no longer running. */
+ iterate_over_lwps (stop_wait_callback, NULL);
+
/* Kill all LWP's ... */
iterate_over_lwps (kill_callback, NULL);
}
static void
-linux_nat_mourn_inferior (void)
+linux_nat_mourn_inferior (struct target_ops *ops)
{
/* Destroy LWP info; it's no longer valid. */
init_lwp_list ();
/* Normal case, no other forks available. */
if (target_can_async_p ())
linux_nat_async (NULL, 0);
- linux_ops->to_mourn_inferior ();
+ linux_ops->to_mourn_inferior (ops);
}
else
/* Multi-fork case. The current inferior_ptid has exited, but
linux_fork_mourn_inferior ();
}
+/* Convert a native/host siginfo object, into/from the siginfo in the
+ layout of the inferiors' architecture. */
+
+static void
+siginfo_fixup (struct siginfo *siginfo, gdb_byte *inf_siginfo, int direction)
+{
+ int done = 0;
+
+ if (linux_nat_siginfo_fixup != NULL)
+ done = linux_nat_siginfo_fixup (siginfo, inf_siginfo, direction);
+
+ /* If there was no callback, or the callback didn't do anything,
+ then just do a straight memcpy. */
+ if (!done)
+ {
+ if (direction == 1)
+ memcpy (siginfo, inf_siginfo, sizeof (struct siginfo));
+ else
+ memcpy (inf_siginfo, siginfo, sizeof (struct siginfo));
+ }
+}
+
+static LONGEST
+linux_xfer_siginfo (struct target_ops *ops, enum target_object object,
+ const char *annex, gdb_byte *readbuf,
+ const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
+{
+ struct lwp_info *lp;
+ LONGEST n;
+ int pid;
+ struct siginfo siginfo;
+ gdb_byte inf_siginfo[sizeof (struct siginfo)];
+
+ gdb_assert (object == TARGET_OBJECT_SIGNAL_INFO);
+ gdb_assert (readbuf || writebuf);
+
+ pid = GET_LWP (inferior_ptid);
+ if (pid == 0)
+ pid = GET_PID (inferior_ptid);
+
+ if (offset > sizeof (siginfo))
+ return -1;
+
+ errno = 0;
+ ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
+ if (errno != 0)
+ return -1;
+
+ /* When GDB is built as a 64-bit application, ptrace writes into
+ SIGINFO an object with 64-bit layout. Since debugging a 32-bit
+ inferior with a 64-bit GDB should look the same as debugging it
+ with a 32-bit GDB, we need to convert it. GDB core always sees
+ the converted layout, so any read/write will have to be done
+ post-conversion. */
+ siginfo_fixup (&siginfo, inf_siginfo, 0);
+
+ if (offset + len > sizeof (siginfo))
+ len = sizeof (siginfo) - offset;
+
+ if (readbuf != NULL)
+ memcpy (readbuf, inf_siginfo + offset, len);
+ else
+ {
+ memcpy (inf_siginfo + offset, writebuf, len);
+
+ /* Convert back to ptrace layout before flushing it out. */
+ siginfo_fixup (&siginfo, inf_siginfo, 1);
+
+ errno = 0;
+ ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
+ if (errno != 0)
+ return -1;
+ }
+
+ return len;
+}
+
static LONGEST
linux_nat_xfer_partial (struct target_ops *ops, enum target_object object,
const char *annex, gdb_byte *readbuf,
const gdb_byte *writebuf,
ULONGEST offset, LONGEST len)
{
- struct cleanup *old_chain = save_inferior_ptid ();
+ struct cleanup *old_chain;
LONGEST xfer;
+ if (object == TARGET_OBJECT_SIGNAL_INFO)
+ return linux_xfer_siginfo (ops, object, annex, readbuf, writebuf,
+ offset, len);
+
+ old_chain = save_inferior_ptid ();
+
if (is_lwp (inferior_ptid))
inferior_ptid = pid_to_ptid (GET_LWP (inferior_ptid));
}
static int
-linux_nat_thread_alive (ptid_t ptid)
+linux_thread_alive (ptid_t ptid)
{
+ int err;
+
gdb_assert (is_lwp (ptid));
- errno = 0;
- ptrace (PTRACE_PEEKUSER, GET_LWP (ptid), 0, 0);
+ /* Send signal 0 instead of anything ptrace, because ptracing a
+ running thread errors out claiming that the thread doesn't
+ exist. */
+ err = kill_lwp (GET_LWP (ptid), 0);
+
if (debug_linux_nat)
fprintf_unfiltered (gdb_stdlog,
- "LLTA: PTRACE_PEEKUSER %s, 0, 0 (%s)\n",
+ "LLTA: KILL(SIG0) %s (%s)\n",
target_pid_to_str (ptid),
- errno ? safe_strerror (errno) : "OK");
+ err ? safe_strerror (err) : "OK");
- /* Not every Linux kernel implements PTRACE_PEEKUSER. But we can
- handle that case gracefully since ptrace will first do a lookup
- for the process based upon the passed-in pid. If that fails we
- will get either -ESRCH or -EPERM, otherwise the child exists and
- is alive. */
- if (errno == ESRCH || errno == EPERM)
+ if (err != 0)
return 0;
return 1;
}
+static int
+linux_nat_thread_alive (struct target_ops *ops, ptid_t ptid)
+{
+ return linux_thread_alive (ptid);
+}
+
static char *
-linux_nat_pid_to_str (ptid_t ptid)
+linux_nat_pid_to_str (struct target_ops *ops, ptid_t ptid)
{
static char buf[64];
static void
sigchld_handler (int signo)
{
- if (linux_nat_async_enabled
- && linux_nat_async_events_enabled
+ if (target_async_permitted
+ && linux_nat_async_events_state != sigchld_sync
&& signo == SIGCHLD)
/* It is *always* a bug to hit this. */
internal_error (__FILE__, __LINE__,
unsigned long,
int, int, int, void *), void *obfd)
{
- long long pid = PIDGET (inferior_ptid);
+ int pid = PIDGET (inferior_ptid);
char mapsfilename[MAXPATHLEN];
FILE *mapsfile;
long long addr, endaddr, size, offset, inode;
char permissions[8], device[8], filename[MAXPATHLEN];
int read, write, exec;
int ret;
+ struct cleanup *cleanup;
/* Compose the filename for the /proc memory map, and open it. */
- sprintf (mapsfilename, "/proc/%lld/maps", pid);
+ sprintf (mapsfilename, "/proc/%d/maps", pid);
if ((mapsfile = fopen (mapsfilename, "r")) == NULL)
error (_("Could not open %s."), mapsfilename);
+ cleanup = make_cleanup_fclose (mapsfile);
if (info_verbose)
fprintf_filtered (gdb_stdout,
segment. */
func (addr, size, read, write, exec, obfd);
}
- fclose (mapsfile);
+ do_cleanups (cleanup);
+ return 0;
+}
+
+static int
+find_signalled_thread (struct thread_info *info, void *data)
+{
+ if (info->stop_signal != TARGET_SIGNAL_0
+ && ptid_get_pid (info->ptid) == ptid_get_pid (inferior_ptid))
+ return 1;
+
return 0;
}
+static enum target_signal
+find_stop_signal (void)
+{
+ struct thread_info *info =
+ iterate_over_threads (find_signalled_thread, NULL);
+
+ if (info)
+ return info->stop_signal;
+ else
+ return TARGET_SIGNAL_0;
+}
+
/* Records the thread's register state for the corefile note
section. */
static char *
linux_nat_do_thread_registers (bfd *obfd, ptid_t ptid,
- char *note_data, int *note_size)
+ char *note_data, int *note_size,
+ enum target_signal stop_signal)
{
gdb_gregset_t gregs;
gdb_fpregset_t fpregs;
char *note_data;
int *note_size;
int num_notes;
+ enum target_signal stop_signal;
};
/* Called by gdbthread.c once per thread. Records the thread's
args->note_data = linux_nat_do_thread_registers (args->obfd,
ti->ptid,
args->note_data,
- args->note_size);
+ args->note_size,
+ args->stop_signal);
args->num_notes++;
return 0;
}
-/* Records the register state for the corefile note section. */
-
-static char *
-linux_nat_do_registers (bfd *obfd, ptid_t ptid,
- char *note_data, int *note_size)
-{
- return linux_nat_do_thread_registers (obfd,
- ptid_build (ptid_get_pid (inferior_ptid),
- ptid_get_pid (inferior_ptid),
- 0),
- note_data, note_size);
-}
-
/* Fills the "to_make_corefile_note" target vector. Builds the note
section for a corefile, and returns it in a malloc buffer. */
thread_args.note_data = note_data;
thread_args.note_size = note_size;
thread_args.num_notes = 0;
+ thread_args.stop_signal = find_stop_signal ();
iterate_over_lwps (linux_nat_corefile_thread_callback, &thread_args);
- if (thread_args.num_notes == 0)
- {
- /* iterate_over_threads didn't come up with any threads; just
- use inferior_ptid. */
- note_data = linux_nat_do_registers (obfd, inferior_ptid,
- note_data, note_size);
- }
- else
- {
- note_data = thread_args.note_data;
- }
+ gdb_assert (thread_args.num_notes != 0);
+ note_data = thread_args.note_data;
auxv_len = target_read_alloc (¤t_target, TARGET_OBJECT_AUXV,
NULL, &auxv);
static void
linux_nat_info_proc_cmd (char *args, int from_tty)
{
- long long pid = PIDGET (inferior_ptid);
+ /* A long is used for pid instead of an int to avoid a loss of precision
+ compiler warning from the output of strtoul. */
+ long pid = PIDGET (inferior_ptid);
FILE *procfile;
char **argv = NULL;
char buffer[MAXPATHLEN];
if (args)
{
/* Break up 'args' into an argv array. */
- if ((argv = buildargv (args)) == NULL)
- nomem (0);
- else
- make_cleanup_freeargv (argv);
+ argv = gdb_buildargv (args);
+ make_cleanup_freeargv (argv);
}
while (argv != NULL && *argv != NULL)
{
if (pid == 0)
error (_("No current process: you must name one."));
- sprintf (fname1, "/proc/%lld", pid);
+ sprintf (fname1, "/proc/%ld", pid);
if (stat (fname1, &dummy) != 0)
error (_("No /proc directory: '%s'"), fname1);
- printf_filtered (_("process %lld\n"), pid);
+ printf_filtered (_("process %ld\n"), pid);
if (cmdline_f || all)
{
- sprintf (fname1, "/proc/%lld/cmdline", pid);
+ sprintf (fname1, "/proc/%ld/cmdline", pid);
if ((procfile = fopen (fname1, "r")) != NULL)
{
- fgets (buffer, sizeof (buffer), procfile);
- printf_filtered ("cmdline = '%s'\n", buffer);
- fclose (procfile);
+ struct cleanup *cleanup = make_cleanup_fclose (procfile);
+ if (fgets (buffer, sizeof (buffer), procfile))
+ printf_filtered ("cmdline = '%s'\n", buffer);
+ else
+ warning (_("unable to read '%s'"), fname1);
+ do_cleanups (cleanup);
}
else
warning (_("unable to open /proc file '%s'"), fname1);
}
if (cwd_f || all)
{
- sprintf (fname1, "/proc/%lld/cwd", pid);
+ sprintf (fname1, "/proc/%ld/cwd", pid);
memset (fname2, 0, sizeof (fname2));
if (readlink (fname1, fname2, sizeof (fname2)) > 0)
printf_filtered ("cwd = '%s'\n", fname2);
}
if (exe_f || all)
{
- sprintf (fname1, "/proc/%lld/exe", pid);
+ sprintf (fname1, "/proc/%ld/exe", pid);
memset (fname2, 0, sizeof (fname2));
if (readlink (fname1, fname2, sizeof (fname2)) > 0)
printf_filtered ("exe = '%s'\n", fname2);
}
if (mappings_f || all)
{
- sprintf (fname1, "/proc/%lld/maps", pid);
+ sprintf (fname1, "/proc/%ld/maps", pid);
if ((procfile = fopen (fname1, "r")) != NULL)
{
long long addr, endaddr, size, offset, inode;
char permissions[8], device[8], filename[MAXPATHLEN];
+ struct cleanup *cleanup;
+ cleanup = make_cleanup_fclose (procfile);
printf_filtered (_("Mapped address spaces:\n\n"));
if (gdbarch_addr_bit (current_gdbarch) == 32)
{
}
}
- fclose (procfile);
+ do_cleanups (cleanup);
}
else
warning (_("unable to open /proc file '%s'"), fname1);
}
if (status_f || all)
{
- sprintf (fname1, "/proc/%lld/status", pid);
+ sprintf (fname1, "/proc/%ld/status", pid);
if ((procfile = fopen (fname1, "r")) != NULL)
{
+ struct cleanup *cleanup = make_cleanup_fclose (procfile);
while (fgets (buffer, sizeof (buffer), procfile) != NULL)
puts_filtered (buffer);
- fclose (procfile);
+ do_cleanups (cleanup);
}
else
warning (_("unable to open /proc file '%s'"), fname1);
}
if (stat_f || all)
{
- sprintf (fname1, "/proc/%lld/stat", pid);
+ sprintf (fname1, "/proc/%ld/stat", pid);
if ((procfile = fopen (fname1, "r")) != NULL)
{
int itmp;
char ctmp;
long ltmp;
+ struct cleanup *cleanup = make_cleanup_fclose (procfile);
if (fscanf (procfile, "%d ", &itmp) > 0)
printf_filtered (_("Process: %d\n"), itmp);
if (fscanf (procfile, "%lu ", <mp) > 0) /* FIXME arch? */
printf_filtered (_("wchan (system call): 0x%lx\n"), ltmp);
#endif
- fclose (procfile);
+ do_cleanups (cleanup);
}
else
warning (_("unable to open /proc file '%s'"), fname1);
FILE *procfile;
char buffer[MAXPATHLEN], fname[MAXPATHLEN];
int signum;
+ struct cleanup *cleanup;
sigemptyset (pending);
sigemptyset (blocked);
procfile = fopen (fname, "r");
if (procfile == NULL)
error (_("Could not open %s"), fname);
+ cleanup = make_cleanup_fclose (procfile);
while (fgets (buffer, MAXPATHLEN, procfile) != NULL)
{
add_line_to_sigset (buffer + 8, ignored);
}
- fclose (procfile);
+ do_cleanups (cleanup);
+}
+
+static LONGEST
+linux_nat_xfer_osdata (struct target_ops *ops, enum target_object object,
+ const char *annex, gdb_byte *readbuf,
+ const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
+{
+ /* We make the process list snapshot when the object starts to be
+ read. */
+ static const char *buf;
+ static LONGEST len_avail = -1;
+ static struct obstack obstack;
+
+ DIR *dirp;
+
+ gdb_assert (object == TARGET_OBJECT_OSDATA);
+
+ if (strcmp (annex, "processes") != 0)
+ return 0;
+
+ gdb_assert (readbuf && !writebuf);
+
+ if (offset == 0)
+ {
+ if (len_avail != -1 && len_avail != 0)
+ obstack_free (&obstack, NULL);
+ len_avail = 0;
+ buf = NULL;
+ obstack_init (&obstack);
+ obstack_grow_str (&obstack, "<osdata type=\"processes\">\n");
+
+ dirp = opendir ("/proc");
+ if (dirp)
+ {
+ struct dirent *dp;
+ while ((dp = readdir (dirp)) != NULL)
+ {
+ struct stat statbuf;
+ char procentry[sizeof ("/proc/4294967295")];
+
+ if (!isdigit (dp->d_name[0])
+ || NAMELEN (dp) > sizeof ("4294967295") - 1)
+ continue;
+
+ sprintf (procentry, "/proc/%s", dp->d_name);
+ if (stat (procentry, &statbuf) == 0
+ && S_ISDIR (statbuf.st_mode))
+ {
+ char *pathname;
+ FILE *f;
+ char cmd[MAXPATHLEN + 1];
+ struct passwd *entry;
+
+ pathname = xstrprintf ("/proc/%s/cmdline", dp->d_name);
+ entry = getpwuid (statbuf.st_uid);
+
+ if ((f = fopen (pathname, "r")) != NULL)
+ {
+ size_t len = fread (cmd, 1, sizeof (cmd) - 1, f);
+ if (len > 0)
+ {
+ int i;
+ for (i = 0; i < len; i++)
+ if (cmd[i] == '\0')
+ cmd[i] = ' ';
+ cmd[len] = '\0';
+
+ obstack_xml_printf (
+ &obstack,
+ "<item>"
+ "<column name=\"pid\">%s</column>"
+ "<column name=\"user\">%s</column>"
+ "<column name=\"command\">%s</column>"
+ "</item>",
+ dp->d_name,
+ entry ? entry->pw_name : "?",
+ cmd);
+ }
+ fclose (f);
+ }
+
+ xfree (pathname);
+ }
+ }
+
+ closedir (dirp);
+ }
+
+ obstack_grow_str0 (&obstack, "</osdata>\n");
+ buf = obstack_finish (&obstack);
+ len_avail = strlen (buf);
+ }
+
+ if (offset >= len_avail)
+ {
+ /* Done. Get rid of the obstack. */
+ obstack_free (&obstack, NULL);
+ buf = NULL;
+ len_avail = 0;
+ return 0;
+ }
+
+ if (len > len_avail - offset)
+ len = len_avail - offset;
+ memcpy (readbuf, buf + offset, len);
+
+ return len;
}
static LONGEST
return procfs_xfer_auxv (ops, object, annex, readbuf, writebuf,
offset, len);
+ if (object == TARGET_OBJECT_OSDATA)
+ return linux_nat_xfer_osdata (ops, object, annex, readbuf, writebuf,
+ offset, len);
+
xfer = linux_proc_xfer_partial (ops, object, annex, readbuf, writebuf,
offset, len);
if (xfer != 0)
return t;
}
-/* Controls if async mode is permitted. */
-static int linux_async_permitted = 0;
-
-/* The set command writes to this variable. If the inferior is
- executing, linux_nat_async_permitted is *not* updated. */
-static int linux_async_permitted_1 = 0;
-
-static void
-set_maintenance_linux_async_permitted (char *args, int from_tty,
- struct cmd_list_element *c)
-{
- if (target_has_execution)
- {
- linux_async_permitted_1 = linux_async_permitted;
- error (_("Cannot change this setting while the inferior is running."));
- }
-
- linux_async_permitted = linux_async_permitted_1;
- linux_nat_set_async_mode (linux_async_permitted);
-}
-
-static void
-show_maintenance_linux_async_permitted (struct ui_file *file, int from_tty,
- struct cmd_list_element *c, const char *value)
-{
- fprintf_filtered (file, _("\
-Controlling the GNU/Linux inferior in asynchronous mode is %s.\n"),
- value);
-}
-
/* target_is_async_p implementation. */
static int
linux_nat_is_async_p (void)
{
/* NOTE: palves 2008-03-21: We're only async when the user requests
- it explicitly with the "maintenance set linux-async" command.
+ it explicitly with the "maintenance set target-async" command.
Someday, linux will always be async. */
- if (!linux_async_permitted)
+ if (!target_async_permitted)
return 0;
return 1;
linux_nat_can_async_p (void)
{
/* NOTE: palves 2008-03-21: We're only async when the user requests
- it explicitly with the "maintenance set linux-async" command.
+ it explicitly with the "maintenance set target-async" command.
Someday, linux will always be async. */
- if (!linux_async_permitted)
+ if (!target_async_permitted)
return 0;
/* See target.h/target_async_mask. */
return linux_nat_async_mask_value;
}
+static int
+linux_nat_supports_non_stop (void)
+{
+ return 1;
+}
+
/* target_async_mask implementation. */
static int
{
linux_nat_async (NULL, 0);
linux_nat_async_mask_value = mask;
- /* We're in sync mode. Make sure SIGCHLD isn't handled by
- async_sigchld_handler when we come out of sigsuspend in
- linux_nat_wait. */
- sigaction (SIGCHLD, &sync_sigchld_action, NULL);
}
else
{
- /* Restore the async handler. */
- sigaction (SIGCHLD, &async_sigchld_action, NULL);
linux_nat_async_mask_value = mask;
linux_nat_async (inferior_event_handler, 0);
}
{
int status, options, pid;
- if (!linux_nat_async_enabled || !linux_nat_async_events_enabled)
+ if (!target_async_permitted
+ || linux_nat_async_events_state != sigchld_async)
internal_error (__FILE__, __LINE__,
"get_pending_events called with async masked");
get_pending_events ();
}
-/* Enable or disable async SIGCHLD handling. */
+/* Set SIGCHLD handling state to STATE. Returns previous state. */
-static int
-linux_nat_async_events (int enable)
+static enum sigchld_state
+linux_nat_async_events (enum sigchld_state state)
{
- int current_state = linux_nat_async_events_enabled;
+ enum sigchld_state current_state = linux_nat_async_events_state;
if (debug_linux_nat_async)
fprintf_unfiltered (gdb_stdlog,
- "LNAE: enable(%d): linux_nat_async_events_enabled(%d), "
+ "LNAE: state(%d): linux_nat_async_events_state(%d), "
"linux_nat_num_queued_events(%d)\n",
- enable, linux_nat_async_events_enabled,
+ state, linux_nat_async_events_state,
linux_nat_num_queued_events);
- if (current_state != enable)
+ if (current_state != state)
{
sigset_t mask;
sigemptyset (&mask);
sigaddset (&mask, SIGCHLD);
- if (enable)
- {
- /* Unblock target events. */
- linux_nat_async_events_enabled = 1;
-
- local_event_queue_to_pipe ();
- /* While in masked async, we may have not collected all the
- pending events. Get them out now. */
- get_pending_events ();
- sigprocmask (SIG_UNBLOCK, &mask, NULL);
- }
- else
+
+ /* Always block before changing state. */
+ sigprocmask (SIG_BLOCK, &mask, NULL);
+
+ /* Set new state. */
+ linux_nat_async_events_state = state;
+
+ switch (state)
{
- /* Block target events. */
- sigprocmask (SIG_BLOCK, &mask, NULL);
- linux_nat_async_events_enabled = 0;
- /* Get events out of queue, and make them available to
- queued_waitpid / my_waitpid. */
- pipe_to_local_event_queue ();
+ case sigchld_sync:
+ {
+ /* Block target events. */
+ sigprocmask (SIG_BLOCK, &mask, NULL);
+ sigaction (SIGCHLD, &sync_sigchld_action, NULL);
+ /* Get events out of queue, and make them available to
+ queued_waitpid / my_waitpid. */
+ pipe_to_local_event_queue ();
+ }
+ break;
+ case sigchld_async:
+ {
+ /* Unblock target events for async mode. */
+
+ sigprocmask (SIG_BLOCK, &mask, NULL);
+
+ /* Put events we already waited on, in the pipe first, so
+ events are FIFO. */
+ local_event_queue_to_pipe ();
+ /* While in masked async, we may have not collected all
+ the pending events. Get them out now. */
+ get_pending_events ();
+
+ /* Let'em come. */
+ sigaction (SIGCHLD, &async_sigchld_action, NULL);
+ sigprocmask (SIG_UNBLOCK, &mask, NULL);
+ }
+ break;
+ case sigchld_default:
+ {
+ /* SIGCHLD default mode. */
+ sigaction (SIGCHLD, &sigchld_default_action, NULL);
+
+ /* Get events out of queue, and make them available to
+ queued_waitpid / my_waitpid. */
+ pipe_to_local_event_queue ();
+
+ /* Unblock SIGCHLD. */
+ sigprocmask (SIG_UNBLOCK, &mask, NULL);
+ }
+ break;
}
}
/* target_terminal_ours implementation. */
-void
+static void
linux_nat_terminal_ours (void)
{
if (!target_is_async_p ())
linux_nat_async (void (*callback) (enum inferior_event_type event_type,
void *context), void *context)
{
- if (linux_nat_async_mask_value == 0 || !linux_nat_async_enabled)
+ if (linux_nat_async_mask_value == 0 || !target_async_permitted)
internal_error (__FILE__, __LINE__,
"Calling target_async when async is masked");
add_file_handler (linux_nat_event_pipe[0],
linux_nat_async_file_handler, NULL);
- linux_nat_async_events (1);
+ linux_nat_async_events (sigchld_async);
}
else
{
async_client_callback = callback;
async_client_context = context;
- linux_nat_async_events (0);
+ linux_nat_async_events (sigchld_sync);
delete_file_handler (linux_nat_event_pipe[0]);
}
return;
}
-/* Enable/Disable async mode. */
+/* Stop an LWP, and push a TARGET_SIGNAL_0 stop status if no other
+ event came out. */
-static void
-linux_nat_set_async_mode (int on)
+static int
+linux_nat_stop_lwp (struct lwp_info *lwp, void *data)
{
- if (linux_nat_async_enabled != on)
+ ptid_t ptid = * (ptid_t *) data;
+
+ if (ptid_equal (lwp->ptid, ptid)
+ || ptid_equal (minus_one_ptid, ptid)
+ || (ptid_is_pid (ptid)
+ && ptid_get_pid (ptid) == ptid_get_pid (lwp->ptid)))
{
- if (on)
+ if (!lwp->stopped)
{
- gdb_assert (waitpid_queue == NULL);
- sigaction (SIGCHLD, &async_sigchld_action, NULL);
+ int pid, status;
- if (pipe (linux_nat_event_pipe) == -1)
- internal_error (__FILE__, __LINE__,
- "creating event pipe failed.");
+ if (debug_linux_nat)
+ fprintf_unfiltered (gdb_stdlog,
+ "LNSL: running -> suspending %s\n",
+ target_pid_to_str (lwp->ptid));
+
+ /* Peek once, to check if we've already waited for this
+ LWP. */
+ pid = queued_waitpid_1 (ptid_get_lwp (lwp->ptid), &status,
+ lwp->cloned ? __WCLONE : 0, 1 /* peek */);
- fcntl (linux_nat_event_pipe[0], F_SETFL, O_NONBLOCK);
- fcntl (linux_nat_event_pipe[1], F_SETFL, O_NONBLOCK);
+ if (pid == -1)
+ {
+ ptid_t ptid = lwp->ptid;
+
+ stop_callback (lwp, NULL);
+ stop_wait_callback (lwp, NULL);
+
+ /* If the lwp exits while we try to stop it, there's
+ nothing else to do. */
+ lwp = find_lwp_pid (ptid);
+ if (lwp == NULL)
+ return 0;
+
+ pid = queued_waitpid_1 (ptid_get_lwp (lwp->ptid), &status,
+ lwp->cloned ? __WCLONE : 0,
+ 1 /* peek */);
+ }
+
+ /* If we didn't collect any signal other than SIGSTOP while
+ stopping the LWP, push a SIGNAL_0 event. In either case,
+ the event-loop will end up calling target_wait which will
+ collect these. */
+ if (pid == -1)
+ push_waitpid (ptid_get_lwp (lwp->ptid), W_STOPCODE (0),
+ lwp->cloned ? __WCLONE : 0);
}
else
{
- sigaction (SIGCHLD, &sync_sigchld_action, NULL);
-
- drain_queued_events (-1);
-
- linux_nat_num_queued_events = 0;
- close (linux_nat_event_pipe[0]);
- close (linux_nat_event_pipe[1]);
- linux_nat_event_pipe[0] = linux_nat_event_pipe[1] = -1;
+ /* Already known to be stopped; do nothing. */
+ if (debug_linux_nat)
+ {
+ if (find_thread_pid (lwp->ptid)->stop_requested)
+ fprintf_unfiltered (gdb_stdlog, "\
+LNSL: already stopped/stop_requested %s\n",
+ target_pid_to_str (lwp->ptid));
+ else
+ fprintf_unfiltered (gdb_stdlog, "\
+LNSL: already stopped/no stop_requested yet %s\n",
+ target_pid_to_str (lwp->ptid));
+ }
}
}
- linux_nat_async_enabled = on;
+ return 0;
+}
+
+static void
+linux_nat_stop (ptid_t ptid)
+{
+ if (non_stop)
+ {
+ linux_nat_async_events (sigchld_sync);
+ iterate_over_lwps (linux_nat_stop_lwp, &ptid);
+ target_async (inferior_event_handler, 0);
+ }
+ else
+ linux_ops->to_stop (ptid);
}
void
t->to_can_async_p = linux_nat_can_async_p;
t->to_is_async_p = linux_nat_is_async_p;
+ t->to_supports_non_stop = linux_nat_supports_non_stop;
t->to_async = linux_nat_async;
t->to_async_mask = linux_nat_async_mask;
t->to_terminal_inferior = linux_nat_terminal_inferior;
t->to_terminal_ours = linux_nat_terminal_ours;
+ /* Methods for non-stop support. */
+ t->to_stop = linux_nat_stop;
+
/* We don't change the stratum; this target will sit at
process_stratum and thread_db will set at thread_stratum. This
is a little strange, since this is a multi-threaded-capable
also want to be used for single-threaded processes. */
add_target (t);
-
- /* TODO: Eliminate this and have libthread_db use
- find_target_beneath. */
- thread_db_init (t);
}
/* Register a method to call whenever a new thread is attached. */
linux_nat_new_thread = new_thread;
}
+/* Register a method that converts a siginfo object between the layout
+ that ptrace returns, and the layout in the architecture of the
+ inferior. */
+void
+linux_nat_set_siginfo_fixup (struct target_ops *t,
+ int (*siginfo_fixup) (struct siginfo *,
+ gdb_byte *,
+ int))
+{
+ /* Save the pointer. */
+ linux_nat_siginfo_fixup = siginfo_fixup;
+}
+
/* Return the saved siginfo associated with PTID. */
struct siginfo *
linux_nat_get_siginfo (ptid_t ptid)
return &lp->siginfo;
}
+/* Enable/Disable async mode. */
+
+static void
+linux_nat_setup_async (void)
+{
+ if (pipe (linux_nat_event_pipe) == -1)
+ internal_error (__FILE__, __LINE__,
+ "creating event pipe failed.");
+ fcntl (linux_nat_event_pipe[0], F_SETFL, O_NONBLOCK);
+ fcntl (linux_nat_event_pipe[1], F_SETFL, O_NONBLOCK);
+}
+
+/* Provide a prototype to silence -Wmissing-prototypes. */
+extern initialize_file_ftype _initialize_linux_nat;
+
void
_initialize_linux_nat (void)
{
show_debug_linux_nat_async,
&setdebuglist, &showdebuglist);
- add_setshow_boolean_cmd ("linux-async", class_maintenance,
- &linux_async_permitted_1, _("\
-Set whether gdb controls the GNU/Linux inferior in asynchronous mode."), _("\
-Show whether gdb controls the GNU/Linux inferior in asynchronous mode."), _("\
-Tells gdb whether to control the GNU/Linux inferior in asynchronous mode."),
- set_maintenance_linux_async_permitted,
- show_maintenance_linux_async_permitted,
- &maintenance_set_cmdlist,
- &maintenance_show_cmdlist);
+ /* Get the default SIGCHLD action. Used while forking an inferior
+ (see linux_nat_create_inferior/linux_nat_async_events). */
+ sigaction (SIGCHLD, NULL, &sigchld_default_action);
/* Block SIGCHLD by default. Doing this early prevents it getting
unblocked if an exception is thrown due to an error while the
sigemptyset (&async_sigchld_action.sa_mask);
async_sigchld_action.sa_flags = SA_RESTART;
- /* Install the default mode. */
- linux_nat_set_async_mode (linux_async_permitted);
+ linux_nat_setup_async ();
+
+ add_setshow_boolean_cmd ("disable-randomization", class_support,
+ &disable_randomization, _("\
+Set disabling of debuggee's virtual address space randomization."), _("\
+Show disabling of debuggee's virtual address space randomization."), _("\
+When this mode is on (which is the default), randomization of the virtual\n\
+address space is disabled. Standalone programs run with the randomization\n\
+enabled by default on some platforms."),
+ &set_disable_randomization,
+ &show_disable_randomization,
+ &setlist, &showlist);
}
\f