X-Git-Url: http://drtracing.org/?a=blobdiff_plain;f=gdb%2Flinux-nat.c;h=b09cd498960283d0b8f3dc8296756a4a40777096;hb=604133b5c51154a435f50cc079b24cb2f2eb8770;hp=80b0cfff70c5012aad279bb07b470015b1188a69;hpb=85c078043bd43ef5cb76191a6d3e2159da0c302e;p=deliverable%2Fbinutils-gdb.git diff --git a/gdb/linux-nat.c b/gdb/linux-nat.c index 80b0cfff70..b09cd49896 100644 --- a/gdb/linux-nat.c +++ b/gdb/linux-nat.c @@ -1,12 +1,13 @@ /* GNU/Linux native-dependent code common to multiple platforms. - Copyright 2001, 2002, 2003, 2004 Free Software Foundation, Inc. + Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 + Free Software Foundation, Inc. This file is part of GDB. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or + the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, @@ -15,9 +16,7 @@ GNU General Public License for more details. You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place - Suite 330, - Boston, MA 02111-1307, USA. */ + along with this program. If not, see . */ #include "defs.h" #include "inferior.h" @@ -31,9 +30,13 @@ #endif #include #include "linux-nat.h" +#include "linux-fork.h" #include "gdbthread.h" #include "gdbcmd.h" #include "regcache.h" +#include "regset.h" +#include "inf-ptrace.h" +#include "auxv.h" #include /* for MAXPATHLEN */ #include /* for elf_gregset etc. */ #include "elf-bfd.h" /* for elfcore_write_* */ @@ -43,6 +46,33 @@ #include "gdbthread.h" /* for struct thread_info etc. */ #include "gdb_stat.h" /* for struct stat */ #include /* for O_RDONLY */ +#include "inf-loop.h" +#include "event-loop.h" +#include "event-top.h" + +/* Note on this file's use of signals: + + We stop threads by sending a SIGSTOP. The use of SIGSTOP instead + of another signal is not entirely significant; we just need for a + signal to be delivered, so that we can intercept it. SIGSTOP's + advantage is that it can not be blocked. A disadvantage is that it + is not a real-time signal, so it can only be queued once; we do not + keep track of other sources of SIGSTOP. + + Two other signals that can't be blocked are SIGCONT and SIGKILL. + But we can't use them, because they have special behavior when the + signal is generated - not when it is delivered. SIGCONT resumes + the entire thread group and SIGKILL kills the entire thread group. + + A delivered SIGSTOP would stop the entire thread group, not just the + thread we tkill'd. But we never let the SIGSTOP deliver; we always + intercept and cancel it (by PTRACE_CONT without passing SIGSTOP). + + We could use a real-time signal instead. This would solve those + problems; we could use PTRACE_GETSIGINFO to locate the specific + stop signals sent by GDB. But we would still have to have some + support for SIGSTOP, since PTRACE_ATTACH generates it, and there + are races with trying to find a signal that is not blocked. */ #ifndef O_LARGEFILE #define O_LARGEFILE 0 @@ -81,13 +111,50 @@ #define __WALL 0x40000000 /* Wait for any child. */ #endif +#ifndef PTRACE_GETSIGINFO +#define PTRACE_GETSIGINFO 0x4202 +#endif + +/* The single-threaded native GNU/Linux target_ops. We save a pointer for + the use of the multi-threaded target. */ +static struct target_ops *linux_ops; +static struct target_ops linux_ops_saved; + +/* The method to call, if any, when a new thread is attached. */ +static void (*linux_nat_new_thread) (ptid_t); + +/* The saved to_xfer_partial method, inherited from inf-ptrace.c. + Called by our to_xfer_partial. */ +static LONGEST (*super_xfer_partial) (struct target_ops *, + enum target_object, + const char *, gdb_byte *, + const gdb_byte *, + ULONGEST, LONGEST); + static int debug_linux_nat; +static void +show_debug_linux_nat (struct ui_file *file, int from_tty, + struct cmd_list_element *c, const char *value) +{ + fprintf_filtered (file, _("Debugging of GNU/Linux lwp module is %s.\n"), + value); +} + +static int debug_linux_nat_async = 0; +static void +show_debug_linux_nat_async (struct ui_file *file, int from_tty, + struct cmd_list_element *c, const char *value) +{ + fprintf_filtered (file, _("Debugging of GNU/Linux async lwp module is %s.\n"), + value); +} static int linux_parent_pid; struct simple_pid_list { int pid; + int status; struct simple_pid_list *next; }; struct simple_pid_list *stopped_pids; @@ -102,20 +169,174 @@ static int linux_supports_tracefork_flag = -1; static int linux_supports_tracevforkdone_flag = -1; +/* Async mode support */ + +/* To listen to target events asynchronously, we install a SIGCHLD + handler whose duty is to call waitpid (-1, ..., WNOHANG) to get all + the pending events into a pipe. Whenever we're ready to handle + events asynchronously, this pipe is registered as the waitable file + handle in the event loop. When we get to entry target points + coming out of the common code (target_wait, target_resume, ...), + that are going to call waitpid, we block SIGCHLD signals, and + remove all the events placed in the pipe into a local queue. All + the subsequent calls to my_waitpid (a waitpid wrapper) check this + local queue first. */ + +/* True if async mode is currently on. */ +static int linux_nat_async_enabled; + +/* Zero if the async mode, although enabled, is masked, which means + linux_nat_wait should behave as if async mode was off. */ +static int linux_nat_async_mask_value = 1; + +/* The read/write ends of the pipe registered as waitable file in the + event loop. */ +static int linux_nat_event_pipe[2] = { -1, -1 }; + +/* Number of queued events in the pipe. */ +static volatile int linux_nat_num_queued_events; + +/* If async mode is on, true if we're listening for events; false if + target events are blocked. */ +static int linux_nat_async_events_enabled; + +static int linux_nat_async_events (int enable); +static void pipe_to_local_event_queue (void); +static void local_event_queue_to_pipe (void); +static void linux_nat_event_pipe_push (int pid, int status, int options); +static int linux_nat_event_pipe_pop (int* ptr_status, int* ptr_options); +static void linux_nat_set_async_mode (int on); +static void linux_nat_async (void (*callback) + (enum inferior_event_type event_type, void *context), + void *context); +static int linux_nat_async_mask (int mask); +static int kill_lwp (int lwpid, int signo); + +/* Captures the result of a successful waitpid call, along with the + options used in that call. */ +struct waitpid_result +{ + int pid; + int status; + int options; + struct waitpid_result *next; +}; + +/* A singly-linked list of the results of the waitpid calls performed + in the async SIGCHLD handler. */ +static struct waitpid_result *waitpid_queue = NULL; + +static int +queued_waitpid (int pid, int *status, int flags) +{ + struct waitpid_result *msg = waitpid_queue, *prev = NULL; + + if (debug_linux_nat_async) + fprintf_unfiltered (gdb_stdlog, + "\ +QWPID: linux_nat_async_events_enabled(%d), linux_nat_num_queued_events(%d)\n", + linux_nat_async_events_enabled, + linux_nat_num_queued_events); + + if (flags & __WALL) + { + for (; msg; prev = msg, msg = msg->next) + if (pid == -1 || pid == msg->pid) + break; + } + else if (flags & __WCLONE) + { + for (; msg; prev = msg, msg = msg->next) + if (msg->options & __WCLONE + && (pid == -1 || pid == msg->pid)) + break; + } + else + { + for (; msg; prev = msg, msg = msg->next) + if ((msg->options & __WCLONE) == 0 + && (pid == -1 || pid == msg->pid)) + break; + } + + if (msg) + { + int pid; + + if (prev) + prev->next = msg->next; + else + waitpid_queue = msg->next; + + msg->next = NULL; + if (status) + *status = msg->status; + pid = msg->pid; + + if (debug_linux_nat_async) + fprintf_unfiltered (gdb_stdlog, "QWPID: pid(%d), status(%x)\n", + pid, msg->status); + xfree (msg); + + return pid; + } + + if (debug_linux_nat_async) + fprintf_unfiltered (gdb_stdlog, "QWPID: miss\n"); + + if (status) + *status = 0; + return -1; +} + +static void +push_waitpid (int pid, int status, int options) +{ + struct waitpid_result *event, *new_event; + + new_event = xmalloc (sizeof (*new_event)); + new_event->pid = pid; + new_event->status = status; + new_event->options = options; + new_event->next = NULL; + + if (waitpid_queue) + { + for (event = waitpid_queue; + event && event->next; + event = event->next) + ; + + event->next = new_event; + } + else + waitpid_queue = new_event; +} + +/* Drain all queued events of PID. If PID is -1, the effect is of + draining all events. */ +static void +drain_queued_events (int pid) +{ + while (queued_waitpid (pid, NULL, __WALL) != -1) + ; +} + /* Trivial list manipulation functions to keep track of a list of new stopped processes. */ static void -add_to_pid_list (struct simple_pid_list **listp, int pid) +add_to_pid_list (struct simple_pid_list **listp, int pid, int status) { struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list)); new_pid->pid = pid; + new_pid->status = status; new_pid->next = *listp; *listp = new_pid; } static int -pull_pid_from_list (struct simple_pid_list **listp, int pid) +pull_pid_from_list (struct simple_pid_list **listp, int pid, int *status) { struct simple_pid_list **p; @@ -123,6 +344,7 @@ pull_pid_from_list (struct simple_pid_list **listp, int pid) if ((*p)->pid == pid) { struct simple_pid_list *next = (*p)->next; + *status = (*p)->status; xfree (*p); *p = next; return 1; @@ -130,10 +352,10 @@ pull_pid_from_list (struct simple_pid_list **listp, int pid) return 0; } -void -linux_record_stopped_pid (int pid) +static void +linux_record_stopped_pid (int pid, int status) { - add_to_pid_list (&stopped_pids, pid); + add_to_pid_list (&stopped_pids, pid, status); } @@ -150,12 +372,21 @@ linux_tracefork_child (void) _exit (0); } -/* Wrapper function for waitpid which handles EINTR. */ +/* Wrapper function for waitpid which handles EINTR, and checks for + locally queued events. */ static int my_waitpid (int pid, int *status, int flags) { int ret; + + /* There should be no concurrent calls to waitpid. */ + gdb_assert (!linux_nat_async_events_enabled); + + ret = queued_waitpid (pid, status, flags); + if (ret != -1) + return ret; + do { ret = waitpid (pid, status, flags); @@ -251,6 +482,7 @@ linux_test_for_tracefork (int original_pid) ret = ptrace (PTRACE_KILL, second_pid, 0, 0); if (ret != 0) warning (_("linux_test_for_tracefork: failed to kill second child")); + my_waitpid (second_pid, &status, 0); } } else @@ -286,9 +518,12 @@ linux_supports_tracevforkdone (int pid) void linux_enable_event_reporting (ptid_t ptid) { - int pid = ptid_get_pid (ptid); + int pid = ptid_get_lwp (ptid); int options; + if (pid == 0) + pid = ptid_get_pid (ptid); + if (! linux_supports_tracefork (pid)) return; @@ -303,37 +538,36 @@ linux_enable_event_reporting (ptid_t ptid) ptrace (PTRACE_SETOPTIONS, pid, 0, options); } -void -child_post_attach (int pid) +static void +linux_child_post_attach (int pid) { linux_enable_event_reporting (pid_to_ptid (pid)); + check_for_thread_db (); } -void +static void linux_child_post_startup_inferior (ptid_t ptid) { linux_enable_event_reporting (ptid); + check_for_thread_db (); } -#ifndef LINUX_CHILD_POST_STARTUP_INFERIOR -void -child_post_startup_inferior (ptid_t ptid) -{ - linux_child_post_startup_inferior (ptid); -} -#endif - -int -child_follow_fork (int follow_child) +static int +linux_child_follow_fork (struct target_ops *ops, int follow_child) { ptid_t last_ptid; struct target_waitstatus last_status; int has_vforked; int parent_pid, child_pid; + if (target_can_async_p ()) + target_async (NULL, 0); + get_last_target_status (&last_ptid, &last_status); has_vforked = (last_status.kind == TARGET_WAITKIND_VFORKED); - parent_pid = ptid_get_pid (last_ptid); + parent_pid = ptid_get_lwp (last_ptid); + if (parent_pid == 0) + parent_pid = ptid_get_pid (last_ptid); child_pid = last_status.value.related_pid; if (! follow_child) @@ -347,11 +581,28 @@ child_follow_fork (int follow_child) also, but they'll be reinserted below. */ detach_breakpoints (child_pid); - fprintf_filtered (gdb_stdout, - "Detaching after fork from child process %d.\n", - child_pid); + /* Detach new forked process? */ + if (detach_fork) + { + if (info_verbose || debug_linux_nat) + { + target_terminal_ours (); + fprintf_filtered (gdb_stdlog, + "Detaching after fork from child process %d.\n", + child_pid); + } - ptrace (PTRACE_DETACH, child_pid, 0, 0); + ptrace (PTRACE_DETACH, child_pid, 0, 0); + } + else + { + struct fork_info *fp; + /* Retain child fork in ptrace (stopped) state. */ + fp = find_fork_pid (child_pid); + if (!fp) + fp = add_fork (child_pid); + fork_save_infrun_state (fp, 0); + } if (has_vforked) { @@ -361,7 +612,7 @@ child_follow_fork (int follow_child) int status; ptrace (PTRACE_CONT, parent_pid, 0, 0); - waitpid (parent_pid, &status, __WALL); + my_waitpid (parent_pid, &status, __WALL); if ((status >> 16) != PTRACE_EVENT_VFORK_DONE) warning (_("Unexpected waitpid result %06x when waiting for " "vfork-done"), status); @@ -418,9 +669,13 @@ child_follow_fork (int follow_child) /* Before detaching from the parent, remove all breakpoints from it. */ remove_breakpoints (); - fprintf_filtered (gdb_stdout, - "Attaching after fork to child process %d.\n", - child_pid); + if (info_verbose || debug_linux_nat) + { + target_terminal_ours (); + fprintf_filtered (gdb_stdlog, + "Attaching after fork to child process %d.\n", + child_pid); + } /* If we're vforking, we may want to hold on to the parent until the child exits or execs. At exec time we can remove the old @@ -442,147 +697,59 @@ child_follow_fork (int follow_child) if (has_vforked) linux_parent_pid = parent_pid; - else - target_detach (NULL, 0); - - inferior_ptid = pid_to_ptid (child_pid); - push_target (&deprecated_child_ops); - - /* Reset breakpoints in the child as appropriate. */ - follow_inferior_reset_breakpoints (); - } - - return 0; -} - -ptid_t -linux_handle_extended_wait (int pid, int status, - struct target_waitstatus *ourstatus) -{ - int event = status >> 16; - - if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK - || event == PTRACE_EVENT_CLONE) - { - unsigned long new_pid; - int ret; - - ptrace (PTRACE_GETEVENTMSG, pid, 0, &new_pid); - - /* If we haven't already seen the new PID stop, wait for it now. */ - if (! pull_pid_from_list (&stopped_pids, new_pid)) + else if (!detach_fork) { - /* The new child has a pending SIGSTOP. We can't affect it until it - hits the SIGSTOP, but we're already attached. */ - do { - ret = waitpid (new_pid, &status, - (event == PTRACE_EVENT_CLONE) ? __WCLONE : 0); - } while (ret == -1 && errno == EINTR); - if (ret == -1) - perror_with_name (_("waiting for new child")); - else if (ret != new_pid) - internal_error (__FILE__, __LINE__, - _("wait returned unexpected PID %d"), ret); - else if (!WIFSTOPPED (status) || WSTOPSIG (status) != SIGSTOP) - internal_error (__FILE__, __LINE__, - _("wait returned unexpected status 0x%x"), status); + struct fork_info *fp; + /* Retain parent fork in ptrace (stopped) state. */ + fp = find_fork_pid (parent_pid); + if (!fp) + fp = add_fork (parent_pid); + fork_save_infrun_state (fp, 0); } - - if (event == PTRACE_EVENT_FORK) - ourstatus->kind = TARGET_WAITKIND_FORKED; - else if (event == PTRACE_EVENT_VFORK) - ourstatus->kind = TARGET_WAITKIND_VFORKED; else - ourstatus->kind = TARGET_WAITKIND_SPURIOUS; - - ourstatus->value.related_pid = new_pid; - return inferior_ptid; - } + target_detach (NULL, 0); - if (event == PTRACE_EVENT_EXEC) - { - ourstatus->kind = TARGET_WAITKIND_EXECD; - ourstatus->value.execd_pathname - = xstrdup (child_pid_to_exec_file (pid)); + inferior_ptid = ptid_build (child_pid, child_pid, 0); - if (linux_parent_pid) - { - detach_breakpoints (linux_parent_pid); - ptrace (PTRACE_DETACH, linux_parent_pid, 0, 0); + /* Reinstall ourselves, since we might have been removed in + target_detach (which does other necessary cleanup). */ - linux_parent_pid = 0; - } + push_target (ops); + linux_nat_switch_fork (inferior_ptid); + check_for_thread_db (); - return inferior_ptid; + /* Reset breakpoints in the child as appropriate. */ + follow_inferior_reset_breakpoints (); } - internal_error (__FILE__, __LINE__, - _("unknown ptrace event %d"), event); + if (target_can_async_p ()) + target_async (inferior_event_handler, 0); + + return 0; } -void -child_insert_fork_catchpoint (int pid) +static void +linux_child_insert_fork_catchpoint (int pid) { if (! linux_supports_tracefork (pid)) error (_("Your system does not support fork catchpoints.")); } -void -child_insert_vfork_catchpoint (int pid) +static void +linux_child_insert_vfork_catchpoint (int pid) { if (!linux_supports_tracefork (pid)) error (_("Your system does not support vfork catchpoints.")); } -void -child_insert_exec_catchpoint (int pid) +static void +linux_child_insert_exec_catchpoint (int pid) { if (!linux_supports_tracefork (pid)) error (_("Your system does not support exec catchpoints.")); } -void -kill_inferior (void) -{ - int status; - int pid = PIDGET (inferior_ptid); - struct target_waitstatus last; - ptid_t last_ptid; - int ret; - - if (pid == 0) - return; - - /* If we're stopped while forking and we haven't followed yet, kill the - other task. We need to do this first because the parent will be - sleeping if this is a vfork. */ - - get_last_target_status (&last_ptid, &last); - - if (last.kind == TARGET_WAITKIND_FORKED - || last.kind == TARGET_WAITKIND_VFORKED) - { - ptrace (PT_KILL, last.value.related_pid, 0, 0); - wait (&status); - } - - /* Kill the current process. */ - ptrace (PT_KILL, pid, 0, 0); - ret = wait (&status); - - /* We might get a SIGCHLD instead of an exit status. This is - aggravated by the first kill above - a child has just died. */ - - while (ret == pid && WIFSTOPPED (status)) - { - ptrace (PT_KILL, pid, 0, 0); - ret = wait (&status); - } - - target_mourn_inferior (); -} - /* On GNU/Linux there are no real LWP's. The closest thing to LWP's are processes sharing the same VM space. A multi-threaded process is basically a group of such processes. However, such a grouping @@ -617,28 +784,17 @@ kill_inferior (void) because the "zombies" stay around. */ /* List of known LWPs. */ -static struct lwp_info *lwp_list; +struct lwp_info *lwp_list; /* Number of LWPs in the list. */ static int num_lwps; - -/* Non-zero if we're running in "threaded" mode. */ -static int threaded; -#define GET_LWP(ptid) ptid_get_lwp (ptid) -#define GET_PID(ptid) ptid_get_pid (ptid) -#define is_lwp(ptid) (GET_LWP (ptid) != 0) -#define BUILD_LWP(lwp, pid) ptid_build (pid, lwp, 0) - /* If the last reported event was a SIGTRAP, this variable is set to the process id of the LWP/thread that got it. */ ptid_t trap_ptid; -/* This module's target-specific operations. */ -static struct target_ops linux_nat_ops; - /* Since we cannot wait (in linux_nat_wait) for the initial process and any cloned processes with a single call to waitpid, we have to use the WNOHANG flag and call waitpid in a loop. To optimize @@ -657,13 +813,19 @@ static sigset_t normal_mask; _initialize_linux_nat. */ static sigset_t suspend_mask; -/* Signals to block to make that sigsuspend work. */ -static sigset_t blocked_mask; +/* SIGCHLD action for synchronous mode. */ +struct sigaction sync_sigchld_action; + +/* SIGCHLD action for asynchronous mode. */ +static struct sigaction async_sigchld_action; /* Prototypes for local functions. */ static int stop_wait_callback (struct lwp_info *lp, void *data); static int linux_nat_thread_alive (ptid_t ptid); +static char *linux_child_pid_to_exec_file (int pid); +static int cancel_breakpoint (struct lwp_info *lp); + /* Convert wait status STATUS to a string. Used for printing debug messages only. */ @@ -703,12 +865,11 @@ init_lwp_list (void) lwp_list = NULL; num_lwps = 0; - threaded = 0; } -/* Add the LWP specified by PID to the list. If this causes the - number of LWPs to become larger than one, go into "threaded" mode. - Return a pointer to the structure describing the new LWP. */ +/* Add the LWP specified by PID to the list. Return a pointer to the + structure describing the new LWP. The LWP should already be stopped + (with an exception for the very first LWP). */ static struct lwp_info * add_lwp (ptid_t ptid) @@ -727,8 +888,10 @@ add_lwp (ptid_t ptid) lp->next = lwp_list; lwp_list = lp; - if (++num_lwps > 1) - threaded = 1; + ++num_lwps; + + if (num_lwps > 1 && linux_nat_new_thread != NULL) + linux_nat_new_thread (ptid); return lp; } @@ -749,8 +912,6 @@ delete_lwp (ptid_t ptid) if (!lp) return; - /* We don't go back to "non-threaded" mode if the number of threads - becomes less than two. */ num_lwps--; if (lpprev) @@ -802,129 +963,364 @@ iterate_over_lwps (int (*callback) (struct lwp_info *, void *), void *data) return NULL; } -/* Attach to the LWP specified by PID. If VERBOSE is non-zero, print - a message telling the user that a new LWP has been added to the - process. */ +/* Update our internal state when changing from one fork (checkpoint, + et cetera) to another indicated by NEW_PTID. We can only switch + single-threaded applications, so we only create one new LWP, and + the previous list is discarded. */ void -lin_lwp_attach_lwp (ptid_t ptid, int verbose) +linux_nat_switch_fork (ptid_t new_ptid) { - struct lwp_info *lp, *found_lp; + struct lwp_info *lp; - gdb_assert (is_lwp (ptid)); + init_lwp_list (); + lp = add_lwp (new_ptid); + lp->stopped = 1; +} - /* Make sure SIGCHLD is blocked. We don't want SIGCHLD events - to interrupt either the ptrace() or waitpid() calls below. */ - if (!sigismember (&blocked_mask, SIGCHLD)) - { - sigaddset (&blocked_mask, SIGCHLD); - sigprocmask (SIG_BLOCK, &blocked_mask, NULL); - } +/* Record a PTID for later deletion. */ - if (verbose) - printf_filtered (_("[New %s]\n"), target_pid_to_str (ptid)); +struct saved_ptids +{ + ptid_t ptid; + struct saved_ptids *next; +}; +static struct saved_ptids *threads_to_delete; - found_lp = lp = find_lwp_pid (ptid); - if (lp == NULL) - lp = add_lwp (ptid); +static void +record_dead_thread (ptid_t ptid) +{ + struct saved_ptids *p = xmalloc (sizeof (struct saved_ptids)); + p->ptid = ptid; + p->next = threads_to_delete; + threads_to_delete = p; +} - /* We assume that we're already attached to any LWP that has an id - equal to the overall process id, and to any LWP that is already - in our list of LWPs. If we're not seeing exit events from threads - and we've had PID wraparound since we last tried to stop all threads, - this assumption might be wrong; fortunately, this is very unlikely - to happen. */ - if (GET_LWP (ptid) != GET_PID (ptid) && found_lp == NULL) - { - pid_t pid; - int status; +/* Delete any dead threads which are not the current thread. */ - if (ptrace (PTRACE_ATTACH, GET_LWP (ptid), 0, 0) < 0) - error (_("Can't attach %s: %s"), target_pid_to_str (ptid), - safe_strerror (errno)); +static void +prune_lwps (void) +{ + struct saved_ptids **p = &threads_to_delete; - if (debug_linux_nat) - fprintf_unfiltered (gdb_stdlog, - "LLAL: PTRACE_ATTACH %s, 0, 0 (OK)\n", - target_pid_to_str (ptid)); + while (*p) + if (! ptid_equal ((*p)->ptid, inferior_ptid)) + { + struct saved_ptids *tmp = *p; + delete_thread (tmp->ptid); + *p = tmp->next; + xfree (tmp); + } + else + p = &(*p)->next; +} - pid = waitpid (GET_LWP (ptid), &status, 0); - if (pid == -1 && errno == ECHILD) - { - /* Try again with __WCLONE to check cloned processes. */ - pid = waitpid (GET_LWP (ptid), &status, __WCLONE); - lp->cloned = 1; - } +/* Handle the exit of a single thread LP. */ + +static void +exit_lwp (struct lwp_info *lp) +{ + if (in_thread_list (lp->ptid)) + { + if (print_thread_events) + printf_unfiltered (_("[%s exited]\n"), target_pid_to_str (lp->ptid)); - gdb_assert (pid == GET_LWP (ptid) - && WIFSTOPPED (status) && WSTOPSIG (status)); + /* Core GDB cannot deal with us deleting the current thread. */ + if (!ptid_equal (lp->ptid, inferior_ptid)) + delete_thread (lp->ptid); + else + record_dead_thread (lp->ptid); + } - child_post_attach (pid); + delete_lwp (lp->ptid); +} - lp->stopped = 1; +/* Detect `T (stopped)' in `/proc/PID/status'. + Other states including `T (tracing stop)' are reported as false. */ - if (debug_linux_nat) +static int +pid_is_stopped (pid_t pid) +{ + FILE *status_file; + char buf[100]; + int retval = 0; + + snprintf (buf, sizeof (buf), "/proc/%d/status", (int) pid); + status_file = fopen (buf, "r"); + if (status_file != NULL) + { + int have_state = 0; + + while (fgets (buf, sizeof (buf), status_file)) { - fprintf_unfiltered (gdb_stdlog, - "LLAL: waitpid %s received %s\n", - target_pid_to_str (ptid), - status_to_str (status)); + if (strncmp (buf, "State:", 6) == 0) + { + have_state = 1; + break; + } } + if (have_state && strstr (buf, "T (stopped)") != NULL) + retval = 1; + fclose (status_file); } - else - { - /* We assume that the LWP representing the original process is - already stopped. Mark it as stopped in the data structure - that the linux ptrace layer uses to keep track of threads. - Note that this won't have already been done since the main - thread will have, we assume, been stopped by an attach from a - different layer. */ - lp->stopped = 1; - } + return retval; } -static void -linux_nat_attach (char *args, int from_tty) +/* Wait for the LWP specified by LP, which we have just attached to. + Returns a wait status for that LWP, to cache. */ + +static int +linux_nat_post_attach_wait (ptid_t ptid, int first, int *cloned, + int *signalled) { - struct lwp_info *lp; - pid_t pid; + pid_t new_pid, pid = GET_LWP (ptid); int status; - /* FIXME: We should probably accept a list of process id's, and - attach all of them. */ - deprecated_child_ops.to_attach (args, from_tty); - - /* Add the initial process as the first LWP to the list. */ - lp = add_lwp (BUILD_LWP (GET_PID (inferior_ptid), GET_PID (inferior_ptid))); + if (pid_is_stopped (pid)) + { + if (debug_linux_nat) + fprintf_unfiltered (gdb_stdlog, + "LNPAW: Attaching to a stopped process\n"); + + /* The process is definitely stopped. It is in a job control + stop, unless the kernel predates the TASK_STOPPED / + TASK_TRACED distinction, in which case it might be in a + ptrace stop. Make sure it is in a ptrace stop; from there we + can kill it, signal it, et cetera. + + First make sure there is a pending SIGSTOP. Since we are + already attached, the process can not transition from stopped + to running without a PTRACE_CONT; so we know this signal will + go into the queue. The SIGSTOP generated by PTRACE_ATTACH is + probably already in the queue (unless this kernel is old + enough to use TASK_STOPPED for ptrace stops); but since SIGSTOP + is not an RT signal, it can only be queued once. */ + kill_lwp (pid, SIGSTOP); + + /* Finally, resume the stopped process. This will deliver the SIGSTOP + (or a higher priority signal, just like normal PTRACE_ATTACH). */ + ptrace (PTRACE_CONT, pid, 0, 0); + } /* Make sure the initial process is stopped. The user-level threads layer might want to poke around in the inferior, and that won't work if things haven't stabilized yet. */ - pid = waitpid (GET_PID (inferior_ptid), &status, 0); - if (pid == -1 && errno == ECHILD) + new_pid = my_waitpid (pid, &status, 0); + if (new_pid == -1 && errno == ECHILD) { - warning (_("%s is a cloned process"), target_pid_to_str (inferior_ptid)); + if (first) + warning (_("%s is a cloned process"), target_pid_to_str (ptid)); /* Try again with __WCLONE to check cloned processes. */ - pid = waitpid (GET_PID (inferior_ptid), &status, __WCLONE); - lp->cloned = 1; + new_pid = my_waitpid (pid, &status, __WCLONE); + *cloned = 1; + } + + gdb_assert (pid == new_pid && WIFSTOPPED (status)); + + if (WSTOPSIG (status) != SIGSTOP) + { + *signalled = 1; + if (debug_linux_nat) + fprintf_unfiltered (gdb_stdlog, + "LNPAW: Received %s after attaching\n", + status_to_str (status)); + } + + return status; +} + +/* Attach to the LWP specified by PID. Return 0 if successful or -1 + if the new LWP could not be attached. */ + +int +lin_lwp_attach_lwp (ptid_t ptid) +{ + struct lwp_info *lp; + int async_events_were_enabled = 0; + + gdb_assert (is_lwp (ptid)); + + if (target_can_async_p ()) + async_events_were_enabled = linux_nat_async_events (0); + + lp = find_lwp_pid (ptid); + + /* We assume that we're already attached to any LWP that has an id + equal to the overall process id, and to any LWP that is already + in our list of LWPs. If we're not seeing exit events from threads + and we've had PID wraparound since we last tried to stop all threads, + this assumption might be wrong; fortunately, this is very unlikely + to happen. */ + if (GET_LWP (ptid) != GET_PID (ptid) && lp == NULL) + { + int status, cloned = 0, signalled = 0; + + if (ptrace (PTRACE_ATTACH, GET_LWP (ptid), 0, 0) < 0) + { + /* If we fail to attach to the thread, issue a warning, + but continue. One way this can happen is if thread + creation is interrupted; as of Linux kernel 2.6.19, a + bug may place threads in the thread list and then fail + to create them. */ + warning (_("Can't attach %s: %s"), target_pid_to_str (ptid), + safe_strerror (errno)); + return -1; + } + + if (debug_linux_nat) + fprintf_unfiltered (gdb_stdlog, + "LLAL: PTRACE_ATTACH %s, 0, 0 (OK)\n", + target_pid_to_str (ptid)); + + status = linux_nat_post_attach_wait (ptid, 0, &cloned, &signalled); + lp = add_lwp (ptid); + lp->stopped = 1; + lp->cloned = cloned; + lp->signalled = signalled; + if (WSTOPSIG (status) != SIGSTOP) + { + lp->resumed = 1; + lp->status = status; + } + + target_post_attach (GET_LWP (lp->ptid)); + + if (debug_linux_nat) + { + fprintf_unfiltered (gdb_stdlog, + "LLAL: waitpid %s received %s\n", + target_pid_to_str (ptid), + status_to_str (status)); + } } + else + { + /* We assume that the LWP representing the original process is + already stopped. Mark it as stopped in the data structure + that the GNU/linux ptrace layer uses to keep track of + threads. Note that this won't have already been done since + the main thread will have, we assume, been stopped by an + attach from a different layer. */ + if (lp == NULL) + lp = add_lwp (ptid); + lp->stopped = 1; + } + + if (async_events_were_enabled) + linux_nat_async_events (1); - gdb_assert (pid == GET_PID (inferior_ptid) - && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP); + return 0; +} + +static void +linux_nat_create_inferior (char *exec_file, char *allargs, char **env, + int from_tty) +{ + int saved_async = 0; + + /* The fork_child mechanism is synchronous and calls target_wait, so + we have to mask the async mode. */ + + if (target_can_async_p ()) + saved_async = linux_nat_async_mask (0); + else + { + /* Restore the original signal mask. */ + sigprocmask (SIG_SETMASK, &normal_mask, NULL); + /* Make sure we don't block SIGCHLD during a sigsuspend. */ + suspend_mask = normal_mask; + sigdelset (&suspend_mask, SIGCHLD); + } + + linux_ops->to_create_inferior (exec_file, allargs, env, from_tty); + + if (saved_async) + linux_nat_async_mask (saved_async); +} +static void +linux_nat_attach (char *args, int from_tty) +{ + struct lwp_info *lp; + int status; + + /* FIXME: We should probably accept a list of process id's, and + attach all of them. */ + linux_ops->to_attach (args, from_tty); + + if (!target_can_async_p ()) + { + /* Restore the original signal mask. */ + sigprocmask (SIG_SETMASK, &normal_mask, NULL); + /* Make sure we don't block SIGCHLD during a sigsuspend. */ + suspend_mask = normal_mask; + sigdelset (&suspend_mask, SIGCHLD); + } + + /* Add the initial process as the first LWP to the list. */ + inferior_ptid = BUILD_LWP (GET_PID (inferior_ptid), GET_PID (inferior_ptid)); + lp = add_lwp (inferior_ptid); + + status = linux_nat_post_attach_wait (lp->ptid, 1, &lp->cloned, + &lp->signalled); lp->stopped = 1; - /* Fake the SIGSTOP that core GDB expects. */ - lp->status = W_STOPCODE (SIGSTOP); + /* If this process is not using thread_db, then we still don't + detect any other threads, but add at least this one. */ + add_thread_silent (lp->ptid); + + /* Save the wait status to report later. */ lp->resumed = 1; if (debug_linux_nat) + fprintf_unfiltered (gdb_stdlog, + "LNA: waitpid %ld, saving status %s\n", + (long) GET_PID (lp->ptid), status_to_str (status)); + + if (!target_can_async_p ()) + lp->status = status; + else { - fprintf_unfiltered (gdb_stdlog, - "LLA: waitpid %ld, faking SIGSTOP\n", (long) pid); + /* We already waited for this LWP, so put the wait result on the + pipe. The event loop will wake up and gets us to handling + this event. */ + linux_nat_event_pipe_push (GET_PID (lp->ptid), status, + lp->cloned ? __WCLONE : 0); + /* Register in the event loop. */ + target_async (inferior_event_handler, 0); } } +/* Get pending status of LP. */ +static int +get_pending_status (struct lwp_info *lp, int *status) +{ + struct target_waitstatus last; + ptid_t last_ptid; + + get_last_target_status (&last_ptid, &last); + + /* If this lwp is the ptid that GDB is processing an event from, the + signal will be in stop_signal. Otherwise, in all-stop + sync + mode, we may cache pending events in lp->status while trying to + stop all threads (see stop_wait_callback). In async mode, the + events are always cached in waitpid_queue. */ + + *status = 0; + if (GET_LWP (lp->ptid) == GET_LWP (last_ptid)) + { + if (stop_signal != TARGET_SIGNAL_0 + && signal_pass_state (stop_signal)) + *status = W_STOPCODE (target_signal_to_host (stop_signal)); + } + else if (target_can_async_p ()) + queued_waitpid (GET_LWP (lp->ptid), status, __WALL); + else + *status = lp->status; + + return 0; +} + static int detach_callback (struct lwp_info *lp, void *data) { @@ -935,40 +1331,30 @@ detach_callback (struct lwp_info *lp, void *data) strsignal (WSTOPSIG (lp->status)), target_pid_to_str (lp->ptid)); - while (lp->signalled && lp->stopped) + /* If there is a pending SIGSTOP, get rid of it. */ + if (lp->signalled) { - errno = 0; - if (ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, - WSTOPSIG (lp->status)) < 0) - error (_("Can't continue %s: %s"), target_pid_to_str (lp->ptid), - safe_strerror (errno)); - if (debug_linux_nat) fprintf_unfiltered (gdb_stdlog, - "DC: PTRACE_CONTINUE (%s, 0, %s) (OK)\n", - target_pid_to_str (lp->ptid), - status_to_str (lp->status)); + "DC: Sending SIGCONT to %s\n", + target_pid_to_str (lp->ptid)); - lp->stopped = 0; + kill_lwp (GET_LWP (lp->ptid), SIGCONT); lp->signalled = 0; - lp->status = 0; - /* FIXME drow/2003-08-26: There was a call to stop_wait_callback - here. But since lp->signalled was cleared above, - stop_wait_callback didn't do anything; the process was left - running. Shouldn't we be waiting for it to stop? - I've removed the call, since stop_wait_callback now does do - something when called with lp->signalled == 0. */ - - gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status)); } /* We don't actually detach from the LWP that has an id equal to the overall process id just yet. */ if (GET_LWP (lp->ptid) != GET_PID (lp->ptid)) { + int status = 0; + + /* Pass on any pending signal for this LWP. */ + get_pending_status (lp, &status); + errno = 0; if (ptrace (PTRACE_DETACH, GET_LWP (lp->ptid), 0, - WSTOPSIG (lp->status)) < 0) + WSTOPSIG (status)) < 0) error (_("Can't detach %s: %s"), target_pid_to_str (lp->ptid), safe_strerror (errno)); @@ -987,22 +1373,44 @@ detach_callback (struct lwp_info *lp, void *data) static void linux_nat_detach (char *args, int from_tty) { + int pid; + int status; + enum target_signal sig; + + if (target_can_async_p ()) + linux_nat_async (NULL, 0); + iterate_over_lwps (detach_callback, NULL); /* Only the initial process should be left right now. */ gdb_assert (num_lwps == 1); + /* Pass on any pending signal for the last LWP. */ + if ((args == NULL || *args == '\0') + && get_pending_status (lwp_list, &status) != -1 + && WIFSTOPPED (status)) + { + /* Put the signal number in ARGS so that inf_ptrace_detach will + pass it along with PTRACE_DETACH. */ + args = alloca (8); + sprintf (args, "%d", (int) WSTOPSIG (status)); + fprintf_unfiltered (gdb_stdlog, + "LND: Sending signal %s to %s\n", + args, + target_pid_to_str (lwp_list->ptid)); + } + trap_ptid = null_ptid; /* Destroy LWP info; it's no longer valid. */ init_lwp_list (); - /* Restore the original signal mask. */ - sigprocmask (SIG_SETMASK, &normal_mask, NULL); - sigemptyset (&blocked_mask); + pid = GET_PID (inferior_ptid); + inferior_ptid = pid_to_ptid (pid); + linux_ops->to_detach (args, from_tty); - inferior_ptid = pid_to_ptid (GET_PID (inferior_ptid)); - deprecated_child_ops.to_detach (args, from_tty); + if (target_can_async_p ()) + drain_queued_events (pid); } /* Resume LP. */ @@ -1012,15 +1420,15 @@ resume_callback (struct lwp_info *lp, void *data) { if (lp->stopped && lp->status == 0) { - struct thread_info *tp; - - child_resume (pid_to_ptid (GET_LWP (lp->ptid)), 0, TARGET_SIGNAL_0); + linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)), + 0, TARGET_SIGNAL_0); if (debug_linux_nat) fprintf_unfiltered (gdb_stdlog, "RC: PTRACE_CONT %s, 0, 0 (resume sibling)\n", target_pid_to_str (lp->ptid)); lp->stopped = 0; lp->step = 0; + memset (&lp->siginfo, 0, sizeof (lp->siginfo)); } return 0; @@ -1046,6 +1454,20 @@ linux_nat_resume (ptid_t ptid, int step, enum target_signal signo) struct lwp_info *lp; int resume_all; + if (debug_linux_nat) + fprintf_unfiltered (gdb_stdlog, + "LLR: Preparing to %s %s, %s, inferior_ptid %s\n", + step ? "step" : "resume", + target_pid_to_str (ptid), + signo ? strsignal (signo) : "0", + target_pid_to_str (inferior_ptid)); + + prune_lwps (); + + if (target_can_async_p ()) + /* Block events while we're here. */ + linux_nat_async_events (0); + /* A specific PTID means `step only this process id'. */ resume_all = (PIDGET (ptid) == -1); @@ -1060,41 +1482,85 @@ linux_nat_resume (ptid_t ptid, int step, enum target_signal signo) ptid = inferior_ptid; lp = find_lwp_pid (ptid); - if (lp) - { - ptid = pid_to_ptid (GET_LWP (lp->ptid)); + gdb_assert (lp != NULL); - /* Remember if we're stepping. */ - lp->step = step; + ptid = pid_to_ptid (GET_LWP (lp->ptid)); - /* Mark this LWP as resumed. */ - lp->resumed = 1; + /* Remember if we're stepping. */ + lp->step = step; + + /* Mark this LWP as resumed. */ + lp->resumed = 1; + + /* If we have a pending wait status for this thread, there is no + point in resuming the process. But first make sure that + linux_nat_wait won't preemptively handle the event - we + should never take this short-circuit if we are going to + leave LP running, since we have skipped resuming all the + other threads. This bit of code needs to be synchronized + with linux_nat_wait. */ + + /* In async mode, we never have pending wait status. */ + if (target_can_async_p () && lp->status) + internal_error (__FILE__, __LINE__, "Pending status in async mode"); + + if (lp->status && WIFSTOPPED (lp->status)) + { + int saved_signo = target_signal_from_host (WSTOPSIG (lp->status)); - /* If we have a pending wait status for this thread, there is no - point in resuming the process. */ - if (lp->status) + if (signal_stop_state (saved_signo) == 0 + && signal_print_state (saved_signo) == 0 + && signal_pass_state (saved_signo) == 1) { + if (debug_linux_nat) + fprintf_unfiltered (gdb_stdlog, + "LLR: Not short circuiting for ignored " + "status 0x%x\n", lp->status); + /* FIXME: What should we do if we are supposed to continue this thread with a signal? */ gdb_assert (signo == TARGET_SIGNAL_0); - return; + signo = saved_signo; + lp->status = 0; } + } - /* Mark LWP as not stopped to prevent it from being continued by - resume_callback. */ - lp->stopped = 0; + if (lp->status) + { + /* FIXME: What should we do if we are supposed to continue + this thread with a signal? */ + gdb_assert (signo == TARGET_SIGNAL_0); + + if (debug_linux_nat) + fprintf_unfiltered (gdb_stdlog, + "LLR: Short circuiting for status 0x%x\n", + lp->status); + + return; } + /* Mark LWP as not stopped to prevent it from being continued by + resume_callback. */ + lp->stopped = 0; + if (resume_all) iterate_over_lwps (resume_callback, NULL); - child_resume (ptid, step, signo); + linux_ops->to_resume (ptid, step, signo); + memset (&lp->siginfo, 0, sizeof (lp->siginfo)); + if (debug_linux_nat) fprintf_unfiltered (gdb_stdlog, "LLR: %s %s, %s (resume event thread)\n", step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT", target_pid_to_str (ptid), signo ? strsignal (signo) : "0"); + + if (target_can_async_p ()) + { + target_executing = 1; + target_async (inferior_event_handler, 0); + } } /* Issue kill to specified lwp. */ @@ -1123,39 +1589,113 @@ kill_lwp (int lwpid, int signo) return kill (lwpid, signo); } -/* Handle a GNU/Linux extended wait response. Most of the work we - just pass off to linux_handle_extended_wait, but if it reports a - clone event we need to add the new LWP to our list (and not report - the trap to higher layers). This function returns non-zero if - the event should be ignored and we should wait again. */ +/* Handle a GNU/Linux extended wait response. If we see a clone + event, we need to add the new LWP to our list (and not report the + trap to higher layers). This function returns non-zero if the + event should be ignored and we should wait again. If STOPPING is + true, the new LWP remains stopped, otherwise it is continued. */ static int -linux_nat_handle_extended (struct lwp_info *lp, int status) +linux_handle_extended_wait (struct lwp_info *lp, int status, + int stopping) { - linux_handle_extended_wait (GET_LWP (lp->ptid), status, - &lp->waitstatus); + int pid = GET_LWP (lp->ptid); + struct target_waitstatus *ourstatus = &lp->waitstatus; + struct lwp_info *new_lp = NULL; + int event = status >> 16; - /* TARGET_WAITKIND_SPURIOUS is used to indicate clone events. */ - if (lp->waitstatus.kind == TARGET_WAITKIND_SPURIOUS) + if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK + || event == PTRACE_EVENT_CLONE) { - struct lwp_info *new_lp; - new_lp = add_lwp (BUILD_LWP (lp->waitstatus.value.related_pid, - GET_PID (inferior_ptid))); - new_lp->cloned = 1; - new_lp->stopped = 1; + unsigned long new_pid; + int ret; - lp->waitstatus.kind = TARGET_WAITKIND_IGNORE; + ptrace (PTRACE_GETEVENTMSG, pid, 0, &new_pid); - if (debug_linux_nat) - fprintf_unfiltered (gdb_stdlog, - "LLHE: Got clone event from LWP %ld, resuming\n", - GET_LWP (lp->ptid)); - ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0); + /* If we haven't already seen the new PID stop, wait for it now. */ + if (! pull_pid_from_list (&stopped_pids, new_pid, &status)) + { + /* The new child has a pending SIGSTOP. We can't affect it until it + hits the SIGSTOP, but we're already attached. */ + ret = my_waitpid (new_pid, &status, + (event == PTRACE_EVENT_CLONE) ? __WCLONE : 0); + if (ret == -1) + perror_with_name (_("waiting for new child")); + else if (ret != new_pid) + internal_error (__FILE__, __LINE__, + _("wait returned unexpected PID %d"), ret); + else if (!WIFSTOPPED (status)) + internal_error (__FILE__, __LINE__, + _("wait returned unexpected status 0x%x"), status); + } - return 1; + ourstatus->value.related_pid = new_pid; + + if (event == PTRACE_EVENT_FORK) + ourstatus->kind = TARGET_WAITKIND_FORKED; + else if (event == PTRACE_EVENT_VFORK) + ourstatus->kind = TARGET_WAITKIND_VFORKED; + else + { + ourstatus->kind = TARGET_WAITKIND_IGNORE; + new_lp = add_lwp (BUILD_LWP (new_pid, GET_PID (inferior_ptid))); + new_lp->cloned = 1; + + if (WSTOPSIG (status) != SIGSTOP) + { + /* This can happen if someone starts sending signals to + the new thread before it gets a chance to run, which + have a lower number than SIGSTOP (e.g. SIGUSR1). + This is an unlikely case, and harder to handle for + fork / vfork than for clone, so we do not try - but + we handle it for clone events here. We'll send + the other signal on to the thread below. */ + + new_lp->signalled = 1; + } + else + status = 0; + + if (stopping) + new_lp->stopped = 1; + else + { + new_lp->resumed = 1; + ptrace (PTRACE_CONT, lp->waitstatus.value.related_pid, 0, + status ? WSTOPSIG (status) : 0); + } + + if (debug_linux_nat) + fprintf_unfiltered (gdb_stdlog, + "LHEW: Got clone event from LWP %ld, resuming\n", + GET_LWP (lp->ptid)); + ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0); + + return 1; + } + + return 0; } - return 0; + if (event == PTRACE_EVENT_EXEC) + { + ourstatus->kind = TARGET_WAITKIND_EXECD; + ourstatus->value.execd_pathname + = xstrdup (linux_child_pid_to_exec_file (pid)); + + if (linux_parent_pid) + { + detach_breakpoints (linux_parent_pid); + ptrace (PTRACE_DETACH, linux_parent_pid, 0, 0); + + linux_parent_pid = 0; + } + + return 0; + } + + internal_error (__FILE__, __LINE__, + _("unknown ptrace event %d"), event); } /* Wait for LP to stop. Returns the wait status, or 0 if the LWP has @@ -1171,10 +1711,10 @@ wait_lwp (struct lwp_info *lp) gdb_assert (!lp->stopped); gdb_assert (lp->status == 0); - pid = waitpid (GET_LWP (lp->ptid), &status, 0); + pid = my_waitpid (GET_LWP (lp->ptid), &status, 0); if (pid == -1 && errno == ECHILD) { - pid = waitpid (GET_LWP (lp->ptid), &status, __WCLONE); + pid = my_waitpid (GET_LWP (lp->ptid), &status, __WCLONE); if (pid == -1 && errno == ECHILD) { /* The thread has previously exited. We need to delete it @@ -1213,16 +1753,7 @@ wait_lwp (struct lwp_info *lp) if (thread_dead) { - if (in_thread_list (lp->ptid)) - { - /* Core GDB cannot deal with us deleting the current thread. */ - if (!ptid_equal (lp->ptid, inferior_ptid)) - delete_thread (lp->ptid); - printf_unfiltered (_("[%s exited]\n"), - target_pid_to_str (lp->ptid)); - } - - delete_lwp (lp->ptid); + exit_lwp (lp); return 0; } @@ -1235,13 +1766,29 @@ wait_lwp (struct lwp_info *lp) fprintf_unfiltered (gdb_stdlog, "WL: Handling extended status 0x%06x\n", status); - if (linux_nat_handle_extended (lp, status)) + if (linux_handle_extended_wait (lp, status, 1)) return wait_lwp (lp); } return status; } +/* Save the most recent siginfo for LP. This is currently only called + for SIGTRAP; some ports use the si_addr field for + target_stopped_data_address. In the future, it may also be used to + restore the siginfo of requeued signals. */ + +static void +save_siginfo (struct lwp_info *lp) +{ + errno = 0; + ptrace (PTRACE_GETSIGINFO, GET_LWP (lp->ptid), + (PTRACE_TYPE_ARG3) 0, &lp->siginfo); + + if (errno != 0) + memset (&lp->siginfo, 0, sizeof (lp->siginfo)); +} + /* Send a SIGSTOP to LP. */ static int @@ -1327,6 +1874,9 @@ stop_wait_callback (struct lwp_info *lp, void *data) user will delete or disable the breakpoint, but the thread will have already tripped on it. */ + /* Save the trap's siginfo in case we need it later. */ + save_siginfo (lp); + /* Now resume this LWP and get the SIGSTOP event. */ errno = 0; ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0); @@ -1341,22 +1891,44 @@ stop_wait_callback (struct lwp_info *lp, void *data) "SWC: Candidate SIGTRAP event in %s\n", target_pid_to_str (lp->ptid)); } - /* Hold the SIGTRAP for handling by linux_nat_wait. */ + /* Hold this event/waitstatus while we check to see if + there are any more (we still want to get that SIGSTOP). */ stop_wait_callback (lp, data); - /* If there's another event, throw it back into the queue. */ - if (lp->status) + + if (target_can_async_p ()) { - if (debug_linux_nat) + /* Don't leave a pending wait status in async mode. + Retrigger the breakpoint. */ + if (!cancel_breakpoint (lp)) { - fprintf_unfiltered (gdb_stdlog, - "SWC: kill %s, %s\n", - target_pid_to_str (lp->ptid), - status_to_str ((int) status)); + /* There was no gdb breakpoint set at pc. Put + the event back in the queue. */ + if (debug_linux_nat) + fprintf_unfiltered (gdb_stdlog, + "SWC: kill %s, %s\n", + target_pid_to_str (lp->ptid), + status_to_str ((int) status)); + kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (status)); } - kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (lp->status)); } - /* Save the sigtrap event. */ - lp->status = status; + else + { + /* Hold the SIGTRAP for handling by + linux_nat_wait. */ + /* If there's another event, throw it back into the + queue. */ + if (lp->status) + { + if (debug_linux_nat) + fprintf_unfiltered (gdb_stdlog, + "SWC: kill %s, %s\n", + target_pid_to_str (lp->ptid), + status_to_str ((int) status)); + kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (lp->status)); + } + /* Save the sigtrap event. */ + lp->status = status; + } return 0; } else @@ -1383,12 +1955,11 @@ stop_wait_callback (struct lwp_info *lp, void *data) /* Hold this event/waitstatus while we check to see if there are any more (we still want to get that SIGSTOP). */ stop_wait_callback (lp, data); - /* If the lp->status field is still empty, use it to hold - this event. If not, then this event must be returned - to the event queue of the LWP. */ - if (lp->status == 0) - lp->status = status; - else + + /* If the lp->status field is still empty, use it to + hold this event. If not, then this event must be + returned to the event queue of the LWP. */ + if (lp->status || target_can_async_p ()) { if (debug_linux_nat) { @@ -1399,6 +1970,8 @@ stop_wait_callback (struct lwp_info *lp, void *data) } kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (status)); } + else + lp->status = status; return 0; } } @@ -1475,7 +2048,15 @@ flush_callback (struct lwp_info *lp, void *data) lp->status = 0; } - while (linux_nat_has_pending (GET_LWP (lp->ptid), &pending, flush_mask)) + /* While there is a pending signal we would like to flush, continue + the inferior and collect another signal. But if there's already + a saved status that we don't want to flush, we can't resume the + inferior - if it stopped for some other reason we wouldn't have + anywhere to save the new status. In that case, we must leave the + signal unflushed (and possibly generate an extra SIGINT stop). + That's much less bad than losing a signal. */ + while (lp->status == 0 + && linux_nat_has_pending (GET_LWP (lp->ptid), &pending, flush_mask)) { int ret; @@ -1560,6 +2141,37 @@ select_event_lwp_callback (struct lwp_info *lp, void *data) return 0; } +static int +cancel_breakpoint (struct lwp_info *lp) +{ + /* Arrange for a breakpoint to be hit again later. We don't keep + the SIGTRAP status and don't forward the SIGTRAP signal to the + LWP. We will handle the current event, eventually we will resume + this LWP, and this breakpoint will trap again. + + If we do not do this, then we run the risk that the user will + delete or disable the breakpoint, but the LWP will have already + tripped on it. */ + + if (breakpoint_inserted_here_p (read_pc_pid (lp->ptid) - + gdbarch_decr_pc_after_break + (current_gdbarch))) + { + if (debug_linux_nat) + fprintf_unfiltered (gdb_stdlog, + "CB: Push back breakpoint for %s\n", + target_pid_to_str (lp->ptid)); + + /* Back up the PC if necessary. */ + if (gdbarch_decr_pc_after_break (current_gdbarch)) + write_pc_pid (read_pc_pid (lp->ptid) - gdbarch_decr_pc_after_break + (current_gdbarch), + lp->ptid); + return 1; + } + return 0; +} + static int cancel_breakpoints_callback (struct lwp_info *lp, void *data) { @@ -1582,21 +2194,9 @@ cancel_breakpoints_callback (struct lwp_info *lp, void *data) if (lp->status != 0 && WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP - && breakpoint_inserted_here_p (read_pc_pid (lp->ptid) - - DECR_PC_AFTER_BREAK)) - { - if (debug_linux_nat) - fprintf_unfiltered (gdb_stdlog, - "CBC: Push back breakpoint for %s\n", - target_pid_to_str (lp->ptid)); - - /* Back up the PC if necessary. */ - if (DECR_PC_AFTER_BREAK) - write_pc_pid (read_pc_pid (lp->ptid) - DECR_PC_AFTER_BREAK, lp->ptid); - - /* Throw away the SIGTRAP. */ - lp->status = 0; - } + && cancel_breakpoint (lp)) + /* Throw away the SIGTRAP. */ + lp->status = 0; return 0; } @@ -1610,7 +2210,7 @@ select_event_lwp (struct lwp_info **orig_lp, int *status) int random_selector; struct lwp_info *event_lp; - /* Record the wait status for the origional LWP. */ + /* Record the wait status for the original LWP. */ (*orig_lp)->status = *status; /* Give preference to any LWP that is being single-stepped. */ @@ -1662,134 +2262,239 @@ resumed_callback (struct lwp_info *lp, void *data) return lp->resumed; } -#ifdef CHILD_WAIT +/* Stop an active thread, verify it still exists, then resume it. */ + +static int +stop_and_resume_callback (struct lwp_info *lp, void *data) +{ + struct lwp_info *ptr; + + if (!lp->stopped && !lp->signalled) + { + stop_callback (lp, NULL); + stop_wait_callback (lp, NULL); + /* Resume if the lwp still exists. */ + for (ptr = lwp_list; ptr; ptr = ptr->next) + if (lp == ptr) + { + resume_callback (lp, NULL); + resume_set_callback (lp, NULL); + } + } + return 0; +} -/* We need to override child_wait to support attaching to cloned - processes, since a normal wait (as done by the default version) - ignores those processes. */ +/* Check if we should go on and pass this event to common code. + Return the affected lwp if we are, or NULL otherwise. */ +static struct lwp_info * +linux_nat_filter_event (int lwpid, int status, int options) +{ + struct lwp_info *lp; -/* Wait for child PTID to do something. Return id of the child, - minus_one_ptid in case of error; store status into *OURSTATUS. */ + lp = find_lwp_pid (pid_to_ptid (lwpid)); -ptid_t -child_wait (ptid_t ptid, struct target_waitstatus *ourstatus) -{ - int save_errno; - int status; - pid_t pid; + /* Check for stop events reported by a process we didn't already + know about - anything not already in our LWP list. - ourstatus->kind = TARGET_WAITKIND_IGNORE; + If we're expecting to receive stopped processes after + fork, vfork, and clone events, then we'll just add the + new one to our list and go back to waiting for the event + to be reported - the stopped process might be returned + from waitpid before or after the event is. */ + if (WIFSTOPPED (status) && !lp) + { + linux_record_stopped_pid (lwpid, status); + return NULL; + } - do + /* Make sure we don't report an event for the exit of an LWP not in + our list, i.e. not part of the current process. This can happen + if we detach from a program we original forked and then it + exits. */ + if (!WIFSTOPPED (status) && !lp) + return NULL; + + /* NOTE drow/2003-06-17: This code seems to be meant for debugging + CLONE_PTRACE processes which do not use the thread library - + otherwise we wouldn't find the new LWP this way. That doesn't + currently work, and the following code is currently unreachable + due to the two blocks above. If it's fixed some day, this code + should be broken out into a function so that we can also pick up + LWPs from the new interface. */ + if (!lp) { - set_sigint_trap (); /* Causes SIGINT to be passed on to the - attached process. */ - set_sigio_trap (); + lp = add_lwp (BUILD_LWP (lwpid, GET_PID (inferior_ptid))); + if (options & __WCLONE) + lp->cloned = 1; - pid = waitpid (GET_PID (ptid), &status, 0); - if (pid == -1 && errno == ECHILD) - /* Try again with __WCLONE to check cloned processes. */ - pid = waitpid (GET_PID (ptid), &status, __WCLONE); + gdb_assert (WIFSTOPPED (status) + && WSTOPSIG (status) == SIGSTOP); + lp->signalled = 1; - if (debug_linux_nat) + if (!in_thread_list (inferior_ptid)) { - fprintf_unfiltered (gdb_stdlog, - "CW: waitpid %ld received %s\n", - (long) pid, status_to_str (status)); + inferior_ptid = BUILD_LWP (GET_PID (inferior_ptid), + GET_PID (inferior_ptid)); + add_thread (inferior_ptid); } - save_errno = errno; + add_thread (lp->ptid); + } + + /* Save the trap's siginfo in case we need it later. */ + if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP) + save_siginfo (lp); - /* Make sure we don't report an event for the exit of the - original program, if we've detached from it. */ - if (pid != -1 && !WIFSTOPPED (status) && pid != GET_PID (inferior_ptid)) - { - pid = -1; - save_errno = EINTR; - } + /* Handle GNU/Linux's extended waitstatus for trace events. */ + if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0) + { + if (debug_linux_nat) + fprintf_unfiltered (gdb_stdlog, + "LLW: Handling extended status 0x%06x\n", + status); + if (linux_handle_extended_wait (lp, status, 0)) + return NULL; + } - /* Check for stop events reported by a process we didn't already - know about - in this case, anything other than inferior_ptid. - - If we're expecting to receive stopped processes after fork, - vfork, and clone events, then we'll just add the new one to - our list and go back to waiting for the event to be reported - - the stopped process might be returned from waitpid before - or after the event is. If we want to handle debugging of - CLONE_PTRACE processes we need to do more here, i.e. switch - to multi-threaded mode. */ - if (pid != -1 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP - && pid != GET_PID (inferior_ptid)) + /* Check if the thread has exited. */ + if ((WIFEXITED (status) || WIFSIGNALED (status)) && num_lwps > 1) + { + /* If this is the main thread, we must stop all threads and + verify if they are still alive. This is because in the nptl + thread model, there is no signal issued for exiting LWPs + other than the main thread. We only get the main thread exit + signal once all child threads have already exited. If we + stop all the threads and use the stop_wait_callback to check + if they have exited we can determine whether this signal + should be ignored or whether it means the end of the debugged + application, regardless of which threading model is being + used. */ + if (GET_PID (lp->ptid) == GET_LWP (lp->ptid)) { - linux_record_stopped_pid (pid); - pid = -1; - save_errno = EINTR; + lp->stopped = 1; + iterate_over_lwps (stop_and_resume_callback, NULL); } - /* Handle GNU/Linux's extended waitstatus for trace events. */ - if (pid != -1 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP - && status >> 16 != 0) + if (debug_linux_nat) + fprintf_unfiltered (gdb_stdlog, + "LLW: %s exited.\n", + target_pid_to_str (lp->ptid)); + + exit_lwp (lp); + + /* If there is at least one more LWP, then the exit signal was + not the end of the debugged application and should be + ignored. */ + if (num_lwps > 0) { - linux_handle_extended_wait (pid, status, ourstatus); + /* Make sure there is at least one thread running. */ + gdb_assert (iterate_over_lwps (running_callback, NULL)); - /* If we see a clone event, detach the child, and don't - report the event. It would be nice to offer some way to - switch into a non-thread-db based threaded mode at this - point. */ - if (ourstatus->kind == TARGET_WAITKIND_SPURIOUS) - { - ptrace (PTRACE_DETACH, ourstatus->value.related_pid, 0, 0); - ourstatus->kind = TARGET_WAITKIND_IGNORE; - ptrace (PTRACE_CONT, pid, 0, 0); - pid = -1; - save_errno = EINTR; - } + /* Discard the event. */ + return NULL; } - - clear_sigio_trap (); - clear_sigint_trap (); } - while (pid == -1 && save_errno == EINTR); - if (pid == -1) + /* Check if the current LWP has previously exited. In the nptl + thread model, LWPs other than the main thread do not issue + signals when they exit so we must check whenever the thread has + stopped. A similar check is made in stop_wait_callback(). */ + if (num_lwps > 1 && !linux_nat_thread_alive (lp->ptid)) { - warning (_("Child process unexpectedly missing: %s"), - safe_strerror (errno)); + if (debug_linux_nat) + fprintf_unfiltered (gdb_stdlog, + "LLW: %s exited.\n", + target_pid_to_str (lp->ptid)); + + exit_lwp (lp); - /* Claim it exited with unknown signal. */ - ourstatus->kind = TARGET_WAITKIND_SIGNALLED; - ourstatus->value.sig = TARGET_SIGNAL_UNKNOWN; - return minus_one_ptid; + /* Make sure there is at least one thread running. */ + gdb_assert (iterate_over_lwps (running_callback, NULL)); + + /* Discard the event. */ + return NULL; } - if (ourstatus->kind == TARGET_WAITKIND_IGNORE) - store_waitstatus (ourstatus, status); + /* Make sure we don't report a SIGSTOP that we sent ourselves in + an attempt to stop an LWP. */ + if (lp->signalled + && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP) + { + if (debug_linux_nat) + fprintf_unfiltered (gdb_stdlog, + "LLW: Delayed SIGSTOP caught for %s.\n", + target_pid_to_str (lp->ptid)); - return pid_to_ptid (pid); -} + /* This is a delayed SIGSTOP. */ + lp->signalled = 0; -#endif + registers_changed (); -/* Stop an active thread, verify it still exists, then resume it. */ + linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)), + lp->step, TARGET_SIGNAL_0); + if (debug_linux_nat) + fprintf_unfiltered (gdb_stdlog, + "LLW: %s %s, 0, 0 (discard SIGSTOP)\n", + lp->step ? + "PTRACE_SINGLESTEP" : "PTRACE_CONT", + target_pid_to_str (lp->ptid)); -static int -stop_and_resume_callback (struct lwp_info *lp, void *data) + lp->stopped = 0; + gdb_assert (lp->resumed); + + /* Discard the event. */ + return NULL; + } + + /* An interesting event. */ + gdb_assert (lp); + return lp; +} + +/* Get the events stored in the pipe into the local queue, so they are + accessible to queued_waitpid. We need to do this, since it is not + always the case that the event at the head of the pipe is the event + we want. */ + +static void +pipe_to_local_event_queue (void) { - struct lwp_info *ptr; + if (debug_linux_nat_async) + fprintf_unfiltered (gdb_stdlog, + "PTLEQ: linux_nat_num_queued_events(%d)\n", + linux_nat_num_queued_events); + while (linux_nat_num_queued_events) + { + int lwpid, status, options; + lwpid = linux_nat_event_pipe_pop (&status, &options); + gdb_assert (lwpid > 0); + push_waitpid (lwpid, status, options); + } +} - if (!lp->stopped && !lp->signalled) +/* Get the unprocessed events stored in the local queue back into the + pipe, so the event loop realizes there's something else to + process. */ + +static void +local_event_queue_to_pipe (void) +{ + struct waitpid_result *w = waitpid_queue; + while (w) { - stop_callback (lp, NULL); - stop_wait_callback (lp, NULL); - /* Resume if the lwp still exists. */ - for (ptr = lwp_list; ptr; ptr = ptr->next) - if (lp == ptr) - { - resume_callback (lp, NULL); - resume_set_callback (lp, NULL); - } + struct waitpid_result *next = w->next; + linux_nat_event_pipe_push (w->pid, + w->status, + w->options); + xfree (w); + w = next; } - return 0; + waitpid_queue = NULL; + + if (debug_linux_nat_async) + fprintf_unfiltered (gdb_stdlog, + "LEQTP: linux_nat_num_queued_events(%d)\n", + linux_nat_num_queued_events); } static ptid_t @@ -1801,20 +2506,34 @@ linux_nat_wait (ptid_t ptid, struct target_waitstatus *ourstatus) pid_t pid = PIDGET (ptid); sigset_t flush_mask; - sigemptyset (&flush_mask); + if (debug_linux_nat_async) + fprintf_unfiltered (gdb_stdlog, "LLW: enter\n"); - /* Make sure SIGCHLD is blocked. */ - if (!sigismember (&blocked_mask, SIGCHLD)) + /* The first time we get here after starting a new inferior, we may + not have added it to the LWP list yet - this is the earliest + moment at which we know its PID. */ + if (num_lwps == 0) { - sigaddset (&blocked_mask, SIGCHLD); - sigprocmask (SIG_BLOCK, &blocked_mask, NULL); + gdb_assert (!is_lwp (inferior_ptid)); + + inferior_ptid = BUILD_LWP (GET_PID (inferior_ptid), + GET_PID (inferior_ptid)); + lp = add_lwp (inferior_ptid); + lp->resumed = 1; + /* Add the main thread to GDB's thread list. */ + add_thread_silent (lp->ptid); } + sigemptyset (&flush_mask); + + if (target_can_async_p ()) + /* Block events while we're here. */ + target_async (NULL, 0); + retry: - /* Make sure there is at least one LWP that has been resumed, at - least if there are any LWPs at all. */ - gdb_assert (num_lwps == 0 || iterate_over_lwps (resumed_callback, NULL)); + /* Make sure there is at least one LWP that has been resumed. */ + gdb_assert (iterate_over_lwps (resumed_callback, NULL)); /* First check if there is a LWP with a wait status pending. */ if (pid == -1) @@ -1823,6 +2542,10 @@ retry: lp = iterate_over_lwps (status_callback, NULL); if (lp) { + if (target_can_async_p ()) + internal_error (__FILE__, __LINE__, + "Found an LWP with a pending status in async mode."); + status = lp->status; lp->status = 0; @@ -1833,7 +2556,7 @@ retry: target_pid_to_str (lp->ptid)); } - /* But if we don't fine one, we'll have to wait, and check both + /* But if we don't find one, we'll have to wait, and check both cloned and uncloned processes. We start with the cloned processes. */ options = __WCLONE | WNOHANG; @@ -1878,8 +2601,8 @@ retry: /* Resume the thread. It should halt immediately returning the pending SIGSTOP. */ registers_changed (); - child_resume (pid_to_ptid (GET_LWP (lp->ptid)), lp->step, - TARGET_SIGNAL_0); + linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)), + lp->step, TARGET_SIGNAL_0); if (debug_linux_nat) fprintf_unfiltered (gdb_stdlog, "LLW: %s %s, 0, 0 (expect SIGSTOP)\n", @@ -1892,15 +2615,24 @@ retry: stop_wait_callback (lp, NULL); } - set_sigint_trap (); /* Causes SIGINT to be passed on to the - attached process. */ - set_sigio_trap (); + if (!target_can_async_p ()) + { + /* Causes SIGINT to be passed on to the attached process. */ + set_sigint_trap (); + set_sigio_trap (); + } while (status == 0) { pid_t lwpid; - lwpid = waitpid (pid, &status, options); + if (target_can_async_p ()) + /* In async mode, don't ever block. Only look at the locally + queued events. */ + lwpid = queued_waitpid (pid, &status, options); + else + lwpid = my_waitpid (pid, &status, options); + if (lwpid > 0) { gdb_assert (pid == -1 || lwpid == pid); @@ -1912,187 +2644,10 @@ retry: (long) lwpid, status_to_str (status)); } - lp = find_lwp_pid (pid_to_ptid (lwpid)); - - /* Check for stop events reported by a process we didn't - already know about - anything not already in our LWP - list. - - If we're expecting to receive stopped processes after - fork, vfork, and clone events, then we'll just add the - new one to our list and go back to waiting for the event - to be reported - the stopped process might be returned - from waitpid before or after the event is. */ - if (WIFSTOPPED (status) && !lp) - { - linux_record_stopped_pid (lwpid); - status = 0; - continue; - } - - /* Make sure we don't report an event for the exit of an LWP not in - our list, i.e. not part of the current process. This can happen - if we detach from a program we original forked and then it - exits. */ - if (!WIFSTOPPED (status) && !lp) - { - status = 0; - continue; - } - - /* NOTE drow/2003-06-17: This code seems to be meant for debugging - CLONE_PTRACE processes which do not use the thread library - - otherwise we wouldn't find the new LWP this way. That doesn't - currently work, and the following code is currently unreachable - due to the two blocks above. If it's fixed some day, this code - should be broken out into a function so that we can also pick up - LWPs from the new interface. */ + lp = linux_nat_filter_event (lwpid, status, options); if (!lp) { - lp = add_lwp (BUILD_LWP (lwpid, GET_PID (inferior_ptid))); - if (options & __WCLONE) - lp->cloned = 1; - - if (threaded) - { - gdb_assert (WIFSTOPPED (status) - && WSTOPSIG (status) == SIGSTOP); - lp->signalled = 1; - - if (!in_thread_list (inferior_ptid)) - { - inferior_ptid = BUILD_LWP (GET_PID (inferior_ptid), - GET_PID (inferior_ptid)); - add_thread (inferior_ptid); - } - - add_thread (lp->ptid); - printf_unfiltered (_("[New %s]\n"), - target_pid_to_str (lp->ptid)); - } - } - - /* Handle GNU/Linux's extended waitstatus for trace events. */ - if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0) - { - if (debug_linux_nat) - fprintf_unfiltered (gdb_stdlog, - "LLW: Handling extended status 0x%06x\n", - status); - if (linux_nat_handle_extended (lp, status)) - { - status = 0; - continue; - } - } - - /* Check if the thread has exited. */ - if ((WIFEXITED (status) || WIFSIGNALED (status)) && num_lwps > 1) - { - if (in_thread_list (lp->ptid)) - { - /* Core GDB cannot deal with us deleting the current - thread. */ - if (!ptid_equal (lp->ptid, inferior_ptid)) - delete_thread (lp->ptid); - printf_unfiltered (_("[%s exited]\n"), - target_pid_to_str (lp->ptid)); - } - - /* If this is the main thread, we must stop all threads and - verify if they are still alive. This is because in the nptl - thread model, there is no signal issued for exiting LWPs - other than the main thread. We only get the main thread - exit signal once all child threads have already exited. - If we stop all the threads and use the stop_wait_callback - to check if they have exited we can determine whether this - signal should be ignored or whether it means the end of the - debugged application, regardless of which threading model - is being used. */ - if (GET_PID (lp->ptid) == GET_LWP (lp->ptid)) - { - lp->stopped = 1; - iterate_over_lwps (stop_and_resume_callback, NULL); - } - - if (debug_linux_nat) - fprintf_unfiltered (gdb_stdlog, - "LLW: %s exited.\n", - target_pid_to_str (lp->ptid)); - - delete_lwp (lp->ptid); - - /* If there is at least one more LWP, then the exit signal - was not the end of the debugged application and should be - ignored. */ - if (num_lwps > 0) - { - /* Make sure there is at least one thread running. */ - gdb_assert (iterate_over_lwps (running_callback, NULL)); - - /* Discard the event. */ - status = 0; - continue; - } - } - - /* Check if the current LWP has previously exited. In the nptl - thread model, LWPs other than the main thread do not issue - signals when they exit so we must check whenever the thread - has stopped. A similar check is made in stop_wait_callback(). */ - if (num_lwps > 1 && !linux_nat_thread_alive (lp->ptid)) - { - if (in_thread_list (lp->ptid)) - { - /* Core GDB cannot deal with us deleting the current - thread. */ - if (!ptid_equal (lp->ptid, inferior_ptid)) - delete_thread (lp->ptid); - printf_unfiltered (_("[%s exited]\n"), - target_pid_to_str (lp->ptid)); - } - if (debug_linux_nat) - fprintf_unfiltered (gdb_stdlog, - "LLW: %s exited.\n", - target_pid_to_str (lp->ptid)); - - delete_lwp (lp->ptid); - - /* Make sure there is at least one thread running. */ - gdb_assert (iterate_over_lwps (running_callback, NULL)); - - /* Discard the event. */ - status = 0; - continue; - } - - /* Make sure we don't report a SIGSTOP that we sent - ourselves in an attempt to stop an LWP. */ - if (lp->signalled - && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP) - { - if (debug_linux_nat) - fprintf_unfiltered (gdb_stdlog, - "LLW: Delayed SIGSTOP caught for %s.\n", - target_pid_to_str (lp->ptid)); - - /* This is a delayed SIGSTOP. */ - lp->signalled = 0; - - registers_changed (); - child_resume (pid_to_ptid (GET_LWP (lp->ptid)), lp->step, - TARGET_SIGNAL_0); - if (debug_linux_nat) - fprintf_unfiltered (gdb_stdlog, - "LLW: %s %s, 0, 0 (discard SIGSTOP)\n", - lp->step ? - "PTRACE_SINGLESTEP" : "PTRACE_CONT", - target_pid_to_str (lp->ptid)); - - lp->stopped = 0; - gdb_assert (lp->resumed); - - /* Discard the event. */ + /* A discarded event. */ status = 0; continue; } @@ -2105,17 +2660,38 @@ retry: /* Alternate between checking cloned and uncloned processes. */ options ^= __WCLONE; - /* And suspend every time we have checked both. */ + /* And every time we have checked both: + In async mode, return to event loop; + In sync mode, suspend waiting for a SIGCHLD signal. */ if (options & __WCLONE) - sigsuspend (&suspend_mask); + { + if (target_can_async_p ()) + { + /* No interesting event. */ + ourstatus->kind = TARGET_WAITKIND_IGNORE; + + /* Get ready for the next event. */ + target_async (inferior_event_handler, 0); + + if (debug_linux_nat_async) + fprintf_unfiltered (gdb_stdlog, "LLW: exit (ignore)\n"); + + return minus_one_ptid; + } + + sigsuspend (&suspend_mask); + } } /* We shouldn't end up here unless we want to try again. */ gdb_assert (status == 0); } - clear_sigio_trap (); - clear_sigint_trap (); + if (!target_can_async_p ()) + { + clear_sigio_trap (); + clear_sigint_trap (); + } gdb_assert (lp); @@ -2130,7 +2706,10 @@ retry: { int signo = target_signal_from_host (WSTOPSIG (status)); - if (signal_stop_state (signo) == 0 + /* If we get a signal while single-stepping, we may need special + care, e.g. to skip the signal handler. Defer to common code. */ + if (!lp->step + && signal_stop_state (signo) == 0 && signal_print_state (signo) == 0 && signal_pass_state (signo) == 1) { @@ -2140,7 +2719,8 @@ retry: newly attached threads may cause an unwanted delay in getting them running. */ registers_changed (); - child_resume (pid_to_ptid (GET_LWP (lp->ptid)), lp->step, signo); + linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)), + lp->step, signo); if (debug_linux_nat) fprintf_unfiltered (gdb_stdlog, "LLW: %s %s, %s (preempt 'handle')\n", @@ -2189,12 +2769,9 @@ retry: the comment in cancel_breakpoints_callback to find out why. */ iterate_over_lwps (cancel_breakpoints_callback, lp); - /* If we're not running in "threaded" mode, we'll report the bare - process id. */ - if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP) { - trap_ptid = (threaded ? lp->ptid : pid_to_ptid (GET_LWP (lp->ptid))); + trap_ptid = lp->ptid; if (debug_linux_nat) fprintf_unfiltered (gdb_stdlog, "LLW: trap_ptid is %s.\n", @@ -2211,7 +2788,14 @@ retry: else store_waitstatus (ourstatus, status); - return (threaded ? lp->ptid : pid_to_ptid (GET_LWP (lp->ptid))); + /* Get ready for the next event. */ + if (target_can_async_p ()) + target_async (inferior_event_handler, 0); + + if (debug_linux_nat_async) + fprintf_unfiltered (gdb_stdlog, "LLW: exit\n"); + + return lp->ptid; } static int @@ -2244,12 +2828,19 @@ kill_wait_callback (struct lwp_info *lp, void *data) { do { - pid = waitpid (GET_LWP (lp->ptid), NULL, __WCLONE); - if (pid != (pid_t) -1 && debug_linux_nat) + pid = my_waitpid (GET_LWP (lp->ptid), NULL, __WCLONE); + if (pid != (pid_t) -1) { - fprintf_unfiltered (gdb_stdlog, - "KWC: wait %s received unknown.\n", - target_pid_to_str (lp->ptid)); + if (debug_linux_nat) + fprintf_unfiltered (gdb_stdlog, + "KWC: wait %s received unknown.\n", + target_pid_to_str (lp->ptid)); + /* The Linux kernel sometimes fails to kill a thread + completely after PTRACE_KILL; that goes from the stop + point in do_fork out to the one in + get_signal_to_deliever and waits again. So kill it + again. */ + kill_callback (lp, NULL); } } while (pid == GET_LWP (lp->ptid)); @@ -2259,12 +2850,15 @@ kill_wait_callback (struct lwp_info *lp, void *data) do { - pid = waitpid (GET_LWP (lp->ptid), NULL, 0); - if (pid != (pid_t) -1 && debug_linux_nat) + pid = my_waitpid (GET_LWP (lp->ptid), NULL, 0); + if (pid != (pid_t) -1) { - fprintf_unfiltered (gdb_stdlog, - "KWC: wait %s received unk.\n", - target_pid_to_str (lp->ptid)); + if (debug_linux_nat) + fprintf_unfiltered (gdb_stdlog, + "KWC: wait %s received unk.\n", + target_pid_to_str (lp->ptid)); + /* See the call to kill_callback above. */ + kill_callback (lp, NULL); } } while (pid == GET_LWP (lp->ptid)); @@ -2276,20 +2870,41 @@ kill_wait_callback (struct lwp_info *lp, void *data) static void linux_nat_kill (void) { - /* Kill all LWP's ... */ - iterate_over_lwps (kill_callback, NULL); + struct target_waitstatus last; + ptid_t last_ptid; + int status; - /* ... and wait until we've flushed all events. */ - iterate_over_lwps (kill_wait_callback, NULL); + if (target_can_async_p ()) + target_async (NULL, 0); - target_mourn_inferior (); -} + /* If we're stopped while forking and we haven't followed yet, + kill the other task. We need to do this first because the + parent will be sleeping if this is a vfork. */ -static void -linux_nat_create_inferior (char *exec_file, char *allargs, char **env, - int from_tty) -{ - deprecated_child_ops.to_create_inferior (exec_file, allargs, env, from_tty); + get_last_target_status (&last_ptid, &last); + + if (last.kind == TARGET_WAITKIND_FORKED + || last.kind == TARGET_WAITKIND_VFORKED) + { + ptrace (PT_KILL, last.value.related_pid, 0, 0); + wait (&status); + } + + if (forks_exist_p ()) + { + linux_fork_killall (); + drain_queued_events (-1); + } + else + { + /* Kill all LWP's ... */ + iterate_over_lwps (kill_callback, NULL); + + /* ... and wait until we've flushed all events. */ + iterate_over_lwps (kill_wait_callback, NULL); + } + + target_mourn_inferior (); } static void @@ -2300,26 +2915,34 @@ linux_nat_mourn_inferior (void) /* Destroy LWP info; it's no longer valid. */ init_lwp_list (); - /* Restore the original signal mask. */ - sigprocmask (SIG_SETMASK, &normal_mask, NULL); - sigemptyset (&blocked_mask); - - deprecated_child_ops.to_mourn_inferior (); + if (! forks_exist_p ()) + { + /* Normal case, no other forks available. */ + if (target_can_async_p ()) + linux_nat_async (NULL, 0); + linux_ops->to_mourn_inferior (); + } + else + /* Multi-fork case. The current inferior_ptid has exited, but + there are other viable forks to debug. Delete the exiting + one and context-switch to the first available. */ + linux_fork_mourn_inferior (); } -static int -linux_nat_xfer_memory (CORE_ADDR memaddr, char *myaddr, int len, int write, - struct mem_attrib *attrib, struct target_ops *target) +static LONGEST +linux_nat_xfer_partial (struct target_ops *ops, enum target_object object, + const char *annex, gdb_byte *readbuf, + const gdb_byte *writebuf, + ULONGEST offset, LONGEST len) { struct cleanup *old_chain = save_inferior_ptid (); - int xfer; + LONGEST xfer; if (is_lwp (inferior_ptid)) inferior_ptid = pid_to_ptid (GET_LWP (inferior_ptid)); - xfer = linux_proc_xfer_memory (memaddr, myaddr, len, write, attrib, target); - if (xfer == 0) - xfer = child_xfer_memory (memaddr, myaddr, len, write, attrib, target); + xfer = linux_ops->to_xfer_partial (ops, object, annex, readbuf, writebuf, + offset, len); do_cleanups (old_chain); return xfer; @@ -2337,7 +2960,13 @@ linux_nat_thread_alive (ptid_t ptid) "LLTA: PTRACE_PEEKUSER %s, 0, 0 (%s)\n", target_pid_to_str (ptid), errno ? safe_strerror (errno) : "OK"); - if (errno) + + /* Not every Linux kernel implements PTRACE_PEEKUSER. But we can + handle that case gracefully since ptrace will first do a lookup + for the process based upon the passed-in pid. If that fails we + will get either -ESRCH or -EPERM, otherwise the child exists and + is alive. */ + if (errno == ESRCH || errno == EPERM) return 0; return 1; @@ -2348,7 +2977,9 @@ linux_nat_pid_to_str (ptid_t ptid) { static char buf[64]; - if (is_lwp (ptid)) + if (is_lwp (ptid) + && ((lwp_list && lwp_list->next) + || GET_PID (ptid) != GET_LWP (ptid))) { snprintf (buf, sizeof (buf), "LWP %ld", GET_LWP (ptid)); return buf; @@ -2357,43 +2988,16 @@ linux_nat_pid_to_str (ptid_t ptid) return normal_pid_to_str (ptid); } -static void -init_linux_nat_ops (void) -{ -#if 0 - linux_nat_ops.to_open = linux_nat_open; -#endif - linux_nat_ops.to_shortname = "lwp-layer"; - linux_nat_ops.to_longname = "lwp-layer"; - linux_nat_ops.to_doc = "Low level threads support (LWP layer)"; - linux_nat_ops.to_attach = linux_nat_attach; - linux_nat_ops.to_detach = linux_nat_detach; - linux_nat_ops.to_resume = linux_nat_resume; - linux_nat_ops.to_wait = linux_nat_wait; - /* fetch_inferior_registers and store_inferior_registers will - honor the LWP id, so we can use them directly. */ - linux_nat_ops.to_fetch_registers = fetch_inferior_registers; - linux_nat_ops.to_store_registers = store_inferior_registers; - linux_nat_ops.deprecated_xfer_memory = linux_nat_xfer_memory; - linux_nat_ops.to_kill = linux_nat_kill; - linux_nat_ops.to_create_inferior = linux_nat_create_inferior; - linux_nat_ops.to_mourn_inferior = linux_nat_mourn_inferior; - linux_nat_ops.to_thread_alive = linux_nat_thread_alive; - linux_nat_ops.to_pid_to_str = linux_nat_pid_to_str; - linux_nat_ops.to_post_startup_inferior = child_post_startup_inferior; - linux_nat_ops.to_post_attach = child_post_attach; - linux_nat_ops.to_insert_fork_catchpoint = child_insert_fork_catchpoint; - linux_nat_ops.to_insert_vfork_catchpoint = child_insert_vfork_catchpoint; - linux_nat_ops.to_insert_exec_catchpoint = child_insert_exec_catchpoint; - - linux_nat_ops.to_stratum = thread_stratum; - linux_nat_ops.to_has_thread_control = tc_schedlock; - linux_nat_ops.to_magic = OPS_MAGIC; -} - static void sigchld_handler (int signo) { + if (linux_nat_async_enabled + && linux_nat_async_events_enabled + && signo == SIGCHLD) + /* It is *always* a bug to hit this. */ + internal_error (__FILE__, __LINE__, + "sigchld_handler called when async events are enabled"); + /* Do nothing. The only reason for this handler is that it allows us to use sigsuspend in linux_nat_wait above to wait for the arrival of a SIGCHLD. */ @@ -2402,8 +3006,8 @@ sigchld_handler (int signo) /* Accepts an integer PID; Returns a string representing a file that can be opened to get the symbols for the child process. */ -char * -child_pid_to_exec_file (int pid) +static char * +linux_child_pid_to_exec_file (int pid) { char *name1, *name2; @@ -2433,7 +3037,8 @@ read_mapping (FILE *mapfile, int ret = fscanf (mapfile, "%llx-%llx %s %llx %s %llx", addr, endaddr, permissions, offset, device, inode); - if (ret > 0 && ret != EOF && *inode != 0) + filename[0] = '\0'; + if (ret > 0 && ret != EOF) { /* Eat everything up to EOL for the filename. This will prevent weird filenames (such as one with embedded whitespace) from @@ -2444,11 +3049,7 @@ read_mapping (FILE *mapfile, only. */ ret += fscanf (mapfile, "%[^\n]\n", filename); } - else - { - filename[0] = '\0'; /* no filename */ - fscanf (mapfile, "\n"); - } + return (ret != 0 && ret != EOF); } @@ -2495,7 +3096,7 @@ linux_nat_find_memory_regions (int (*func) (CORE_ADDR, size, paddr_nz (addr), read ? 'r' : ' ', write ? 'w' : ' ', exec ? 'x' : ' '); - if (filename && filename[0]) + if (filename[0]) fprintf_filtered (gdb_stdout, " for %s", filename); fprintf_filtered (gdb_stdout, "\n"); } @@ -2521,21 +3122,57 @@ linux_nat_do_thread_registers (bfd *obfd, ptid_t ptid, gdb_fpxregset_t fpxregs; #endif unsigned long lwp = ptid_get_lwp (ptid); + struct regcache *regcache = get_thread_regcache (ptid); + struct gdbarch *gdbarch = get_regcache_arch (regcache); + const struct regset *regset; + int core_regset_p; + struct cleanup *old_chain; + + old_chain = save_inferior_ptid (); + inferior_ptid = ptid; + target_fetch_registers (regcache, -1); + do_cleanups (old_chain); + + core_regset_p = gdbarch_regset_from_core_section_p (gdbarch); + if (core_regset_p + && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg", + sizeof (gregs))) != NULL + && regset->collect_regset != NULL) + regset->collect_regset (regset, regcache, -1, + &gregs, sizeof (gregs)); + else + fill_gregset (regcache, &gregs, -1); - fill_gregset (&gregs, -1); note_data = (char *) elfcore_write_prstatus (obfd, note_data, note_size, lwp, stop_signal, &gregs); - fill_fpregset (&fpregs, -1); + if (core_regset_p + && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg2", + sizeof (fpregs))) != NULL + && regset->collect_regset != NULL) + regset->collect_regset (regset, regcache, -1, + &fpregs, sizeof (fpregs)); + else + fill_fpregset (regcache, &fpregs, -1); + note_data = (char *) elfcore_write_prfpreg (obfd, note_data, note_size, &fpregs, sizeof (fpregs)); + #ifdef FILL_FPXREGSET - fill_fpxregset (&fpxregs, -1); + if (core_regset_p + && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg-xfp", + sizeof (fpxregs))) != NULL + && regset->collect_regset != NULL) + regset->collect_regset (regset, regcache, -1, + &fpxregs, sizeof (fpxregs)); + else + fill_fpxregset (regcache, &fpxregs, -1); + note_data = (char *) elfcore_write_prxfpreg (obfd, note_data, note_size, @@ -2559,21 +3196,13 @@ static int linux_nat_corefile_thread_callback (struct lwp_info *ti, void *data) { struct linux_nat_corefile_thread_data *args = data; - ptid_t saved_ptid = inferior_ptid; - inferior_ptid = ti->ptid; - registers_changed (); - target_fetch_registers (-1); /* FIXME should not be necessary; - fill_gregset should do it automatically. */ args->note_data = linux_nat_do_thread_registers (args->obfd, ti->ptid, args->note_data, args->note_size); args->num_notes++; - inferior_ptid = saved_ptid; - registers_changed (); - target_fetch_registers (-1); /* FIXME should not be necessary; - fill_gregset should do it automatically. */ + return 0; } @@ -2583,15 +3212,11 @@ static char * linux_nat_do_registers (bfd *obfd, ptid_t ptid, char *note_data, int *note_size) { - registers_changed (); - target_fetch_registers (-1); /* FIXME should not be necessary; - fill_gregset should do it automatically. */ return linux_nat_do_thread_registers (obfd, ptid_build (ptid_get_pid (inferior_ptid), ptid_get_pid (inferior_ptid), 0), note_data, note_size); - return note_data; } /* Fills the "to_make_corefile_note" target vector. Builds the note @@ -2602,11 +3227,13 @@ linux_nat_make_corefile_notes (bfd *obfd, int *note_size) { struct linux_nat_corefile_thread_data thread_args; struct cleanup *old_chain; + /* The variable size must be >= sizeof (prpsinfo_t.pr_fname). */ char fname[16] = { '\0' }; + /* The variable size must be >= sizeof (prpsinfo_t.pr_psargs). */ char psargs[80] = { '\0' }; char *note_data = NULL; ptid_t current_ptid = inferior_ptid; - char *auxv; + gdb_byte *auxv; int auxv_len; if (get_exec_file (0)) @@ -2615,9 +3242,18 @@ linux_nat_make_corefile_notes (bfd *obfd, int *note_size) strncpy (psargs, get_exec_file (0), sizeof (psargs)); if (get_inferior_args ()) { - strncat (psargs, " ", sizeof (psargs) - strlen (psargs)); - strncat (psargs, get_inferior_args (), - sizeof (psargs) - strlen (psargs)); + char *string_end; + char *psargs_end = psargs + sizeof (psargs); + + /* linux_elfcore_write_prpsinfo () handles zero unterminated + strings fine. */ + string_end = memchr (psargs, 0, sizeof (psargs)); + if (string_end != NULL) + { + *string_end++ = ' '; + strncpy (string_end, get_inferior_args (), + psargs_end - string_end); + } } note_data = (char *) elfcore_write_prpsinfo (obfd, note_data, @@ -2642,7 +3278,8 @@ linux_nat_make_corefile_notes (bfd *obfd, int *note_size) note_data = thread_args.note_data; } - auxv_len = target_auxv_read (¤t_target, &auxv); + auxv_len = target_read_alloc (¤t_target, TARGET_OBJECT_AUXV, + NULL, &auxv); if (auxv_len > 0) { note_data = elfcore_write_note (obfd, note_data, note_size, @@ -2733,7 +3370,7 @@ linux_nat_info_proc_cmd (char *args, int from_tty) if (cmdline_f || all) { sprintf (fname1, "/proc/%lld/cmdline", pid); - if ((procfile = fopen (fname1, "r")) > 0) + if ((procfile = fopen (fname1, "r")) != NULL) { fgets (buffer, sizeof (buffer), procfile); printf_filtered ("cmdline = '%s'\n", buffer); @@ -2763,13 +3400,13 @@ linux_nat_info_proc_cmd (char *args, int from_tty) if (mappings_f || all) { sprintf (fname1, "/proc/%lld/maps", pid); - if ((procfile = fopen (fname1, "r")) > 0) + if ((procfile = fopen (fname1, "r")) != NULL) { long long addr, endaddr, size, offset, inode; char permissions[8], device[8], filename[MAXPATHLEN]; printf_filtered (_("Mapped address spaces:\n\n")); - if (TARGET_ADDR_BIT == 32) + if (gdbarch_addr_bit (current_gdbarch) == 32) { printf_filtered ("\t%10s %10s %10s %10s %7s\n", "Start Addr", @@ -2795,7 +3432,7 @@ linux_nat_info_proc_cmd (char *args, int from_tty) a generic local_address_string instead to print out the addresses; that makes sense to me, too. */ - if (TARGET_ADDR_BIT == 32) + if (gdbarch_addr_bit (current_gdbarch) == 32) { printf_filtered ("\t%#10lx %#10lx %#10x %#10x %7s\n", (unsigned long) addr, /* FIXME: pr_addr */ @@ -2823,7 +3460,7 @@ linux_nat_info_proc_cmd (char *args, int from_tty) if (status_f || all) { sprintf (fname1, "/proc/%lld/status", pid); - if ((procfile = fopen (fname1, "r")) > 0) + if ((procfile = fopen (fname1, "r")) != NULL) { while (fgets (buffer, sizeof (buffer), procfile) != NULL) puts_filtered (buffer); @@ -2835,14 +3472,15 @@ linux_nat_info_proc_cmd (char *args, int from_tty) if (stat_f || all) { sprintf (fname1, "/proc/%lld/stat", pid); - if ((procfile = fopen (fname1, "r")) > 0) + if ((procfile = fopen (fname1, "r")) != NULL) { int itmp; char ctmp; + long ltmp; if (fscanf (procfile, "%d ", &itmp) > 0) printf_filtered (_("Process: %d\n"), itmp); - if (fscanf (procfile, "%s ", &buffer[0]) > 0) + if (fscanf (procfile, "(%[^)]) ", &buffer[0]) > 0) printf_filtered (_("Exec file: %s\n"), buffer); if (fscanf (procfile, "%c ", &ctmp) > 0) printf_filtered (_("State: %c\n"), ctmp); @@ -2856,71 +3494,71 @@ linux_nat_info_proc_cmd (char *args, int from_tty) printf_filtered (_("TTY: %d\n"), itmp); if (fscanf (procfile, "%d ", &itmp) > 0) printf_filtered (_("TTY owner process group: %d\n"), itmp); - if (fscanf (procfile, "%u ", &itmp) > 0) - printf_filtered (_("Flags: 0x%x\n"), itmp); - if (fscanf (procfile, "%u ", &itmp) > 0) - printf_filtered (_("Minor faults (no memory page): %u\n"), - (unsigned int) itmp); - if (fscanf (procfile, "%u ", &itmp) > 0) - printf_filtered (_("Minor faults, children: %u\n"), - (unsigned int) itmp); - if (fscanf (procfile, "%u ", &itmp) > 0) - printf_filtered (_("Major faults (memory page faults): %u\n"), - (unsigned int) itmp); - if (fscanf (procfile, "%u ", &itmp) > 0) - printf_filtered (_("Major faults, children: %u\n"), - (unsigned int) itmp); - if (fscanf (procfile, "%d ", &itmp) > 0) - printf_filtered ("utime: %d\n", itmp); - if (fscanf (procfile, "%d ", &itmp) > 0) - printf_filtered ("stime: %d\n", itmp); - if (fscanf (procfile, "%d ", &itmp) > 0) - printf_filtered ("utime, children: %d\n", itmp); - if (fscanf (procfile, "%d ", &itmp) > 0) - printf_filtered ("stime, children: %d\n", itmp); - if (fscanf (procfile, "%d ", &itmp) > 0) - printf_filtered (_("jiffies remaining in current time slice: %d\n"), - itmp); - if (fscanf (procfile, "%d ", &itmp) > 0) - printf_filtered ("'nice' value: %d\n", itmp); - if (fscanf (procfile, "%u ", &itmp) > 0) - printf_filtered (_("jiffies until next timeout: %u\n"), - (unsigned int) itmp); - if (fscanf (procfile, "%u ", &itmp) > 0) - printf_filtered ("jiffies until next SIGALRM: %u\n", - (unsigned int) itmp); - if (fscanf (procfile, "%d ", &itmp) > 0) - printf_filtered (_("start time (jiffies since system boot): %d\n"), - itmp); - if (fscanf (procfile, "%u ", &itmp) > 0) - printf_filtered (_("Virtual memory size: %u\n"), - (unsigned int) itmp); - if (fscanf (procfile, "%u ", &itmp) > 0) - printf_filtered (_("Resident set size: %u\n"), (unsigned int) itmp); - if (fscanf (procfile, "%u ", &itmp) > 0) - printf_filtered ("rlim: %u\n", (unsigned int) itmp); - if (fscanf (procfile, "%u ", &itmp) > 0) - printf_filtered (_("Start of text: 0x%x\n"), itmp); - if (fscanf (procfile, "%u ", &itmp) > 0) - printf_filtered (_("End of text: 0x%x\n"), itmp); - if (fscanf (procfile, "%u ", &itmp) > 0) - printf_filtered (_("Start of stack: 0x%x\n"), itmp); + if (fscanf (procfile, "%lu ", <mp) > 0) + printf_filtered (_("Flags: 0x%lx\n"), ltmp); + if (fscanf (procfile, "%lu ", <mp) > 0) + printf_filtered (_("Minor faults (no memory page): %lu\n"), + (unsigned long) ltmp); + if (fscanf (procfile, "%lu ", <mp) > 0) + printf_filtered (_("Minor faults, children: %lu\n"), + (unsigned long) ltmp); + if (fscanf (procfile, "%lu ", <mp) > 0) + printf_filtered (_("Major faults (memory page faults): %lu\n"), + (unsigned long) ltmp); + if (fscanf (procfile, "%lu ", <mp) > 0) + printf_filtered (_("Major faults, children: %lu\n"), + (unsigned long) ltmp); + if (fscanf (procfile, "%ld ", <mp) > 0) + printf_filtered (_("utime: %ld\n"), ltmp); + if (fscanf (procfile, "%ld ", <mp) > 0) + printf_filtered (_("stime: %ld\n"), ltmp); + if (fscanf (procfile, "%ld ", <mp) > 0) + printf_filtered (_("utime, children: %ld\n"), ltmp); + if (fscanf (procfile, "%ld ", <mp) > 0) + printf_filtered (_("stime, children: %ld\n"), ltmp); + if (fscanf (procfile, "%ld ", <mp) > 0) + printf_filtered (_("jiffies remaining in current time slice: %ld\n"), + ltmp); + if (fscanf (procfile, "%ld ", <mp) > 0) + printf_filtered (_("'nice' value: %ld\n"), ltmp); + if (fscanf (procfile, "%lu ", <mp) > 0) + printf_filtered (_("jiffies until next timeout: %lu\n"), + (unsigned long) ltmp); + if (fscanf (procfile, "%lu ", <mp) > 0) + printf_filtered (_("jiffies until next SIGALRM: %lu\n"), + (unsigned long) ltmp); + if (fscanf (procfile, "%ld ", <mp) > 0) + printf_filtered (_("start time (jiffies since system boot): %ld\n"), + ltmp); + if (fscanf (procfile, "%lu ", <mp) > 0) + printf_filtered (_("Virtual memory size: %lu\n"), + (unsigned long) ltmp); + if (fscanf (procfile, "%lu ", <mp) > 0) + printf_filtered (_("Resident set size: %lu\n"), (unsigned long) ltmp); + if (fscanf (procfile, "%lu ", <mp) > 0) + printf_filtered (_("rlim: %lu\n"), (unsigned long) ltmp); + if (fscanf (procfile, "%lu ", <mp) > 0) + printf_filtered (_("Start of text: 0x%lx\n"), ltmp); + if (fscanf (procfile, "%lu ", <mp) > 0) + printf_filtered (_("End of text: 0x%lx\n"), ltmp); + if (fscanf (procfile, "%lu ", <mp) > 0) + printf_filtered (_("Start of stack: 0x%lx\n"), ltmp); #if 0 /* Don't know how architecture-dependent the rest is... Anyway the signal bitmap info is available from "status". */ - if (fscanf (procfile, "%u ", &itmp) > 0) /* FIXME arch? */ - printf_filtered (_("Kernel stack pointer: 0x%x\n"), itmp); - if (fscanf (procfile, "%u ", &itmp) > 0) /* FIXME arch? */ - printf_filtered (_("Kernel instr pointer: 0x%x\n"), itmp); - if (fscanf (procfile, "%d ", &itmp) > 0) - printf_filtered (_("Pending signals bitmap: 0x%x\n"), itmp); - if (fscanf (procfile, "%d ", &itmp) > 0) - printf_filtered (_("Blocked signals bitmap: 0x%x\n"), itmp); - if (fscanf (procfile, "%d ", &itmp) > 0) - printf_filtered (_("Ignored signals bitmap: 0x%x\n"), itmp); - if (fscanf (procfile, "%d ", &itmp) > 0) - printf_filtered (_("Catched signals bitmap: 0x%x\n"), itmp); - if (fscanf (procfile, "%u ", &itmp) > 0) /* FIXME arch? */ - printf_filtered (_("wchan (system call): 0x%x\n"), itmp); + if (fscanf (procfile, "%lu ", <mp) > 0) /* FIXME arch? */ + printf_filtered (_("Kernel stack pointer: 0x%lx\n"), ltmp); + if (fscanf (procfile, "%lu ", <mp) > 0) /* FIXME arch? */ + printf_filtered (_("Kernel instr pointer: 0x%lx\n"), ltmp); + if (fscanf (procfile, "%ld ", <mp) > 0) + printf_filtered (_("Pending signals bitmap: 0x%lx\n"), ltmp); + if (fscanf (procfile, "%ld ", <mp) > 0) + printf_filtered (_("Blocked signals bitmap: 0x%lx\n"), ltmp); + if (fscanf (procfile, "%ld ", <mp) > 0) + printf_filtered (_("Ignored signals bitmap: 0x%lx\n"), ltmp); + if (fscanf (procfile, "%ld ", <mp) > 0) + printf_filtered (_("Catched signals bitmap: 0x%lx\n"), ltmp); + if (fscanf (procfile, "%lu ", <mp) > 0) /* FIXME arch? */ + printf_filtered (_("wchan (system call): 0x%lx\n"), ltmp); #endif fclose (procfile); } @@ -2929,14 +3567,22 @@ linux_nat_info_proc_cmd (char *args, int from_tty) } } -int -linux_proc_xfer_memory (CORE_ADDR addr, char *myaddr, int len, int write, - struct mem_attrib *attrib, struct target_ops *target) +/* Implement the to_xfer_partial interface for memory reads using the /proc + filesystem. Because we can use a single read() call for /proc, this + can be much more efficient than banging away at PTRACE_PEEKTEXT, + but it doesn't support writes. */ + +static LONGEST +linux_proc_xfer_partial (struct target_ops *ops, enum target_object object, + const char *annex, gdb_byte *readbuf, + const gdb_byte *writebuf, + ULONGEST offset, LONGEST len) { - int fd, ret; + LONGEST ret; + int fd; char filename[64]; - if (write) + if (object != TARGET_OBJECT_MEMORY || !readbuf) return 0; /* Don't bother for one word. */ @@ -2955,9 +3601,9 @@ linux_proc_xfer_memory (CORE_ADDR addr, char *myaddr, int len, int write, 32-bit platforms (for instance, SPARC debugging a SPARC64 application). */ #ifdef HAVE_PREAD64 - if (pread64 (fd, myaddr, len, addr) != len) + if (pread64 (fd, readbuf, len, offset) != len) #else - if (lseek (fd, addr, SEEK_SET) == -1 || read (fd, myaddr, len) != len) + if (lseek (fd, offset, SEEK_SET) == -1 || read (fd, readbuf, len) != len) #endif ret = 0; else @@ -3048,14 +3694,506 @@ linux_proc_pending_signals (int pid, sigset_t *pending, sigset_t *blocked, sigse fclose (procfile); } +static LONGEST +linux_xfer_partial (struct target_ops *ops, enum target_object object, + const char *annex, gdb_byte *readbuf, + const gdb_byte *writebuf, ULONGEST offset, LONGEST len) +{ + LONGEST xfer; + + if (object == TARGET_OBJECT_AUXV) + return procfs_xfer_auxv (ops, object, annex, readbuf, writebuf, + offset, len); + + xfer = linux_proc_xfer_partial (ops, object, annex, readbuf, writebuf, + offset, len); + if (xfer != 0) + return xfer; + + return super_xfer_partial (ops, object, annex, readbuf, writebuf, + offset, len); +} + +/* Create a prototype generic GNU/Linux target. The client can override + it with local methods. */ + +static void +linux_target_install_ops (struct target_ops *t) +{ + t->to_insert_fork_catchpoint = linux_child_insert_fork_catchpoint; + t->to_insert_vfork_catchpoint = linux_child_insert_vfork_catchpoint; + t->to_insert_exec_catchpoint = linux_child_insert_exec_catchpoint; + t->to_pid_to_exec_file = linux_child_pid_to_exec_file; + t->to_post_startup_inferior = linux_child_post_startup_inferior; + t->to_post_attach = linux_child_post_attach; + t->to_follow_fork = linux_child_follow_fork; + t->to_find_memory_regions = linux_nat_find_memory_regions; + t->to_make_corefile_notes = linux_nat_make_corefile_notes; + + super_xfer_partial = t->to_xfer_partial; + t->to_xfer_partial = linux_xfer_partial; +} + +struct target_ops * +linux_target (void) +{ + struct target_ops *t; + + t = inf_ptrace_target (); + linux_target_install_ops (t); + + return t; +} + +struct target_ops * +linux_trad_target (CORE_ADDR (*register_u_offset)(struct gdbarch *, int, int)) +{ + struct target_ops *t; + + t = inf_ptrace_trad_target (register_u_offset); + linux_target_install_ops (t); + + return t; +} + +/* Controls if async mode is permitted. */ +static int linux_async_permitted = 0; + +/* The set command writes to this variable. If the inferior is + executing, linux_nat_async_permitted is *not* updated. */ +static int linux_async_permitted_1 = 0; + +static void +set_maintenance_linux_async_permitted (char *args, int from_tty, + struct cmd_list_element *c) +{ + if (target_has_execution) + { + linux_async_permitted_1 = linux_async_permitted; + error (_("Cannot change this setting while the inferior is running.")); + } + + linux_async_permitted = linux_async_permitted_1; + linux_nat_set_async_mode (linux_async_permitted); +} + +static void +show_maintenance_linux_async_permitted (struct ui_file *file, int from_tty, + struct cmd_list_element *c, const char *value) +{ + fprintf_filtered (file, _("\ +Controlling the GNU/Linux inferior in asynchronous mode is %s.\n"), + value); +} + +/* target_is_async_p implementation. */ + +static int +linux_nat_is_async_p (void) +{ + /* NOTE: palves 2008-03-21: We're only async when the user requests + it explicitly with the "maintenance set linux-async" command. + Someday, linux will always be async. */ + if (!linux_async_permitted) + return 0; + + return 1; +} + +/* target_can_async_p implementation. */ + +static int +linux_nat_can_async_p (void) +{ + /* NOTE: palves 2008-03-21: We're only async when the user requests + it explicitly with the "maintenance set linux-async" command. + Someday, linux will always be async. */ + if (!linux_async_permitted) + return 0; + + /* See target.h/target_async_mask. */ + return linux_nat_async_mask_value; +} + +/* target_async_mask implementation. */ + +static int +linux_nat_async_mask (int mask) +{ + int current_state; + current_state = linux_nat_async_mask_value; + + if (current_state != mask) + { + if (mask == 0) + { + linux_nat_async (NULL, 0); + linux_nat_async_mask_value = mask; + /* We're in sync mode. Make sure SIGCHLD isn't handled by + async_sigchld_handler when we come out of sigsuspend in + linux_nat_wait. */ + sigaction (SIGCHLD, &sync_sigchld_action, NULL); + } + else + { + /* Restore the async handler. */ + sigaction (SIGCHLD, &async_sigchld_action, NULL); + linux_nat_async_mask_value = mask; + linux_nat_async (inferior_event_handler, 0); + } + } + + return current_state; +} + +/* Pop an event from the event pipe. */ + +static int +linux_nat_event_pipe_pop (int* ptr_status, int* ptr_options) +{ + struct waitpid_result event = {0}; + int ret; + + do + { + ret = read (linux_nat_event_pipe[0], &event, sizeof (event)); + } + while (ret == -1 && errno == EINTR); + + gdb_assert (ret == sizeof (event)); + + *ptr_status = event.status; + *ptr_options = event.options; + + linux_nat_num_queued_events--; + + return event.pid; +} + +/* Push an event into the event pipe. */ + +static void +linux_nat_event_pipe_push (int pid, int status, int options) +{ + int ret; + struct waitpid_result event = {0}; + event.pid = pid; + event.status = status; + event.options = options; + + do + { + ret = write (linux_nat_event_pipe[1], &event, sizeof (event)); + gdb_assert ((ret == -1 && errno == EINTR) || ret == sizeof (event)); + } while (ret == -1 && errno == EINTR); + + linux_nat_num_queued_events++; +} + +static void +get_pending_events (void) +{ + int status, options, pid; + + if (!linux_nat_async_enabled || !linux_nat_async_events_enabled) + internal_error (__FILE__, __LINE__, + "get_pending_events called with async masked"); + + while (1) + { + status = 0; + options = __WCLONE | WNOHANG; + + do + { + pid = waitpid (-1, &status, options); + } + while (pid == -1 && errno == EINTR); + + if (pid <= 0) + { + options = WNOHANG; + do + { + pid = waitpid (-1, &status, options); + } + while (pid == -1 && errno == EINTR); + } + + if (pid <= 0) + /* No more children reporting events. */ + break; + + if (debug_linux_nat_async) + fprintf_unfiltered (gdb_stdlog, "\ +get_pending_events: pid(%d), status(%x), options (%x)\n", + pid, status, options); + + linux_nat_event_pipe_push (pid, status, options); + } + + if (debug_linux_nat_async) + fprintf_unfiltered (gdb_stdlog, "\ +get_pending_events: linux_nat_num_queued_events(%d)\n", + linux_nat_num_queued_events); +} + +/* SIGCHLD handler for async mode. */ + +static void +async_sigchld_handler (int signo) +{ + if (debug_linux_nat_async) + fprintf_unfiltered (gdb_stdlog, "async_sigchld_handler\n"); + + get_pending_events (); +} + +/* Enable or disable async SIGCHLD handling. */ + +static int +linux_nat_async_events (int enable) +{ + int current_state = linux_nat_async_events_enabled; + + if (debug_linux_nat_async) + fprintf_unfiltered (gdb_stdlog, + "LNAE: enable(%d): linux_nat_async_events_enabled(%d), " + "linux_nat_num_queued_events(%d)\n", + enable, linux_nat_async_events_enabled, + linux_nat_num_queued_events); + + if (current_state != enable) + { + sigset_t mask; + sigemptyset (&mask); + sigaddset (&mask, SIGCHLD); + if (enable) + { + /* Unblock target events. */ + linux_nat_async_events_enabled = 1; + + local_event_queue_to_pipe (); + /* While in masked async, we may have not collected all the + pending events. Get them out now. */ + get_pending_events (); + sigprocmask (SIG_UNBLOCK, &mask, NULL); + } + else + { + /* Block target events. */ + sigprocmask (SIG_BLOCK, &mask, NULL); + linux_nat_async_events_enabled = 0; + /* Get events out of queue, and make them available to + queued_waitpid / my_waitpid. */ + pipe_to_local_event_queue (); + } + } + + return current_state; +} + +static int async_terminal_is_ours = 1; + +/* target_terminal_inferior implementation. */ + +static void +linux_nat_terminal_inferior (void) +{ + if (!target_is_async_p ()) + { + /* Async mode is disabled. */ + terminal_inferior (); + return; + } + + /* GDB should never give the terminal to the inferior, if the + inferior is running in the background (run&, continue&, etc.). + This check can be removed when the common code is fixed. */ + if (!sync_execution) + return; + + terminal_inferior (); + + if (!async_terminal_is_ours) + return; + + delete_file_handler (input_fd); + async_terminal_is_ours = 0; + set_sigint_trap (); +} + +/* target_terminal_ours implementation. */ + void -_initialize_linux_nat (void) +linux_nat_terminal_ours (void) { - struct sigaction action; - extern void thread_db_init (struct target_ops *); + if (!target_is_async_p ()) + { + /* Async mode is disabled. */ + terminal_ours (); + return; + } + + /* GDB should never give the terminal to the inferior if the + inferior is running in the background (run&, continue&, etc.), + but claiming it sure should. */ + terminal_ours (); + + if (!sync_execution) + return; + + if (async_terminal_is_ours) + return; + + clear_sigint_trap (); + add_file_handler (input_fd, stdin_event_handler, 0); + async_terminal_is_ours = 1; +} + +static void (*async_client_callback) (enum inferior_event_type event_type, + void *context); +static void *async_client_context; + +static void +linux_nat_async_file_handler (int error, gdb_client_data client_data) +{ + async_client_callback (INF_REG_EVENT, async_client_context); +} + +/* target_async implementation. */ + +static void +linux_nat_async (void (*callback) (enum inferior_event_type event_type, + void *context), void *context) +{ + if (linux_nat_async_mask_value == 0 || !linux_nat_async_enabled) + internal_error (__FILE__, __LINE__, + "Calling target_async when async is masked"); + + if (callback != NULL) + { + async_client_callback = callback; + async_client_context = context; + add_file_handler (linux_nat_event_pipe[0], + linux_nat_async_file_handler, NULL); + + linux_nat_async_events (1); + } + else + { + async_client_callback = callback; + async_client_context = context; + + linux_nat_async_events (0); + delete_file_handler (linux_nat_event_pipe[0]); + } + return; +} + +/* Enable/Disable async mode. */ + +static void +linux_nat_set_async_mode (int on) +{ + if (linux_nat_async_enabled != on) + { + if (on) + { + gdb_assert (waitpid_queue == NULL); + sigaction (SIGCHLD, &async_sigchld_action, NULL); + + if (pipe (linux_nat_event_pipe) == -1) + internal_error (__FILE__, __LINE__, + "creating event pipe failed."); + + fcntl (linux_nat_event_pipe[0], F_SETFL, O_NONBLOCK); + fcntl (linux_nat_event_pipe[1], F_SETFL, O_NONBLOCK); + } + else + { + sigaction (SIGCHLD, &sync_sigchld_action, NULL); + + drain_queued_events (-1); + + linux_nat_num_queued_events = 0; + close (linux_nat_event_pipe[0]); + close (linux_nat_event_pipe[1]); + linux_nat_event_pipe[0] = linux_nat_event_pipe[1] = -1; + + } + } + linux_nat_async_enabled = on; +} + +void +linux_nat_add_target (struct target_ops *t) +{ + /* Save the provided single-threaded target. We save this in a separate + variable because another target we've inherited from (e.g. inf-ptrace) + may have saved a pointer to T; we want to use it for the final + process stratum target. */ + linux_ops_saved = *t; + linux_ops = &linux_ops_saved; + + /* Override some methods for multithreading. */ + t->to_create_inferior = linux_nat_create_inferior; + t->to_attach = linux_nat_attach; + t->to_detach = linux_nat_detach; + t->to_resume = linux_nat_resume; + t->to_wait = linux_nat_wait; + t->to_xfer_partial = linux_nat_xfer_partial; + t->to_kill = linux_nat_kill; + t->to_mourn_inferior = linux_nat_mourn_inferior; + t->to_thread_alive = linux_nat_thread_alive; + t->to_pid_to_str = linux_nat_pid_to_str; + t->to_has_thread_control = tc_schedlock; + + t->to_can_async_p = linux_nat_can_async_p; + t->to_is_async_p = linux_nat_is_async_p; + t->to_async = linux_nat_async; + t->to_async_mask = linux_nat_async_mask; + t->to_terminal_inferior = linux_nat_terminal_inferior; + t->to_terminal_ours = linux_nat_terminal_ours; + + /* We don't change the stratum; this target will sit at + process_stratum and thread_db will set at thread_stratum. This + is a little strange, since this is a multi-threaded-capable + target, but we want to be on the stack below thread_db, and we + also want to be used for single-threaded processes. */ + + add_target (t); + + /* TODO: Eliminate this and have libthread_db use + find_target_beneath. */ + thread_db_init (t); +} + +/* Register a method to call whenever a new thread is attached. */ +void +linux_nat_set_new_thread (struct target_ops *t, void (*new_thread) (ptid_t)) +{ + /* Save the pointer. We only support a single registered instance + of the GNU/Linux native target, so we do not need to map this to + T. */ + linux_nat_new_thread = new_thread; +} + +/* Return the saved siginfo associated with PTID. */ +struct siginfo * +linux_nat_get_siginfo (ptid_t ptid) +{ + struct lwp_info *lp = find_lwp_pid (ptid); + + gdb_assert (lp != NULL); + + return &lp->siginfo; +} - deprecated_child_ops.to_find_memory_regions = linux_nat_find_memory_regions; - deprecated_child_ops.to_make_corefile_notes = linux_nat_make_corefile_notes; +void +_initialize_linux_nat (void) +{ + sigset_t mask; add_info ("proc", linux_nat_info_proc_cmd, _("\ Show /proc process information about any running process.\n\ @@ -3066,31 +4204,63 @@ Specify any of the following keywords for detailed info:\n\ status -- list a different bunch of random process info.\n\ all -- list all available /proc info.")); - init_linux_nat_ops (); - add_target (&linux_nat_ops); - thread_db_init (&linux_nat_ops); + add_setshow_zinteger_cmd ("lin-lwp", class_maintenance, + &debug_linux_nat, _("\ +Set debugging of GNU/Linux lwp module."), _("\ +Show debugging of GNU/Linux lwp module."), _("\ +Enables printf debugging output."), + NULL, + show_debug_linux_nat, + &setdebuglist, &showdebuglist); + + add_setshow_zinteger_cmd ("lin-lwp-async", class_maintenance, + &debug_linux_nat_async, _("\ +Set debugging of GNU/Linux async lwp module."), _("\ +Show debugging of GNU/Linux async lwp module."), _("\ +Enables printf debugging output."), + NULL, + show_debug_linux_nat_async, + &setdebuglist, &showdebuglist); - /* Save the original signal mask. */ + add_setshow_boolean_cmd ("linux-async", class_maintenance, + &linux_async_permitted_1, _("\ +Set whether gdb controls the GNU/Linux inferior in asynchronous mode."), _("\ +Show whether gdb controls the GNU/Linux inferior in asynchronous mode."), _("\ +Tells gdb whether to control the GNU/Linux inferior in asynchronous mode."), + set_maintenance_linux_async_permitted, + show_maintenance_linux_async_permitted, + &maintenance_set_cmdlist, + &maintenance_show_cmdlist); + + /* Block SIGCHLD by default. Doing this early prevents it getting + unblocked if an exception is thrown due to an error while the + inferior is starting (sigsetjmp/siglongjmp). */ + sigemptyset (&mask); + sigaddset (&mask, SIGCHLD); + sigprocmask (SIG_BLOCK, &mask, NULL); + + /* Save this mask as the default. */ sigprocmask (SIG_SETMASK, NULL, &normal_mask); - action.sa_handler = sigchld_handler; - sigemptyset (&action.sa_mask); - action.sa_flags = 0; - sigaction (SIGCHLD, &action, NULL); + /* The synchronous SIGCHLD handler. */ + sync_sigchld_action.sa_handler = sigchld_handler; + sigemptyset (&sync_sigchld_action.sa_mask); + sync_sigchld_action.sa_flags = SA_RESTART; + + /* Make it the default. */ + sigaction (SIGCHLD, &sync_sigchld_action, NULL); /* Make sure we don't block SIGCHLD during a sigsuspend. */ sigprocmask (SIG_SETMASK, NULL, &suspend_mask); sigdelset (&suspend_mask, SIGCHLD); - sigemptyset (&blocked_mask); + /* SIGCHLD handler for async mode. */ + async_sigchld_action.sa_handler = async_sigchld_handler; + sigemptyset (&async_sigchld_action.sa_mask); + async_sigchld_action.sa_flags = SA_RESTART; - add_setshow_zinteger_cmd ("lin-lwp", no_class, &debug_linux_nat, _("\ -Set debugging of GNU/Linux lwp module."), _("\ -Show debugging of GNU/Linux lwp module."), _("\ -Enables printf debugging output."), - NULL, - NULL, /* FIXME: i18n: */ - &setdebuglist, &showdebuglist); + /* Install the default mode. */ + linux_nat_set_async_mode (linux_async_permitted); } @@ -3112,7 +4282,7 @@ get_signo (const char *name) if (ms == NULL) return 0; - if (target_read_memory (SYMBOL_VALUE_ADDRESS (ms), (char *) &signo, + if (target_read_memory (SYMBOL_VALUE_ADDRESS (ms), (gdb_byte *) &signo, sizeof (signo)) != 0) return 0; @@ -3126,16 +4296,24 @@ lin_thread_get_thread_signals (sigset_t *set) { struct sigaction action; int restart, cancel; + sigset_t blocked_mask; + sigemptyset (&blocked_mask); sigemptyset (set); restart = get_signo ("__pthread_sig_restart"); + cancel = get_signo ("__pthread_sig_cancel"); + + /* LinuxThreads normally uses the first two RT signals, but in some legacy + cases may use SIGUSR1/SIGUSR2. NPTL always uses RT signals, but does + not provide any way for the debugger to query the signal numbers - + fortunately they don't change! */ + if (restart == 0) - return; + restart = __SIGRTMIN; - cancel = get_signo ("__pthread_sig_cancel"); if (cancel == 0) - return; + cancel = __SIGRTMIN + 1; sigaddset (set, restart); sigaddset (set, cancel); @@ -3148,7 +4326,7 @@ lin_thread_get_thread_signals (sigset_t *set) action.sa_handler = sigchld_handler; sigemptyset (&action.sa_mask); - action.sa_flags = 0; + action.sa_flags = SA_RESTART; sigaction (cancel, &action, NULL); /* We block the "cancel" signal throughout this code ... */