X-Git-Url: http://drtracing.org/?a=blobdiff_plain;f=gdb%2Flinux-nat.c;h=c3aa94fe3ad848cf7ac76b2f76041ee96cd2ae1a;hb=4d062f1ad566bb7f0eaed761db36788a7e981a28;hp=128e83fdc9e259d5cf936f04443850d46a8e0e7b;hpb=a9762ec78a53fbe9209fe1654db42df0cd328d50;p=deliverable%2Fbinutils-gdb.git diff --git a/gdb/linux-nat.c b/gdb/linux-nat.c index 128e83fdc9..c3aa94fe3a 100644 --- a/gdb/linux-nat.c +++ b/gdb/linux-nat.c @@ -1,6 +1,6 @@ /* GNU/Linux native-dependent code common to multiple platforms. - Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007 + Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, Inc. This file is part of GDB. @@ -46,6 +46,107 @@ #include "gdbthread.h" /* for struct thread_info etc. */ #include "gdb_stat.h" /* for struct stat */ #include /* for O_RDONLY */ +#include "inf-loop.h" +#include "event-loop.h" +#include "event-top.h" +#include +#include +#include "gdb_dirent.h" +#include "xml-support.h" + +#ifdef HAVE_PERSONALITY +# include +# if !HAVE_DECL_ADDR_NO_RANDOMIZE +# define ADDR_NO_RANDOMIZE 0x0040000 +# endif +#endif /* HAVE_PERSONALITY */ + +/* This comment documents high-level logic of this file. + +Waiting for events in sync mode +=============================== + +When waiting for an event in a specific thread, we just use waitpid, passing +the specific pid, and not passing WNOHANG. + +When waiting for an event in all threads, waitpid is not quite good. Prior to +version 2.4, Linux can either wait for event in main thread, or in secondary +threads. (2.4 has the __WALL flag). So, if we use blocking waitpid, we might +miss an event. The solution is to use non-blocking waitpid, together with +sigsuspend. First, we use non-blocking waitpid to get an event in the main +process, if any. Second, we use non-blocking waitpid with the __WCLONED +flag to check for events in cloned processes. If nothing is found, we use +sigsuspend to wait for SIGCHLD. When SIGCHLD arrives, it means something +happened to a child process -- and SIGCHLD will be delivered both for events +in main debugged process and in cloned processes. As soon as we know there's +an event, we get back to calling nonblocking waitpid with and without __WCLONED. + +Note that SIGCHLD should be blocked between waitpid and sigsuspend calls, +so that we don't miss a signal. If SIGCHLD arrives in between, when it's +blocked, the signal becomes pending and sigsuspend immediately +notices it and returns. + +Waiting for events in async mode +================================ + +In async mode, GDB should always be ready to handle both user input +and target events, so neither blocking waitpid nor sigsuspend are +viable options. Instead, we should asynchronously notify the GDB main +event loop whenever there's an unprocessed event from the target. We +detect asynchronous target events by handling SIGCHLD signals. To +notify the event loop about target events, the self-pipe trick is used +--- a pipe is registered as waitable event source in the event loop, +the event loop select/poll's on the read end of this pipe (as well on +other event sources, e.g., stdin), and the SIGCHLD handler writes a +byte to this pipe. This is more portable than relying on +pselect/ppoll, since on kernels that lack those syscalls, libc +emulates them with select/poll+sigprocmask, and that is racy +(a.k.a. plain broken). + +Obviously, if we fail to notify the event loop if there's a target +event, it's bad. OTOH, if we notify the event loop when there's no +event from the target, linux_nat_wait will detect that there's no real +event to report, and return event of type TARGET_WAITKIND_IGNORE. +This is mostly harmless, but it will waste time and is better avoided. + +The main design point is that every time GDB is outside linux-nat.c, +we have a SIGCHLD handler installed that is called when something +happens to the target and notifies the GDB event loop. Whenever GDB +core decides to handle the event, and calls into linux-nat.c, we +process things as in sync mode, except that the we never block in +sigsuspend. + +While processing an event, we may end up momentarily blocked in +waitpid calls. Those waitpid calls, while blocking, are guarantied to +return quickly. E.g., in all-stop mode, before reporting to the core +that an LWP hit a breakpoint, all LWPs are stopped by sending them +SIGSTOP, and synchronously waiting for the SIGSTOP to be reported. +Note that this is different from blocking indefinitely waiting for the +next event --- here, we're already handling an event. + +Use of signals +============== + +We stop threads by sending a SIGSTOP. The use of SIGSTOP instead of another +signal is not entirely significant; we just need for a signal to be delivered, +so that we can intercept it. SIGSTOP's advantage is that it can not be +blocked. A disadvantage is that it is not a real-time signal, so it can only +be queued once; we do not keep track of other sources of SIGSTOP. + +Two other signals that can't be blocked are SIGCONT and SIGKILL. But we can't +use them, because they have special behavior when the signal is generated - +not when it is delivered. SIGCONT resumes the entire thread group and SIGKILL +kills the entire thread group. + +A delivered SIGSTOP would stop the entire thread group, not just the thread we +tkill'd. But we never let the SIGSTOP be delivered; we always intercept and +cancel it (by PTRACE_CONT without passing SIGSTOP). + +We could use a real-time signal instead. This would solve those problems; we +could use PTRACE_GETSIGINFO to locate the specific stop signals sent by GDB. +But we would still have to have some support for SIGSTOP, since PTRACE_ATTACH +generates it, and there are races with trying to find a signal that is not +blocked. */ #ifndef O_LARGEFILE #define O_LARGEFILE 0 @@ -84,11 +185,26 @@ #define __WALL 0x40000000 /* Wait for any child. */ #endif +#ifndef PTRACE_GETSIGINFO +# define PTRACE_GETSIGINFO 0x4202 +# define PTRACE_SETSIGINFO 0x4203 +#endif + /* The single-threaded native GNU/Linux target_ops. We save a pointer for the use of the multi-threaded target. */ static struct target_ops *linux_ops; static struct target_ops linux_ops_saved; +/* The method to call, if any, when a new thread is attached. */ +static void (*linux_nat_new_thread) (ptid_t); + +/* The method to call, if any, when the siginfo object needs to be + converted between the layout returned by ptrace, and the layout in + the architecture of the inferior. */ +static int (*linux_nat_siginfo_fixup) (struct siginfo *, + gdb_byte *, + int); + /* The saved to_xfer_partial method, inherited from inf-ptrace.c. Called by our to_xfer_partial. */ static LONGEST (*super_xfer_partial) (struct target_ops *, @@ -106,6 +222,42 @@ show_debug_linux_nat (struct ui_file *file, int from_tty, value); } +static int debug_linux_nat_async = 0; +static void +show_debug_linux_nat_async (struct ui_file *file, int from_tty, + struct cmd_list_element *c, const char *value) +{ + fprintf_filtered (file, _("Debugging of GNU/Linux async lwp module is %s.\n"), + value); +} + +static int disable_randomization = 1; + +static void +show_disable_randomization (struct ui_file *file, int from_tty, + struct cmd_list_element *c, const char *value) +{ +#ifdef HAVE_PERSONALITY + fprintf_filtered (file, _("\ +Disabling randomization of debuggee's virtual address space is %s.\n"), + value); +#else /* !HAVE_PERSONALITY */ + fputs_filtered (_("\ +Disabling randomization of debuggee's virtual address space is unsupported on\n\ +this platform.\n"), file); +#endif /* !HAVE_PERSONALITY */ +} + +static void +set_disable_randomization (char *args, int from_tty, struct cmd_list_element *c) +{ +#ifndef HAVE_PERSONALITY + error (_("\ +Disabling randomization of debuggee's virtual address space is unsupported on\n\ +this platform.")); +#endif /* !HAVE_PERSONALITY */ +} + static int linux_parent_pid; struct simple_pid_list @@ -126,6 +278,65 @@ static int linux_supports_tracefork_flag = -1; static int linux_supports_tracevforkdone_flag = -1; +/* Async mode support */ + +/* Zero if the async mode, although enabled, is masked, which means + linux_nat_wait should behave as if async mode was off. */ +static int linux_nat_async_mask_value = 1; + +/* The read/write ends of the pipe registered as waitable file in the + event loop. */ +static int linux_nat_event_pipe[2] = { -1, -1 }; + +/* Flush the event pipe. */ + +static void +async_file_flush (void) +{ + int ret; + char buf; + + do + { + ret = read (linux_nat_event_pipe[0], &buf, 1); + } + while (ret >= 0 || (ret == -1 && errno == EINTR)); +} + +/* Put something (anything, doesn't matter what, or how much) in event + pipe, so that the select/poll in the event-loop realizes we have + something to process. */ + +static void +async_file_mark (void) +{ + int ret; + + /* It doesn't really matter what the pipe contains, as long we end + up with something in it. Might as well flush the previous + left-overs. */ + async_file_flush (); + + do + { + ret = write (linux_nat_event_pipe[1], "+", 1); + } + while (ret == -1 && errno == EINTR); + + /* Ignore EAGAIN. If the pipe is full, the event loop will already + be awakened anyway. */ +} + +static void linux_nat_async (void (*callback) + (enum inferior_event_type event_type, void *context), + void *context); +static int linux_nat_async_mask (int mask); +static int kill_lwp (int lwpid, int signo); + +static int stop_callback (struct lwp_info *lp, void *data); + +static void block_child_signals (sigset_t *prev_mask); +static void restore_child_signals_mask (sigset_t *prev_mask); /* Trivial list manipulation functions to keep track of a list of new stopped processes. */ @@ -182,6 +393,7 @@ static int my_waitpid (int pid, int *status, int flags) { int ret; + do { ret = waitpid (pid, status, flags); @@ -209,13 +421,20 @@ linux_test_for_tracefork (int original_pid) { int child_pid, ret, status; long second_pid; + sigset_t prev_mask; + + /* We don't want those ptrace calls to be interrupted. */ + block_child_signals (&prev_mask); linux_supports_tracefork_flag = 0; linux_supports_tracevforkdone_flag = 0; ret = ptrace (PTRACE_SETOPTIONS, original_pid, 0, PTRACE_O_TRACEFORK); if (ret != 0) - return; + { + restore_child_signals_mask (&prev_mask); + return; + } child_pid = fork (); if (child_pid == -1) @@ -239,6 +458,7 @@ linux_test_for_tracefork (int original_pid) if (ret != 0) { warning (_("linux_test_for_tracefork: failed to kill child")); + restore_child_signals_mask (&prev_mask); return; } @@ -249,6 +469,7 @@ linux_test_for_tracefork (int original_pid) warning (_("linux_test_for_tracefork: unexpected wait status 0x%x from " "killed child"), status); + restore_child_signals_mask (&prev_mask); return; } @@ -288,6 +509,8 @@ linux_test_for_tracefork (int original_pid) if (ret != 0) warning (_("linux_test_for_tracefork: failed to kill child")); my_waitpid (child_pid, &status, 0); + + restore_child_signals_mask (&prev_mask); } /* Return non-zero iff we have tracefork functionality available. @@ -350,33 +573,40 @@ linux_child_post_startup_inferior (ptid_t ptid) static int linux_child_follow_fork (struct target_ops *ops, int follow_child) { + sigset_t prev_mask; ptid_t last_ptid; struct target_waitstatus last_status; int has_vforked; int parent_pid, child_pid; + block_child_signals (&prev_mask); + get_last_target_status (&last_ptid, &last_status); has_vforked = (last_status.kind == TARGET_WAITKIND_VFORKED); parent_pid = ptid_get_lwp (last_ptid); if (parent_pid == 0) parent_pid = ptid_get_pid (last_ptid); - child_pid = last_status.value.related_pid; + child_pid = PIDGET (last_status.value.related_pid); if (! follow_child) { /* We're already attached to the parent, by default. */ /* Before detaching from the child, remove all breakpoints from - it. (This won't actually modify the breakpoint list, but will - physically remove the breakpoints from the child.) */ - /* If we vforked this will remove the breakpoints from the parent - also, but they'll be reinserted below. */ - detach_breakpoints (child_pid); + it. If we forked, then this has already been taken care of + by infrun.c. If we vforked however, any breakpoint inserted + in the parent is visible in the child, even those added while + stopped in a vfork catchpoint. This won't actually modify + the breakpoint list, but will physically remove the + breakpoints from the child. This will remove the breakpoints + from the parent also, but they'll be reinserted below. */ + if (has_vforked) + detach_breakpoints (child_pid); /* Detach new forked process? */ if (detach_fork) { - if (debug_linux_nat) + if (info_verbose || debug_linux_nat) { target_terminal_ours (); fprintf_filtered (gdb_stdlog, @@ -389,6 +619,14 @@ linux_child_follow_fork (struct target_ops *ops, int follow_child) else { struct fork_info *fp; + struct inferior *parent_inf, *child_inf; + + /* Add process to GDB's tables. */ + child_inf = add_inferior (child_pid); + + parent_inf = find_inferior_pid (GET_PID (last_ptid)); + child_inf->attach_flag = parent_inf->attach_flag; + /* Retain child fork in ptrace (stopped) state. */ fp = find_fork_pid (child_pid); if (!fp) @@ -452,16 +690,25 @@ linux_child_follow_fork (struct target_ops *ops, int follow_child) } else { + struct thread_info *last_tp = find_thread_pid (last_ptid); + struct thread_info *tp; char child_pid_spelling[40]; + struct inferior *parent_inf, *child_inf; - /* Needed to keep the breakpoint lists in sync. */ - if (! has_vforked) - detach_breakpoints (child_pid); + /* Copy user stepping state to the new inferior thread. */ + struct breakpoint *step_resume_breakpoint = last_tp->step_resume_breakpoint; + CORE_ADDR step_range_start = last_tp->step_range_start; + CORE_ADDR step_range_end = last_tp->step_range_end; + struct frame_id step_frame_id = last_tp->step_frame_id; + + /* Otherwise, deleting the parent would get rid of this + breakpoint. */ + last_tp->step_resume_breakpoint = NULL; /* Before detaching from the parent, remove all breakpoints from it. */ remove_breakpoints (); - if (debug_linux_nat) + if (info_verbose || debug_linux_nat) { target_terminal_ours (); fprintf_filtered (gdb_stdlog, @@ -469,6 +716,14 @@ linux_child_follow_fork (struct target_ops *ops, int follow_child) child_pid); } + /* Add the new inferior first, so that the target_detach below + doesn't unpush the target. */ + + child_inf = add_inferior (child_pid); + + parent_inf = find_inferior_pid (GET_PID (last_ptid)); + child_inf->attach_flag = parent_inf->attach_flag; + /* If we're vforking, we may want to hold on to the parent until the child exits or execs. At exec time we can remove the old breakpoints from the parent and detach it; at exit time we @@ -488,7 +743,10 @@ linux_child_follow_fork (struct target_ops *ops, int follow_child) safely resume it. */ if (has_vforked) - linux_parent_pid = parent_pid; + { + linux_parent_pid = parent_pid; + detach_inferior (parent_pid); + } else if (!detach_fork) { struct fork_info *fp; @@ -497,23 +755,32 @@ linux_child_follow_fork (struct target_ops *ops, int follow_child) if (!fp) fp = add_fork (parent_pid); fork_save_infrun_state (fp, 0); + + /* Also add an entry for the child fork. */ + fp = find_fork_pid (child_pid); + if (!fp) + fp = add_fork (child_pid); + fork_save_infrun_state (fp, 0); } else - { - target_detach (NULL, 0); - } + target_detach (NULL, 0); - inferior_ptid = pid_to_ptid (child_pid); + inferior_ptid = ptid_build (child_pid, child_pid, 0); - /* Reinstall ourselves, since we might have been removed in - target_detach (which does other necessary cleanup). */ + linux_nat_switch_fork (inferior_ptid); + check_for_thread_db (); - push_target (ops); + tp = inferior_thread (); + tp->step_resume_breakpoint = step_resume_breakpoint; + tp->step_range_start = step_range_start; + tp->step_range_end = step_range_end; + tp->step_frame_id = step_frame_id; /* Reset breakpoints in the child as appropriate. */ follow_inferior_reset_breakpoints (); } + restore_child_signals_mask (&prev_mask); return 0; } @@ -573,33 +840,9 @@ linux_child_insert_exec_catchpoint (int pid) because the "zombies" stay around. */ /* List of known LWPs. */ -static struct lwp_info *lwp_list; - -/* Number of LWPs in the list. */ -static int num_lwps; - - -#define GET_LWP(ptid) ptid_get_lwp (ptid) -#define GET_PID(ptid) ptid_get_pid (ptid) -#define is_lwp(ptid) (GET_LWP (ptid) != 0) -#define BUILD_LWP(lwp, pid) ptid_build (pid, lwp, 0) - -/* If the last reported event was a SIGTRAP, this variable is set to - the process id of the LWP/thread that got it. */ -ptid_t trap_ptid; +struct lwp_info *lwp_list; -/* Since we cannot wait (in linux_nat_wait) for the initial process and - any cloned processes with a single call to waitpid, we have to use - the WNOHANG flag and call waitpid in a loop. To optimize - things a bit we use `sigsuspend' to wake us up when a process has - something to report (it will send us a SIGCHLD if it has). To make - this work we have to juggle with the signal mask. We save the - original signal mask such that we can restore it before creating a - new process in order to avoid blocking certain signals in the - inferior. We then block SIGCHLD during the waitpid/sigsuspend - loop. */ - /* Original signal mask. */ static sigset_t normal_mask; @@ -609,12 +852,39 @@ static sigset_t suspend_mask; /* Signals to block to make that sigsuspend work. */ static sigset_t blocked_mask; + +/* SIGCHLD action. */ +struct sigaction sigchld_action; + +/* Block child signals (SIGCHLD and linux threads signals), and store + the previous mask in PREV_MASK. */ + +static void +block_child_signals (sigset_t *prev_mask) +{ + /* Make sure SIGCHLD is blocked. */ + if (!sigismember (&blocked_mask, SIGCHLD)) + sigaddset (&blocked_mask, SIGCHLD); + + sigprocmask (SIG_BLOCK, &blocked_mask, prev_mask); +} + +/* Restore child signals mask, previously returned by + block_child_signals. */ + +static void +restore_child_signals_mask (sigset_t *prev_mask) +{ + sigprocmask (SIG_SETMASK, prev_mask, NULL); +} /* Prototypes for local functions. */ static int stop_wait_callback (struct lwp_info *lp, void *data); -static int linux_nat_thread_alive (ptid_t ptid); +static int linux_thread_alive (ptid_t ptid); static char *linux_child_pid_to_exec_file (int pid); +static int cancel_breakpoint (struct lwp_info *lp); + /* Convert wait status STATUS to a string. Used for printing debug messages only. */ @@ -653,11 +923,53 @@ init_lwp_list (void) } lwp_list = NULL; - num_lwps = 0; +} + +/* Remove all LWPs belong to PID from the lwp list. */ + +static void +purge_lwp_list (int pid) +{ + struct lwp_info *lp, *lpprev, *lpnext; + + lpprev = NULL; + + for (lp = lwp_list; lp; lp = lpnext) + { + lpnext = lp->next; + + if (ptid_get_pid (lp->ptid) == pid) + { + if (lp == lwp_list) + lwp_list = lp->next; + else + lpprev->next = lp->next; + + xfree (lp); + } + else + lpprev = lp; + } +} + +/* Return the number of known LWPs in the tgid given by PID. */ + +static int +num_lwps (int pid) +{ + int count = 0; + struct lwp_info *lp; + + for (lp = lwp_list; lp; lp = lp->next) + if (ptid_get_pid (lp->ptid) == pid) + count++; + + return count; } /* Add the LWP specified by PID to the list. Return a pointer to the - structure describing the new LWP. */ + structure describing the new LWP. The LWP should already be stopped + (with an exception for the very first LWP). */ static struct lwp_info * add_lwp (ptid_t ptid) @@ -676,7 +988,9 @@ add_lwp (ptid_t ptid) lp->next = lwp_list; lwp_list = lp; - ++num_lwps; + + if (num_lwps (GET_PID (ptid)) > 1 && linux_nat_new_thread != NULL) + linux_nat_new_thread (ptid); return lp; } @@ -697,8 +1011,6 @@ delete_lwp (ptid_t ptid) if (!lp) return; - num_lwps--; - if (lpprev) lpprev->next = lp->next; else @@ -728,21 +1040,54 @@ find_lwp_pid (ptid_t ptid) return NULL; } +/* Returns true if PTID matches filter FILTER. FILTER can be the wild + card MINUS_ONE_PTID (all ptid match it); can be a ptid representing + a process (ptid_is_pid returns true), in which case, all lwps of + that give process match, lwps of other process do not; or, it can + represent a specific thread, in which case, only that thread will + match true. PTID must represent an LWP, it can never be a wild + card. */ + +static int +ptid_match (ptid_t ptid, ptid_t filter) +{ + /* Since both parameters have the same type, prevent easy mistakes + from happening. */ + gdb_assert (!ptid_equal (ptid, minus_one_ptid) + && !ptid_equal (ptid, null_ptid)); + + if (ptid_equal (filter, minus_one_ptid)) + return 1; + if (ptid_is_pid (filter) + && ptid_get_pid (ptid) == ptid_get_pid (filter)) + return 1; + else if (ptid_equal (ptid, filter)) + return 1; + + return 0; +} + /* Call CALLBACK with its second argument set to DATA for every LWP in the list. If CALLBACK returns 1 for a particular LWP, return a pointer to the structure describing that LWP immediately. Otherwise return NULL. */ struct lwp_info * -iterate_over_lwps (int (*callback) (struct lwp_info *, void *), void *data) +iterate_over_lwps (ptid_t filter, + int (*callback) (struct lwp_info *, void *), + void *data) { struct lwp_info *lp, *lpnext; for (lp = lwp_list; lp; lp = lpnext) { lpnext = lp->next; - if ((*callback) (lp, data)) - return lp; + + if (ptid_match (lp->ptid, filter)) + { + if ((*callback) (lp, data)) + return lp; + } } return NULL; @@ -761,116 +1106,164 @@ linux_nat_switch_fork (ptid_t new_ptid) init_lwp_list (); lp = add_lwp (new_ptid); lp->stopped = 1; -} -/* Record a PTID for later deletion. */ + init_thread_list (); + add_thread_silent (new_ptid); +} -struct saved_ptids -{ - ptid_t ptid; - struct saved_ptids *next; -}; -static struct saved_ptids *threads_to_delete; +/* Handle the exit of a single thread LP. */ static void -record_dead_thread (ptid_t ptid) +exit_lwp (struct lwp_info *lp) { - struct saved_ptids *p = xmalloc (sizeof (struct saved_ptids)); - p->ptid = ptid; - p->next = threads_to_delete; - threads_to_delete = p; + struct thread_info *th = find_thread_pid (lp->ptid); + + if (th) + { + if (print_thread_events) + printf_unfiltered (_("[%s exited]\n"), target_pid_to_str (lp->ptid)); + + delete_thread (lp->ptid); + } + + delete_lwp (lp->ptid); } -/* Delete any dead threads which are not the current thread. */ +/* Return an lwp's tgid, found in `/proc/PID/status'. */ -static void -prune_lwps (void) +int +linux_proc_get_tgid (int lwpid) { - struct saved_ptids **p = &threads_to_delete; + FILE *status_file; + char buf[100]; + int tgid = -1; - while (*p) - if (! ptid_equal ((*p)->ptid, inferior_ptid)) - { - struct saved_ptids *tmp = *p; - delete_thread (tmp->ptid); - *p = tmp->next; - xfree (tmp); - } - else - p = &(*p)->next; + snprintf (buf, sizeof (buf), "/proc/%d/status", (int) lwpid); + status_file = fopen (buf, "r"); + if (status_file != NULL) + { + while (fgets (buf, sizeof (buf), status_file)) + { + if (strncmp (buf, "Tgid:", 5) == 0) + { + tgid = strtoul (buf + strlen ("Tgid:"), NULL, 10); + break; + } + } + + fclose (status_file); + } + + return tgid; } -/* Callback for iterate_over_threads that finds a thread corresponding - to the given LWP. */ +/* Detect `T (stopped)' in `/proc/PID/status'. + Other states including `T (tracing stop)' are reported as false. */ static int -find_thread_from_lwp (struct thread_info *thr, void *dummy) +pid_is_stopped (pid_t pid) { - ptid_t *ptid_p = dummy; + FILE *status_file; + char buf[100]; + int retval = 0; - if (GET_LWP (thr->ptid) && GET_LWP (thr->ptid) == GET_LWP (*ptid_p)) - return 1; - else - return 0; + snprintf (buf, sizeof (buf), "/proc/%d/status", (int) pid); + status_file = fopen (buf, "r"); + if (status_file != NULL) + { + int have_state = 0; + + while (fgets (buf, sizeof (buf), status_file)) + { + if (strncmp (buf, "State:", 6) == 0) + { + have_state = 1; + break; + } + } + if (have_state && strstr (buf, "T (stopped)") != NULL) + retval = 1; + fclose (status_file); + } + return retval; } -/* Handle the exit of a single thread LP. */ +/* Wait for the LWP specified by LP, which we have just attached to. + Returns a wait status for that LWP, to cache. */ -static void -exit_lwp (struct lwp_info *lp) +static int +linux_nat_post_attach_wait (ptid_t ptid, int first, int *cloned, + int *signalled) { - if (in_thread_list (lp->ptid)) + pid_t new_pid, pid = GET_LWP (ptid); + int status; + + if (pid_is_stopped (pid)) { - /* Core GDB cannot deal with us deleting the current thread. */ - if (!ptid_equal (lp->ptid, inferior_ptid)) - delete_thread (lp->ptid); - else - record_dead_thread (lp->ptid); - printf_unfiltered (_("[%s exited]\n"), - target_pid_to_str (lp->ptid)); + if (debug_linux_nat) + fprintf_unfiltered (gdb_stdlog, + "LNPAW: Attaching to a stopped process\n"); + + /* The process is definitely stopped. It is in a job control + stop, unless the kernel predates the TASK_STOPPED / + TASK_TRACED distinction, in which case it might be in a + ptrace stop. Make sure it is in a ptrace stop; from there we + can kill it, signal it, et cetera. + + First make sure there is a pending SIGSTOP. Since we are + already attached, the process can not transition from stopped + to running without a PTRACE_CONT; so we know this signal will + go into the queue. The SIGSTOP generated by PTRACE_ATTACH is + probably already in the queue (unless this kernel is old + enough to use TASK_STOPPED for ptrace stops); but since SIGSTOP + is not an RT signal, it can only be queued once. */ + kill_lwp (pid, SIGSTOP); + + /* Finally, resume the stopped process. This will deliver the SIGSTOP + (or a higher priority signal, just like normal PTRACE_ATTACH). */ + ptrace (PTRACE_CONT, pid, 0, 0); } - else + + /* Make sure the initial process is stopped. The user-level threads + layer might want to poke around in the inferior, and that won't + work if things haven't stabilized yet. */ + new_pid = my_waitpid (pid, &status, 0); + if (new_pid == -1 && errno == ECHILD) { - /* Even if LP->PTID is not in the global GDB thread list, the - LWP may be - with an additional thread ID. We don't need - to print anything in this case; thread_db is in use and - already took care of that. But it didn't delete the thread - in order to handle zombies correctly. */ + if (first) + warning (_("%s is a cloned process"), target_pid_to_str (ptid)); - struct thread_info *thr; + /* Try again with __WCLONE to check cloned processes. */ + new_pid = my_waitpid (pid, &status, __WCLONE); + *cloned = 1; + } - thr = iterate_over_threads (find_thread_from_lwp, &lp->ptid); - if (thr) - { - if (!ptid_equal (thr->ptid, inferior_ptid)) - delete_thread (thr->ptid); - else - record_dead_thread (thr->ptid); - } + gdb_assert (pid == new_pid && WIFSTOPPED (status)); + + if (WSTOPSIG (status) != SIGSTOP) + { + *signalled = 1; + if (debug_linux_nat) + fprintf_unfiltered (gdb_stdlog, + "LNPAW: Received %s after attaching\n", + status_to_str (status)); } - delete_lwp (lp->ptid); + return status; } -/* Attach to the LWP specified by PID. If VERBOSE is non-zero, print - a message telling the user that a new LWP has been added to the - process. Return 0 if successful or -1 if the new LWP could not - be attached. */ +/* Attach to the LWP specified by PID. Return 0 if successful or -1 + if the new LWP could not be attached. */ int -lin_lwp_attach_lwp (ptid_t ptid, int verbose) +lin_lwp_attach_lwp (ptid_t ptid) { struct lwp_info *lp; + sigset_t prev_mask; gdb_assert (is_lwp (ptid)); - /* Make sure SIGCHLD is blocked. We don't want SIGCHLD events - to interrupt either the ptrace() or waitpid() calls below. */ - if (!sigismember (&blocked_mask, SIGCHLD)) - { - sigaddset (&blocked_mask, SIGCHLD); - sigprocmask (SIG_BLOCK, &blocked_mask, NULL); - } + block_child_signals (&prev_mask); lp = find_lwp_pid (ptid); @@ -882,43 +1275,38 @@ lin_lwp_attach_lwp (ptid_t ptid, int verbose) to happen. */ if (GET_LWP (ptid) != GET_PID (ptid) && lp == NULL) { - pid_t pid; - int status; + int status, cloned = 0, signalled = 0; if (ptrace (PTRACE_ATTACH, GET_LWP (ptid), 0, 0) < 0) { /* If we fail to attach to the thread, issue a warning, but continue. One way this can happen is if thread - creation is interrupted; as of Linux 2.6.19, a kernel + creation is interrupted; as of Linux kernel 2.6.19, a bug may place threads in the thread list and then fail to create them. */ warning (_("Can't attach %s: %s"), target_pid_to_str (ptid), safe_strerror (errno)); + restore_child_signals_mask (&prev_mask); return -1; } - if (lp == NULL) - lp = add_lwp (ptid); - if (debug_linux_nat) fprintf_unfiltered (gdb_stdlog, "LLAL: PTRACE_ATTACH %s, 0, 0 (OK)\n", target_pid_to_str (ptid)); - pid = my_waitpid (GET_LWP (ptid), &status, 0); - if (pid == -1 && errno == ECHILD) + status = linux_nat_post_attach_wait (ptid, 0, &cloned, &signalled); + lp = add_lwp (ptid); + lp->stopped = 1; + lp->cloned = cloned; + lp->signalled = signalled; + if (WSTOPSIG (status) != SIGSTOP) { - /* Try again with __WCLONE to check cloned processes. */ - pid = my_waitpid (GET_LWP (ptid), &status, __WCLONE); - lp->cloned = 1; + lp->resumed = 1; + lp->status = status; } - gdb_assert (pid == GET_LWP (ptid) - && WIFSTOPPED (status) && WSTOPSIG (status)); - - target_post_attach (pid); - - lp->stopped = 1; + target_post_attach (GET_LWP (lp->ptid)); if (debug_linux_nat) { @@ -941,99 +1329,207 @@ lin_lwp_attach_lwp (ptid_t ptid, int verbose) lp->stopped = 1; } - if (verbose) - printf_filtered (_("[New %s]\n"), target_pid_to_str (ptid)); - + restore_child_signals_mask (&prev_mask); return 0; } static void -linux_nat_attach (char *args, int from_tty) +linux_nat_create_inferior (struct target_ops *ops, + char *exec_file, char *allargs, char **env, + int from_tty) { - struct lwp_info *lp; - pid_t pid; - int status; + int saved_async = 0; +#ifdef HAVE_PERSONALITY + int personality_orig = 0, personality_set = 0; +#endif /* HAVE_PERSONALITY */ - /* FIXME: We should probably accept a list of process id's, and - attach all of them. */ - linux_ops->to_attach (args, from_tty); + /* The fork_child mechanism is synchronous and calls target_wait, so + we have to mask the async mode. */ - /* Add the initial process as the first LWP to the list. */ - inferior_ptid = BUILD_LWP (GET_PID (inferior_ptid), GET_PID (inferior_ptid)); - lp = add_lwp (inferior_ptid); + if (target_can_async_p ()) + /* Mask async mode. Creating a child requires a loop calling + wait_for_inferior currently. */ + saved_async = linux_nat_async_mask (0); - /* Make sure the initial process is stopped. The user-level threads - layer might want to poke around in the inferior, and that won't - work if things haven't stabilized yet. */ - pid = my_waitpid (GET_PID (inferior_ptid), &status, 0); - if (pid == -1 && errno == ECHILD) +#ifdef HAVE_PERSONALITY + if (disable_randomization) { - warning (_("%s is a cloned process"), target_pid_to_str (inferior_ptid)); + errno = 0; + personality_orig = personality (0xffffffff); + if (errno == 0 && !(personality_orig & ADDR_NO_RANDOMIZE)) + { + personality_set = 1; + personality (personality_orig | ADDR_NO_RANDOMIZE); + } + if (errno != 0 || (personality_set + && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE))) + warning (_("Error disabling address space randomization: %s"), + safe_strerror (errno)); + } +#endif /* HAVE_PERSONALITY */ - /* Try again with __WCLONE to check cloned processes. */ - pid = my_waitpid (GET_PID (inferior_ptid), &status, __WCLONE); - lp->cloned = 1; + linux_ops->to_create_inferior (ops, exec_file, allargs, env, from_tty); + +#ifdef HAVE_PERSONALITY + if (personality_set) + { + errno = 0; + personality (personality_orig); + if (errno != 0) + warning (_("Error restoring address space randomization: %s"), + safe_strerror (errno)); } +#endif /* HAVE_PERSONALITY */ + + if (saved_async) + linux_nat_async_mask (saved_async); +} + +static void +linux_nat_attach (struct target_ops *ops, char *args, int from_tty) +{ + struct lwp_info *lp; + int status; + ptid_t ptid; + + linux_ops->to_attach (ops, args, from_tty); + + /* The ptrace base target adds the main thread with (pid,0,0) + format. Decorate it with lwp info. */ + ptid = BUILD_LWP (GET_PID (inferior_ptid), GET_PID (inferior_ptid)); + thread_change_ptid (inferior_ptid, ptid); - gdb_assert (pid == GET_PID (inferior_ptid) - && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP); + /* Add the initial process as the first LWP to the list. */ + lp = add_lwp (ptid); + status = linux_nat_post_attach_wait (lp->ptid, 1, &lp->cloned, + &lp->signalled); lp->stopped = 1; - /* Fake the SIGSTOP that core GDB expects. */ - lp->status = W_STOPCODE (SIGSTOP); + /* Save the wait status to report later. */ lp->resumed = 1; if (debug_linux_nat) - { - fprintf_unfiltered (gdb_stdlog, - "LLA: waitpid %ld, faking SIGSTOP\n", (long) pid); - } + fprintf_unfiltered (gdb_stdlog, + "LNA: waitpid %ld, saving status %s\n", + (long) GET_PID (lp->ptid), status_to_str (status)); + + lp->status = status; + + if (target_can_async_p ()) + target_async (inferior_event_handler, 0); } +/* Get pending status of LP. */ static int -detach_callback (struct lwp_info *lp, void *data) +get_pending_status (struct lwp_info *lp, int *status) { - gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status)); + struct target_waitstatus last; + ptid_t last_ptid; - if (debug_linux_nat && lp->status) - fprintf_unfiltered (gdb_stdlog, "DC: Pending %s for %s on detach.\n", - strsignal (WSTOPSIG (lp->status)), - target_pid_to_str (lp->ptid)); + get_last_target_status (&last_ptid, &last); - while (lp->signalled && lp->stopped) + /* If this lwp is the ptid that GDB is processing an event from, the + signal will be in stop_signal. Otherwise, we may cache pending + events in lp->status while trying to stop all threads (see + stop_wait_callback). */ + + *status = 0; + + if (non_stop) { - errno = 0; - if (ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, - WSTOPSIG (lp->status)) < 0) - error (_("Can't continue %s: %s"), target_pid_to_str (lp->ptid), - safe_strerror (errno)); + enum target_signal signo = TARGET_SIGNAL_0; + + if (is_executing (lp->ptid)) + { + /* If the core thought this lwp was executing --- e.g., the + executing property hasn't been updated yet, but the + thread has been stopped with a stop_callback / + stop_wait_callback sequence (see linux_nat_detach for + example) --- we can only have pending events in the local + queue. */ + signo = target_signal_from_host (WSTOPSIG (lp->status)); + } + else + { + /* If the core knows the thread is not executing, then we + have the last signal recorded in + thread_info->stop_signal. */ + + struct thread_info *tp = find_thread_pid (lp->ptid); + signo = tp->stop_signal; + } + + if (signo != TARGET_SIGNAL_0 + && !signal_pass_state (signo)) + { + if (debug_linux_nat) + fprintf_unfiltered (gdb_stdlog, "\ +GPT: lwp %s had signal %s, but it is in no pass state\n", + target_pid_to_str (lp->ptid), + target_signal_to_string (signo)); + } + else + { + if (signo != TARGET_SIGNAL_0) + *status = W_STOPCODE (target_signal_to_host (signo)); + + if (debug_linux_nat) + fprintf_unfiltered (gdb_stdlog, + "GPT: lwp %s as pending signal %s\n", + target_pid_to_str (lp->ptid), + target_signal_to_string (signo)); + } + } + else + { + if (GET_LWP (lp->ptid) == GET_LWP (last_ptid)) + { + struct thread_info *tp = find_thread_pid (lp->ptid); + if (tp->stop_signal != TARGET_SIGNAL_0 + && signal_pass_state (tp->stop_signal)) + *status = W_STOPCODE (target_signal_to_host (tp->stop_signal)); + } + else + *status = lp->status; + } + + return 0; +} + +static int +detach_callback (struct lwp_info *lp, void *data) +{ + gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status)); + + if (debug_linux_nat && lp->status) + fprintf_unfiltered (gdb_stdlog, "DC: Pending %s for %s on detach.\n", + strsignal (WSTOPSIG (lp->status)), + target_pid_to_str (lp->ptid)); + /* If there is a pending SIGSTOP, get rid of it. */ + if (lp->signalled) + { if (debug_linux_nat) fprintf_unfiltered (gdb_stdlog, - "DC: PTRACE_CONTINUE (%s, 0, %s) (OK)\n", - target_pid_to_str (lp->ptid), - status_to_str (lp->status)); + "DC: Sending SIGCONT to %s\n", + target_pid_to_str (lp->ptid)); - lp->stopped = 0; + kill_lwp (GET_LWP (lp->ptid), SIGCONT); lp->signalled = 0; - lp->status = 0; - /* FIXME drow/2003-08-26: There was a call to stop_wait_callback - here. But since lp->signalled was cleared above, - stop_wait_callback didn't do anything; the process was left - running. Shouldn't we be waiting for it to stop? - I've removed the call, since stop_wait_callback now does do - something when called with lp->signalled == 0. */ - - gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status)); } /* We don't actually detach from the LWP that has an id equal to the overall process id just yet. */ if (GET_LWP (lp->ptid) != GET_PID (lp->ptid)) { + int status = 0; + + /* Pass on any pending signal for this LWP. */ + get_pending_status (lp, &status); + errno = 0; if (ptrace (PTRACE_DETACH, GET_LWP (lp->ptid), 0, - WSTOPSIG (lp->status)) < 0) + WSTOPSIG (status)) < 0) error (_("Can't detach %s: %s"), target_pid_to_str (lp->ptid), safe_strerror (errno)); @@ -1041,7 +1537,7 @@ detach_callback (struct lwp_info *lp, void *data) fprintf_unfiltered (gdb_stdlog, "PTRACE_DETACH (%s, %s, 0) (OK)\n", target_pid_to_str (lp->ptid), - strsignal (WSTOPSIG (lp->status))); + strsignal (WSTOPSIG (status))); delete_lwp (lp->ptid); } @@ -1050,24 +1546,62 @@ detach_callback (struct lwp_info *lp, void *data) } static void -linux_nat_detach (char *args, int from_tty) +linux_nat_detach (struct target_ops *ops, char *args, int from_tty) { - iterate_over_lwps (detach_callback, NULL); + int pid; + int status; + enum target_signal sig; + struct lwp_info *main_lwp; + + pid = GET_PID (inferior_ptid); + + if (target_can_async_p ()) + linux_nat_async (NULL, 0); + + /* Stop all threads before detaching. ptrace requires that the + thread is stopped to sucessfully detach. */ + iterate_over_lwps (pid_to_ptid (pid), stop_callback, NULL); + /* ... and wait until all of them have reported back that + they're no longer running. */ + iterate_over_lwps (pid_to_ptid (pid), stop_wait_callback, NULL); + + iterate_over_lwps (pid_to_ptid (pid), detach_callback, NULL); /* Only the initial process should be left right now. */ - gdb_assert (num_lwps == 1); + gdb_assert (num_lwps (GET_PID (inferior_ptid)) == 1); - trap_ptid = null_ptid; + main_lwp = find_lwp_pid (pid_to_ptid (pid)); - /* Destroy LWP info; it's no longer valid. */ - init_lwp_list (); + /* Pass on any pending signal for the last LWP. */ + if ((args == NULL || *args == '\0') + && get_pending_status (main_lwp, &status) != -1 + && WIFSTOPPED (status)) + { + /* Put the signal number in ARGS so that inf_ptrace_detach will + pass it along with PTRACE_DETACH. */ + args = alloca (8); + sprintf (args, "%d", (int) WSTOPSIG (status)); + fprintf_unfiltered (gdb_stdlog, + "LND: Sending signal %s to %s\n", + args, + target_pid_to_str (main_lwp->ptid)); + } - /* Restore the original signal mask. */ - sigprocmask (SIG_SETMASK, &normal_mask, NULL); - sigemptyset (&blocked_mask); + delete_lwp (main_lwp->ptid); - inferior_ptid = pid_to_ptid (GET_PID (inferior_ptid)); - linux_ops->to_detach (args, from_tty); + if (forks_exist_p ()) + { + /* Multi-fork case. The current inferior_ptid is being detached + from, but there are other viable forks to debug. Detach from + the current fork, and context-switch to the first + available. */ + linux_fork_detach (args, from_tty); + + if (non_stop && target_can_async_p ()) + target_async (inferior_event_handler, 0); + } + else + linux_ops->to_detach (ops, args, from_tty); } /* Resume LP. */ @@ -1077,9 +1611,13 @@ resume_callback (struct lwp_info *lp, void *data) { if (lp->stopped && lp->status == 0) { - struct thread_info *tp; + if (debug_linux_nat) + fprintf_unfiltered (gdb_stdlog, + "RC: PTRACE_CONT %s, 0, 0 (resuming sibling)\n", + target_pid_to_str (lp->ptid)); - linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)), + linux_ops->to_resume (linux_ops, + pid_to_ptid (GET_LWP (lp->ptid)), 0, TARGET_SIGNAL_0); if (debug_linux_nat) fprintf_unfiltered (gdb_stdlog, @@ -1087,7 +1625,14 @@ resume_callback (struct lwp_info *lp, void *data) target_pid_to_str (lp->ptid)); lp->stopped = 0; lp->step = 0; + memset (&lp->siginfo, 0, sizeof (lp->siginfo)); } + else if (lp->stopped && debug_linux_nat) + fprintf_unfiltered (gdb_stdlog, "RC: Not resuming sibling %s (has pending)\n", + target_pid_to_str (lp->ptid)); + else if (debug_linux_nat) + fprintf_unfiltered (gdb_stdlog, "RC: Not resuming sibling %s (not stopped)\n", + target_pid_to_str (lp->ptid)); return 0; } @@ -1107,10 +1652,12 @@ resume_set_callback (struct lwp_info *lp, void *data) } static void -linux_nat_resume (ptid_t ptid, int step, enum target_signal signo) +linux_nat_resume (struct target_ops *ops, + ptid_t ptid, int step, enum target_signal signo) { + sigset_t prev_mask; struct lwp_info *lp; - int resume_all; + int resume_many; if (debug_linux_nat) fprintf_unfiltered (gdb_stdlog, @@ -1120,90 +1667,113 @@ linux_nat_resume (ptid_t ptid, int step, enum target_signal signo) signo ? strsignal (signo) : "0", target_pid_to_str (inferior_ptid)); - prune_lwps (); + block_child_signals (&prev_mask); /* A specific PTID means `step only this process id'. */ - resume_all = (PIDGET (ptid) == -1); + resume_many = (ptid_equal (minus_one_ptid, ptid) + || ptid_is_pid (ptid)); - if (resume_all) - iterate_over_lwps (resume_set_callback, NULL); - else - iterate_over_lwps (resume_clear_callback, NULL); - - /* If PID is -1, it's the current inferior that should be - handled specially. */ - if (PIDGET (ptid) == -1) - ptid = inferior_ptid; - - lp = find_lwp_pid (ptid); - if (lp) + if (!non_stop) { - ptid = pid_to_ptid (GET_LWP (lp->ptid)); + /* Mark the lwps we're resuming as resumed. */ + iterate_over_lwps (minus_one_ptid, resume_clear_callback, NULL); + iterate_over_lwps (ptid, resume_set_callback, NULL); + } + else + iterate_over_lwps (minus_one_ptid, resume_set_callback, NULL); - /* Remember if we're stepping. */ - lp->step = step; + /* See if it's the current inferior that should be handled + specially. */ + if (resume_many) + lp = find_lwp_pid (inferior_ptid); + else + lp = find_lwp_pid (ptid); + gdb_assert (lp != NULL); - /* Mark this LWP as resumed. */ - lp->resumed = 1; + /* Remember if we're stepping. */ + lp->step = step; - /* If we have a pending wait status for this thread, there is no - point in resuming the process. But first make sure that - linux_nat_wait won't preemptively handle the event - we - should never take this short-circuit if we are going to - leave LP running, since we have skipped resuming all the - other threads. This bit of code needs to be synchronized - with linux_nat_wait. */ + /* If we have a pending wait status for this thread, there is no + point in resuming the process. But first make sure that + linux_nat_wait won't preemptively handle the event - we + should never take this short-circuit if we are going to + leave LP running, since we have skipped resuming all the + other threads. This bit of code needs to be synchronized + with linux_nat_wait. */ - if (lp->status && WIFSTOPPED (lp->status)) + if (lp->status && WIFSTOPPED (lp->status)) + { + int saved_signo; + struct inferior *inf; + + inf = find_inferior_pid (ptid_get_pid (lp->ptid)); + gdb_assert (inf); + saved_signo = target_signal_from_host (WSTOPSIG (lp->status)); + + /* Defer to common code if we're gaining control of the + inferior. */ + if (inf->stop_soon == NO_STOP_QUIETLY + && signal_stop_state (saved_signo) == 0 + && signal_print_state (saved_signo) == 0 + && signal_pass_state (saved_signo) == 1) { - int saved_signo = target_signal_from_host (WSTOPSIG (lp->status)); - - if (signal_stop_state (saved_signo) == 0 - && signal_print_state (saved_signo) == 0 - && signal_pass_state (saved_signo) == 1) - { - if (debug_linux_nat) - fprintf_unfiltered (gdb_stdlog, - "LLR: Not short circuiting for ignored " - "status 0x%x\n", lp->status); - - /* FIXME: What should we do if we are supposed to continue - this thread with a signal? */ - gdb_assert (signo == TARGET_SIGNAL_0); - signo = saved_signo; - lp->status = 0; - } - } + if (debug_linux_nat) + fprintf_unfiltered (gdb_stdlog, + "LLR: Not short circuiting for ignored " + "status 0x%x\n", lp->status); - if (lp->status) - { /* FIXME: What should we do if we are supposed to continue this thread with a signal? */ gdb_assert (signo == TARGET_SIGNAL_0); + signo = saved_signo; + lp->status = 0; + } + } - if (debug_linux_nat) - fprintf_unfiltered (gdb_stdlog, - "LLR: Short circuiting for status 0x%x\n", - lp->status); + if (lp->status) + { + /* FIXME: What should we do if we are supposed to continue + this thread with a signal? */ + gdb_assert (signo == TARGET_SIGNAL_0); - return; - } + if (debug_linux_nat) + fprintf_unfiltered (gdb_stdlog, + "LLR: Short circuiting for status 0x%x\n", + lp->status); - /* Mark LWP as not stopped to prevent it from being continued by - resume_callback. */ - lp->stopped = 0; + restore_child_signals_mask (&prev_mask); + if (target_can_async_p ()) + { + target_async (inferior_event_handler, 0); + /* Tell the event loop we have something to process. */ + async_file_mark (); + } + return; } - if (resume_all) - iterate_over_lwps (resume_callback, NULL); + /* Mark LWP as not stopped to prevent it from being continued by + resume_callback. */ + lp->stopped = 0; + + if (resume_many) + iterate_over_lwps (ptid, resume_callback, NULL); + + /* Convert to something the lower layer understands. */ + ptid = pid_to_ptid (GET_LWP (lp->ptid)); + + linux_ops->to_resume (linux_ops, ptid, step, signo); + memset (&lp->siginfo, 0, sizeof (lp->siginfo)); - linux_ops->to_resume (ptid, step, signo); if (debug_linux_nat) fprintf_unfiltered (gdb_stdlog, "LLR: %s %s, %s (resume event thread)\n", step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT", target_pid_to_str (ptid), signo ? strsignal (signo) : "0"); + + restore_child_signals_mask (&prev_mask); + if (target_can_async_p ()) + target_async (inferior_event_handler, 0); } /* Issue kill to specified lwp. */ @@ -1272,7 +1842,7 @@ linux_handle_extended_wait (struct lwp_info *lp, int status, _("wait returned unexpected status 0x%x"), status); } - ourstatus->value.related_pid = new_pid; + ourstatus->value.related_pid = ptid_build (new_pid, new_pid, 0); if (event == PTRACE_EVENT_FORK) ourstatus->kind = TARGET_WAITKIND_FORKED; @@ -1280,9 +1850,12 @@ linux_handle_extended_wait (struct lwp_info *lp, int status, ourstatus->kind = TARGET_WAITKIND_VFORKED; else { + struct cleanup *old_chain; + ourstatus->kind = TARGET_WAITKIND_IGNORE; - new_lp = add_lwp (BUILD_LWP (new_pid, GET_PID (inferior_ptid))); + new_lp = add_lwp (BUILD_LWP (new_pid, GET_PID (lp->ptid))); new_lp->cloned = 1; + new_lp->stopped = 1; if (WSTOPSIG (status) != SIGSTOP) { @@ -1299,12 +1872,38 @@ linux_handle_extended_wait (struct lwp_info *lp, int status, else status = 0; - if (stopping) - new_lp->stopped = 1; - else + if (non_stop) { + /* Add the new thread to GDB's lists as soon as possible + so that: + + 1) the frontend doesn't have to wait for a stop to + display them, and, + + 2) we tag it with the correct running state. */ + + /* If the thread_db layer is active, let it know about + this new thread, and add it to GDB's list. */ + if (!thread_db_attach_lwp (new_lp->ptid)) + { + /* We're not using thread_db. Add it to GDB's + list. */ + target_post_attach (GET_LWP (new_lp->ptid)); + add_thread (new_lp->ptid); + } + + if (!stopping) + { + set_running (new_lp->ptid, 1); + set_executing (new_lp->ptid, 1); + } + } + + if (!stopping) + { + new_lp->stopped = 0; new_lp->resumed = 1; - ptrace (PTRACE_CONT, lp->waitstatus.value.related_pid, 0, + ptrace (PTRACE_CONT, new_pid, 0, status ? WSTOPSIG (status) : 0); } @@ -1334,6 +1933,16 @@ linux_handle_extended_wait (struct lwp_info *lp, int status, linux_parent_pid = 0; } + /* At this point, all inserted breakpoints are gone. Doing this + as soon as we detect an exec prevents the badness of deleting + a breakpoint writing the current "shadow contents" to lift + the bp. That shadow is NOT valid after an exec. + + Note that we have to do this after the detach_breakpoints + call above, otherwise breakpoints wouldn't be lifted from the + parent on a vfork, because detach_breakpoints would think + that breakpoints are not inserted. */ + mark_breakpoints_out (); return 0; } @@ -1416,6 +2025,22 @@ wait_lwp (struct lwp_info *lp) return status; } +/* Save the most recent siginfo for LP. This is currently only called + for SIGTRAP; some ports use the si_addr field for + target_stopped_data_address. In the future, it may also be used to + restore the siginfo of requeued signals. */ + +static void +save_siginfo (struct lwp_info *lp) +{ + errno = 0; + ptrace (PTRACE_GETSIGINFO, GET_LWP (lp->ptid), + (PTRACE_TYPE_ARG3) 0, &lp->siginfo); + + if (errno != 0) + memset (&lp->siginfo, 0, sizeof (lp->siginfo)); +} + /* Send a SIGSTOP to LP. */ static int @@ -1448,14 +2073,66 @@ stop_callback (struct lwp_info *lp, void *data) return 0; } -/* Wait until LP is stopped. If DATA is non-null it is interpreted as - a pointer to a set of signals to be flushed immediately. */ +/* Return non-zero if LWP PID has a pending SIGINT. */ static int -stop_wait_callback (struct lwp_info *lp, void *data) +linux_nat_has_pending_sigint (int pid) +{ + sigset_t pending, blocked, ignored; + int i; + + linux_proc_pending_signals (pid, &pending, &blocked, &ignored); + + if (sigismember (&pending, SIGINT) + && !sigismember (&ignored, SIGINT)) + return 1; + + return 0; +} + +/* Set a flag in LP indicating that we should ignore its next SIGINT. */ + +static int +set_ignore_sigint (struct lwp_info *lp, void *data) +{ + /* If a thread has a pending SIGINT, consume it; otherwise, set a + flag to consume the next one. */ + if (lp->stopped && lp->status != 0 && WIFSTOPPED (lp->status) + && WSTOPSIG (lp->status) == SIGINT) + lp->status = 0; + else + lp->ignore_sigint = 1; + + return 0; +} + +/* If LP does not have a SIGINT pending, then clear the ignore_sigint flag. + This function is called after we know the LWP has stopped; if the LWP + stopped before the expected SIGINT was delivered, then it will never have + arrived. Also, if the signal was delivered to a shared queue and consumed + by a different thread, it will never be delivered to this LWP. */ + +static void +maybe_clear_ignore_sigint (struct lwp_info *lp) { - sigset_t *flush_mask = data; + if (!lp->ignore_sigint) + return; + + if (!linux_nat_has_pending_sigint (GET_LWP (lp->ptid))) + { + if (debug_linux_nat) + fprintf_unfiltered (gdb_stdlog, + "MCIS: Clearing bogus flag for %s\n", + target_pid_to_str (lp->ptid)); + lp->ignore_sigint = 0; + } +} + +/* Wait until LP is stopped. */ +static int +stop_wait_callback (struct lwp_info *lp, void *data) +{ if (!lp->stopped) { int status; @@ -1464,26 +2141,24 @@ stop_wait_callback (struct lwp_info *lp, void *data) if (status == 0) return 0; - /* Ignore any signals in FLUSH_MASK. */ - if (flush_mask && sigismember (flush_mask, WSTOPSIG (status))) + if (lp->ignore_sigint && WIFSTOPPED (status) + && WSTOPSIG (status) == SIGINT) { - if (!lp->signalled) - { - lp->stopped = 1; - return 0; - } + lp->ignore_sigint = 0; errno = 0; ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0); if (debug_linux_nat) fprintf_unfiltered (gdb_stdlog, - "PTRACE_CONT %s, 0, 0 (%s)\n", + "PTRACE_CONT %s, 0, 0 (%s) (discarding SIGINT)\n", target_pid_to_str (lp->ptid), errno ? safe_strerror (errno) : "OK"); - return stop_wait_callback (lp, flush_mask); + return stop_wait_callback (lp, NULL); } + maybe_clear_ignore_sigint (lp); + if (WSTOPSIG (status) != SIGSTOP) { if (WSTOPSIG (status) == SIGTRAP) @@ -1501,6 +2176,9 @@ stop_wait_callback (struct lwp_info *lp, void *data) user will delete or disable the breakpoint, but the thread will have already tripped on it. */ + /* Save the trap's siginfo in case we need it later. */ + save_siginfo (lp); + /* Now resume this LWP and get the SIGSTOP event. */ errno = 0; ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0); @@ -1515,20 +2193,23 @@ stop_wait_callback (struct lwp_info *lp, void *data) "SWC: Candidate SIGTRAP event in %s\n", target_pid_to_str (lp->ptid)); } - /* Hold the SIGTRAP for handling by linux_nat_wait. */ - stop_wait_callback (lp, data); - /* If there's another event, throw it back into the queue. */ + /* Hold this event/waitstatus while we check to see if + there are any more (we still want to get that SIGSTOP). */ + stop_wait_callback (lp, NULL); + + /* Hold the SIGTRAP for handling by linux_nat_wait. If + there's another event, throw it back into the + queue. */ if (lp->status) { if (debug_linux_nat) - { - fprintf_unfiltered (gdb_stdlog, - "SWC: kill %s, %s\n", - target_pid_to_str (lp->ptid), - status_to_str ((int) status)); - } + fprintf_unfiltered (gdb_stdlog, + "SWC: kill %s, %s\n", + target_pid_to_str (lp->ptid), + status_to_str ((int) status)); kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (lp->status)); } + /* Save the sigtrap event. */ lp->status = status; return 0; @@ -1556,13 +2237,12 @@ stop_wait_callback (struct lwp_info *lp, void *data) /* Hold this event/waitstatus while we check to see if there are any more (we still want to get that SIGSTOP). */ - stop_wait_callback (lp, data); - /* If the lp->status field is still empty, use it to hold - this event. If not, then this event must be returned - to the event queue of the LWP. */ - if (lp->status == 0) - lp->status = status; - else + stop_wait_callback (lp, NULL); + + /* If the lp->status field is still empty, use it to + hold this event. If not, then this event must be + returned to the event queue of the LWP. */ + if (lp->status) { if (debug_linux_nat) { @@ -1573,6 +2253,8 @@ stop_wait_callback (struct lwp_info *lp, void *data) } kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (status)); } + else + lp->status = status; return 0; } } @@ -1588,96 +2270,6 @@ stop_wait_callback (struct lwp_info *lp, void *data) return 0; } -/* Check whether PID has any pending signals in FLUSH_MASK. If so set - the appropriate bits in PENDING, and return 1 - otherwise return 0. */ - -static int -linux_nat_has_pending (int pid, sigset_t *pending, sigset_t *flush_mask) -{ - sigset_t blocked, ignored; - int i; - - linux_proc_pending_signals (pid, pending, &blocked, &ignored); - - if (!flush_mask) - return 0; - - for (i = 1; i < NSIG; i++) - if (sigismember (pending, i)) - if (!sigismember (flush_mask, i) - || sigismember (&blocked, i) - || sigismember (&ignored, i)) - sigdelset (pending, i); - - if (sigisemptyset (pending)) - return 0; - - return 1; -} - -/* DATA is interpreted as a mask of signals to flush. If LP has - signals pending, and they are all in the flush mask, then arrange - to flush them. LP should be stopped, as should all other threads - it might share a signal queue with. */ - -static int -flush_callback (struct lwp_info *lp, void *data) -{ - sigset_t *flush_mask = data; - sigset_t pending, intersection, blocked, ignored; - int pid, status; - - /* Normally, when an LWP exits, it is removed from the LWP list. The - last LWP isn't removed till later, however. So if there is only - one LWP on the list, make sure it's alive. */ - if (lwp_list == lp && lp->next == NULL) - if (!linux_nat_thread_alive (lp->ptid)) - return 0; - - /* Just because the LWP is stopped doesn't mean that new signals - can't arrive from outside, so this function must be careful of - race conditions. However, because all threads are stopped, we - can assume that the pending mask will not shrink unless we resume - the LWP, and that it will then get another signal. We can't - control which one, however. */ - - if (lp->status) - { - if (debug_linux_nat) - printf_unfiltered (_("FC: LP has pending status %06x\n"), lp->status); - if (WIFSTOPPED (lp->status) && sigismember (flush_mask, WSTOPSIG (lp->status))) - lp->status = 0; - } - - /* While there is a pending signal we would like to flush, continue - the inferior and collect another signal. But if there's already - a saved status that we don't want to flush, we can't resume the - inferior - if it stopped for some other reason we wouldn't have - anywhere to save the new status. In that case, we must leave the - signal unflushed (and possibly generate an extra SIGINT stop). - That's much less bad than losing a signal. */ - while (lp->status == 0 - && linux_nat_has_pending (GET_LWP (lp->ptid), &pending, flush_mask)) - { - int ret; - - errno = 0; - ret = ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0); - if (debug_linux_nat) - fprintf_unfiltered (gdb_stderr, - "FC: Sent PTRACE_CONT, ret %d %d\n", ret, errno); - - lp->stopped = 0; - stop_wait_callback (lp, flush_mask); - if (debug_linux_nat) - fprintf_unfiltered (gdb_stderr, - "FC: Wait finished; saved status is %d\n", - lp->status); - } - - return 0; -} - /* Return non-zero if LP has a wait status pending. */ static int @@ -1685,7 +2277,12 @@ status_callback (struct lwp_info *lp, void *data) { /* Only report a pending wait status if we pretend that this has indeed been resumed. */ - return (lp->status != 0 && lp->resumed); + /* We check for lp->waitstatus in addition to lp->status, because we + can have pending process exits recorded in lp->waitstatus, and + W_EXITCODE(0,0) == 0. */ + return ((lp->status != 0 + || lp->waitstatus.kind != TARGET_WAITKIND_IGNORE) + && lp->resumed); } /* Return non-zero if LP isn't stopped. */ @@ -1705,8 +2302,8 @@ count_events_callback (struct lwp_info *lp, void *data) gdb_assert (count != NULL); - /* Count only LWPs that have a SIGTRAP event pending. */ - if (lp->status != 0 + /* Count only resumed LWPs that have a SIGTRAP event pending. */ + if (lp->status != 0 && lp->resumed && WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP) (*count)++; @@ -1733,8 +2330,8 @@ select_event_lwp_callback (struct lwp_info *lp, void *data) gdb_assert (selector != NULL); - /* Select only LWPs that have a SIGTRAP event pending. */ - if (lp->status != 0 + /* Select only resumed LWPs that have a SIGTRAP event pending. */ + if (lp->status != 0 && lp->resumed && WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP) if ((*selector)-- == 0) return 1; @@ -1742,6 +2339,39 @@ select_event_lwp_callback (struct lwp_info *lp, void *data) return 0; } +static int +cancel_breakpoint (struct lwp_info *lp) +{ + /* Arrange for a breakpoint to be hit again later. We don't keep + the SIGTRAP status and don't forward the SIGTRAP signal to the + LWP. We will handle the current event, eventually we will resume + this LWP, and this breakpoint will trap again. + + If we do not do this, then we run the risk that the user will + delete or disable the breakpoint, but the LWP will have already + tripped on it. */ + + struct regcache *regcache = get_thread_regcache (lp->ptid); + struct gdbarch *gdbarch = get_regcache_arch (regcache); + CORE_ADDR pc; + + pc = regcache_read_pc (regcache) - gdbarch_decr_pc_after_break (gdbarch); + if (breakpoint_inserted_here_p (pc)) + { + if (debug_linux_nat) + fprintf_unfiltered (gdb_stdlog, + "CB: Push back breakpoint for %s\n", + target_pid_to_str (lp->ptid)); + + /* Back up the PC if necessary. */ + if (gdbarch_decr_pc_after_break (gdbarch)) + regcache_write_pc (regcache, pc); + + return 1; + } + return 0; +} + static int cancel_breakpoints_callback (struct lwp_info *lp, void *data) { @@ -1764,24 +2394,9 @@ cancel_breakpoints_callback (struct lwp_info *lp, void *data) if (lp->status != 0 && WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP - && breakpoint_inserted_here_p (read_pc_pid (lp->ptid) - - gdbarch_decr_pc_after_break - (current_gdbarch))) - { - if (debug_linux_nat) - fprintf_unfiltered (gdb_stdlog, - "CBC: Push back breakpoint for %s\n", - target_pid_to_str (lp->ptid)); - - /* Back up the PC if necessary. */ - if (gdbarch_decr_pc_after_break (current_gdbarch)) - write_pc_pid (read_pc_pid (lp->ptid) - gdbarch_decr_pc_after_break - (current_gdbarch), - lp->ptid); - - /* Throw away the SIGTRAP. */ - lp->status = 0; - } + && cancel_breakpoint (lp)) + /* Throw away the SIGTRAP. */ + lp->status = 0; return 0; } @@ -1789,7 +2404,7 @@ cancel_breakpoints_callback (struct lwp_info *lp, void *data) /* Select one LWP out of those that have events pending. */ static void -select_event_lwp (struct lwp_info **orig_lp, int *status) +select_event_lwp (ptid_t filter, struct lwp_info **orig_lp, int *status) { int num_events = 0; int random_selector; @@ -1799,7 +2414,8 @@ select_event_lwp (struct lwp_info **orig_lp, int *status) (*orig_lp)->status = *status; /* Give preference to any LWP that is being single-stepped. */ - event_lp = iterate_over_lwps (select_singlestep_lwp_callback, NULL); + event_lp = iterate_over_lwps (filter, + select_singlestep_lwp_callback, NULL); if (event_lp != NULL) { if (debug_linux_nat) @@ -1813,7 +2429,7 @@ select_event_lwp (struct lwp_info **orig_lp, int *status) which have had SIGTRAP events. */ /* First see how many SIGTRAP events we have. */ - iterate_over_lwps (count_events_callback, &num_events); + iterate_over_lwps (filter, count_events_callback, &num_events); /* Now randomly pick a LWP out of those that have had a SIGTRAP. */ random_selector = (int) @@ -1824,7 +2440,8 @@ select_event_lwp (struct lwp_info **orig_lp, int *status) "SEL: Found %d SIGTRAP events, selecting #%d\n", num_events, random_selector); - event_lp = iterate_over_lwps (select_event_lwp_callback, + event_lp = iterate_over_lwps (filter, + select_event_lwp_callback, &random_selector); } @@ -1869,47 +2486,255 @@ stop_and_resume_callback (struct lwp_info *lp, void *data) return 0; } +/* Check if we should go on and pass this event to common code. + Return the affected lwp if we are, or NULL otherwise. */ +static struct lwp_info * +linux_nat_filter_event (int lwpid, int status, int options) +{ + struct lwp_info *lp; + + lp = find_lwp_pid (pid_to_ptid (lwpid)); + + /* Check for stop events reported by a process we didn't already + know about - anything not already in our LWP list. + + If we're expecting to receive stopped processes after + fork, vfork, and clone events, then we'll just add the + new one to our list and go back to waiting for the event + to be reported - the stopped process might be returned + from waitpid before or after the event is. */ + if (WIFSTOPPED (status) && !lp) + { + linux_record_stopped_pid (lwpid, status); + return NULL; + } + + /* Make sure we don't report an event for the exit of an LWP not in + our list, i.e. not part of the current process. This can happen + if we detach from a program we original forked and then it + exits. */ + if (!WIFSTOPPED (status) && !lp) + return NULL; + + /* NOTE drow/2003-06-17: This code seems to be meant for debugging + CLONE_PTRACE processes which do not use the thread library - + otherwise we wouldn't find the new LWP this way. That doesn't + currently work, and the following code is currently unreachable + due to the two blocks above. If it's fixed some day, this code + should be broken out into a function so that we can also pick up + LWPs from the new interface. */ + if (!lp) + { + lp = add_lwp (BUILD_LWP (lwpid, GET_PID (inferior_ptid))); + if (options & __WCLONE) + lp->cloned = 1; + + gdb_assert (WIFSTOPPED (status) + && WSTOPSIG (status) == SIGSTOP); + lp->signalled = 1; + + if (!in_thread_list (inferior_ptid)) + { + inferior_ptid = BUILD_LWP (GET_PID (inferior_ptid), + GET_PID (inferior_ptid)); + add_thread (inferior_ptid); + } + + add_thread (lp->ptid); + } + + /* Save the trap's siginfo in case we need it later. */ + if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP) + save_siginfo (lp); + + /* Handle GNU/Linux's extended waitstatus for trace events. */ + if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0) + { + if (debug_linux_nat) + fprintf_unfiltered (gdb_stdlog, + "LLW: Handling extended status 0x%06x\n", + status); + if (linux_handle_extended_wait (lp, status, 0)) + return NULL; + } + + /* Check if the thread has exited. */ + if ((WIFEXITED (status) || WIFSIGNALED (status)) + && num_lwps (GET_PID (lp->ptid)) > 1) + { + /* If this is the main thread, we must stop all threads and verify + if they are still alive. This is because in the nptl thread model + on Linux 2.4, there is no signal issued for exiting LWPs + other than the main thread. We only get the main thread exit + signal once all child threads have already exited. If we + stop all the threads and use the stop_wait_callback to check + if they have exited we can determine whether this signal + should be ignored or whether it means the end of the debugged + application, regardless of which threading model is being + used. */ + if (GET_PID (lp->ptid) == GET_LWP (lp->ptid)) + { + lp->stopped = 1; + iterate_over_lwps (pid_to_ptid (GET_PID (lp->ptid)), + stop_and_resume_callback, NULL); + } + + if (debug_linux_nat) + fprintf_unfiltered (gdb_stdlog, + "LLW: %s exited.\n", + target_pid_to_str (lp->ptid)); + + if (num_lwps (GET_PID (lp->ptid)) > 1) + { + /* If there is at least one more LWP, then the exit signal + was not the end of the debugged application and should be + ignored. */ + exit_lwp (lp); + return NULL; + } + } + + /* Check if the current LWP has previously exited. In the nptl + thread model, LWPs other than the main thread do not issue + signals when they exit so we must check whenever the thread has + stopped. A similar check is made in stop_wait_callback(). */ + if (num_lwps (GET_PID (lp->ptid)) > 1 && !linux_thread_alive (lp->ptid)) + { + ptid_t ptid = pid_to_ptid (GET_PID (lp->ptid)); + + if (debug_linux_nat) + fprintf_unfiltered (gdb_stdlog, + "LLW: %s exited.\n", + target_pid_to_str (lp->ptid)); + + exit_lwp (lp); + + /* Make sure there is at least one thread running. */ + gdb_assert (iterate_over_lwps (ptid, running_callback, NULL)); + + /* Discard the event. */ + return NULL; + } + + /* Make sure we don't report a SIGSTOP that we sent ourselves in + an attempt to stop an LWP. */ + if (lp->signalled + && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP) + { + if (debug_linux_nat) + fprintf_unfiltered (gdb_stdlog, + "LLW: Delayed SIGSTOP caught for %s.\n", + target_pid_to_str (lp->ptid)); + + /* This is a delayed SIGSTOP. */ + lp->signalled = 0; + + registers_changed (); + + linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)), + lp->step, TARGET_SIGNAL_0); + if (debug_linux_nat) + fprintf_unfiltered (gdb_stdlog, + "LLW: %s %s, 0, 0 (discard SIGSTOP)\n", + lp->step ? + "PTRACE_SINGLESTEP" : "PTRACE_CONT", + target_pid_to_str (lp->ptid)); + + lp->stopped = 0; + gdb_assert (lp->resumed); + + /* Discard the event. */ + return NULL; + } + + /* Make sure we don't report a SIGINT that we have already displayed + for another thread. */ + if (lp->ignore_sigint + && WIFSTOPPED (status) && WSTOPSIG (status) == SIGINT) + { + if (debug_linux_nat) + fprintf_unfiltered (gdb_stdlog, + "LLW: Delayed SIGINT caught for %s.\n", + target_pid_to_str (lp->ptid)); + + /* This is a delayed SIGINT. */ + lp->ignore_sigint = 0; + + registers_changed (); + linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)), + lp->step, TARGET_SIGNAL_0); + if (debug_linux_nat) + fprintf_unfiltered (gdb_stdlog, + "LLW: %s %s, 0, 0 (discard SIGINT)\n", + lp->step ? + "PTRACE_SINGLESTEP" : "PTRACE_CONT", + target_pid_to_str (lp->ptid)); + + lp->stopped = 0; + gdb_assert (lp->resumed); + + /* Discard the event. */ + return NULL; + } + + /* An interesting event. */ + gdb_assert (lp); + return lp; +} + static ptid_t -linux_nat_wait (ptid_t ptid, struct target_waitstatus *ourstatus) +linux_nat_wait_1 (struct target_ops *ops, + ptid_t ptid, struct target_waitstatus *ourstatus) { + static sigset_t prev_mask; struct lwp_info *lp = NULL; int options = 0; int status = 0; - pid_t pid = PIDGET (ptid); - sigset_t flush_mask; + pid_t pid; + + if (debug_linux_nat_async) + fprintf_unfiltered (gdb_stdlog, "LLW: enter\n"); /* The first time we get here after starting a new inferior, we may not have added it to the LWP list yet - this is the earliest moment at which we know its PID. */ - if (num_lwps == 0) + if (ptid_is_pid (inferior_ptid)) { - gdb_assert (!is_lwp (inferior_ptid)); + /* Upgrade the main thread's ptid. */ + thread_change_ptid (inferior_ptid, + BUILD_LWP (GET_PID (inferior_ptid), + GET_PID (inferior_ptid))); - inferior_ptid = BUILD_LWP (GET_PID (inferior_ptid), - GET_PID (inferior_ptid)); lp = add_lwp (inferior_ptid); lp->resumed = 1; } - sigemptyset (&flush_mask); - /* Make sure SIGCHLD is blocked. */ - if (!sigismember (&blocked_mask, SIGCHLD)) - { - sigaddset (&blocked_mask, SIGCHLD); - sigprocmask (SIG_BLOCK, &blocked_mask, NULL); - } + block_child_signals (&prev_mask); + + if (ptid_equal (ptid, minus_one_ptid)) + pid = -1; + else if (ptid_is_pid (ptid)) + /* A request to wait for a specific tgid. This is not possible + with waitpid, so instead, we wait for any child, and leave + children we're not interested in right now with a pending + status to report later. */ + pid = -1; + else + pid = GET_LWP (ptid); retry: + lp = NULL; + status = 0; /* Make sure there is at least one LWP that has been resumed. */ - gdb_assert (iterate_over_lwps (resumed_callback, NULL)); + gdb_assert (iterate_over_lwps (ptid, resumed_callback, NULL)); /* First check if there is a LWP with a wait status pending. */ if (pid == -1) { /* Any LWP that's been resumed will do. */ - lp = iterate_over_lwps (status_callback, NULL); + lp = iterate_over_lwps (ptid, status_callback, NULL); if (lp) { status = lp->status; @@ -1922,9 +2747,9 @@ retry: target_pid_to_str (lp->ptid)); } - /* But if we don't fine one, we'll have to wait, and check both - cloned and uncloned processes. We start with the cloned - processes. */ + /* But if we don't find one, we'll have to wait, and check both + cloned and uncloned processes. We start with the cloned + processes. */ options = __WCLONE | WNOHANG; } else if (is_lwp (ptid)) @@ -1951,9 +2776,16 @@ retry: the layer beneath us can understand. */ options = lp->cloned ? __WCLONE : 0; pid = GET_LWP (ptid); + + /* We check for lp->waitstatus in addition to lp->status, + because we can have pending process exits recorded in + lp->status and W_EXITCODE(0,0) == 0. We should probably have + an additional lp->status_p flag. */ + if (status == 0 && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE) + lp = NULL; } - if (status && lp->signalled) + if (lp && lp->signalled) { /* A pending SIGSTOP may interfere with the normal stream of events. In a typical case where interference is a problem, @@ -1967,7 +2799,7 @@ retry: /* Resume the thread. It should halt immediately returning the pending SIGSTOP. */ registers_changed (); - linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)), + linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)), lp->step, TARGET_SIGNAL_0); if (debug_linux_nat) fprintf_unfiltered (gdb_stdlog, @@ -1981,15 +2813,21 @@ retry: stop_wait_callback (lp, NULL); } - set_sigint_trap (); /* Causes SIGINT to be passed on to the - attached process. */ - set_sigio_trap (); + if (!target_can_async_p ()) + { + /* Causes SIGINT to be passed on to the attached process. */ + set_sigint_trap (); + } + + if (target_can_async_p ()) + options |= WNOHANG; /* In async mode, don't block. */ - while (status == 0) + while (lp == NULL) { pid_t lwpid; lwpid = my_waitpid (pid, &status, options); + if (lwpid > 0) { gdb_assert (pid == -1 || lwpid == pid); @@ -2001,170 +2839,80 @@ retry: (long) lwpid, status_to_str (status)); } - lp = find_lwp_pid (pid_to_ptid (lwpid)); - - /* Check for stop events reported by a process we didn't - already know about - anything not already in our LWP - list. - - If we're expecting to receive stopped processes after - fork, vfork, and clone events, then we'll just add the - new one to our list and go back to waiting for the event - to be reported - the stopped process might be returned - from waitpid before or after the event is. */ - if (WIFSTOPPED (status) && !lp) - { - linux_record_stopped_pid (lwpid, status); - status = 0; - continue; - } - - /* Make sure we don't report an event for the exit of an LWP not in - our list, i.e. not part of the current process. This can happen - if we detach from a program we original forked and then it - exits. */ - if (!WIFSTOPPED (status) && !lp) - { - status = 0; - continue; - } + lp = linux_nat_filter_event (lwpid, status, options); - /* NOTE drow/2003-06-17: This code seems to be meant for debugging - CLONE_PTRACE processes which do not use the thread library - - otherwise we wouldn't find the new LWP this way. That doesn't - currently work, and the following code is currently unreachable - due to the two blocks above. If it's fixed some day, this code - should be broken out into a function so that we can also pick up - LWPs from the new interface. */ - if (!lp) + if (lp + && ptid_is_pid (ptid) + && ptid_get_pid (lp->ptid) != ptid_get_pid (ptid)) { - lp = add_lwp (BUILD_LWP (lwpid, GET_PID (inferior_ptid))); - if (options & __WCLONE) - lp->cloned = 1; - - gdb_assert (WIFSTOPPED (status) - && WSTOPSIG (status) == SIGSTOP); - lp->signalled = 1; + if (debug_linux_nat) + fprintf (stderr, "LWP %ld got an event %06x, leaving pending.\n", + ptid_get_lwp (lp->ptid), status); - if (!in_thread_list (inferior_ptid)) + if (WIFSTOPPED (status)) { - inferior_ptid = BUILD_LWP (GET_PID (inferior_ptid), - GET_PID (inferior_ptid)); - add_thread (inferior_ptid); - } + if (WSTOPSIG (status) != SIGSTOP) + { + lp->status = status; - add_thread (lp->ptid); - printf_unfiltered (_("[New %s]\n"), - target_pid_to_str (lp->ptid)); - } + stop_callback (lp, NULL); - /* Handle GNU/Linux's extended waitstatus for trace events. */ - if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0) - { - if (debug_linux_nat) - fprintf_unfiltered (gdb_stdlog, - "LLW: Handling extended status 0x%06x\n", - status); - if (linux_handle_extended_wait (lp, status, 0)) - { - status = 0; - continue; - } - } + /* Resume in order to collect the sigstop. */ + ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0); - /* Check if the thread has exited. */ - if ((WIFEXITED (status) || WIFSIGNALED (status)) && num_lwps > 1) - { - /* If this is the main thread, we must stop all threads and - verify if they are still alive. This is because in the nptl - thread model, there is no signal issued for exiting LWPs - other than the main thread. We only get the main thread - exit signal once all child threads have already exited. - If we stop all the threads and use the stop_wait_callback - to check if they have exited we can determine whether this - signal should be ignored or whether it means the end of the - debugged application, regardless of which threading model - is being used. */ - if (GET_PID (lp->ptid) == GET_LWP (lp->ptid)) + stop_wait_callback (lp, NULL); + } + else + { + lp->stopped = 1; + lp->signalled = 0; + } + } + else if (WIFEXITED (status) || WIFSIGNALED (status)) { + if (debug_linux_nat) + fprintf (stderr, "Process %ld exited while stopping LWPs\n", + ptid_get_lwp (lp->ptid)); + + /* This was the last lwp in the process. Since + events are serialized to GDB core, and we can't + report this one right now, but GDB core and the + other target layers will want to be notified + about the exit code/signal, leave the status + pending for the next time we're able to report + it. */ + lp->status = status; + + /* Prevent trying to stop this thread again. We'll + never try to resume it because it has a pending + status. */ lp->stopped = 1; - iterate_over_lwps (stop_and_resume_callback, NULL); - } - - if (debug_linux_nat) - fprintf_unfiltered (gdb_stdlog, - "LLW: %s exited.\n", - target_pid_to_str (lp->ptid)); - exit_lwp (lp); + /* Dead LWP's aren't expected to reported a pending + sigstop. */ + lp->signalled = 0; - /* If there is at least one more LWP, then the exit signal - was not the end of the debugged application and should be - ignored. */ - if (num_lwps > 0) - { - /* Make sure there is at least one thread running. */ - gdb_assert (iterate_over_lwps (running_callback, NULL)); - - /* Discard the event. */ - status = 0; - continue; + /* Store the pending event in the waitstatus as + well, because W_EXITCODE(0,0) == 0. */ + store_waitstatus (&lp->waitstatus, status); } - } - /* Check if the current LWP has previously exited. In the nptl - thread model, LWPs other than the main thread do not issue - signals when they exit so we must check whenever the thread - has stopped. A similar check is made in stop_wait_callback(). */ - if (num_lwps > 1 && !linux_nat_thread_alive (lp->ptid)) - { - if (debug_linux_nat) - fprintf_unfiltered (gdb_stdlog, - "LLW: %s exited.\n", - target_pid_to_str (lp->ptid)); - - exit_lwp (lp); - - /* Make sure there is at least one thread running. */ - gdb_assert (iterate_over_lwps (running_callback, NULL)); - - /* Discard the event. */ - status = 0; + /* Keep looking. */ + lp = NULL; continue; } - /* Make sure we don't report a SIGSTOP that we sent - ourselves in an attempt to stop an LWP. */ - if (lp->signalled - && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP) + if (lp) + break; + else { - if (debug_linux_nat) - fprintf_unfiltered (gdb_stdlog, - "LLW: Delayed SIGSTOP caught for %s.\n", - target_pid_to_str (lp->ptid)); - - /* This is a delayed SIGSTOP. */ - lp->signalled = 0; - - registers_changed (); - linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)), - lp->step, TARGET_SIGNAL_0); - if (debug_linux_nat) - fprintf_unfiltered (gdb_stdlog, - "LLW: %s %s, 0, 0 (discard SIGSTOP)\n", - lp->step ? - "PTRACE_SINGLESTEP" : "PTRACE_CONT", - target_pid_to_str (lp->ptid)); - - lp->stopped = 0; - gdb_assert (lp->resumed); - - /* Discard the event. */ - status = 0; + if (pid == -1) + { + /* waitpid did return something. Restart over. */ + options |= __WCLONE; + } continue; } - - break; } if (pid == -1) @@ -2172,17 +2920,33 @@ retry: /* Alternate between checking cloned and uncloned processes. */ options ^= __WCLONE; - /* And suspend every time we have checked both. */ + /* And every time we have checked both: + In async mode, return to event loop; + In sync mode, suspend waiting for a SIGCHLD signal. */ if (options & __WCLONE) - sigsuspend (&suspend_mask); + { + if (target_can_async_p ()) + { + /* No interesting event. */ + ourstatus->kind = TARGET_WAITKIND_IGNORE; + + if (debug_linux_nat_async) + fprintf_unfiltered (gdb_stdlog, "LLW: exit (ignore)\n"); + + restore_child_signals_mask (&prev_mask); + return minus_one_ptid; + } + + sigsuspend (&suspend_mask); + } } /* We shouldn't end up here unless we want to try again. */ - gdb_assert (status == 0); + gdb_assert (lp == NULL); } - clear_sigio_trap (); - clear_sigint_trap (); + if (!target_can_async_p ()) + clear_sigint_trap (); gdb_assert (lp); @@ -2196,10 +2960,17 @@ retry: if (WIFSTOPPED (status)) { int signo = target_signal_from_host (WSTOPSIG (status)); + struct inferior *inf; - /* If we get a signal while single-stepping, we may need special - care, e.g. to skip the signal handler. Defer to common code. */ + inf = find_inferior_pid (ptid_get_pid (lp->ptid)); + gdb_assert (inf); + + /* Defer to common code if we get a signal while + single-stepping, since that may need special care, e.g. to + skip the signal handler, or, if we're gaining control of the + inferior. */ if (!lp->step + && inf->stop_soon == NO_STOP_QUIETLY && signal_stop_state (signo) == 0 && signal_print_state (signo) == 0 && signal_pass_state (signo) == 1) @@ -2210,7 +2981,7 @@ retry: newly attached threads may cause an unwanted delay in getting them running. */ registers_changed (); - linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)), + linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)), lp->step, signo); if (debug_linux_nat) fprintf_unfiltered (gdb_stdlog, @@ -2220,17 +2991,27 @@ retry: target_pid_to_str (lp->ptid), signo ? strsignal (signo) : "0"); lp->stopped = 0; - status = 0; goto retry; } - if (signo == TARGET_SIGNAL_INT && signal_pass_state (signo) == 0) + if (!non_stop) { - /* If ^C/BREAK is typed at the tty/console, SIGINT gets - forwarded to the entire process group, that is, all LWP's - will receive it. Since we only want to report it once, - we try to flush it from all LWPs except this one. */ - sigaddset (&flush_mask, SIGINT); + /* Only do the below in all-stop, as we currently use SIGINT + to implement target_stop (see linux_nat_stop) in + non-stop. */ + if (signo == TARGET_SIGNAL_INT && signal_pass_state (signo) == 0) + { + /* If ^C/BREAK is typed at the tty/console, SIGINT gets + forwarded to the entire process group, that is, all LWPs + will receive it - unless they're using CLONE_THREAD to + share signals. Since we only want to report it once, we + mark it as ignored for all LWPs except this one. */ + iterate_over_lwps (pid_to_ptid (ptid_get_pid (ptid)), + set_ignore_sigint, NULL); + lp->ignore_sigint = 0; + } + else + maybe_clear_ignore_sigint (lp); } } @@ -2241,35 +3022,35 @@ retry: fprintf_unfiltered (gdb_stdlog, "LLW: Candidate event %s in %s.\n", status_to_str (status), target_pid_to_str (lp->ptid)); - /* Now stop all other LWP's ... */ - iterate_over_lwps (stop_callback, NULL); + if (!non_stop) + { + /* Now stop all other LWP's ... */ + iterate_over_lwps (minus_one_ptid, stop_callback, NULL); - /* ... and wait until all of them have reported back that they're no - longer running. */ - iterate_over_lwps (stop_wait_callback, &flush_mask); - iterate_over_lwps (flush_callback, &flush_mask); + /* ... and wait until all of them have reported back that + they're no longer running. */ + iterate_over_lwps (minus_one_ptid, stop_wait_callback, NULL); - /* If we're not waiting for a specific LWP, choose an event LWP from - among those that have had events. Giving equal priority to all - LWPs that have had events helps prevent starvation. */ - if (pid == -1) - select_event_lwp (&lp, &status); + /* If we're not waiting for a specific LWP, choose an event LWP + from among those that have had events. Giving equal priority + to all LWPs that have had events helps prevent + starvation. */ + if (pid == -1) + select_event_lwp (ptid, &lp, &status); + } /* Now that we've selected our final event LWP, cancel any breakpoints in other LWPs that have hit a GDB breakpoint. See the comment in cancel_breakpoints_callback to find out why. */ - iterate_over_lwps (cancel_breakpoints_callback, lp); + iterate_over_lwps (minus_one_ptid, cancel_breakpoints_callback, lp); if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP) { - trap_ptid = lp->ptid; if (debug_linux_nat) fprintf_unfiltered (gdb_stdlog, - "LLW: trap_ptid is %s.\n", - target_pid_to_str (trap_ptid)); + "LLW: trap ptid is %s.\n", + target_pid_to_str (lp->ptid)); } - else - trap_ptid = null_ptid; if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE) { @@ -2279,9 +3060,43 @@ retry: else store_waitstatus (ourstatus, status); + if (debug_linux_nat_async) + fprintf_unfiltered (gdb_stdlog, "LLW: exit\n"); + + restore_child_signals_mask (&prev_mask); return lp->ptid; } +static ptid_t +linux_nat_wait (struct target_ops *ops, + ptid_t ptid, struct target_waitstatus *ourstatus) +{ + ptid_t event_ptid; + + if (debug_linux_nat) + fprintf_unfiltered (gdb_stdlog, "linux_nat_wait: [%s]\n", target_pid_to_str (ptid)); + + /* Flush the async file first. */ + if (target_can_async_p ()) + async_file_flush (); + + event_ptid = linux_nat_wait_1 (ops, ptid, ourstatus); + + /* If we requested any event, and something came out, assume there + may be more. If we requested a specific lwp or process, also + assume there may be more. */ + if (target_can_async_p () + && (ourstatus->kind != TARGET_WAITKIND_IGNORE + || !ptid_equal (ptid, minus_one_ptid))) + async_file_mark (); + + /* Get ready for the next event. */ + if (target_can_async_p ()) + target_async (inferior_event_handler, 0); + + return event_ptid; +} + static int kill_callback (struct lwp_info *lp, void *data) { @@ -2313,11 +3128,18 @@ kill_wait_callback (struct lwp_info *lp, void *data) do { pid = my_waitpid (GET_LWP (lp->ptid), NULL, __WCLONE); - if (pid != (pid_t) -1 && debug_linux_nat) + if (pid != (pid_t) -1) { - fprintf_unfiltered (gdb_stdlog, - "KWC: wait %s received unknown.\n", - target_pid_to_str (lp->ptid)); + if (debug_linux_nat) + fprintf_unfiltered (gdb_stdlog, + "KWC: wait %s received unknown.\n", + target_pid_to_str (lp->ptid)); + /* The Linux kernel sometimes fails to kill a thread + completely after PTRACE_KILL; that goes from the stop + point in do_fork out to the one in + get_signal_to_deliever and waits again. So kill it + again. */ + kill_callback (lp, NULL); } } while (pid == GET_LWP (lp->ptid)); @@ -2328,11 +3150,14 @@ kill_wait_callback (struct lwp_info *lp, void *data) do { pid = my_waitpid (GET_LWP (lp->ptid), NULL, 0); - if (pid != (pid_t) -1 && debug_linux_nat) + if (pid != (pid_t) -1) { - fprintf_unfiltered (gdb_stdlog, - "KWC: wait %s received unk.\n", - target_pid_to_str (lp->ptid)); + if (debug_linux_nat) + fprintf_unfiltered (gdb_stdlog, + "KWC: wait %s received unk.\n", + target_pid_to_str (lp->ptid)); + /* See the call to kill_callback above. */ + kill_callback (lp, NULL); } } while (pid == GET_LWP (lp->ptid)); @@ -2342,7 +3167,7 @@ kill_wait_callback (struct lwp_info *lp, void *data) } static void -linux_nat_kill (void) +linux_nat_kill (struct target_ops *ops) { struct target_waitstatus last; ptid_t last_ptid; @@ -2357,7 +3182,7 @@ linux_nat_kill (void) if (last.kind == TARGET_WAITKIND_FORKED || last.kind == TARGET_WAITKIND_VFORKED) { - ptrace (PT_KILL, last.value.related_pid, 0, 0); + ptrace (PT_KILL, PIDGET (last.value.related_pid), 0, 0); wait (&status); } @@ -2365,31 +3190,32 @@ linux_nat_kill (void) linux_fork_killall (); else { + ptid_t ptid = pid_to_ptid (ptid_get_pid (inferior_ptid)); + /* Stop all threads before killing them, since ptrace requires + that the thread is stopped to sucessfully PTRACE_KILL. */ + iterate_over_lwps (ptid, stop_callback, NULL); + /* ... and wait until all of them have reported back that + they're no longer running. */ + iterate_over_lwps (ptid, stop_wait_callback, NULL); + /* Kill all LWP's ... */ - iterate_over_lwps (kill_callback, NULL); + iterate_over_lwps (ptid, kill_callback, NULL); /* ... and wait until we've flushed all events. */ - iterate_over_lwps (kill_wait_callback, NULL); + iterate_over_lwps (ptid, kill_wait_callback, NULL); } target_mourn_inferior (); } static void -linux_nat_mourn_inferior (void) +linux_nat_mourn_inferior (struct target_ops *ops) { - trap_ptid = null_ptid; - - /* Destroy LWP info; it's no longer valid. */ - init_lwp_list (); - - /* Restore the original signal mask. */ - sigprocmask (SIG_SETMASK, &normal_mask, NULL); - sigemptyset (&blocked_mask); + purge_lwp_list (ptid_get_pid (inferior_ptid)); if (! forks_exist_p ()) /* Normal case, no other forks available. */ - linux_ops->to_mourn_inferior (); + linux_ops->to_mourn_inferior (ops); else /* Multi-fork case. The current inferior_ptid has exited, but there are other viable forks to debug. Delete the exiting @@ -2397,15 +3223,96 @@ linux_nat_mourn_inferior (void) linux_fork_mourn_inferior (); } +/* Convert a native/host siginfo object, into/from the siginfo in the + layout of the inferiors' architecture. */ + +static void +siginfo_fixup (struct siginfo *siginfo, gdb_byte *inf_siginfo, int direction) +{ + int done = 0; + + if (linux_nat_siginfo_fixup != NULL) + done = linux_nat_siginfo_fixup (siginfo, inf_siginfo, direction); + + /* If there was no callback, or the callback didn't do anything, + then just do a straight memcpy. */ + if (!done) + { + if (direction == 1) + memcpy (siginfo, inf_siginfo, sizeof (struct siginfo)); + else + memcpy (inf_siginfo, siginfo, sizeof (struct siginfo)); + } +} + +static LONGEST +linux_xfer_siginfo (struct target_ops *ops, enum target_object object, + const char *annex, gdb_byte *readbuf, + const gdb_byte *writebuf, ULONGEST offset, LONGEST len) +{ + int pid; + struct siginfo siginfo; + gdb_byte inf_siginfo[sizeof (struct siginfo)]; + + gdb_assert (object == TARGET_OBJECT_SIGNAL_INFO); + gdb_assert (readbuf || writebuf); + + pid = GET_LWP (inferior_ptid); + if (pid == 0) + pid = GET_PID (inferior_ptid); + + if (offset > sizeof (siginfo)) + return -1; + + errno = 0; + ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo); + if (errno != 0) + return -1; + + /* When GDB is built as a 64-bit application, ptrace writes into + SIGINFO an object with 64-bit layout. Since debugging a 32-bit + inferior with a 64-bit GDB should look the same as debugging it + with a 32-bit GDB, we need to convert it. GDB core always sees + the converted layout, so any read/write will have to be done + post-conversion. */ + siginfo_fixup (&siginfo, inf_siginfo, 0); + + if (offset + len > sizeof (siginfo)) + len = sizeof (siginfo) - offset; + + if (readbuf != NULL) + memcpy (readbuf, inf_siginfo + offset, len); + else + { + memcpy (inf_siginfo + offset, writebuf, len); + + /* Convert back to ptrace layout before flushing it out. */ + siginfo_fixup (&siginfo, inf_siginfo, 1); + + errno = 0; + ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo); + if (errno != 0) + return -1; + } + + return len; +} + static LONGEST linux_nat_xfer_partial (struct target_ops *ops, enum target_object object, const char *annex, gdb_byte *readbuf, const gdb_byte *writebuf, ULONGEST offset, LONGEST len) { - struct cleanup *old_chain = save_inferior_ptid (); + struct cleanup *old_chain; LONGEST xfer; + if (object == TARGET_OBJECT_SIGNAL_INFO) + return linux_xfer_siginfo (ops, object, annex, readbuf, writebuf, + offset, len); + + old_chain = save_inferior_ptid (); + if (is_lwp (inferior_ptid)) inferior_ptid = pid_to_ptid (GET_LWP (inferior_ptid)); @@ -2417,35 +3324,43 @@ linux_nat_xfer_partial (struct target_ops *ops, enum target_object object, } static int -linux_nat_thread_alive (ptid_t ptid) +linux_thread_alive (ptid_t ptid) { + int err; + gdb_assert (is_lwp (ptid)); - errno = 0; - ptrace (PTRACE_PEEKUSER, GET_LWP (ptid), 0, 0); + /* Send signal 0 instead of anything ptrace, because ptracing a + running thread errors out claiming that the thread doesn't + exist. */ + err = kill_lwp (GET_LWP (ptid), 0); + if (debug_linux_nat) fprintf_unfiltered (gdb_stdlog, - "LLTA: PTRACE_PEEKUSER %s, 0, 0 (%s)\n", + "LLTA: KILL(SIG0) %s (%s)\n", target_pid_to_str (ptid), - errno ? safe_strerror (errno) : "OK"); + err ? safe_strerror (err) : "OK"); - /* Not every Linux kernel implements PTRACE_PEEKUSER. But we can - handle that case gracefully since ptrace will first do a lookup - for the process based upon the passed-in pid. If that fails we - will get either -ESRCH or -EPERM, otherwise the child exists and - is alive. */ - if (errno == ESRCH || errno == EPERM) + if (err != 0) return 0; return 1; } +static int +linux_nat_thread_alive (struct target_ops *ops, ptid_t ptid) +{ + return linux_thread_alive (ptid); +} + static char * -linux_nat_pid_to_str (ptid_t ptid) +linux_nat_pid_to_str (struct target_ops *ops, ptid_t ptid) { static char buf[64]; - if (lwp_list && lwp_list->next && is_lwp (ptid)) + if (is_lwp (ptid) + && (GET_PID (ptid) != GET_LWP (ptid) + || num_lwps (GET_PID (ptid)) > 1)) { snprintf (buf, sizeof (buf), "LWP %ld", GET_LWP (ptid)); return buf; @@ -2454,14 +3369,6 @@ linux_nat_pid_to_str (ptid_t ptid) return normal_pid_to_str (ptid); } -static void -sigchld_handler (int signo) -{ - /* Do nothing. The only reason for this handler is that it allows - us to use sigsuspend in linux_nat_wait above to wait for the - arrival of a SIGCHLD. */ -} - /* Accepts an integer PID; Returns a string representing a file that can be opened to get the symbols for the child process. */ @@ -2520,18 +3427,20 @@ linux_nat_find_memory_regions (int (*func) (CORE_ADDR, unsigned long, int, int, int, void *), void *obfd) { - long long pid = PIDGET (inferior_ptid); + int pid = PIDGET (inferior_ptid); char mapsfilename[MAXPATHLEN]; FILE *mapsfile; long long addr, endaddr, size, offset, inode; char permissions[8], device[8], filename[MAXPATHLEN]; int read, write, exec; int ret; + struct cleanup *cleanup; /* Compose the filename for the /proc memory map, and open it. */ - sprintf (mapsfilename, "/proc/%lld/maps", pid); + sprintf (mapsfilename, "/proc/%d/maps", pid); if ((mapsfile = fopen (mapsfilename, "r")) == NULL) error (_("Could not open %s."), mapsfilename); + cleanup = make_cleanup_fclose (mapsfile); if (info_verbose) fprintf_filtered (gdb_stdout, @@ -2560,32 +3469,54 @@ linux_nat_find_memory_regions (int (*func) (CORE_ADDR, fprintf_filtered (gdb_stdout, "\n"); } - /* Invoke the callback function to create the corefile - segment. */ - func (addr, size, read, write, exec, obfd); - } - fclose (mapsfile); + /* Invoke the callback function to create the corefile + segment. */ + func (addr, size, read, write, exec, obfd); + } + do_cleanups (cleanup); + return 0; +} + +static int +find_signalled_thread (struct thread_info *info, void *data) +{ + if (info->stop_signal != TARGET_SIGNAL_0 + && ptid_get_pid (info->ptid) == ptid_get_pid (inferior_ptid)) + return 1; + return 0; } +static enum target_signal +find_stop_signal (void) +{ + struct thread_info *info = + iterate_over_threads (find_signalled_thread, NULL); + + if (info) + return info->stop_signal; + else + return TARGET_SIGNAL_0; +} + /* Records the thread's register state for the corefile note section. */ static char * linux_nat_do_thread_registers (bfd *obfd, ptid_t ptid, - char *note_data, int *note_size) + char *note_data, int *note_size, + enum target_signal stop_signal) { gdb_gregset_t gregs; gdb_fpregset_t fpregs; -#ifdef FILL_FPXREGSET - gdb_fpxregset_t fpxregs; -#endif unsigned long lwp = ptid_get_lwp (ptid); struct regcache *regcache = get_thread_regcache (ptid); struct gdbarch *gdbarch = get_regcache_arch (regcache); const struct regset *regset; int core_regset_p; struct cleanup *old_chain; + struct core_regset_section *sect_list; + char *gdb_regset; old_chain = save_inferior_ptid (); inferior_ptid = ptid; @@ -2593,6 +3524,8 @@ linux_nat_do_thread_registers (bfd *obfd, ptid_t ptid, do_cleanups (old_chain); core_regset_p = gdbarch_regset_from_core_section_p (gdbarch); + sect_list = gdbarch_core_regset_sections (gdbarch); + if (core_regset_p && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg", sizeof (gregs))) != NULL @@ -2608,35 +3541,56 @@ linux_nat_do_thread_registers (bfd *obfd, ptid_t ptid, lwp, stop_signal, &gregs); - if (core_regset_p - && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg2", - sizeof (fpregs))) != NULL - && regset->collect_regset != NULL) - regset->collect_regset (regset, regcache, -1, - &fpregs, sizeof (fpregs)); - else - fill_fpregset (regcache, &fpregs, -1); - - note_data = (char *) elfcore_write_prfpreg (obfd, - note_data, - note_size, - &fpregs, sizeof (fpregs)); + /* The loop below uses the new struct core_regset_section, which stores + the supported section names and sizes for the core file. Note that + note PRSTATUS needs to be treated specially. But the other notes are + structurally the same, so they can benefit from the new struct. */ + if (core_regset_p && sect_list != NULL) + while (sect_list->sect_name != NULL) + { + /* .reg was already handled above. */ + if (strcmp (sect_list->sect_name, ".reg") == 0) + { + sect_list++; + continue; + } + regset = gdbarch_regset_from_core_section (gdbarch, + sect_list->sect_name, + sect_list->size); + gdb_assert (regset && regset->collect_regset); + gdb_regset = xmalloc (sect_list->size); + regset->collect_regset (regset, regcache, -1, + gdb_regset, sect_list->size); + note_data = (char *) elfcore_write_register_note (obfd, + note_data, + note_size, + sect_list->sect_name, + gdb_regset, + sect_list->size); + xfree (gdb_regset); + sect_list++; + } -#ifdef FILL_FPXREGSET - if (core_regset_p - && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg-xfp", - sizeof (fpxregs))) != NULL - && regset->collect_regset != NULL) - regset->collect_regset (regset, regcache, -1, - &fpxregs, sizeof (fpxregs)); + /* For architectures that does not have the struct core_regset_section + implemented, we use the old method. When all the architectures have + the new support, the code below should be deleted. */ else - fill_fpxregset (regcache, &fpxregs, -1); + { + if (core_regset_p + && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg2", + sizeof (fpregs))) != NULL + && regset->collect_regset != NULL) + regset->collect_regset (regset, regcache, -1, + &fpregs, sizeof (fpregs)); + else + fill_fpregset (regcache, &fpregs, -1); + + note_data = (char *) elfcore_write_prfpreg (obfd, + note_data, + note_size, + &fpregs, sizeof (fpregs)); + } - note_data = (char *) elfcore_write_prxfpreg (obfd, - note_data, - note_size, - &fpxregs, sizeof (fpxregs)); -#endif return note_data; } @@ -2646,6 +3600,7 @@ struct linux_nat_corefile_thread_data char *note_data; int *note_size; int num_notes; + enum target_signal stop_signal; }; /* Called by gdbthread.c once per thread. Records the thread's @@ -2659,25 +3614,13 @@ linux_nat_corefile_thread_callback (struct lwp_info *ti, void *data) args->note_data = linux_nat_do_thread_registers (args->obfd, ti->ptid, args->note_data, - args->note_size); + args->note_size, + args->stop_signal); args->num_notes++; return 0; } -/* Records the register state for the corefile note section. */ - -static char * -linux_nat_do_registers (bfd *obfd, ptid_t ptid, - char *note_data, int *note_size) -{ - return linux_nat_do_thread_registers (obfd, - ptid_build (ptid_get_pid (inferior_ptid), - ptid_get_pid (inferior_ptid), - 0), - note_data, note_size); -} - /* Fills the "to_make_corefile_note" target vector. Builds the note section for a corefile, and returns it in a malloc buffer. */ @@ -2686,10 +3629,13 @@ linux_nat_make_corefile_notes (bfd *obfd, int *note_size) { struct linux_nat_corefile_thread_data thread_args; struct cleanup *old_chain; + /* The variable size must be >= sizeof (prpsinfo_t.pr_fname). */ char fname[16] = { '\0' }; + /* The variable size must be >= sizeof (prpsinfo_t.pr_psargs). */ char psargs[80] = { '\0' }; char *note_data = NULL; ptid_t current_ptid = inferior_ptid; + ptid_t filter = pid_to_ptid (ptid_get_pid (inferior_ptid)); gdb_byte *auxv; int auxv_len; @@ -2699,9 +3645,18 @@ linux_nat_make_corefile_notes (bfd *obfd, int *note_size) strncpy (psargs, get_exec_file (0), sizeof (psargs)); if (get_inferior_args ()) { - strncat (psargs, " ", sizeof (psargs) - strlen (psargs)); - strncat (psargs, get_inferior_args (), - sizeof (psargs) - strlen (psargs)); + char *string_end; + char *psargs_end = psargs + sizeof (psargs); + + /* linux_elfcore_write_prpsinfo () handles zero unterminated + strings fine. */ + string_end = memchr (psargs, 0, sizeof (psargs)); + if (string_end != NULL) + { + *string_end++ = ' '; + strncpy (string_end, get_inferior_args (), + psargs_end - string_end); + } } note_data = (char *) elfcore_write_prpsinfo (obfd, note_data, @@ -2713,18 +3668,10 @@ linux_nat_make_corefile_notes (bfd *obfd, int *note_size) thread_args.note_data = note_data; thread_args.note_size = note_size; thread_args.num_notes = 0; - iterate_over_lwps (linux_nat_corefile_thread_callback, &thread_args); - if (thread_args.num_notes == 0) - { - /* iterate_over_threads didn't come up with any threads; just - use inferior_ptid. */ - note_data = linux_nat_do_registers (obfd, inferior_ptid, - note_data, note_size); - } - else - { - note_data = thread_args.note_data; - } + thread_args.stop_signal = find_stop_signal (); + iterate_over_lwps (filter, linux_nat_corefile_thread_callback, &thread_args); + gdb_assert (thread_args.num_notes != 0); + note_data = thread_args.note_data; auxv_len = target_read_alloc (¤t_target, TARGET_OBJECT_AUXV, NULL, &auxv); @@ -2744,7 +3691,9 @@ linux_nat_make_corefile_notes (bfd *obfd, int *note_size) static void linux_nat_info_proc_cmd (char *args, int from_tty) { - long long pid = PIDGET (inferior_ptid); + /* A long is used for pid instead of an int to avoid a loss of precision + compiler warning from the output of strtoul. */ + long pid = PIDGET (inferior_ptid); FILE *procfile; char **argv = NULL; char buffer[MAXPATHLEN]; @@ -2762,10 +3711,8 @@ linux_nat_info_proc_cmd (char *args, int from_tty) if (args) { /* Break up 'args' into an argv array. */ - if ((argv = buildargv (args)) == NULL) - nomem (0); - else - make_cleanup_freeargv (argv); + argv = gdb_buildargv (args); + make_cleanup_freeargv (argv); } while (argv != NULL && *argv != NULL) { @@ -2810,26 +3757,29 @@ linux_nat_info_proc_cmd (char *args, int from_tty) if (pid == 0) error (_("No current process: you must name one.")); - sprintf (fname1, "/proc/%lld", pid); + sprintf (fname1, "/proc/%ld", pid); if (stat (fname1, &dummy) != 0) error (_("No /proc directory: '%s'"), fname1); - printf_filtered (_("process %lld\n"), pid); + printf_filtered (_("process %ld\n"), pid); if (cmdline_f || all) { - sprintf (fname1, "/proc/%lld/cmdline", pid); + sprintf (fname1, "/proc/%ld/cmdline", pid); if ((procfile = fopen (fname1, "r")) != NULL) { - fgets (buffer, sizeof (buffer), procfile); - printf_filtered ("cmdline = '%s'\n", buffer); - fclose (procfile); + struct cleanup *cleanup = make_cleanup_fclose (procfile); + if (fgets (buffer, sizeof (buffer), procfile)) + printf_filtered ("cmdline = '%s'\n", buffer); + else + warning (_("unable to read '%s'"), fname1); + do_cleanups (cleanup); } else warning (_("unable to open /proc file '%s'"), fname1); } if (cwd_f || all) { - sprintf (fname1, "/proc/%lld/cwd", pid); + sprintf (fname1, "/proc/%ld/cwd", pid); memset (fname2, 0, sizeof (fname2)); if (readlink (fname1, fname2, sizeof (fname2)) > 0) printf_filtered ("cwd = '%s'\n", fname2); @@ -2838,7 +3788,7 @@ linux_nat_info_proc_cmd (char *args, int from_tty) } if (exe_f || all) { - sprintf (fname1, "/proc/%lld/exe", pid); + sprintf (fname1, "/proc/%ld/exe", pid); memset (fname2, 0, sizeof (fname2)); if (readlink (fname1, fname2, sizeof (fname2)) > 0) printf_filtered ("exe = '%s'\n", fname2); @@ -2847,12 +3797,14 @@ linux_nat_info_proc_cmd (char *args, int from_tty) } if (mappings_f || all) { - sprintf (fname1, "/proc/%lld/maps", pid); + sprintf (fname1, "/proc/%ld/maps", pid); if ((procfile = fopen (fname1, "r")) != NULL) { long long addr, endaddr, size, offset, inode; char permissions[8], device[8], filename[MAXPATHLEN]; + struct cleanup *cleanup; + cleanup = make_cleanup_fclose (procfile); printf_filtered (_("Mapped address spaces:\n\n")); if (gdbarch_addr_bit (current_gdbarch) == 32) { @@ -2900,31 +3852,33 @@ linux_nat_info_proc_cmd (char *args, int from_tty) } } - fclose (procfile); + do_cleanups (cleanup); } else warning (_("unable to open /proc file '%s'"), fname1); } if (status_f || all) { - sprintf (fname1, "/proc/%lld/status", pid); + sprintf (fname1, "/proc/%ld/status", pid); if ((procfile = fopen (fname1, "r")) != NULL) { + struct cleanup *cleanup = make_cleanup_fclose (procfile); while (fgets (buffer, sizeof (buffer), procfile) != NULL) puts_filtered (buffer); - fclose (procfile); + do_cleanups (cleanup); } else warning (_("unable to open /proc file '%s'"), fname1); } if (stat_f || all) { - sprintf (fname1, "/proc/%lld/stat", pid); + sprintf (fname1, "/proc/%ld/stat", pid); if ((procfile = fopen (fname1, "r")) != NULL) { int itmp; char ctmp; long ltmp; + struct cleanup *cleanup = make_cleanup_fclose (procfile); if (fscanf (procfile, "%d ", &itmp) > 0) printf_filtered (_("Process: %d\n"), itmp); @@ -3008,7 +3962,7 @@ linux_nat_info_proc_cmd (char *args, int from_tty) if (fscanf (procfile, "%lu ", <mp) > 0) /* FIXME arch? */ printf_filtered (_("wchan (system call): 0x%lx\n"), ltmp); #endif - fclose (procfile); + do_cleanups (cleanup); } else warning (_("unable to open /proc file '%s'"), fname1); @@ -3110,6 +4064,7 @@ linux_proc_pending_signals (int pid, sigset_t *pending, sigset_t *blocked, sigse FILE *procfile; char buffer[MAXPATHLEN], fname[MAXPATHLEN]; int signum; + struct cleanup *cleanup; sigemptyset (pending); sigemptyset (blocked); @@ -3118,6 +4073,7 @@ linux_proc_pending_signals (int pid, sigset_t *pending, sigset_t *blocked, sigse procfile = fopen (fname, "r"); if (procfile == NULL) error (_("Could not open %s"), fname); + cleanup = make_cleanup_fclose (procfile); while (fgets (buffer, MAXPATHLEN, procfile) != NULL) { @@ -3139,7 +4095,114 @@ linux_proc_pending_signals (int pid, sigset_t *pending, sigset_t *blocked, sigse add_line_to_sigset (buffer + 8, ignored); } - fclose (procfile); + do_cleanups (cleanup); +} + +static LONGEST +linux_nat_xfer_osdata (struct target_ops *ops, enum target_object object, + const char *annex, gdb_byte *readbuf, + const gdb_byte *writebuf, ULONGEST offset, LONGEST len) +{ + /* We make the process list snapshot when the object starts to be + read. */ + static const char *buf; + static LONGEST len_avail = -1; + static struct obstack obstack; + + DIR *dirp; + + gdb_assert (object == TARGET_OBJECT_OSDATA); + + if (strcmp (annex, "processes") != 0) + return 0; + + gdb_assert (readbuf && !writebuf); + + if (offset == 0) + { + if (len_avail != -1 && len_avail != 0) + obstack_free (&obstack, NULL); + len_avail = 0; + buf = NULL; + obstack_init (&obstack); + obstack_grow_str (&obstack, "\n"); + + dirp = opendir ("/proc"); + if (dirp) + { + struct dirent *dp; + while ((dp = readdir (dirp)) != NULL) + { + struct stat statbuf; + char procentry[sizeof ("/proc/4294967295")]; + + if (!isdigit (dp->d_name[0]) + || NAMELEN (dp) > sizeof ("4294967295") - 1) + continue; + + sprintf (procentry, "/proc/%s", dp->d_name); + if (stat (procentry, &statbuf) == 0 + && S_ISDIR (statbuf.st_mode)) + { + char *pathname; + FILE *f; + char cmd[MAXPATHLEN + 1]; + struct passwd *entry; + + pathname = xstrprintf ("/proc/%s/cmdline", dp->d_name); + entry = getpwuid (statbuf.st_uid); + + if ((f = fopen (pathname, "r")) != NULL) + { + size_t len = fread (cmd, 1, sizeof (cmd) - 1, f); + if (len > 0) + { + int i; + for (i = 0; i < len; i++) + if (cmd[i] == '\0') + cmd[i] = ' '; + cmd[len] = '\0'; + + obstack_xml_printf ( + &obstack, + "" + "%s" + "%s" + "%s" + "", + dp->d_name, + entry ? entry->pw_name : "?", + cmd); + } + fclose (f); + } + + xfree (pathname); + } + } + + closedir (dirp); + } + + obstack_grow_str0 (&obstack, "\n"); + buf = obstack_finish (&obstack); + len_avail = strlen (buf); + } + + if (offset >= len_avail) + { + /* Done. Get rid of the obstack. */ + obstack_free (&obstack, NULL); + buf = NULL; + len_avail = 0; + return 0; + } + + if (len > len_avail - offset) + len = len_avail - offset; + memcpy (readbuf, buf + offset, len); + + return len; } static LONGEST @@ -3153,6 +4216,10 @@ linux_xfer_partial (struct target_ops *ops, enum target_object object, return procfs_xfer_auxv (ops, object, annex, readbuf, writebuf, offset, len); + if (object == TARGET_OBJECT_OSDATA) + return linux_nat_xfer_osdata (ops, object, annex, readbuf, writebuf, + offset, len); + xfer = linux_proc_xfer_partial (ops, object, annex, readbuf, writebuf, offset, len); if (xfer != 0) @@ -3162,7 +4229,7 @@ linux_xfer_partial (struct target_ops *ops, enum target_object object, offset, len); } -/* Create a prototype generic Linux target. The client can override +/* Create a prototype generic GNU/Linux target. The client can override it with local methods. */ static void @@ -3204,6 +4271,320 @@ linux_trad_target (CORE_ADDR (*register_u_offset)(struct gdbarch *, int, int)) return t; } +/* target_is_async_p implementation. */ + +static int +linux_nat_is_async_p (void) +{ + /* NOTE: palves 2008-03-21: We're only async when the user requests + it explicitly with the "set target-async" command. + Someday, linux will always be async. */ + if (!target_async_permitted) + return 0; + + /* See target.h/target_async_mask. */ + return linux_nat_async_mask_value; +} + +/* target_can_async_p implementation. */ + +static int +linux_nat_can_async_p (void) +{ + /* NOTE: palves 2008-03-21: We're only async when the user requests + it explicitly with the "set target-async" command. + Someday, linux will always be async. */ + if (!target_async_permitted) + return 0; + + /* See target.h/target_async_mask. */ + return linux_nat_async_mask_value; +} + +static int +linux_nat_supports_non_stop (void) +{ + return 1; +} + +/* True if we want to support multi-process. To be removed when GDB + supports multi-exec. */ + +int linux_multi_process = 0; + +static int +linux_nat_supports_multi_process (void) +{ + return linux_multi_process; +} + +/* target_async_mask implementation. */ + +static int +linux_nat_async_mask (int new_mask) +{ + int curr_mask = linux_nat_async_mask_value; + + if (curr_mask != new_mask) + { + if (new_mask == 0) + { + linux_nat_async (NULL, 0); + linux_nat_async_mask_value = new_mask; + } + else + { + linux_nat_async_mask_value = new_mask; + + /* If we're going out of async-mask in all-stop, then the + inferior is stopped. The next resume will call + target_async. In non-stop, the target event source + should be always registered in the event loop. Do so + now. */ + if (non_stop) + linux_nat_async (inferior_event_handler, 0); + } + } + + return curr_mask; +} + +static int async_terminal_is_ours = 1; + +/* target_terminal_inferior implementation. */ + +static void +linux_nat_terminal_inferior (void) +{ + if (!target_is_async_p ()) + { + /* Async mode is disabled. */ + terminal_inferior (); + return; + } + + /* GDB should never give the terminal to the inferior, if the + inferior is running in the background (run&, continue&, etc.). + This check can be removed when the common code is fixed. */ + if (!sync_execution) + return; + + terminal_inferior (); + + if (!async_terminal_is_ours) + return; + + delete_file_handler (input_fd); + async_terminal_is_ours = 0; + set_sigint_trap (); +} + +/* target_terminal_ours implementation. */ + +static void +linux_nat_terminal_ours (void) +{ + if (!target_is_async_p ()) + { + /* Async mode is disabled. */ + terminal_ours (); + return; + } + + /* GDB should never give the terminal to the inferior if the + inferior is running in the background (run&, continue&, etc.), + but claiming it sure should. */ + terminal_ours (); + + if (!sync_execution) + return; + + if (async_terminal_is_ours) + return; + + clear_sigint_trap (); + add_file_handler (input_fd, stdin_event_handler, 0); + async_terminal_is_ours = 1; +} + +static void (*async_client_callback) (enum inferior_event_type event_type, + void *context); +static void *async_client_context; + +/* SIGCHLD handler that serves two purposes: In non-stop/async mode, + so we notice when any child changes state, and notify the + event-loop; it allows us to use sigsuspend in linux_nat_wait_1 + above to wait for the arrival of a SIGCHLD. */ + +static void +sigchld_handler (int signo) +{ + int old_errno = errno; + + if (debug_linux_nat_async) + fprintf_unfiltered (gdb_stdlog, "sigchld\n"); + + if (signo == SIGCHLD + && linux_nat_event_pipe[0] != -1) + async_file_mark (); /* Let the event loop know that there are + events to handle. */ + + errno = old_errno; +} + +/* Callback registered with the target events file descriptor. */ + +static void +handle_target_event (int error, gdb_client_data client_data) +{ + (*async_client_callback) (INF_REG_EVENT, async_client_context); +} + +/* Create/destroy the target events pipe. Returns previous state. */ + +static int +linux_async_pipe (int enable) +{ + int previous = (linux_nat_event_pipe[0] != -1); + + if (previous != enable) + { + sigset_t prev_mask; + + block_child_signals (&prev_mask); + + if (enable) + { + if (pipe (linux_nat_event_pipe) == -1) + internal_error (__FILE__, __LINE__, + "creating event pipe failed."); + + fcntl (linux_nat_event_pipe[0], F_SETFL, O_NONBLOCK); + fcntl (linux_nat_event_pipe[1], F_SETFL, O_NONBLOCK); + } + else + { + close (linux_nat_event_pipe[0]); + close (linux_nat_event_pipe[1]); + linux_nat_event_pipe[0] = -1; + linux_nat_event_pipe[1] = -1; + } + + restore_child_signals_mask (&prev_mask); + } + + return previous; +} + +/* target_async implementation. */ + +static void +linux_nat_async (void (*callback) (enum inferior_event_type event_type, + void *context), void *context) +{ + if (linux_nat_async_mask_value == 0 || !target_async_permitted) + internal_error (__FILE__, __LINE__, + "Calling target_async when async is masked"); + + if (callback != NULL) + { + async_client_callback = callback; + async_client_context = context; + if (!linux_async_pipe (1)) + { + add_file_handler (linux_nat_event_pipe[0], + handle_target_event, NULL); + /* There may be pending events to handle. Tell the event loop + to poll them. */ + async_file_mark (); + } + } + else + { + async_client_callback = callback; + async_client_context = context; + delete_file_handler (linux_nat_event_pipe[0]); + linux_async_pipe (0); + } + return; +} + +/* Stop an LWP, and push a TARGET_SIGNAL_0 stop status if no other + event came out. */ + +static int +linux_nat_stop_lwp (struct lwp_info *lwp, void *data) +{ + if (!lwp->stopped) + { + int pid, status; + ptid_t ptid = lwp->ptid; + + if (debug_linux_nat) + fprintf_unfiltered (gdb_stdlog, + "LNSL: running -> suspending %s\n", + target_pid_to_str (lwp->ptid)); + + + stop_callback (lwp, NULL); + stop_wait_callback (lwp, NULL); + + /* If the lwp exits while we try to stop it, there's nothing + else to do. */ + lwp = find_lwp_pid (ptid); + if (lwp == NULL) + return 0; + + /* If we didn't collect any signal other than SIGSTOP while + stopping the LWP, push a SIGNAL_0 event. In either case, the + event-loop will end up calling target_wait which will collect + these. */ + if (lwp->status == 0) + lwp->status = W_STOPCODE (0); + async_file_mark (); + } + else + { + /* Already known to be stopped; do nothing. */ + + if (debug_linux_nat) + { + if (find_thread_pid (lwp->ptid)->stop_requested) + fprintf_unfiltered (gdb_stdlog, "\ +LNSL: already stopped/stop_requested %s\n", + target_pid_to_str (lwp->ptid)); + else + fprintf_unfiltered (gdb_stdlog, "\ +LNSL: already stopped/no stop_requested yet %s\n", + target_pid_to_str (lwp->ptid)); + } + } + return 0; +} + +static void +linux_nat_stop (ptid_t ptid) +{ + if (non_stop) + iterate_over_lwps (ptid, linux_nat_stop_lwp, NULL); + else + linux_ops->to_stop (ptid); +} + +static void +linux_nat_close (int quitting) +{ + /* Unregister from the event loop. */ + if (target_is_async_p ()) + target_async (NULL, 0); + + /* Reset the async_masking. */ + linux_nat_async_mask_value = 1; + + if (linux_ops->to_close) + linux_ops->to_close (quitting); +} + void linux_nat_add_target (struct target_ops *t) { @@ -3215,6 +4596,7 @@ linux_nat_add_target (struct target_ops *t) linux_ops = &linux_ops_saved; /* Override some methods for multithreading. */ + t->to_create_inferior = linux_nat_create_inferior; t->to_attach = linux_nat_attach; t->to_detach = linux_nat_detach; t->to_resume = linux_nat_resume; @@ -3226,6 +4608,20 @@ linux_nat_add_target (struct target_ops *t) t->to_pid_to_str = linux_nat_pid_to_str; t->to_has_thread_control = tc_schedlock; + t->to_can_async_p = linux_nat_can_async_p; + t->to_is_async_p = linux_nat_is_async_p; + t->to_supports_non_stop = linux_nat_supports_non_stop; + t->to_async = linux_nat_async; + t->to_async_mask = linux_nat_async_mask; + t->to_terminal_inferior = linux_nat_terminal_inferior; + t->to_terminal_ours = linux_nat_terminal_ours; + t->to_close = linux_nat_close; + + /* Methods for non-stop support. */ + t->to_stop = linux_nat_stop; + + t->to_supports_multi_process = linux_nat_supports_multi_process; + /* We don't change the stratum; this target will sit at process_stratum and thread_db will set at thread_stratum. This is a little strange, since this is a multi-threaded-capable @@ -3233,16 +4629,49 @@ linux_nat_add_target (struct target_ops *t) also want to be used for single-threaded processes. */ add_target (t); +} + +/* Register a method to call whenever a new thread is attached. */ +void +linux_nat_set_new_thread (struct target_ops *t, void (*new_thread) (ptid_t)) +{ + /* Save the pointer. We only support a single registered instance + of the GNU/Linux native target, so we do not need to map this to + T. */ + linux_nat_new_thread = new_thread; +} - /* TODO: Eliminate this and have libthread_db use - find_target_beneath. */ - thread_db_init (t); +/* Register a method that converts a siginfo object between the layout + that ptrace returns, and the layout in the architecture of the + inferior. */ +void +linux_nat_set_siginfo_fixup (struct target_ops *t, + int (*siginfo_fixup) (struct siginfo *, + gdb_byte *, + int)) +{ + /* Save the pointer. */ + linux_nat_siginfo_fixup = siginfo_fixup; +} + +/* Return the saved siginfo associated with PTID. */ +struct siginfo * +linux_nat_get_siginfo (ptid_t ptid) +{ + struct lwp_info *lp = find_lwp_pid (ptid); + + gdb_assert (lp != NULL); + + return &lp->siginfo; } +/* Provide a prototype to silence -Wmissing-prototypes. */ +extern initialize_file_ftype _initialize_linux_nat; + void _initialize_linux_nat (void) { - struct sigaction action; + sigset_t mask; add_info ("proc", linux_nat_info_proc_cmd, _("\ Show /proc process information about any running process.\n\ @@ -3253,13 +4682,34 @@ Specify any of the following keywords for detailed info:\n\ status -- list a different bunch of random process info.\n\ all -- list all available /proc info.")); - /* Save the original signal mask. */ + add_setshow_zinteger_cmd ("lin-lwp", class_maintenance, + &debug_linux_nat, _("\ +Set debugging of GNU/Linux lwp module."), _("\ +Show debugging of GNU/Linux lwp module."), _("\ +Enables printf debugging output."), + NULL, + show_debug_linux_nat, + &setdebuglist, &showdebuglist); + + add_setshow_zinteger_cmd ("lin-lwp-async", class_maintenance, + &debug_linux_nat_async, _("\ +Set debugging of GNU/Linux async lwp module."), _("\ +Show debugging of GNU/Linux async lwp module."), _("\ +Enables printf debugging output."), + NULL, + show_debug_linux_nat_async, + &setdebuglist, &showdebuglist); + + /* Save this mask as the default. */ sigprocmask (SIG_SETMASK, NULL, &normal_mask); - action.sa_handler = sigchld_handler; - sigemptyset (&action.sa_mask); - action.sa_flags = SA_RESTART; - sigaction (SIGCHLD, &action, NULL); + /* Install a SIGCHLD handler. */ + sigchld_action.sa_handler = sigchld_handler; + sigemptyset (&sigchld_action.sa_mask); + sigchld_action.sa_flags = SA_RESTART; + + /* Make it the default. */ + sigaction (SIGCHLD, &sigchld_action, NULL); /* Make sure we don't block SIGCHLD during a sigsuspend. */ sigprocmask (SIG_SETMASK, NULL, &suspend_mask); @@ -3267,13 +4717,16 @@ Specify any of the following keywords for detailed info:\n\ sigemptyset (&blocked_mask); - add_setshow_zinteger_cmd ("lin-lwp", no_class, &debug_linux_nat, _("\ -Set debugging of GNU/Linux lwp module."), _("\ -Show debugging of GNU/Linux lwp module."), _("\ -Enables printf debugging output."), - NULL, - show_debug_linux_nat, - &setdebuglist, &showdebuglist); + add_setshow_boolean_cmd ("disable-randomization", class_support, + &disable_randomization, _("\ +Set disabling of debuggee's virtual address space randomization."), _("\ +Show disabling of debuggee's virtual address space randomization."), _("\ +When this mode is on (which is the default), randomization of the virtual\n\ +address space is disabled. Standalone programs run with the randomization\n\ +enabled by default on some platforms."), + &set_disable_randomization, + &show_disable_randomization, + &setlist, &showlist); } @@ -3310,6 +4763,7 @@ lin_thread_get_thread_signals (sigset_t *set) struct sigaction action; int restart, cancel; + sigemptyset (&blocked_mask); sigemptyset (set); restart = get_signo ("__pthread_sig_restart"); @@ -3347,4 +4801,3 @@ lin_thread_get_thread_signals (sigset_t *set) /* ... except during a sigsuspend. */ sigdelset (&suspend_mask, cancel); } -