X-Git-Url: http://drtracing.org/?a=blobdiff_plain;f=gdb%2Flinux-nat.c;h=62a453879bbb63bfc4b3ff8bd1e7c058fc514610;hb=77b06cd7198f20bb48b3a148ca8b9efbaa06aa19;hp=553d676ae5233bb94f112a39377129ce7b299bde;hpb=d90e17a74d28db7b3632eced357327ce2fb14f01;p=deliverable%2Fbinutils-gdb.git diff --git a/gdb/linux-nat.c b/gdb/linux-nat.c index 553d676ae5..62a453879b 100644 --- a/gdb/linux-nat.c +++ b/gdb/linux-nat.c @@ -1,7 +1,7 @@ /* GNU/Linux native-dependent code common to multiple platforms. - Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 - Free Software Foundation, Inc. + Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, + 2011 Free Software Foundation, Inc. This file is part of GDB. @@ -38,12 +38,12 @@ #include "inf-ptrace.h" #include "auxv.h" #include /* for MAXPATHLEN */ -#include /* for elf_gregset etc. */ +#include /* for elf_gregset etc. */ #include "elf-bfd.h" /* for elfcore_write_* */ #include "gregset.h" /* for gregset */ #include "gdbcore.h" /* for get_exec_file */ #include /* for isdigit */ -#include "gdbthread.h" /* for struct thread_info etc. */ +#include "gdbthread.h" /* for struct thread_info etc. */ #include "gdb_stat.h" /* for struct stat */ #include /* for O_RDONLY */ #include "inf-loop.h" @@ -53,6 +53,13 @@ #include #include "gdb_dirent.h" #include "xml-support.h" +#include "terminal.h" +#include +#include "solib.h" + +#ifndef SPUFS_MAGIC +#define SPUFS_MAGIC 0x23c9b64e +#endif #ifdef HAVE_PERSONALITY # include @@ -61,7 +68,7 @@ # endif #endif /* HAVE_PERSONALITY */ -/* This comment documents high-level logic of this file. +/* This comment documents high-level logic of this file. Waiting for events in sync mode =============================== @@ -69,20 +76,21 @@ Waiting for events in sync mode When waiting for an event in a specific thread, we just use waitpid, passing the specific pid, and not passing WNOHANG. -When waiting for an event in all threads, waitpid is not quite good. Prior to +When waiting for an event in all threads, waitpid is not quite good. Prior to version 2.4, Linux can either wait for event in main thread, or in secondary -threads. (2.4 has the __WALL flag). So, if we use blocking waitpid, we might +threads. (2.4 has the __WALL flag). So, if we use blocking waitpid, we might miss an event. The solution is to use non-blocking waitpid, together with sigsuspend. First, we use non-blocking waitpid to get an event in the main -process, if any. Second, we use non-blocking waitpid with the __WCLONED +process, if any. Second, we use non-blocking waitpid with the __WCLONED flag to check for events in cloned processes. If nothing is found, we use sigsuspend to wait for SIGCHLD. When SIGCHLD arrives, it means something happened to a child process -- and SIGCHLD will be delivered both for events in main debugged process and in cloned processes. As soon as we know there's -an event, we get back to calling nonblocking waitpid with and without __WCLONED. +an event, we get back to calling nonblocking waitpid with and without +__WCLONED. Note that SIGCHLD should be blocked between waitpid and sigsuspend calls, -so that we don't miss a signal. If SIGCHLD arrives in between, when it's +so that we don't miss a signal. If SIGCHLD arrives in between, when it's blocked, the signal becomes pending and sigsuspend immediately notices it and returns. @@ -159,7 +167,7 @@ blocked. */ #define PTRACE_SETOPTIONS 0x4200 #define PTRACE_GETEVENTMSG 0x4201 -/* options set using PTRACE_SETOPTIONS */ +/* Options set using PTRACE_SETOPTIONS. */ #define PTRACE_O_TRACESYSGOOD 0x00000001 #define PTRACE_O_TRACEFORK 0x00000002 #define PTRACE_O_TRACEVFORK 0x00000004 @@ -178,6 +186,11 @@ blocked. */ #endif /* PTRACE_EVENT_FORK */ +/* Unlike other extended result codes, WSTOPSIG (status) on + PTRACE_O_TRACESYSGOOD syscall events doesn't return SIGTRAP, but + instead SIGTRAP with bit 7 set. */ +#define SYSCALL_SIGTRAP (SIGTRAP | 0x80) + /* We can't always assume that this flag is available, but all systems with the ptrace event handlers also have __WALL, so it's safe to use here. */ @@ -227,7 +240,8 @@ static void show_debug_linux_nat_async (struct ui_file *file, int from_tty, struct cmd_list_element *c, const char *value) { - fprintf_filtered (file, _("Debugging of GNU/Linux async lwp module is %s.\n"), + fprintf_filtered (file, + _("Debugging of GNU/Linux async lwp module is %s.\n"), value); } @@ -238,28 +252,28 @@ show_disable_randomization (struct ui_file *file, int from_tty, struct cmd_list_element *c, const char *value) { #ifdef HAVE_PERSONALITY - fprintf_filtered (file, _("\ -Disabling randomization of debuggee's virtual address space is %s.\n"), + fprintf_filtered (file, + _("Disabling randomization of debuggee's " + "virtual address space is %s.\n"), value); #else /* !HAVE_PERSONALITY */ - fputs_filtered (_("\ -Disabling randomization of debuggee's virtual address space is unsupported on\n\ -this platform.\n"), file); + fputs_filtered (_("Disabling randomization of debuggee's " + "virtual address space is unsupported on\n" + "this platform.\n"), file); #endif /* !HAVE_PERSONALITY */ } static void -set_disable_randomization (char *args, int from_tty, struct cmd_list_element *c) +set_disable_randomization (char *args, int from_tty, + struct cmd_list_element *c) { #ifndef HAVE_PERSONALITY - error (_("\ -Disabling randomization of debuggee's virtual address space is unsupported on\n\ -this platform.")); + error (_("Disabling randomization of debuggee's " + "virtual address space is unsupported on\n" + "this platform.")); #endif /* !HAVE_PERSONALITY */ } -static int linux_parent_pid; - struct simple_pid_list { int pid; @@ -273,17 +287,25 @@ struct simple_pid_list *stopped_pids; static int linux_supports_tracefork_flag = -1; +/* This variable is a tri-state flag: -1 for unknown, 0 if + PTRACE_O_TRACESYSGOOD can not be used, 1 if it can. */ + +static int linux_supports_tracesysgood_flag = -1; + /* If we have PTRACE_O_TRACEFORK, this flag indicates whether we also have PTRACE_O_TRACEVFORKDONE. */ static int linux_supports_tracevforkdone_flag = -1; -/* Async mode support */ +/* Async mode support. */ /* Zero if the async mode, although enabled, is masked, which means linux_nat_wait should behave as if async mode was off. */ static int linux_nat_async_mask_value = 1; +/* Stores the current used ptrace() options. */ +static int current_ptrace_options = 0; + /* The read/write ends of the pipe registered as waitable file in the event loop. */ static int linux_nat_event_pipe[2] = { -1, -1 }; @@ -328,7 +350,8 @@ async_file_mark (void) } static void linux_nat_async (void (*callback) - (enum inferior_event_type event_type, void *context), + (enum inferior_event_type event_type, + void *context), void *context); static int linux_nat_async_mask (int mask); static int kill_lwp (int lwpid, int signo); @@ -337,6 +360,12 @@ static int stop_callback (struct lwp_info *lp, void *data); static void block_child_signals (sigset_t *prev_mask); static void restore_child_signals_mask (sigset_t *prev_mask); + +struct lwp_info; +static struct lwp_info *add_lwp (ptid_t ptid); +static void purge_lwp_list (int pid); +static struct lwp_info *find_lwp_pid (ptid_t ptid); + /* Trivial list manipulation functions to keep track of a list of new stopped processes. */ @@ -344,6 +373,7 @@ static void add_to_pid_list (struct simple_pid_list **listp, int pid, int status) { struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list)); + new_pid->pid = pid; new_pid->status = status; new_pid->next = *listp; @@ -351,7 +381,7 @@ add_to_pid_list (struct simple_pid_list **listp, int pid, int status) } static int -pull_pid_from_list (struct simple_pid_list **listp, int pid, int *status) +pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp) { struct simple_pid_list **p; @@ -359,7 +389,8 @@ pull_pid_from_list (struct simple_pid_list **listp, int pid, int *status) if ((*p)->pid == pid) { struct simple_pid_list *next = (*p)->next; - *status = (*p)->status; + + *statusp = (*p)->status; xfree (*p); *p = next; return 1; @@ -379,8 +410,6 @@ linux_record_stopped_pid (int pid, int status) static void linux_tracefork_child (void) { - int ret; - ptrace (PTRACE_TRACEME, 0, 0, 0); kill (getpid (), SIGSTOP); fork (); @@ -390,13 +419,13 @@ linux_tracefork_child (void) /* Wrapper function for waitpid which handles EINTR. */ static int -my_waitpid (int pid, int *status, int flags) +my_waitpid (int pid, int *statusp, int flags) { int ret; do { - ret = waitpid (pid, status, flags); + ret = waitpid (pid, statusp, flags); } while (ret == -1 && errno == EINTR); @@ -449,7 +478,8 @@ linux_test_for_tracefork (int original_pid) else if (ret != child_pid) error (_("linux_test_for_tracefork: waitpid: unexpected result %d."), ret); if (! WIFSTOPPED (status)) - error (_("linux_test_for_tracefork: waitpid: unexpected status %d."), status); + error (_("linux_test_for_tracefork: waitpid: unexpected status %d."), + status); ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0, PTRACE_O_TRACEFORK); if (ret != 0) @@ -464,10 +494,11 @@ linux_test_for_tracefork (int original_pid) ret = my_waitpid (child_pid, &status, 0); if (ret != child_pid) - warning (_("linux_test_for_tracefork: failed to wait for killed child")); + warning (_("linux_test_for_tracefork: failed " + "to wait for killed child")); else if (!WIFSIGNALED (status)) - warning (_("linux_test_for_tracefork: unexpected wait status 0x%x from " - "killed child"), status); + warning (_("linux_test_for_tracefork: unexpected " + "wait status 0x%x from killed child"), status); restore_child_signals_mask (&prev_mask); return; @@ -497,7 +528,8 @@ linux_test_for_tracefork (int original_pid) my_waitpid (second_pid, &second_status, 0); ret = ptrace (PTRACE_KILL, second_pid, 0, 0); if (ret != 0) - warning (_("linux_test_for_tracefork: failed to kill second child")); + warning (_("linux_test_for_tracefork: " + "failed to kill second child")); my_waitpid (second_pid, &status, 0); } } @@ -513,6 +545,43 @@ linux_test_for_tracefork (int original_pid) restore_child_signals_mask (&prev_mask); } +/* Determine if PTRACE_O_TRACESYSGOOD can be used to follow syscalls. + + We try to enable syscall tracing on ORIGINAL_PID. If this fails, + we know that the feature is not available. This may change the tracing + options for ORIGINAL_PID, but we'll be setting them shortly anyway. */ + +static void +linux_test_for_tracesysgood (int original_pid) +{ + int ret; + sigset_t prev_mask; + + /* We don't want those ptrace calls to be interrupted. */ + block_child_signals (&prev_mask); + + linux_supports_tracesysgood_flag = 0; + + ret = ptrace (PTRACE_SETOPTIONS, original_pid, 0, PTRACE_O_TRACESYSGOOD); + if (ret != 0) + goto out; + + linux_supports_tracesysgood_flag = 1; +out: + restore_child_signals_mask (&prev_mask); +} + +/* Determine wether we support PTRACE_O_TRACESYSGOOD option available. + This function also sets linux_supports_tracesysgood_flag. */ + +static int +linux_supports_tracesysgood (int pid) +{ + if (linux_supports_tracesysgood_flag == -1) + linux_test_for_tracesysgood (pid); + return linux_supports_tracesysgood_flag; +} + /* Return non-zero iff we have tracefork functionality available. This function also sets linux_supports_tracefork_flag. */ @@ -532,12 +601,27 @@ linux_supports_tracevforkdone (int pid) return linux_supports_tracevforkdone_flag; } +static void +linux_enable_tracesysgood (ptid_t ptid) +{ + int pid = ptid_get_lwp (ptid); + + if (pid == 0) + pid = ptid_get_pid (ptid); + + if (linux_supports_tracesysgood (pid) == 0) + return; + + current_ptrace_options |= PTRACE_O_TRACESYSGOOD; + + ptrace (PTRACE_SETOPTIONS, pid, 0, current_ptrace_options); +} + void linux_enable_event_reporting (ptid_t ptid) { int pid = ptid_get_lwp (ptid); - int options; if (pid == 0) pid = ptid_get_pid (ptid); @@ -545,15 +629,16 @@ linux_enable_event_reporting (ptid_t ptid) if (! linux_supports_tracefork (pid)) return; - options = PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORK | PTRACE_O_TRACEEXEC - | PTRACE_O_TRACECLONE; + current_ptrace_options |= PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORK + | PTRACE_O_TRACEEXEC | PTRACE_O_TRACECLONE; + if (linux_supports_tracevforkdone (pid)) - options |= PTRACE_O_TRACEVFORKDONE; + current_ptrace_options |= PTRACE_O_TRACEVFORKDONE; /* Do not enable PTRACE_O_TRACEEXIT until GDB is more prepared to support read-only process state. */ - ptrace (PTRACE_SETOPTIONS, pid, 0, options); + ptrace (PTRACE_SETOPTIONS, pid, 0, current_ptrace_options); } static void @@ -561,6 +646,7 @@ linux_child_post_attach (int pid) { linux_enable_event_reporting (pid_to_ptid (pid)); check_for_thread_db (); + linux_enable_tracesysgood (pid_to_ptid (pid)); } static void @@ -568,49 +654,74 @@ linux_child_post_startup_inferior (ptid_t ptid) { linux_enable_event_reporting (ptid); check_for_thread_db (); + linux_enable_tracesysgood (ptid); } static int linux_child_follow_fork (struct target_ops *ops, int follow_child) { sigset_t prev_mask; - ptid_t last_ptid; - struct target_waitstatus last_status; int has_vforked; int parent_pid, child_pid; block_child_signals (&prev_mask); - get_last_target_status (&last_ptid, &last_status); - has_vforked = (last_status.kind == TARGET_WAITKIND_VFORKED); - parent_pid = ptid_get_lwp (last_ptid); + has_vforked = (inferior_thread ()->pending_follow.kind + == TARGET_WAITKIND_VFORKED); + parent_pid = ptid_get_lwp (inferior_ptid); if (parent_pid == 0) - parent_pid = ptid_get_pid (last_ptid); - child_pid = PIDGET (last_status.value.related_pid); + parent_pid = ptid_get_pid (inferior_ptid); + child_pid = PIDGET (inferior_thread ()->pending_follow.value.related_pid); + + if (!detach_fork) + linux_enable_event_reporting (pid_to_ptid (child_pid)); + + if (has_vforked + && !non_stop /* Non-stop always resumes both branches. */ + && (!target_is_async_p () || sync_execution) + && !(follow_child || detach_fork || sched_multi)) + { + /* The parent stays blocked inside the vfork syscall until the + child execs or exits. If we don't let the child run, then + the parent stays blocked. If we're telling the parent to run + in the foreground, the user will not be able to ctrl-c to get + back the terminal, effectively hanging the debug session. */ + fprintf_filtered (gdb_stderr, _("\ +Can not resume the parent process over vfork in the foreground while\n\ +holding the child stopped. Try \"set detach-on-fork\" or \ +\"set schedule-multiple\".\n")); + /* FIXME output string > 80 columns. */ + return 1; + } if (! follow_child) { - /* We're already attached to the parent, by default. */ - - /* Before detaching from the child, remove all breakpoints from - it. If we forked, then this has already been taken care of - by infrun.c. If we vforked however, any breakpoint inserted - in the parent is visible in the child, even those added while - stopped in a vfork catchpoint. This won't actually modify - the breakpoint list, but will physically remove the - breakpoints from the child. This will remove the breakpoints - from the parent also, but they'll be reinserted below. */ - if (has_vforked) - detach_breakpoints (child_pid); + struct lwp_info *child_lp = NULL; + + /* We're already attached to the parent, by default. */ /* Detach new forked process? */ if (detach_fork) { + /* Before detaching from the child, remove all breakpoints + from it. If we forked, then this has already been taken + care of by infrun.c. If we vforked however, any + breakpoint inserted in the parent is visible in the + child, even those added while stopped in a vfork + catchpoint. This will remove the breakpoints from the + parent also, but they'll be reinserted below. */ + if (has_vforked) + { + /* keep breakpoints list in sync. */ + remove_breakpoints_pid (GET_PID (inferior_ptid)); + } + if (info_verbose || debug_linux_nat) { target_terminal_ours (); fprintf_filtered (gdb_stdlog, - "Detaching after fork from child process %d.\n", + "Detaching after fork from " + "child process %d.\n", child_pid); } @@ -618,34 +729,94 @@ linux_child_follow_fork (struct target_ops *ops, int follow_child) } else { - struct fork_info *fp; struct inferior *parent_inf, *child_inf; + struct cleanup *old_chain; /* Add process to GDB's tables. */ child_inf = add_inferior (child_pid); - parent_inf = find_inferior_pid (GET_PID (last_ptid)); + parent_inf = current_inferior (); child_inf->attach_flag = parent_inf->attach_flag; + copy_terminal_info (child_inf, parent_inf); - /* Retain child fork in ptrace (stopped) state. */ - fp = find_fork_pid (child_pid); - if (!fp) - fp = add_fork (child_pid); - fork_save_infrun_state (fp, 0); + old_chain = save_inferior_ptid (); + save_current_program_space (); + + inferior_ptid = ptid_build (child_pid, child_pid, 0); + add_thread (inferior_ptid); + child_lp = add_lwp (inferior_ptid); + child_lp->stopped = 1; + child_lp->resumed = 1; + + /* If this is a vfork child, then the address-space is + shared with the parent. */ + if (has_vforked) + { + child_inf->pspace = parent_inf->pspace; + child_inf->aspace = parent_inf->aspace; + + /* The parent will be frozen until the child is done + with the shared region. Keep track of the + parent. */ + child_inf->vfork_parent = parent_inf; + child_inf->pending_detach = 0; + parent_inf->vfork_child = child_inf; + parent_inf->pending_detach = 0; + } + else + { + child_inf->aspace = new_address_space (); + child_inf->pspace = add_program_space (child_inf->aspace); + child_inf->removable = 1; + set_current_program_space (child_inf->pspace); + clone_program_space (child_inf->pspace, parent_inf->pspace); + + /* Let the shared library layer (solib-svr4) learn about + this new process, relocate the cloned exec, pull in + shared libraries, and install the solib event + breakpoint. If a "cloned-VM" event was propagated + better throughout the core, this wouldn't be + required. */ + solib_create_inferior_hook (0); + } + + /* Let the thread_db layer learn about this new process. */ + check_for_thread_db (); + + do_cleanups (old_chain); } if (has_vforked) { + struct lwp_info *lp; + struct inferior *parent_inf; + + parent_inf = current_inferior (); + + /* If we detached from the child, then we have to be careful + to not insert breakpoints in the parent until the child + is done with the shared memory region. However, if we're + staying attached to the child, then we can and should + insert breakpoints, so that we can debug it. A + subsequent child exec or exit is enough to know when does + the child stops using the parent's address space. */ + parent_inf->waiting_for_vfork_done = detach_fork; + parent_inf->pspace->breakpoints_not_allowed = detach_fork; + + lp = find_lwp_pid (pid_to_ptid (parent_pid)); gdb_assert (linux_supports_tracefork_flag >= 0); if (linux_supports_tracevforkdone (0)) { - int status; + if (debug_linux_nat) + fprintf_unfiltered (gdb_stdlog, + "LCFF: waiting for VFORK_DONE on %d\n", + parent_pid); - ptrace (PTRACE_CONT, parent_pid, 0, 0); - my_waitpid (parent_pid, &status, __WALL); - if ((status >> 16) != PTRACE_EVENT_VFORK_DONE) - warning (_("Unexpected waitpid result %06x when waiting for " - "vfork-done"), status); + lp->stopped = 1; + lp->resumed = 1; + + /* We'll handle the VFORK_DONE event like any other + event, in target_wait. */ } else { @@ -680,40 +851,48 @@ linux_child_follow_fork (struct target_ops *ops, int follow_child) is only the single-step breakpoint at vfork's return point. */ + if (debug_linux_nat) + fprintf_unfiltered (gdb_stdlog, + "LCFF: no VFORK_DONE " + "support, sleeping a bit\n"); + usleep (10000); - } - /* Since we vforked, breakpoints were removed in the parent - too. Put them back. */ - reattach_breakpoints (parent_pid); + /* Pretend we've seen a PTRACE_EVENT_VFORK_DONE event, + and leave it pending. The next linux_nat_resume call + will notice a pending event, and bypasses actually + resuming the inferior. */ + lp->status = 0; + lp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE; + lp->stopped = 0; + lp->resumed = 1; + + /* If we're in async mode, need to tell the event loop + there's something here to process. */ + if (target_can_async_p ()) + async_file_mark (); + } } } else { - struct thread_info *last_tp = find_thread_pid (last_ptid); - struct thread_info *tp; - char child_pid_spelling[40]; struct inferior *parent_inf, *child_inf; - - /* Copy user stepping state to the new inferior thread. */ - struct breakpoint *step_resume_breakpoint = last_tp->step_resume_breakpoint; - CORE_ADDR step_range_start = last_tp->step_range_start; - CORE_ADDR step_range_end = last_tp->step_range_end; - struct frame_id step_frame_id = last_tp->step_frame_id; - - /* Otherwise, deleting the parent would get rid of this - breakpoint. */ - last_tp->step_resume_breakpoint = NULL; - - /* Before detaching from the parent, remove all breakpoints from it. */ - remove_breakpoints (); + struct lwp_info *lp; + struct program_space *parent_pspace; if (info_verbose || debug_linux_nat) { target_terminal_ours (); - fprintf_filtered (gdb_stdlog, - "Attaching after fork to child process %d.\n", - child_pid); + if (has_vforked) + fprintf_filtered (gdb_stdlog, + _("Attaching after process %d " + "vfork to child process %d.\n"), + parent_pid, child_pid); + else + fprintf_filtered (gdb_stdlog, + _("Attaching after process %d " + "fork to child process %d.\n"), + parent_pid, child_pid); } /* Add the new inferior first, so that the target_detach below @@ -721,63 +900,73 @@ linux_child_follow_fork (struct target_ops *ops, int follow_child) child_inf = add_inferior (child_pid); - parent_inf = find_inferior_pid (GET_PID (last_ptid)); + parent_inf = current_inferior (); child_inf->attach_flag = parent_inf->attach_flag; + copy_terminal_info (child_inf, parent_inf); - /* If we're vforking, we may want to hold on to the parent until - the child exits or execs. At exec time we can remove the old - breakpoints from the parent and detach it; at exit time we - could do the same (or even, sneakily, resume debugging it - the - child's exec has failed, or something similar). - - This doesn't clean up "properly", because we can't call - target_detach, but that's OK; if the current target is "child", - then it doesn't need any further cleanups, and lin_lwp will - generally not encounter vfork (vfork is defined to fork - in libpthread.so). + parent_pspace = parent_inf->pspace; - The holding part is very easy if we have VFORKDONE events; - but keeping track of both processes is beyond GDB at the - moment. So we don't expose the parent to the rest of GDB. - Instead we quietly hold onto it until such time as we can - safely resume it. */ + /* If we're vforking, we want to hold on to the parent until the + child exits or execs. At child exec or exit time we can + remove the old breakpoints from the parent and detach or + resume debugging it. Otherwise, detach the parent now; we'll + want to reuse it's program/address spaces, but we can't set + them to the child before removing breakpoints from the + parent, otherwise, the breakpoints module could decide to + remove breakpoints from the wrong process (since they'd be + assigned to the same address space). */ if (has_vforked) { - linux_parent_pid = parent_pid; - detach_inferior (parent_pid); - } - else if (!detach_fork) - { - struct fork_info *fp; - /* Retain parent fork in ptrace (stopped) state. */ - fp = find_fork_pid (parent_pid); - if (!fp) - fp = add_fork (parent_pid); - fork_save_infrun_state (fp, 0); - - /* Also add an entry for the child fork. */ - fp = find_fork_pid (child_pid); - if (!fp) - fp = add_fork (child_pid); - fork_save_infrun_state (fp, 0); + gdb_assert (child_inf->vfork_parent == NULL); + gdb_assert (parent_inf->vfork_child == NULL); + child_inf->vfork_parent = parent_inf; + child_inf->pending_detach = 0; + parent_inf->vfork_child = child_inf; + parent_inf->pending_detach = detach_fork; + parent_inf->waiting_for_vfork_done = 0; } - else + else if (detach_fork) target_detach (NULL, 0); - inferior_ptid = ptid_build (child_pid, child_pid, 0); + /* Note that the detach above makes PARENT_INF dangling. */ - linux_nat_switch_fork (inferior_ptid); - check_for_thread_db (); + /* Add the child thread to the appropriate lists, and switch to + this new thread, before cloning the program space, and + informing the solib layer about this new process. */ + + inferior_ptid = ptid_build (child_pid, child_pid, 0); + add_thread (inferior_ptid); + lp = add_lwp (inferior_ptid); + lp->stopped = 1; + lp->resumed = 1; - tp = inferior_thread (); - tp->step_resume_breakpoint = step_resume_breakpoint; - tp->step_range_start = step_range_start; - tp->step_range_end = step_range_end; - tp->step_frame_id = step_frame_id; + /* If this is a vfork child, then the address-space is shared + with the parent. If we detached from the parent, then we can + reuse the parent's program/address spaces. */ + if (has_vforked || detach_fork) + { + child_inf->pspace = parent_pspace; + child_inf->aspace = child_inf->pspace->aspace; + } + else + { + child_inf->aspace = new_address_space (); + child_inf->pspace = add_program_space (child_inf->aspace); + child_inf->removable = 1; + set_current_program_space (child_inf->pspace); + clone_program_space (child_inf->pspace, parent_pspace); + + /* Let the shared library layer (solib-svr4) learn about + this new process, relocate the cloned exec, pull in + shared libraries, and install the solib event breakpoint. + If a "cloned-VM" event was propagated better throughout + the core, this wouldn't be required. */ + solib_create_inferior_hook (0); + } - /* Reset breakpoints in the child as appropriate. */ - follow_inferior_reset_breakpoints (); + /* Let the thread_db layer learn about this new process. */ + check_for_thread_db (); } restore_child_signals_mask (&prev_mask); @@ -785,25 +974,37 @@ linux_child_follow_fork (struct target_ops *ops, int follow_child) } -static void +static int linux_child_insert_fork_catchpoint (int pid) { - if (! linux_supports_tracefork (pid)) - error (_("Your system does not support fork catchpoints.")); + return !linux_supports_tracefork (pid); } -static void +static int linux_child_insert_vfork_catchpoint (int pid) { - if (!linux_supports_tracefork (pid)) - error (_("Your system does not support vfork catchpoints.")); + return !linux_supports_tracefork (pid); } -static void +static int linux_child_insert_exec_catchpoint (int pid) { - if (!linux_supports_tracefork (pid)) - error (_("Your system does not support exec catchpoints.")); + return !linux_supports_tracefork (pid); +} + +static int +linux_child_set_syscall_catchpoint (int pid, int needed, int any_count, + int table_size, int *table) +{ + if (!linux_supports_tracesysgood (pid)) + return 1; + + /* On GNU/Linux, we ignore the arguments. It means that we only + enable the syscall catchpoints, but do not disable them. + + Also, we do not use the `table' information because we do not + filter system calls here. We let GDB do the logic for us. */ + return 0; } /* On GNU/Linux there are no real LWP's. The closest thing to LWP's @@ -883,7 +1084,6 @@ restore_child_signals_mask (sigset_t *prev_mask) static int stop_wait_callback (struct lwp_info *lp, void *data); static int linux_thread_alive (ptid_t ptid); static char *linux_child_pid_to_exec_file (int pid); -static int cancel_breakpoint (struct lwp_info *lp); /* Convert wait status STATUS to a string. Used for printing debug @@ -895,36 +1095,23 @@ status_to_str (int status) static char buf[64]; if (WIFSTOPPED (status)) - snprintf (buf, sizeof (buf), "%s (stopped)", - strsignal (WSTOPSIG (status))); + { + if (WSTOPSIG (status) == SYSCALL_SIGTRAP) + snprintf (buf, sizeof (buf), "%s (stopped at syscall)", + strsignal (SIGTRAP)); + else + snprintf (buf, sizeof (buf), "%s (stopped)", + strsignal (WSTOPSIG (status))); + } else if (WIFSIGNALED (status)) snprintf (buf, sizeof (buf), "%s (terminated)", - strsignal (WSTOPSIG (status))); + strsignal (WTERMSIG (status))); else snprintf (buf, sizeof (buf), "%d (exited)", WEXITSTATUS (status)); return buf; } -/* Initialize the list of LWPs. Note that this module, contrary to - what GDB's generic threads layer does for its thread list, - re-initializes the LWP lists whenever we mourn or detach (which - doesn't involve mourning) the inferior. */ - -static void -init_lwp_list (void) -{ - struct lwp_info *lp, *lpnext; - - for (lp = lwp_list; lp; lp = lpnext) - { - lpnext = lp->next; - xfree (lp); - } - - lwp_list = NULL; -} - /* Remove all LWPs belong to PID from the lwp list. */ static void @@ -985,6 +1172,7 @@ add_lwp (ptid_t ptid) lp->waitstatus.kind = TARGET_WAITKIND_IGNORE; lp->ptid = ptid; + lp->core = -1; lp->next = lwp_list; lwp_list = lp; @@ -1040,33 +1228,6 @@ find_lwp_pid (ptid_t ptid) return NULL; } -/* Returns true if PTID matches filter FILTER. FILTER can be the wild - card MINUS_ONE_PTID (all ptid match it); can be a ptid representing - a process (ptid_is_pid returns true), in which case, all lwps of - that give process match, lwps of other process do not; or, it can - represent a specific thread, in which case, only that thread will - match true. PTID must represent an LWP, it can never be a wild - card. */ - -static int -ptid_match (ptid_t ptid, ptid_t filter) -{ - /* Since both parameters have the same type, prevent easy mistakes - from happening. */ - gdb_assert (!ptid_equal (ptid, minus_one_ptid) - && !ptid_equal (ptid, null_ptid)); - - if (ptid_equal (filter, minus_one_ptid)) - return 1; - if (ptid_is_pid (filter) - && ptid_get_pid (ptid) == ptid_get_pid (filter)) - return 1; - else if (ptid_equal (ptid, filter)) - return 1; - - return 0; -} - /* Call CALLBACK with its second argument set to DATA for every LWP in the list. If CALLBACK returns 1 for a particular LWP, return a pointer to the structure describing that LWP immediately. @@ -1093,22 +1254,30 @@ iterate_over_lwps (ptid_t filter, return NULL; } -/* Update our internal state when changing from one fork (checkpoint, - et cetera) to another indicated by NEW_PTID. We can only switch - single-threaded applications, so we only create one new LWP, and - the previous list is discarded. */ +/* Update our internal state when changing from one checkpoint to + another indicated by NEW_PTID. We can only switch single-threaded + applications, so we only create one new LWP, and the previous list + is discarded. */ void linux_nat_switch_fork (ptid_t new_ptid) { struct lwp_info *lp; - init_lwp_list (); + purge_lwp_list (GET_PID (inferior_ptid)); + lp = add_lwp (new_ptid); lp->stopped = 1; - init_thread_list (); - add_thread_silent (new_ptid); + /* This changes the thread's ptid while preserving the gdb thread + num. Also changes the inferior pid, while preserving the + inferior num. */ + thread_change_ptid (inferior_ptid, new_ptid); + + /* We've just told GDB core that the thread changed target id, but, + in fact, it really is a different thread, with different register + contents. */ + registers_changed (); } /* Handle the exit of a single thread LP. */ @@ -1116,7 +1285,7 @@ linux_nat_switch_fork (ptid_t new_ptid) static void exit_lwp (struct lwp_info *lp) { - struct thread_info *th = find_thread_pid (lp->ptid); + struct thread_info *th = find_thread_ptid (lp->ptid); if (th) { @@ -1129,6 +1298,34 @@ exit_lwp (struct lwp_info *lp) delete_lwp (lp->ptid); } +/* Return an lwp's tgid, found in `/proc/PID/status'. */ + +int +linux_proc_get_tgid (int lwpid) +{ + FILE *status_file; + char buf[100]; + int tgid = -1; + + snprintf (buf, sizeof (buf), "/proc/%d/status", (int) lwpid); + status_file = fopen (buf, "r"); + if (status_file != NULL) + { + while (fgets (buf, sizeof (buf), status_file)) + { + if (strncmp (buf, "Tgid:", 5) == 0) + { + tgid = strtoul (buf + strlen ("Tgid:"), NULL, 10); + break; + } + } + + fclose (status_file); + } + + return tgid; +} + /* Detect `T (stopped)' in `/proc/PID/status'. Other states including `T (tracing stop)' are reported as false. */ @@ -1210,7 +1407,16 @@ linux_nat_post_attach_wait (ptid_t ptid, int first, int *cloned, *cloned = 1; } - gdb_assert (pid == new_pid && WIFSTOPPED (status)); + gdb_assert (pid == new_pid); + + if (!WIFSTOPPED (status)) + { + /* The pid we tried to attach has apparently just exited. */ + if (debug_linux_nat) + fprintf_unfiltered (gdb_stdlog, "LNPAW: Failed to stop %d: %s", + pid, status_to_str (status)); + return status; + } if (WSTOPSIG (status) != SIGSTOP) { @@ -1268,6 +1474,9 @@ lin_lwp_attach_lwp (ptid_t ptid) target_pid_to_str (ptid)); status = linux_nat_post_attach_wait (ptid, 0, &cloned, &signalled); + if (!WIFSTOPPED (status)) + return -1; + lp = add_lwp (ptid); lp->stopped = 1; lp->cloned = cloned; @@ -1310,7 +1519,6 @@ linux_nat_create_inferior (struct target_ops *ops, char *exec_file, char *allargs, char **env, int from_tty) { - int saved_async = 0; #ifdef HAVE_PERSONALITY int personality_orig = 0, personality_set = 0; #endif /* HAVE_PERSONALITY */ @@ -1318,11 +1526,6 @@ linux_nat_create_inferior (struct target_ops *ops, /* The fork_child mechanism is synchronous and calls target_wait, so we have to mask the async mode. */ - if (target_can_async_p ()) - /* Mask async mode. Creating a child requires a loop calling - wait_for_inferior currently. */ - saved_async = linux_nat_async_mask (0); - #ifdef HAVE_PERSONALITY if (disable_randomization) { @@ -1352,9 +1555,6 @@ linux_nat_create_inferior (struct target_ops *ops, safe_strerror (errno)); } #endif /* HAVE_PERSONALITY */ - - if (saved_async) - linux_nat_async_mask (saved_async); } static void @@ -1376,6 +1576,39 @@ linux_nat_attach (struct target_ops *ops, char *args, int from_tty) status = linux_nat_post_attach_wait (lp->ptid, 1, &lp->cloned, &lp->signalled); + if (!WIFSTOPPED (status)) + { + if (WIFEXITED (status)) + { + int exit_code = WEXITSTATUS (status); + + target_terminal_ours (); + target_mourn_inferior (); + if (exit_code == 0) + error (_("Unable to attach: program exited normally.")); + else + error (_("Unable to attach: program exited with code %d."), + exit_code); + } + else if (WIFSIGNALED (status)) + { + enum target_signal signo; + + target_terminal_ours (); + target_mourn_inferior (); + + signo = target_signal_from_host (WTERMSIG (status)); + error (_("Unable to attach: program terminated with signal " + "%s, %s."), + target_signal_to_name (signo), + target_signal_to_string (signo)); + } + + internal_error (__FILE__, __LINE__, + _("unexpected status %d for PID %ld"), + status, (long) GET_LWP (ptid)); + } + lp->stopped = 1; /* Save the wait status to report later. */ @@ -1395,74 +1628,81 @@ linux_nat_attach (struct target_ops *ops, char *args, int from_tty) static int get_pending_status (struct lwp_info *lp, int *status) { - struct target_waitstatus last; - ptid_t last_ptid; - - get_last_target_status (&last_ptid, &last); - - /* If this lwp is the ptid that GDB is processing an event from, the - signal will be in stop_signal. Otherwise, we may cache pending - events in lp->status while trying to stop all threads (see - stop_wait_callback). */ + enum target_signal signo = TARGET_SIGNAL_0; + + /* If we paused threads momentarily, we may have stored pending + events in lp->status or lp->waitstatus (see stop_wait_callback), + and GDB core hasn't seen any signal for those threads. + Otherwise, the last signal reported to the core is found in the + thread object's stop_signal. + + There's a corner case that isn't handled here at present. Only + if the thread stopped with a TARGET_WAITKIND_STOPPED does + stop_signal make sense as a real signal to pass to the inferior. + Some catchpoint related events, like + TARGET_WAITKIND_(V)FORK|EXEC|SYSCALL, have their stop_signal set + to TARGET_SIGNAL_SIGTRAP when the catchpoint triggers. But, + those traps are debug API (ptrace in our case) related and + induced; the inferior wouldn't see them if it wasn't being + traced. Hence, we should never pass them to the inferior, even + when set to pass state. Since this corner case isn't handled by + infrun.c when proceeding with a signal, for consistency, neither + do we handle it here (or elsewhere in the file we check for + signal pass state). Normally SIGTRAP isn't set to pass state, so + this is really a corner case. */ - *status = 0; + if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE) + signo = TARGET_SIGNAL_0; /* a pending ptrace event, not a real signal. */ + else if (lp->status) + signo = target_signal_from_host (WSTOPSIG (lp->status)); + else if (non_stop && !is_executing (lp->ptid)) + { + struct thread_info *tp = find_thread_ptid (lp->ptid); - if (non_stop) + signo = tp->suspend.stop_signal; + } + else if (!non_stop) { - enum target_signal signo = TARGET_SIGNAL_0; + struct target_waitstatus last; + ptid_t last_ptid; - if (is_executing (lp->ptid)) - { - /* If the core thought this lwp was executing --- e.g., the - executing property hasn't been updated yet, but the - thread has been stopped with a stop_callback / - stop_wait_callback sequence (see linux_nat_detach for - example) --- we can only have pending events in the local - queue. */ - signo = target_signal_from_host (WSTOPSIG (lp->status)); - } - else + get_last_target_status (&last_ptid, &last); + + if (GET_LWP (lp->ptid) == GET_LWP (last_ptid)) { - /* If the core knows the thread is not executing, then we - have the last signal recorded in - thread_info->stop_signal. */ + struct thread_info *tp = find_thread_ptid (lp->ptid); - struct thread_info *tp = find_thread_pid (lp->ptid); - signo = tp->stop_signal; + signo = tp->suspend.stop_signal; } + } - if (signo != TARGET_SIGNAL_0 - && !signal_pass_state (signo)) - { - if (debug_linux_nat) - fprintf_unfiltered (gdb_stdlog, "\ -GPT: lwp %s had signal %s, but it is in no pass state\n", - target_pid_to_str (lp->ptid), - target_signal_to_string (signo)); - } - else - { - if (signo != TARGET_SIGNAL_0) - *status = W_STOPCODE (target_signal_to_host (signo)); + *status = 0; - if (debug_linux_nat) - fprintf_unfiltered (gdb_stdlog, - "GPT: lwp %s as pending signal %s\n", - target_pid_to_str (lp->ptid), - target_signal_to_string (signo)); - } + if (signo == TARGET_SIGNAL_0) + { + if (debug_linux_nat) + fprintf_unfiltered (gdb_stdlog, + "GPT: lwp %s has no pending signal\n", + target_pid_to_str (lp->ptid)); + } + else if (!signal_pass_state (signo)) + { + if (debug_linux_nat) + fprintf_unfiltered (gdb_stdlog, + "GPT: lwp %s had signal %s, " + "but it is in no pass state\n", + target_pid_to_str (lp->ptid), + target_signal_to_string (signo)); } else { - if (GET_LWP (lp->ptid) == GET_LWP (last_ptid)) - { - struct thread_info *tp = find_thread_pid (lp->ptid); - if (tp->stop_signal != TARGET_SIGNAL_0 - && signal_pass_state (tp->stop_signal)) - *status = W_STOPCODE (target_signal_to_host (tp->stop_signal)); - } - else - *status = lp->status; + *status = W_STOPCODE (target_signal_to_host (signo)); + + if (debug_linux_nat) + fprintf_unfiltered (gdb_stdlog, + "GPT: lwp %s has pending signal %s\n", + target_pid_to_str (lp->ptid), + target_signal_to_string (signo)); } return 0; @@ -1522,7 +1762,6 @@ linux_nat_detach (struct target_ops *ops, char *args, int from_tty) { int pid; int status; - enum target_signal sig; struct lwp_info *main_lwp; pid = GET_PID (inferior_ptid); @@ -1553,10 +1792,11 @@ linux_nat_detach (struct target_ops *ops, char *args, int from_tty) pass it along with PTRACE_DETACH. */ args = alloca (8); sprintf (args, "%d", (int) WSTOPSIG (status)); - fprintf_unfiltered (gdb_stdlog, - "LND: Sending signal %s to %s\n", - args, - target_pid_to_str (main_lwp->ptid)); + if (debug_linux_nat) + fprintf_unfiltered (gdb_stdlog, + "LND: Sending signal %s to %s\n", + args, + target_pid_to_str (main_lwp->ptid)); } delete_lwp (main_lwp->ptid); @@ -1581,7 +1821,16 @@ linux_nat_detach (struct target_ops *ops, char *args, int from_tty) static int resume_callback (struct lwp_info *lp, void *data) { - if (lp->stopped && lp->status == 0) + struct inferior *inf = find_inferior_pid (GET_PID (lp->ptid)); + + if (lp->stopped && inf->vfork_child != NULL) + { + if (debug_linux_nat) + fprintf_unfiltered (gdb_stdlog, + "RC: Not resuming %s (vfork parent)\n", + target_pid_to_str (lp->ptid)); + } + else if (lp->stopped && lp->status == 0) { if (debug_linux_nat) fprintf_unfiltered (gdb_stdlog, @@ -1598,12 +1847,15 @@ resume_callback (struct lwp_info *lp, void *data) lp->stopped = 0; lp->step = 0; memset (&lp->siginfo, 0, sizeof (lp->siginfo)); + lp->stopped_by_watchpoint = 0; } else if (lp->stopped && debug_linux_nat) - fprintf_unfiltered (gdb_stdlog, "RC: Not resuming sibling %s (has pending)\n", + fprintf_unfiltered (gdb_stdlog, + "RC: Not resuming sibling %s (has pending)\n", target_pid_to_str (lp->ptid)); else if (debug_linux_nat) - fprintf_unfiltered (gdb_stdlog, "RC: Not resuming sibling %s (not stopped)\n", + fprintf_unfiltered (gdb_stdlog, + "RC: Not resuming sibling %s (not stopped)\n", target_pid_to_str (lp->ptid)); return 0; @@ -1636,7 +1888,8 @@ linux_nat_resume (struct target_ops *ops, "LLR: Preparing to %s %s, %s, inferior_ptid %s\n", step ? "step" : "resume", target_pid_to_str (ptid), - signo ? strsignal (signo) : "0", + (signo != TARGET_SIGNAL_0 + ? strsignal (target_signal_to_host (signo)) : "0"), target_pid_to_str (inferior_ptid)); block_child_signals (&prev_mask); @@ -1645,14 +1898,8 @@ linux_nat_resume (struct target_ops *ops, resume_many = (ptid_equal (minus_one_ptid, ptid) || ptid_is_pid (ptid)); - if (!non_stop) - { - /* Mark the lwps we're resuming as resumed. */ - iterate_over_lwps (minus_one_ptid, resume_clear_callback, NULL); - iterate_over_lwps (ptid, resume_set_callback, NULL); - } - else - iterate_over_lwps (minus_one_ptid, resume_set_callback, NULL); + /* Mark the lwps we're resuming as resumed. */ + iterate_over_lwps (ptid, resume_set_callback, NULL); /* See if it's the current inferior that should be handled specially. */ @@ -1675,7 +1922,7 @@ linux_nat_resume (struct target_ops *ops, if (lp->status && WIFSTOPPED (lp->status)) { - int saved_signo; + enum target_signal saved_signo; struct inferior *inf; inf = find_inferior_pid (ptid_get_pid (lp->ptid)); @@ -1684,7 +1931,7 @@ linux_nat_resume (struct target_ops *ops, /* Defer to common code if we're gaining control of the inferior. */ - if (inf->stop_soon == NO_STOP_QUIETLY + if (inf->control.stop_soon == NO_STOP_QUIETLY && signal_stop_state (saved_signo) == 0 && signal_print_state (saved_signo) == 0 && signal_pass_state (saved_signo) == 1) @@ -1702,7 +1949,7 @@ linux_nat_resume (struct target_ops *ops, } } - if (lp->status) + if (lp->status || lp->waitstatus.kind != TARGET_WAITKIND_IGNORE) { /* FIXME: What should we do if we are supposed to continue this thread with a signal? */ @@ -1735,45 +1982,178 @@ linux_nat_resume (struct target_ops *ops, linux_ops->to_resume (linux_ops, ptid, step, signo); memset (&lp->siginfo, 0, sizeof (lp->siginfo)); + lp->stopped_by_watchpoint = 0; if (debug_linux_nat) fprintf_unfiltered (gdb_stdlog, "LLR: %s %s, %s (resume event thread)\n", step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT", target_pid_to_str (ptid), - signo ? strsignal (signo) : "0"); + (signo != TARGET_SIGNAL_0 + ? strsignal (target_signal_to_host (signo)) : "0")); restore_child_signals_mask (&prev_mask); if (target_can_async_p ()) target_async (inferior_event_handler, 0); } -/* Issue kill to specified lwp. */ - -static int tkill_failed; +/* Send a signal to an LWP. */ static int kill_lwp (int lwpid, int signo) { - errno = 0; - -/* Use tkill, if possible, in case we are using nptl threads. If tkill - fails, then we are not using nptl threads and we should be using kill. */ + /* Use tkill, if possible, in case we are using nptl threads. If tkill + fails, then we are not using nptl threads and we should be using kill. */ #ifdef HAVE_TKILL_SYSCALL - if (!tkill_failed) - { - int ret = syscall (__NR_tkill, lwpid, signo); - if (errno != ENOSYS) - return ret; - errno = 0; - tkill_failed = 1; - } + { + static int tkill_failed; + + if (!tkill_failed) + { + int ret; + + errno = 0; + ret = syscall (__NR_tkill, lwpid, signo); + if (errno != ENOSYS) + return ret; + tkill_failed = 1; + } + } #endif return kill (lwpid, signo); } +/* Handle a GNU/Linux syscall trap wait response. If we see a syscall + event, check if the core is interested in it: if not, ignore the + event, and keep waiting; otherwise, we need to toggle the LWP's + syscall entry/exit status, since the ptrace event itself doesn't + indicate it, and report the trap to higher layers. */ + +static int +linux_handle_syscall_trap (struct lwp_info *lp, int stopping) +{ + struct target_waitstatus *ourstatus = &lp->waitstatus; + struct gdbarch *gdbarch = target_thread_architecture (lp->ptid); + int syscall_number = (int) gdbarch_get_syscall_number (gdbarch, lp->ptid); + + if (stopping) + { + /* If we're stopping threads, there's a SIGSTOP pending, which + makes it so that the LWP reports an immediate syscall return, + followed by the SIGSTOP. Skip seeing that "return" using + PTRACE_CONT directly, and let stop_wait_callback collect the + SIGSTOP. Later when the thread is resumed, a new syscall + entry event. If we didn't do this (and returned 0), we'd + leave a syscall entry pending, and our caller, by using + PTRACE_CONT to collect the SIGSTOP, skips the syscall return + itself. Later, when the user re-resumes this LWP, we'd see + another syscall entry event and we'd mistake it for a return. + + If stop_wait_callback didn't force the SIGSTOP out of the LWP + (leaving immediately with LWP->signalled set, without issuing + a PTRACE_CONT), it would still be problematic to leave this + syscall enter pending, as later when the thread is resumed, + it would then see the same syscall exit mentioned above, + followed by the delayed SIGSTOP, while the syscall didn't + actually get to execute. It seems it would be even more + confusing to the user. */ + + if (debug_linux_nat) + fprintf_unfiltered (gdb_stdlog, + "LHST: ignoring syscall %d " + "for LWP %ld (stopping threads), " + "resuming with PTRACE_CONT for SIGSTOP\n", + syscall_number, + GET_LWP (lp->ptid)); + + lp->syscall_state = TARGET_WAITKIND_IGNORE; + ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0); + return 1; + } + + if (catch_syscall_enabled ()) + { + /* Always update the entry/return state, even if this particular + syscall isn't interesting to the core now. In async mode, + the user could install a new catchpoint for this syscall + between syscall enter/return, and we'll need to know to + report a syscall return if that happens. */ + lp->syscall_state = (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY + ? TARGET_WAITKIND_SYSCALL_RETURN + : TARGET_WAITKIND_SYSCALL_ENTRY); + + if (catching_syscall_number (syscall_number)) + { + /* Alright, an event to report. */ + ourstatus->kind = lp->syscall_state; + ourstatus->value.syscall_number = syscall_number; + + if (debug_linux_nat) + fprintf_unfiltered (gdb_stdlog, + "LHST: stopping for %s of syscall %d" + " for LWP %ld\n", + lp->syscall_state + == TARGET_WAITKIND_SYSCALL_ENTRY + ? "entry" : "return", + syscall_number, + GET_LWP (lp->ptid)); + return 0; + } + + if (debug_linux_nat) + fprintf_unfiltered (gdb_stdlog, + "LHST: ignoring %s of syscall %d " + "for LWP %ld\n", + lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY + ? "entry" : "return", + syscall_number, + GET_LWP (lp->ptid)); + } + else + { + /* If we had been syscall tracing, and hence used PT_SYSCALL + before on this LWP, it could happen that the user removes all + syscall catchpoints before we get to process this event. + There are two noteworthy issues here: + + - When stopped at a syscall entry event, resuming with + PT_STEP still resumes executing the syscall and reports a + syscall return. + + - Only PT_SYSCALL catches syscall enters. If we last + single-stepped this thread, then this event can't be a + syscall enter. If we last single-stepped this thread, this + has to be a syscall exit. + + The points above mean that the next resume, be it PT_STEP or + PT_CONTINUE, can not trigger a syscall trace event. */ + if (debug_linux_nat) + fprintf_unfiltered (gdb_stdlog, + "LHST: caught syscall event " + "with no syscall catchpoints." + " %d for LWP %ld, ignoring\n", + syscall_number, + GET_LWP (lp->ptid)); + lp->syscall_state = TARGET_WAITKIND_IGNORE; + } + + /* The core isn't interested in this event. For efficiency, avoid + stopping all threads only to have the core resume them all again. + Since we're not stopping threads, if we're still syscall tracing + and not stepping, we can't use PTRACE_CONT here, as we'd miss any + subsequent syscall. Simply resume using the inf-ptrace layer, + which knows when to use PT_SYSCALL or PT_CONTINUE. */ + + /* Note that gdbarch_get_syscall_number may access registers, hence + fill a regcache. */ + registers_changed (); + linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)), + lp->step, TARGET_SIGNAL_0); + return 1; +} + /* Handle a GNU/Linux extended wait response. If we see a clone event, we need to add the new LWP to our list (and not report the trap to higher layers). This function returns non-zero if the @@ -1786,7 +2166,6 @@ linux_handle_extended_wait (struct lwp_info *lp, int status, { int pid = GET_LWP (lp->ptid); struct target_waitstatus *ourstatus = &lp->waitstatus; - struct lwp_info *new_lp = NULL; int event = status >> 16; if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK @@ -1816,15 +2195,44 @@ linux_handle_extended_wait (struct lwp_info *lp, int status, ourstatus->value.related_pid = ptid_build (new_pid, new_pid, 0); + if (event == PTRACE_EVENT_FORK + && linux_fork_checkpointing_p (GET_PID (lp->ptid))) + { + struct fork_info *fp; + + /* Handle checkpointing by linux-fork.c here as a special + case. We don't want the follow-fork-mode or 'catch fork' + to interfere with this. */ + + /* This won't actually modify the breakpoint list, but will + physically remove the breakpoints from the child. */ + detach_breakpoints (new_pid); + + /* Retain child fork in ptrace (stopped) state. */ + fp = find_fork_pid (new_pid); + if (!fp) + fp = add_fork (new_pid); + + /* Report as spurious, so that infrun doesn't want to follow + this fork. We're actually doing an infcall in + linux-fork.c. */ + ourstatus->kind = TARGET_WAITKIND_SPURIOUS; + linux_enable_event_reporting (pid_to_ptid (new_pid)); + + /* Report the stop to the core. */ + return 0; + } + if (event == PTRACE_EVENT_FORK) ourstatus->kind = TARGET_WAITKIND_FORKED; else if (event == PTRACE_EVENT_VFORK) ourstatus->kind = TARGET_WAITKIND_VFORKED; else { - struct cleanup *old_chain; + struct lwp_info *new_lp; ourstatus->kind = TARGET_WAITKIND_IGNORE; + new_lp = add_lwp (BUILD_LWP (new_pid, GET_PID (lp->ptid))); new_lp->cloned = 1; new_lp->stopped = 1; @@ -1871,19 +2279,48 @@ linux_handle_extended_wait (struct lwp_info *lp, int status, } } + /* Note the need to use the low target ops to resume, to + handle resuming with PT_SYSCALL if we have syscall + catchpoints. */ if (!stopping) { + enum target_signal signo; + new_lp->stopped = 0; new_lp->resumed = 1; - ptrace (PTRACE_CONT, new_pid, 0, - status ? WSTOPSIG (status) : 0); + + signo = (status + ? target_signal_from_host (WSTOPSIG (status)) + : TARGET_SIGNAL_0); + + linux_ops->to_resume (linux_ops, pid_to_ptid (new_pid), + 0, signo); + } + else + { + if (status != 0) + { + /* We created NEW_LP so it cannot yet contain STATUS. */ + gdb_assert (new_lp->status == 0); + + /* Save the wait status to report later. */ + if (debug_linux_nat) + fprintf_unfiltered (gdb_stdlog, + "LHEW: waitpid of new LWP %ld, " + "saving status %s\n", + (long) GET_LWP (new_lp->ptid), + status_to_str (status)); + new_lp->status = status; + } } if (debug_linux_nat) fprintf_unfiltered (gdb_stdlog, - "LHEW: Got clone event from LWP %ld, resuming\n", + "LHEW: Got clone event " + "from LWP %ld, resuming\n", GET_LWP (lp->ptid)); - ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0); + linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)), + 0, TARGET_SIGNAL_0); return 1; } @@ -1893,29 +2330,39 @@ linux_handle_extended_wait (struct lwp_info *lp, int status, if (event == PTRACE_EVENT_EXEC) { + if (debug_linux_nat) + fprintf_unfiltered (gdb_stdlog, + "LHEW: Got exec event from LWP %ld\n", + GET_LWP (lp->ptid)); + ourstatus->kind = TARGET_WAITKIND_EXECD; ourstatus->value.execd_pathname = xstrdup (linux_child_pid_to_exec_file (pid)); - if (linux_parent_pid) + return 0; + } + + if (event == PTRACE_EVENT_VFORK_DONE) + { + if (current_inferior ()->waiting_for_vfork_done) { - detach_breakpoints (linux_parent_pid); - ptrace (PTRACE_DETACH, linux_parent_pid, 0, 0); + if (debug_linux_nat) + fprintf_unfiltered (gdb_stdlog, + "LHEW: Got expected PTRACE_EVENT_" + "VFORK_DONE from LWP %ld: stopping\n", + GET_LWP (lp->ptid)); - linux_parent_pid = 0; + ourstatus->kind = TARGET_WAITKIND_VFORK_DONE; + return 0; } - /* At this point, all inserted breakpoints are gone. Doing this - as soon as we detect an exec prevents the badness of deleting - a breakpoint writing the current "shadow contents" to lift - the bp. That shadow is NOT valid after an exec. - - Note that we have to do this after the detach_breakpoints - call above, otherwise breakpoints wouldn't be lifted from the - parent on a vfork, because detach_breakpoints would think - that breakpoints are not inserted. */ - mark_breakpoints_out (); - return 0; + if (debug_linux_nat) + fprintf_unfiltered (gdb_stdlog, + "LHEW: Got PTRACE_EVENT_VFORK_DONE " + "from LWP %ld: resuming\n", + GET_LWP (lp->ptid)); + ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0); + return 1; } internal_error (__FILE__, __LINE__, @@ -1983,6 +2430,18 @@ wait_lwp (struct lwp_info *lp) gdb_assert (WIFSTOPPED (status)); + /* Handle GNU/Linux's syscall SIGTRAPs. */ + if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP) + { + /* No longer need the sysgood bit. The ptrace event ends up + recorded in lp->waitstatus if we care for it. We can carry + on handling the event like a regular SIGTRAP from here + on. */ + status = W_STOPCODE (SIGTRAP); + if (linux_handle_syscall_trap (lp, 1)) + return wait_lwp (lp); + } + /* Handle GNU/Linux's extended waitstatus for trace events. */ if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0) { @@ -2051,7 +2510,6 @@ static int linux_nat_has_pending_sigint (int pid) { sigset_t pending, blocked, ignored; - int i; linux_proc_pending_signals (pid, &pending, &blocked, &ignored); @@ -2100,11 +2558,123 @@ maybe_clear_ignore_sigint (struct lwp_info *lp) } } +/* Fetch the possible triggered data watchpoint info and store it in + LP. + + On some archs, like x86, that use debug registers to set + watchpoints, it's possible that the way to know which watched + address trapped, is to check the register that is used to select + which address to watch. Problem is, between setting the watchpoint + and reading back which data address trapped, the user may change + the set of watchpoints, and, as a consequence, GDB changes the + debug registers in the inferior. To avoid reading back a stale + stopped-data-address when that happens, we cache in LP the fact + that a watchpoint trapped, and the corresponding data address, as + soon as we see LP stop with a SIGTRAP. If GDB changes the debug + registers meanwhile, we have the cached data we can rely on. */ + +static void +save_sigtrap (struct lwp_info *lp) +{ + struct cleanup *old_chain; + + if (linux_ops->to_stopped_by_watchpoint == NULL) + { + lp->stopped_by_watchpoint = 0; + return; + } + + old_chain = save_inferior_ptid (); + inferior_ptid = lp->ptid; + + lp->stopped_by_watchpoint = linux_ops->to_stopped_by_watchpoint (); + + if (lp->stopped_by_watchpoint) + { + if (linux_ops->to_stopped_data_address != NULL) + lp->stopped_data_address_p = + linux_ops->to_stopped_data_address (¤t_target, + &lp->stopped_data_address); + else + lp->stopped_data_address_p = 0; + } + + do_cleanups (old_chain); +} + +/* See save_sigtrap. */ + +static int +linux_nat_stopped_by_watchpoint (void) +{ + struct lwp_info *lp = find_lwp_pid (inferior_ptid); + + gdb_assert (lp != NULL); + + return lp->stopped_by_watchpoint; +} + +static int +linux_nat_stopped_data_address (struct target_ops *ops, CORE_ADDR *addr_p) +{ + struct lwp_info *lp = find_lwp_pid (inferior_ptid); + + gdb_assert (lp != NULL); + + *addr_p = lp->stopped_data_address; + + return lp->stopped_data_address_p; +} + +/* Commonly any breakpoint / watchpoint generate only SIGTRAP. */ + +static int +sigtrap_is_event (int status) +{ + return WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP; +} + +/* SIGTRAP-like events recognizer. */ + +static int (*linux_nat_status_is_event) (int status) = sigtrap_is_event; + +/* Check for SIGTRAP-like events in LP. */ + +static int +linux_nat_lp_status_is_event (struct lwp_info *lp) +{ + /* We check for lp->waitstatus in addition to lp->status, because we can + have pending process exits recorded in lp->status + and W_EXITCODE(0,0) == 0. We should probably have an additional + lp->status_p flag. */ + + return (lp->waitstatus.kind == TARGET_WAITKIND_IGNORE + && linux_nat_status_is_event (lp->status)); +} + +/* Set alternative SIGTRAP-like events recognizer. If + breakpoint_inserted_here_p there then gdbarch_decr_pc_after_break will be + applied. */ + +void +linux_nat_set_status_is_event (struct target_ops *t, + int (*status_is_event) (int status)) +{ + linux_nat_status_is_event = status_is_event; +} + /* Wait until LP is stopped. */ static int stop_wait_callback (struct lwp_info *lp, void *data) { + struct inferior *inf = find_inferior_pid (GET_PID (lp->ptid)); + + /* If this is a vfork parent, bail out, it is not going to report + any SIGSTOP until the vfork is done with. */ + if (inf->vfork_child != NULL) + return 0; + if (!lp->stopped) { int status; @@ -2122,7 +2692,8 @@ stop_wait_callback (struct lwp_info *lp, void *data) ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0); if (debug_linux_nat) fprintf_unfiltered (gdb_stdlog, - "PTRACE_CONT %s, 0, 0 (%s) (discarding SIGINT)\n", + "PTRACE_CONT %s, 0, 0 (%s) " + "(discarding SIGINT)\n", target_pid_to_str (lp->ptid), errno ? safe_strerror (errno) : "OK"); @@ -2133,7 +2704,7 @@ stop_wait_callback (struct lwp_info *lp, void *data) if (WSTOPSIG (status) != SIGSTOP) { - if (WSTOPSIG (status) == SIGTRAP) + if (linux_nat_status_is_event (status)) { /* If a LWP other than the LWP that we're reporting an event for has hit a GDB breakpoint (as opposed to @@ -2151,7 +2722,9 @@ stop_wait_callback (struct lwp_info *lp, void *data) /* Save the trap's siginfo in case we need it later. */ save_siginfo (lp); - /* Now resume this LWP and get the SIGSTOP event. */ + save_sigtrap (lp); + + /* Now resume this LWP and get the SIGSTOP event. */ errno = 0; ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0); if (debug_linux_nat) @@ -2166,12 +2739,12 @@ stop_wait_callback (struct lwp_info *lp, void *data) target_pid_to_str (lp->ptid)); } /* Hold this event/waitstatus while we check to see if - there are any more (we still want to get that SIGSTOP). */ + there are any more (we still want to get that SIGSTOP). */ stop_wait_callback (lp, NULL); /* Hold the SIGTRAP for handling by linux_nat_wait. If there's another event, throw it back into the - queue. */ + queue. */ if (lp->status) { if (debug_linux_nat) @@ -2182,14 +2755,14 @@ stop_wait_callback (struct lwp_info *lp, void *data) kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (lp->status)); } - /* Save the sigtrap event. */ + /* Save the sigtrap event. */ lp->status = status; return 0; } else { /* The thread was stopped with a signal other than - SIGSTOP, and didn't accidentally trip a breakpoint. */ + SIGSTOP, and didn't accidentally trip a breakpoint. */ if (debug_linux_nat) { @@ -2198,7 +2771,7 @@ stop_wait_callback (struct lwp_info *lp, void *data) status_to_str ((int) status), target_pid_to_str (lp->ptid)); } - /* Now resume this LWP and get the SIGSTOP event. */ + /* Now resume this LWP and get the SIGSTOP event. */ errno = 0; ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0); if (debug_linux_nat) @@ -2208,7 +2781,7 @@ stop_wait_callback (struct lwp_info *lp, void *data) errno ? safe_strerror (errno) : "OK"); /* Hold this event/waitstatus while we check to see if - there are any more (we still want to get that SIGSTOP). */ + there are any more (we still want to get that SIGSTOP). */ stop_wait_callback (lp, NULL); /* If the lp->status field is still empty, use it to @@ -2249,12 +2822,23 @@ status_callback (struct lwp_info *lp, void *data) { /* Only report a pending wait status if we pretend that this has indeed been resumed. */ - /* We check for lp->waitstatus in addition to lp->status, because we - can have pending process exits recorded in lp->waitstatus, and - W_EXITCODE(0,0) == 0. */ - return ((lp->status != 0 - || lp->waitstatus.kind != TARGET_WAITKIND_IGNORE) - && lp->resumed); + if (!lp->resumed) + return 0; + + if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE) + { + /* A ptrace event, like PTRACE_FORK|VFORK|EXEC, syscall event, + or a a pending process exit. Note that `W_EXITCODE(0,0) == + 0', so a clean process exit can not be stored pending in + lp->status, it is indistinguishable from + no-pending-status. */ + return 1; + } + + if (lp->status != 0) + return 1; + + return 0; } /* Return non-zero if LP isn't stopped. */ @@ -2275,8 +2859,7 @@ count_events_callback (struct lwp_info *lp, void *data) gdb_assert (count != NULL); /* Count only resumed LWPs that have a SIGTRAP event pending. */ - if (lp->status != 0 && lp->resumed - && WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP) + if (lp->resumed && linux_nat_lp_status_is_event (lp)) (*count)++; return 0; @@ -2302,9 +2885,8 @@ select_event_lwp_callback (struct lwp_info *lp, void *data) gdb_assert (selector != NULL); - /* Select only resumed LWPs that have a SIGTRAP event pending. */ - if (lp->status != 0 && lp->resumed - && WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP) + /* Select only resumed LWPs that have a SIGTRAP event pending. */ + if (lp->resumed && linux_nat_lp_status_is_event (lp)) if ((*selector)-- == 0) return 1; @@ -2328,7 +2910,7 @@ cancel_breakpoint (struct lwp_info *lp) CORE_ADDR pc; pc = regcache_read_pc (regcache) - gdbarch_decr_pc_after_break (gdbarch); - if (breakpoint_inserted_here_p (pc)) + if (breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc)) { if (debug_linux_nat) fprintf_unfiltered (gdb_stdlog, @@ -2364,8 +2946,7 @@ cancel_breakpoints_callback (struct lwp_info *lp, void *data) delete or disable the breakpoint, but the LWP will have already tripped on it. */ - if (lp->status != 0 - && WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP + if (linux_nat_lp_status_is_event (lp) && cancel_breakpoint (lp)) /* Throw away the SIGTRAP. */ lp->status = 0; @@ -2482,7 +3063,7 @@ linux_nat_filter_event (int lwpid, int status, int options) } /* Make sure we don't report an event for the exit of an LWP not in - our list, i.e. not part of the current process. This can happen + our list, i.e. not part of the current process. This can happen if we detach from a program we original forked and then it exits. */ if (!WIFSTOPPED (status) && !lp) @@ -2515,9 +3096,17 @@ linux_nat_filter_event (int lwpid, int status, int options) add_thread (lp->ptid); } - /* Save the trap's siginfo in case we need it later. */ - if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP) - save_siginfo (lp); + /* Handle GNU/Linux's syscall SIGTRAPs. */ + if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP) + { + /* No longer need the sysgood bit. The ptrace event ends up + recorded in lp->waitstatus if we care for it. We can carry + on handling the event like a regular SIGTRAP from here + on. */ + status = W_STOPCODE (SIGTRAP); + if (linux_handle_syscall_trap (lp, 0)) + return NULL; + } /* Handle GNU/Linux's extended waitstatus for trace events. */ if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0) @@ -2530,6 +3119,14 @@ linux_nat_filter_event (int lwpid, int status, int options) return NULL; } + if (linux_nat_status_is_event (status)) + { + /* Save the trap's siginfo in case we need it later. */ + save_siginfo (lp); + + save_sigtrap (lp); + } + /* Check if the thread has exited. */ if ((WIFEXITED (status) || WIFSIGNALED (status)) && num_lwps (GET_PID (lp->ptid)) > 1) @@ -2651,12 +3248,14 @@ linux_nat_filter_event (int lwpid, int status, int options) /* An interesting event. */ gdb_assert (lp); + lp->status = status; return lp; } static ptid_t linux_nat_wait_1 (struct target_ops *ops, - ptid_t ptid, struct target_waitstatus *ourstatus) + ptid_t ptid, struct target_waitstatus *ourstatus, + int target_options) { static sigset_t prev_mask; struct lwp_info *lp = NULL; @@ -2699,8 +3298,20 @@ retry: lp = NULL; status = 0; - /* Make sure there is at least one LWP that has been resumed. */ - gdb_assert (iterate_over_lwps (ptid, resumed_callback, NULL)); + /* Make sure that of those LWPs we want to get an event from, there + is at least one LWP that has been resumed. If there's none, just + bail out. The core may just be flushing asynchronously all + events. */ + if (iterate_over_lwps (ptid, resumed_callback, NULL) == NULL) + { + ourstatus->kind = TARGET_WAITKIND_IGNORE; + + if (debug_linux_nat_async) + fprintf_unfiltered (gdb_stdlog, "LLW: exit (no resumed LWP)\n"); + + restore_child_signals_mask (&prev_mask); + return minus_one_ptid; + } /* First check if there is a LWP with a wait status pending. */ if (pid == -1) @@ -2709,13 +3320,10 @@ retry: lp = iterate_over_lwps (ptid, status_callback, NULL); if (lp) { - status = lp->status; - lp->status = 0; - - if (debug_linux_nat && status) + if (debug_linux_nat && lp->status) fprintf_unfiltered (gdb_stdlog, "LLW: Using pending wait status %s for %s.\n", - status_to_str (status), + status_to_str (lp->status), target_pid_to_str (lp->ptid)); } @@ -2734,13 +3342,11 @@ retry: /* We have a specific LWP to check. */ lp = find_lwp_pid (ptid); gdb_assert (lp); - status = lp->status; - lp->status = 0; - if (debug_linux_nat && status) + if (debug_linux_nat && lp->status) fprintf_unfiltered (gdb_stdlog, "LLW: Using pending wait status %s for %s.\n", - status_to_str (status), + status_to_str (lp->status), target_pid_to_str (lp->ptid)); /* If we have to wait, take into account whether PID is a cloned @@ -2753,7 +3359,7 @@ retry: because we can have pending process exits recorded in lp->status and W_EXITCODE(0,0) == 0. We should probably have an additional lp->status_p flag. */ - if (status == 0 && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE) + if (lp->status == 0 && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE) lp = NULL; } @@ -2781,8 +3387,26 @@ retry: lp->stopped = 0; gdb_assert (lp->resumed); - /* This should catch the pending SIGSTOP. */ + /* Catch the pending SIGSTOP. */ + status = lp->status; + lp->status = 0; + stop_wait_callback (lp, NULL); + + /* If the lp->status field isn't empty, we caught another signal + while flushing the SIGSTOP. Return it back to the event + queue of the LWP, as we already have an event to handle. */ + if (lp->status) + { + if (debug_linux_nat) + fprintf_unfiltered (gdb_stdlog, + "LLW: kill %s, %s\n", + target_pid_to_str (lp->ptid), + status_to_str (lp->status)); + kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (lp->status)); + } + + lp->status = status; } if (!target_can_async_p ()) @@ -2791,8 +3415,9 @@ retry: set_sigint_trap (); } - if (target_can_async_p ()) - options |= WNOHANG; /* In async mode, don't block. */ + /* Translate generic target_wait options into waitpid options. */ + if (target_options & TARGET_WNOHANG) + options |= WNOHANG; while (lp == NULL) { @@ -2813,26 +3438,48 @@ retry: lp = linux_nat_filter_event (lwpid, status, options); + /* STATUS is now no longer valid, use LP->STATUS instead. */ + status = 0; + if (lp && ptid_is_pid (ptid) && ptid_get_pid (lp->ptid) != ptid_get_pid (ptid)) { + gdb_assert (lp->resumed); + if (debug_linux_nat) - fprintf (stderr, "LWP %ld got an event %06x, leaving pending.\n", - ptid_get_lwp (lp->ptid), status); + fprintf (stderr, + "LWP %ld got an event %06x, leaving pending.\n", + ptid_get_lwp (lp->ptid), lp->status); - if (WIFSTOPPED (status)) + if (WIFSTOPPED (lp->status)) { - if (WSTOPSIG (status) != SIGSTOP) + if (WSTOPSIG (lp->status) != SIGSTOP) { - lp->status = status; - - stop_callback (lp, NULL); - - /* Resume in order to collect the sigstop. */ - ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0); - - stop_wait_callback (lp, NULL); + /* Cancel breakpoint hits. The breakpoint may + be removed before we fetch events from this + process to report to the core. It is best + not to assume the moribund breakpoints + heuristic always handles these cases --- it + could be too many events go through to the + core before this one is handled. All-stop + always cancels breakpoint hits in all + threads. */ + if (non_stop + && linux_nat_lp_status_is_event (lp) + && cancel_breakpoint (lp)) + { + /* Throw away the SIGTRAP. */ + lp->status = 0; + + if (debug_linux_nat) + fprintf (stderr, + "LLW: LWP %ld hit a breakpoint while" + " waiting for another process;" + " cancelled it\n", + ptid_get_lwp (lp->ptid)); + } + lp->stopped = 1; } else { @@ -2840,10 +3487,11 @@ retry: lp->signalled = 0; } } - else if (WIFEXITED (status) || WIFSIGNALED (status)) + else if (WIFEXITED (lp->status) || WIFSIGNALED (lp->status)) { if (debug_linux_nat) - fprintf (stderr, "Process %ld exited while stopping LWPs\n", + fprintf (stderr, + "Process %ld exited while stopping LWPs\n", ptid_get_lwp (lp->ptid)); /* This was the last lwp in the process. Since @@ -2853,7 +3501,6 @@ retry: about the exit code/signal, leave the status pending for the next time we're able to report it. */ - lp->status = status; /* Prevent trying to stop this thread again. We'll never try to resume it because it has a pending @@ -2866,7 +3513,7 @@ retry: /* Store the pending event in the waitstatus as well, because W_EXITCODE(0,0) == 0. */ - store_waitstatus (&lp->waitstatus, status); + store_waitstatus (&lp->waitstatus, lp->status); } /* Keep looking. */ @@ -2897,7 +3544,7 @@ retry: In sync mode, suspend waiting for a SIGCHLD signal. */ if (options & __WCLONE) { - if (target_can_async_p ()) + if (target_options & TARGET_WNOHANG) { /* No interesting event. */ ourstatus->kind = TARGET_WAITKIND_IGNORE; @@ -2912,6 +3559,17 @@ retry: sigsuspend (&suspend_mask); } } + else if (target_options & TARGET_WNOHANG) + { + /* No interesting event for PID yet. */ + ourstatus->kind = TARGET_WAITKIND_IGNORE; + + if (debug_linux_nat_async) + fprintf_unfiltered (gdb_stdlog, "LLW: exit (ignore)\n"); + + restore_child_signals_mask (&prev_mask); + return minus_one_ptid; + } /* We shouldn't end up here unless we want to try again. */ gdb_assert (lp == NULL); @@ -2922,6 +3580,9 @@ retry: gdb_assert (lp); + status = lp->status; + lp->status = 0; + /* Don't report signals that GDB isn't interested in, such as signals that are neither printed nor stopped upon. Stopping all threads can be a bit time-consuming so if we want decent @@ -2931,7 +3592,7 @@ retry: if (WIFSTOPPED (status)) { - int signo = target_signal_from_host (WSTOPSIG (status)); + enum target_signal signo = target_signal_from_host (WSTOPSIG (status)); struct inferior *inf; inf = find_inferior_pid (ptid_get_pid (lp->ptid)); @@ -2942,7 +3603,7 @@ retry: skip the signal handler, or, if we're gaining control of the inferior. */ if (!lp->step - && inf->stop_soon == NO_STOP_QUIETLY + && inf->control.stop_soon == NO_STOP_QUIETLY && signal_stop_state (signo) == 0 && signal_print_state (signo) == 0 && signal_pass_state (signo) == 1) @@ -2961,7 +3622,9 @@ retry: lp->step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT", target_pid_to_str (lp->ptid), - signo ? strsignal (signo) : "0"); + (signo != TARGET_SIGNAL_0 + ? strsignal (target_signal_to_host (signo)) + : "0")); lp->stopped = 0; goto retry; } @@ -3009,14 +3672,21 @@ retry: starvation. */ if (pid == -1) select_event_lwp (ptid, &lp, &status); - } - /* Now that we've selected our final event LWP, cancel any - breakpoints in other LWPs that have hit a GDB breakpoint. See - the comment in cancel_breakpoints_callback to find out why. */ - iterate_over_lwps (minus_one_ptid, cancel_breakpoints_callback, lp); + /* Now that we've selected our final event LWP, cancel any + breakpoints in other LWPs that have hit a GDB breakpoint. + See the comment in cancel_breakpoints_callback to find out + why. */ + iterate_over_lwps (minus_one_ptid, cancel_breakpoints_callback, lp); + + /* In all-stop, from the core's perspective, all LWPs are now + stopped until a new resume action is sent over. */ + iterate_over_lwps (minus_one_ptid, resume_clear_callback, NULL); + } + else + lp->resumed = 0; - if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP) + if (linux_nat_status_is_event (status)) { if (debug_linux_nat) fprintf_unfiltered (gdb_stdlog, @@ -3036,23 +3706,83 @@ retry: fprintf_unfiltered (gdb_stdlog, "LLW: exit\n"); restore_child_signals_mask (&prev_mask); + + if (ourstatus->kind == TARGET_WAITKIND_EXITED + || ourstatus->kind == TARGET_WAITKIND_SIGNALLED) + lp->core = -1; + else + lp->core = linux_nat_core_of_thread_1 (lp->ptid); + return lp->ptid; } +/* Resume LWPs that are currently stopped without any pending status + to report, but are resumed from the core's perspective. */ + +static int +resume_stopped_resumed_lwps (struct lwp_info *lp, void *data) +{ + ptid_t *wait_ptid_p = data; + + if (lp->stopped + && lp->resumed + && lp->status == 0 + && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE) + { + gdb_assert (is_executing (lp->ptid)); + + /* Don't bother if there's a breakpoint at PC that we'd hit + immediately, and we're not waiting for this LWP. */ + if (!ptid_match (lp->ptid, *wait_ptid_p)) + { + struct regcache *regcache = get_thread_regcache (lp->ptid); + CORE_ADDR pc = regcache_read_pc (regcache); + + if (breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc)) + return 0; + } + + if (debug_linux_nat) + fprintf_unfiltered (gdb_stdlog, + "RSRL: resuming stopped-resumed LWP %s\n", + target_pid_to_str (lp->ptid)); + + linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)), + lp->step, TARGET_SIGNAL_0); + lp->stopped = 0; + memset (&lp->siginfo, 0, sizeof (lp->siginfo)); + lp->stopped_by_watchpoint = 0; + } + + return 0; +} + static ptid_t linux_nat_wait (struct target_ops *ops, - ptid_t ptid, struct target_waitstatus *ourstatus) + ptid_t ptid, struct target_waitstatus *ourstatus, + int target_options) { ptid_t event_ptid; if (debug_linux_nat) - fprintf_unfiltered (gdb_stdlog, "linux_nat_wait: [%s]\n", target_pid_to_str (ptid)); + fprintf_unfiltered (gdb_stdlog, + "linux_nat_wait: [%s]\n", target_pid_to_str (ptid)); /* Flush the async file first. */ if (target_can_async_p ()) async_file_flush (); - event_ptid = linux_nat_wait_1 (ops, ptid, ourstatus); + /* Resume LWPs that are currently stopped without any pending status + to report, but are resumed from the core's perspective. LWPs get + in this state if we find them stopping at a time we're not + interested in reporting the event (target_wait on a + specific_process, for example, see linux_nat_wait_1), and + meanwhile the event became uninteresting. Don't bother resuming + LWPs we're not going to wait for if they'd stop immediately. */ + if (non_stop) + iterate_over_lwps (minus_one_ptid, resume_stopped_resumed_lwps, &ptid); + + event_ptid = linux_nat_wait_1 (ops, ptid, ourstatus, target_options); /* If we requested any event, and something came out, assume there may be more. If we requested a specific lwp or process, also @@ -3163,6 +3893,7 @@ linux_nat_kill (struct target_ops *ops) else { ptid_t ptid = pid_to_ptid (ptid_get_pid (inferior_ptid)); + /* Stop all threads before killing them, since ptrace requires that the thread is stopped to sucessfully PTRACE_KILL. */ iterate_over_lwps (ptid, stop_callback, NULL); @@ -3283,6 +4014,12 @@ linux_nat_xfer_partial (struct target_ops *ops, enum target_object object, return linux_xfer_siginfo (ops, object, annex, readbuf, writebuf, offset, len); + /* The target is connected but no live inferior is selected. Pass + this request down to a lower stratum (e.g., the executable + file). */ + if (object == TARGET_OBJECT_MEMORY && ptid_equal (inferior_ptid, null_ptid)) + return 0; + old_chain = save_inferior_ptid (); if (is_lwp (inferior_ptid)) @@ -3395,9 +4132,7 @@ read_mapping (FILE *mapfile, regions in the inferior for a corefile. */ static int -linux_nat_find_memory_regions (int (*func) (CORE_ADDR, - unsigned long, - int, int, int, void *), void *obfd) +linux_nat_find_memory_regions (find_memory_region_ftype func, void *obfd) { int pid = PIDGET (inferior_ptid); char mapsfilename[MAXPATHLEN]; @@ -3405,7 +4140,6 @@ linux_nat_find_memory_regions (int (*func) (CORE_ADDR, long long addr, endaddr, size, offset, inode; char permissions[8], device[8], filename[MAXPATHLEN]; int read, write, exec; - int ret; struct cleanup *cleanup; /* Compose the filename for the /proc memory map, and open it. */ @@ -3432,8 +4166,8 @@ linux_nat_find_memory_regions (int (*func) (CORE_ADDR, if (info_verbose) { fprintf_filtered (gdb_stdout, - "Save segment, %lld bytes at 0x%s (%c%c%c)", - size, paddr_nz (addr), + "Save segment, %s bytes at %s (%c%c%c)", + plongest (size), paddress (target_gdbarch, addr), read ? 'r' : ' ', write ? 'w' : ' ', exec ? 'x' : ' '); if (filename[0]) @@ -3452,7 +4186,7 @@ linux_nat_find_memory_regions (int (*func) (CORE_ADDR, static int find_signalled_thread (struct thread_info *info, void *data) { - if (info->stop_signal != TARGET_SIGNAL_0 + if (info->suspend.stop_signal != TARGET_SIGNAL_0 && ptid_get_pid (info->ptid) == ptid_get_pid (inferior_ptid)) return 1; @@ -3466,7 +4200,7 @@ find_stop_signal (void) iterate_over_threads (find_signalled_thread, NULL); if (info) - return info->stop_signal; + return info->suspend.stop_signal; else return TARGET_SIGNAL_0; } @@ -3479,11 +4213,9 @@ linux_nat_do_thread_registers (bfd *obfd, ptid_t ptid, char *note_data, int *note_size, enum target_signal stop_signal) { - gdb_gregset_t gregs; - gdb_fpregset_t fpregs; unsigned long lwp = ptid_get_lwp (ptid); - struct regcache *regcache = get_thread_regcache (ptid); - struct gdbarch *gdbarch = get_regcache_arch (regcache); + struct gdbarch *gdbarch = target_gdbarch; + struct regcache *regcache = get_thread_arch_regcache (ptid, gdbarch); const struct regset *regset; int core_regset_p; struct cleanup *old_chain; @@ -3498,21 +4230,6 @@ linux_nat_do_thread_registers (bfd *obfd, ptid_t ptid, core_regset_p = gdbarch_regset_from_core_section_p (gdbarch); sect_list = gdbarch_core_regset_sections (gdbarch); - if (core_regset_p - && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg", - sizeof (gregs))) != NULL - && regset->collect_regset != NULL) - regset->collect_regset (regset, regcache, -1, - &gregs, sizeof (gregs)); - else - fill_gregset (regcache, &gregs, -1); - - note_data = (char *) elfcore_write_prstatus (obfd, - note_data, - note_size, - lwp, - stop_signal, &gregs); - /* The loop below uses the new struct core_regset_section, which stores the supported section names and sizes for the core file. Note that note PRSTATUS needs to be treated specially. But the other notes are @@ -3520,12 +4237,6 @@ linux_nat_do_thread_registers (bfd *obfd, ptid_t ptid, if (core_regset_p && sect_list != NULL) while (sect_list->sect_name != NULL) { - /* .reg was already handled above. */ - if (strcmp (sect_list->sect_name, ".reg") == 0) - { - sect_list++; - continue; - } regset = gdbarch_regset_from_core_section (gdbarch, sect_list->sect_name, sect_list->size); @@ -3533,12 +4244,17 @@ linux_nat_do_thread_registers (bfd *obfd, ptid_t ptid, gdb_regset = xmalloc (sect_list->size); regset->collect_regset (regset, regcache, -1, gdb_regset, sect_list->size); - note_data = (char *) elfcore_write_register_note (obfd, - note_data, - note_size, - sect_list->sect_name, - gdb_regset, - sect_list->size); + + if (strcmp (sect_list->sect_name, ".reg") == 0) + note_data = (char *) elfcore_write_prstatus + (obfd, note_data, note_size, + lwp, target_signal_to_host (stop_signal), + gdb_regset); + else + note_data = (char *) elfcore_write_register_note + (obfd, note_data, note_size, + sect_list->sect_name, gdb_regset, + sect_list->size); xfree (gdb_regset); sect_list++; } @@ -3548,10 +4264,26 @@ linux_nat_do_thread_registers (bfd *obfd, ptid_t ptid, the new support, the code below should be deleted. */ else { + gdb_gregset_t gregs; + gdb_fpregset_t fpregs; + + if (core_regset_p + && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg", + sizeof (gregs))) + != NULL && regset->collect_regset != NULL) + regset->collect_regset (regset, regcache, -1, + &gregs, sizeof (gregs)); + else + fill_gregset (regcache, &gregs, -1); + + note_data = (char *) elfcore_write_prstatus + (obfd, note_data, note_size, lwp, target_signal_to_host (stop_signal), + &gregs); + if (core_regset_p && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg2", - sizeof (fpregs))) != NULL - && regset->collect_regset != NULL) + sizeof (fpregs))) + != NULL && regset->collect_regset != NULL) regset->collect_regset (regset, regcache, -1, &fpregs, sizeof (fpregs)); else @@ -3593,6 +4325,120 @@ linux_nat_corefile_thread_callback (struct lwp_info *ti, void *data) return 0; } +/* Enumerate spufs IDs for process PID. */ + +static void +iterate_over_spus (int pid, void (*callback) (void *, int), void *data) +{ + char path[128]; + DIR *dir; + struct dirent *entry; + + xsnprintf (path, sizeof path, "/proc/%d/fd", pid); + dir = opendir (path); + if (!dir) + return; + + rewinddir (dir); + while ((entry = readdir (dir)) != NULL) + { + struct stat st; + struct statfs stfs; + int fd; + + fd = atoi (entry->d_name); + if (!fd) + continue; + + xsnprintf (path, sizeof path, "/proc/%d/fd/%d", pid, fd); + if (stat (path, &st) != 0) + continue; + if (!S_ISDIR (st.st_mode)) + continue; + + if (statfs (path, &stfs) != 0) + continue; + if (stfs.f_type != SPUFS_MAGIC) + continue; + + callback (data, fd); + } + + closedir (dir); +} + +/* Generate corefile notes for SPU contexts. */ + +struct linux_spu_corefile_data +{ + bfd *obfd; + char *note_data; + int *note_size; +}; + +static void +linux_spu_corefile_callback (void *data, int fd) +{ + struct linux_spu_corefile_data *args = data; + int i; + + static const char *spu_files[] = + { + "object-id", + "mem", + "regs", + "fpcr", + "lslr", + "decr", + "decr_status", + "signal1", + "signal1_type", + "signal2", + "signal2_type", + "event_mask", + "event_status", + "mbox_info", + "ibox_info", + "wbox_info", + "dma_info", + "proxydma_info", + }; + + for (i = 0; i < sizeof (spu_files) / sizeof (spu_files[0]); i++) + { + char annex[32], note_name[32]; + gdb_byte *spu_data; + LONGEST spu_len; + + xsnprintf (annex, sizeof annex, "%d/%s", fd, spu_files[i]); + spu_len = target_read_alloc (¤t_target, TARGET_OBJECT_SPU, + annex, &spu_data); + if (spu_len > 0) + { + xsnprintf (note_name, sizeof note_name, "SPU/%s", annex); + args->note_data = elfcore_write_note (args->obfd, args->note_data, + args->note_size, note_name, + NT_SPU, spu_data, spu_len); + xfree (spu_data); + } + } +} + +static char * +linux_spu_make_corefile_notes (bfd *obfd, char *note_data, int *note_size) +{ + struct linux_spu_corefile_data args; + + args.obfd = obfd; + args.note_data = note_data; + args.note_size = note_size; + + iterate_over_spus (PIDGET (inferior_ptid), + linux_spu_corefile_callback, &args); + + return args.note_data; +} + /* Fills the "to_make_corefile_note" target vector. Builds the note section for a corefile, and returns it in a malloc buffer. */ @@ -3600,13 +4446,11 @@ static char * linux_nat_make_corefile_notes (bfd *obfd, int *note_size) { struct linux_nat_corefile_thread_data thread_args; - struct cleanup *old_chain; /* The variable size must be >= sizeof (prpsinfo_t.pr_fname). */ char fname[16] = { '\0' }; /* The variable size must be >= sizeof (prpsinfo_t.pr_psargs). */ char psargs[80] = { '\0' }; char *note_data = NULL; - ptid_t current_ptid = inferior_ptid; ptid_t filter = pid_to_ptid (ptid_get_pid (inferior_ptid)); gdb_byte *auxv; int auxv_len; @@ -3654,6 +4498,8 @@ linux_nat_make_corefile_notes (bfd *obfd, int *note_size) xfree (auxv); } + note_data = linux_spu_make_corefile_notes (obfd, note_data, note_size); + make_cleanup (xfree, note_data); return note_data; } @@ -3674,7 +4520,6 @@ linux_nat_info_proc_cmd (char *args, int from_tty) int cwd_f = 1; int exe_f = 1; int mappings_f = 0; - int environ_f = 0; int status_f = 0; int stat_f = 0; int all = 0; @@ -3722,7 +4567,7 @@ linux_nat_info_proc_cmd (char *args, int from_tty) } else { - /* [...] (future options here) */ + /* [...] (future options here). */ } argv++; } @@ -3740,6 +4585,7 @@ linux_nat_info_proc_cmd (char *args, int from_tty) if ((procfile = fopen (fname1, "r")) != NULL) { struct cleanup *cleanup = make_cleanup_fclose (procfile); + if (fgets (buffer, sizeof (buffer), procfile)) printf_filtered ("cmdline = '%s'\n", buffer); else @@ -3778,7 +4624,7 @@ linux_nat_info_proc_cmd (char *args, int from_tty) cleanup = make_cleanup_fclose (procfile); printf_filtered (_("Mapped address spaces:\n\n")); - if (gdbarch_addr_bit (current_gdbarch) == 32) + if (gdbarch_addr_bit (target_gdbarch) == 32) { printf_filtered ("\t%10s %10s %10s %10s %7s\n", "Start Addr", @@ -3804,7 +4650,7 @@ linux_nat_info_proc_cmd (char *args, int from_tty) a generic local_address_string instead to print out the addresses; that makes sense to me, too. */ - if (gdbarch_addr_bit (current_gdbarch) == 32) + if (gdbarch_addr_bit (target_gdbarch) == 32) { printf_filtered ("\t%#10lx %#10lx %#10x %#10x %7s\n", (unsigned long) addr, /* FIXME: pr_addr */ @@ -3835,6 +4681,7 @@ linux_nat_info_proc_cmd (char *args, int from_tty) if ((procfile = fopen (fname1, "r")) != NULL) { struct cleanup *cleanup = make_cleanup_fclose (procfile); + while (fgets (buffer, sizeof (buffer), procfile) != NULL) puts_filtered (buffer); do_cleanups (cleanup); @@ -3891,8 +4738,8 @@ linux_nat_info_proc_cmd (char *args, int from_tty) if (fscanf (procfile, "%ld ", <mp) > 0) printf_filtered (_("stime, children: %ld\n"), ltmp); if (fscanf (procfile, "%ld ", <mp) > 0) - printf_filtered (_("jiffies remaining in current time slice: %ld\n"), - ltmp); + printf_filtered (_("jiffies remaining in current " + "time slice: %ld\n"), ltmp); if (fscanf (procfile, "%ld ", <mp) > 0) printf_filtered (_("'nice' value: %ld\n"), ltmp); if (fscanf (procfile, "%lu ", <mp) > 0) @@ -3902,13 +4749,14 @@ linux_nat_info_proc_cmd (char *args, int from_tty) printf_filtered (_("jiffies until next SIGALRM: %lu\n"), (unsigned long) ltmp); if (fscanf (procfile, "%ld ", <mp) > 0) - printf_filtered (_("start time (jiffies since system boot): %ld\n"), - ltmp); + printf_filtered (_("start time (jiffies since " + "system boot): %ld\n"), ltmp); if (fscanf (procfile, "%lu ", <mp) > 0) printf_filtered (_("Virtual memory size: %lu\n"), (unsigned long) ltmp); if (fscanf (procfile, "%lu ", <mp) > 0) - printf_filtered (_("Resident set size: %lu\n"), (unsigned long) ltmp); + printf_filtered (_("Resident set size: %lu\n"), + (unsigned long) ltmp); if (fscanf (procfile, "%lu ", <mp) > 0) printf_filtered (_("rlim: %lu\n"), (unsigned long) ltmp); if (fscanf (procfile, "%lu ", <mp) > 0) @@ -3917,11 +4765,11 @@ linux_nat_info_proc_cmd (char *args, int from_tty) printf_filtered (_("End of text: 0x%lx\n"), ltmp); if (fscanf (procfile, "%lu ", <mp) > 0) printf_filtered (_("Start of stack: 0x%lx\n"), ltmp); -#if 0 /* Don't know how architecture-dependent the rest is... - Anyway the signal bitmap info is available from "status". */ - if (fscanf (procfile, "%lu ", <mp) > 0) /* FIXME arch? */ +#if 0 /* Don't know how architecture-dependent the rest is... + Anyway the signal bitmap info is available from "status". */ + if (fscanf (procfile, "%lu ", <mp) > 0) /* FIXME arch? */ printf_filtered (_("Kernel stack pointer: 0x%lx\n"), ltmp); - if (fscanf (procfile, "%lu ", <mp) > 0) /* FIXME arch? */ + if (fscanf (procfile, "%lu ", <mp) > 0) /* FIXME arch? */ printf_filtered (_("Kernel instr pointer: 0x%lx\n"), ltmp); if (fscanf (procfile, "%ld ", <mp) > 0) printf_filtered (_("Pending signals bitmap: 0x%lx\n"), ltmp); @@ -3931,7 +4779,7 @@ linux_nat_info_proc_cmd (char *args, int from_tty) printf_filtered (_("Ignored signals bitmap: 0x%lx\n"), ltmp); if (fscanf (procfile, "%ld ", <mp) > 0) printf_filtered (_("Catched signals bitmap: 0x%lx\n"), ltmp); - if (fscanf (procfile, "%lu ", <mp) > 0) /* FIXME arch? */ + if (fscanf (procfile, "%lu ", <mp) > 0) /* FIXME arch? */ printf_filtered (_("wchan (system call): 0x%lx\n"), ltmp); #endif do_cleanups (cleanup); @@ -3987,6 +4835,100 @@ linux_proc_xfer_partial (struct target_ops *ops, enum target_object object, return ret; } + +/* Enumerate spufs IDs for process PID. */ +static LONGEST +spu_enumerate_spu_ids (int pid, gdb_byte *buf, ULONGEST offset, LONGEST len) +{ + enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch); + LONGEST pos = 0; + LONGEST written = 0; + char path[128]; + DIR *dir; + struct dirent *entry; + + xsnprintf (path, sizeof path, "/proc/%d/fd", pid); + dir = opendir (path); + if (!dir) + return -1; + + rewinddir (dir); + while ((entry = readdir (dir)) != NULL) + { + struct stat st; + struct statfs stfs; + int fd; + + fd = atoi (entry->d_name); + if (!fd) + continue; + + xsnprintf (path, sizeof path, "/proc/%d/fd/%d", pid, fd); + if (stat (path, &st) != 0) + continue; + if (!S_ISDIR (st.st_mode)) + continue; + + if (statfs (path, &stfs) != 0) + continue; + if (stfs.f_type != SPUFS_MAGIC) + continue; + + if (pos >= offset && pos + 4 <= offset + len) + { + store_unsigned_integer (buf + pos - offset, 4, byte_order, fd); + written += 4; + } + pos += 4; + } + + closedir (dir); + return written; +} + +/* Implement the to_xfer_partial interface for the TARGET_OBJECT_SPU + object type, using the /proc file system. */ +static LONGEST +linux_proc_xfer_spu (struct target_ops *ops, enum target_object object, + const char *annex, gdb_byte *readbuf, + const gdb_byte *writebuf, + ULONGEST offset, LONGEST len) +{ + char buf[128]; + int fd = 0; + int ret = -1; + int pid = PIDGET (inferior_ptid); + + if (!annex) + { + if (!readbuf) + return -1; + else + return spu_enumerate_spu_ids (pid, readbuf, offset, len); + } + + xsnprintf (buf, sizeof buf, "/proc/%d/fd/%s", pid, annex); + fd = open (buf, writebuf? O_WRONLY : O_RDONLY); + if (fd <= 0) + return -1; + + if (offset != 0 + && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset) + { + close (fd); + return 0; + } + + if (writebuf) + ret = write (fd, writebuf, (size_t) len); + else if (readbuf) + ret = read (fd, readbuf, (size_t) len); + + close (fd); + return ret; +} + + /* Parse LINE as a signal set and add its set bits to SIGS. */ static void @@ -4031,11 +4973,11 @@ add_line_to_sigset (const char *line, sigset_t *sigs) SIGS to match. */ void -linux_proc_pending_signals (int pid, sigset_t *pending, sigset_t *blocked, sigset_t *ignored) +linux_proc_pending_signals (int pid, sigset_t *pending, + sigset_t *blocked, sigset_t *ignored) { FILE *procfile; char buffer[MAXPATHLEN], fname[MAXPATHLEN]; - int signum; struct cleanup *cleanup; sigemptyset (pending); @@ -4072,8 +5014,8 @@ linux_proc_pending_signals (int pid, sigset_t *pending, sigset_t *blocked, sigse static LONGEST linux_nat_xfer_osdata (struct target_ops *ops, enum target_object object, - const char *annex, gdb_byte *readbuf, - const gdb_byte *writebuf, ULONGEST offset, LONGEST len) + const char *annex, gdb_byte *readbuf, + const gdb_byte *writebuf, ULONGEST offset, LONGEST len) { /* We make the process list snapshot when the object starts to be read. */ @@ -4085,6 +5027,45 @@ linux_nat_xfer_osdata (struct target_ops *ops, enum target_object object, gdb_assert (object == TARGET_OBJECT_OSDATA); + if (!annex) + { + if (offset == 0) + { + if (len_avail != -1 && len_avail != 0) + obstack_free (&obstack, NULL); + len_avail = 0; + buf = NULL; + obstack_init (&obstack); + obstack_grow_str (&obstack, "\n"); + + obstack_xml_printf (&obstack, + "" + "processes" + "" + "Listing of all processes" + ""); + + obstack_grow_str0 (&obstack, "\n"); + buf = obstack_finish (&obstack); + len_avail = strlen (buf); + } + + if (offset >= len_avail) + { + /* Done. Get rid of the obstack. */ + obstack_free (&obstack, NULL); + buf = NULL; + len_avail = 0; + return 0; + } + + if (len > len_avail - offset) + len = len_avail - offset; + memcpy (readbuf, buf + offset, len); + + return len; + } + if (strcmp (annex, "processes") != 0) return 0; @@ -4093,7 +5074,7 @@ linux_nat_xfer_osdata (struct target_ops *ops, enum target_object object, if (offset == 0) { if (len_avail != -1 && len_avail != 0) - obstack_free (&obstack, NULL); + obstack_free (&obstack, NULL); len_avail = 0; buf = NULL; obstack_init (&obstack); @@ -4101,60 +5082,63 @@ linux_nat_xfer_osdata (struct target_ops *ops, enum target_object object, dirp = opendir ("/proc"); if (dirp) - { - struct dirent *dp; - while ((dp = readdir (dirp)) != NULL) - { - struct stat statbuf; - char procentry[sizeof ("/proc/4294967295")]; - - if (!isdigit (dp->d_name[0]) - || NAMELEN (dp) > sizeof ("4294967295") - 1) - continue; - - sprintf (procentry, "/proc/%s", dp->d_name); - if (stat (procentry, &statbuf) == 0 - && S_ISDIR (statbuf.st_mode)) - { - char *pathname; - FILE *f; - char cmd[MAXPATHLEN + 1]; - struct passwd *entry; - - pathname = xstrprintf ("/proc/%s/cmdline", dp->d_name); - entry = getpwuid (statbuf.st_uid); - - if ((f = fopen (pathname, "r")) != NULL) - { - size_t len = fread (cmd, 1, sizeof (cmd) - 1, f); - if (len > 0) - { - int i; - for (i = 0; i < len; i++) - if (cmd[i] == '\0') - cmd[i] = ' '; - cmd[len] = '\0'; - - obstack_xml_printf ( - &obstack, - "" - "%s" - "%s" - "%s" - "", - dp->d_name, - entry ? entry->pw_name : "?", - cmd); - } - fclose (f); - } - - xfree (pathname); - } - } - - closedir (dirp); - } + { + struct dirent *dp; + + while ((dp = readdir (dirp)) != NULL) + { + struct stat statbuf; + char procentry[sizeof ("/proc/4294967295")]; + + if (!isdigit (dp->d_name[0]) + || NAMELEN (dp) > sizeof ("4294967295") - 1) + continue; + + sprintf (procentry, "/proc/%s", dp->d_name); + if (stat (procentry, &statbuf) == 0 + && S_ISDIR (statbuf.st_mode)) + { + char *pathname; + FILE *f; + char cmd[MAXPATHLEN + 1]; + struct passwd *entry; + + pathname = xstrprintf ("/proc/%s/cmdline", dp->d_name); + entry = getpwuid (statbuf.st_uid); + + if ((f = fopen (pathname, "r")) != NULL) + { + size_t len = fread (cmd, 1, sizeof (cmd) - 1, f); + + if (len > 0) + { + int i; + + for (i = 0; i < len; i++) + if (cmd[i] == '\0') + cmd[i] = ' '; + cmd[len] = '\0'; + + obstack_xml_printf ( + &obstack, + "" + "%s" + "%s" + "%s" + "", + dp->d_name, + entry ? entry->pw_name : "?", + cmd); + } + fclose (f); + } + + xfree (pathname); + } + } + + closedir (dirp); + } obstack_grow_str0 (&obstack, "\n"); buf = obstack_finish (&obstack); @@ -4185,13 +5169,31 @@ linux_xfer_partial (struct target_ops *ops, enum target_object object, LONGEST xfer; if (object == TARGET_OBJECT_AUXV) - return procfs_xfer_auxv (ops, object, annex, readbuf, writebuf, + return memory_xfer_auxv (ops, object, annex, readbuf, writebuf, offset, len); if (object == TARGET_OBJECT_OSDATA) return linux_nat_xfer_osdata (ops, object, annex, readbuf, writebuf, offset, len); + if (object == TARGET_OBJECT_SPU) + return linux_proc_xfer_spu (ops, object, annex, readbuf, writebuf, + offset, len); + + /* GDB calculates all the addresses in possibly larget width of the address. + Address width needs to be masked before its final use - either by + linux_proc_xfer_partial or inf_ptrace_xfer_partial. + + Compare ADDR_BIT first to avoid a compiler warning on shift overflow. */ + + if (object == TARGET_OBJECT_MEMORY) + { + int addr_bit = gdbarch_addr_bit (target_gdbarch); + + if (addr_bit < (sizeof (ULONGEST) * HOST_CHAR_BIT)) + offset &= ((ULONGEST) 1 << addr_bit) - 1; + } + xfer = linux_proc_xfer_partial (ops, object, annex, readbuf, writebuf, offset, len); if (xfer != 0) @@ -4210,6 +5212,7 @@ linux_target_install_ops (struct target_ops *t) t->to_insert_fork_catchpoint = linux_child_insert_fork_catchpoint; t->to_insert_vfork_catchpoint = linux_child_insert_vfork_catchpoint; t->to_insert_exec_catchpoint = linux_child_insert_exec_catchpoint; + t->to_set_syscall_catchpoint = linux_child_set_syscall_catchpoint; t->to_pid_to_exec_file = linux_child_pid_to_exec_file; t->to_post_startup_inferior = linux_child_post_startup_inferior; t->to_post_attach = linux_child_post_attach; @@ -4282,7 +5285,7 @@ linux_nat_supports_non_stop (void) /* True if we want to support multi-process. To be removed when GDB supports multi-exec. */ -int linux_multi_process = 0; +int linux_multi_process = 1; static int linux_nat_supports_multi_process (void) @@ -4335,14 +5338,9 @@ linux_nat_terminal_inferior (void) return; } - /* GDB should never give the terminal to the inferior, if the - inferior is running in the background (run&, continue&, etc.). - This check can be removed when the common code is fixed. */ - if (!sync_execution) - return; - terminal_inferior (); + /* Calls to target_terminal_*() are meant to be idempotent. */ if (!async_terminal_is_ours) return; @@ -4368,9 +5366,6 @@ linux_nat_terminal_ours (void) but claiming it sure should. */ terminal_ours (); - if (!sync_execution) - return; - if (async_terminal_is_ours) return; @@ -4489,7 +5484,6 @@ linux_nat_stop_lwp (struct lwp_info *lwp, void *data) { if (!lwp->stopped) { - int pid, status; ptid_t ptid = lwp->ptid; if (debug_linux_nat) @@ -4521,13 +5515,14 @@ linux_nat_stop_lwp (struct lwp_info *lwp, void *data) if (debug_linux_nat) { - if (find_thread_pid (lwp->ptid)->stop_requested) - fprintf_unfiltered (gdb_stdlog, "\ -LNSL: already stopped/stop_requested %s\n", + if (find_thread_ptid (lwp->ptid)->stop_requested) + fprintf_unfiltered (gdb_stdlog, + "LNSL: already stopped/stop_requested %s\n", target_pid_to_str (lwp->ptid)); else - fprintf_unfiltered (gdb_stdlog, "\ -LNSL: already stopped/no stop_requested yet %s\n", + fprintf_unfiltered (gdb_stdlog, + "LNSL: already stopped/no " + "stop_requested yet %s\n", target_pid_to_str (lwp->ptid)); } } @@ -4557,6 +5552,116 @@ linux_nat_close (int quitting) linux_ops->to_close (quitting); } +/* When requests are passed down from the linux-nat layer to the + single threaded inf-ptrace layer, ptids of (lwpid,0,0) form are + used. The address space pointer is stored in the inferior object, + but the common code that is passed such ptid can't tell whether + lwpid is a "main" process id or not (it assumes so). We reverse + look up the "main" process id from the lwp here. */ + +struct address_space * +linux_nat_thread_address_space (struct target_ops *t, ptid_t ptid) +{ + struct lwp_info *lwp; + struct inferior *inf; + int pid; + + pid = GET_LWP (ptid); + if (GET_LWP (ptid) == 0) + { + /* An (lwpid,0,0) ptid. Look up the lwp object to get at the + tgid. */ + lwp = find_lwp_pid (ptid); + pid = GET_PID (lwp->ptid); + } + else + { + /* A (pid,lwpid,0) ptid. */ + pid = GET_PID (ptid); + } + + inf = find_inferior_pid (pid); + gdb_assert (inf != NULL); + return inf->aspace; +} + +int +linux_nat_core_of_thread_1 (ptid_t ptid) +{ + struct cleanup *back_to; + char *filename; + FILE *f; + char *content = NULL; + char *p; + char *ts = 0; + int content_read = 0; + int i; + int core; + + filename = xstrprintf ("/proc/%d/task/%ld/stat", + GET_PID (ptid), GET_LWP (ptid)); + back_to = make_cleanup (xfree, filename); + + f = fopen (filename, "r"); + if (!f) + { + do_cleanups (back_to); + return -1; + } + + make_cleanup_fclose (f); + + for (;;) + { + int n; + + content = xrealloc (content, content_read + 1024); + n = fread (content + content_read, 1, 1024, f); + content_read += n; + if (n < 1024) + { + content[content_read] = '\0'; + break; + } + } + + make_cleanup (xfree, content); + + p = strchr (content, '('); + + /* Skip ")". */ + if (p != NULL) + p = strchr (p, ')'); + if (p != NULL) + p++; + + /* If the first field after program name has index 0, then core number is + the field with index 36. There's no constant for that anywhere. */ + if (p != NULL) + p = strtok_r (p, " ", &ts); + for (i = 0; p != NULL && i != 36; ++i) + p = strtok_r (NULL, " ", &ts); + + if (p == NULL || sscanf (p, "%d", &core) == 0) + core = -1; + + do_cleanups (back_to); + + return core; +} + +/* Return the cached value of the processor core for thread PTID. */ + +int +linux_nat_core_of_thread (struct target_ops *ops, ptid_t ptid) +{ + struct lwp_info *info = find_lwp_pid (ptid); + + if (info) + return info->core; + return -1; +} + void linux_nat_add_target (struct target_ops *t) { @@ -4579,6 +5684,9 @@ linux_nat_add_target (struct target_ops *t) t->to_thread_alive = linux_nat_thread_alive; t->to_pid_to_str = linux_nat_pid_to_str; t->to_has_thread_control = tc_schedlock; + t->to_thread_address_space = linux_nat_thread_address_space; + t->to_stopped_by_watchpoint = linux_nat_stopped_by_watchpoint; + t->to_stopped_data_address = linux_nat_stopped_data_address; t->to_can_async_p = linux_nat_can_async_p; t->to_is_async_p = linux_nat_is_async_p; @@ -4594,6 +5702,8 @@ linux_nat_add_target (struct target_ops *t) t->to_supports_multi_process = linux_nat_supports_multi_process; + t->to_core_of_thread = linux_nat_core_of_thread; + /* We don't change the stratum; this target will sit at process_stratum and thread_db will set at thread_stratum. This is a little strange, since this is a multi-threaded-capable @@ -4643,8 +5753,6 @@ extern initialize_file_ftype _initialize_linux_nat; void _initialize_linux_nat (void) { - sigset_t mask; - add_info ("proc", linux_nat_info_proc_cmd, _("\ Show /proc process information about any running process.\n\ Specify any process id, or use the program being debugged by default.\n\