#include <unistd.h>
#include <sys/syscall.h>
#endif
-#include <sys/ptrace.h>
+#include "nat/gdb_ptrace.h"
#include "linux-nat.h"
#include "nat/linux-ptrace.h"
#include "nat/linux-procfs.h"
#include "target-descriptions.h"
#include "filestuff.h"
#include "objfiles.h"
+#include "nat/linux-namespaces.h"
+#include "fileio.h"
#ifndef SPUFS_MAGIC
#define SPUFS_MAGIC 0x23c9b64e
#define O_LARGEFILE 0
#endif
+/* Does the current host support PTRACE_GETREGSET? */
+enum tribool have_ptrace_getregset = TRIBOOL_UNKNOWN;
+
/* The single-threaded native GNU/Linux target_ops. We save a pointer for
the use of the multi-threaded target. */
static struct target_ops *linux_ops;
static int kill_lwp (int lwpid, int signo);
static int stop_callback (struct lwp_info *lp, void *data);
+static int resume_stopped_resumed_lwps (struct lwp_info *lp, void *data);
static void block_child_signals (sigset_t *prev_mask);
static void restore_child_signals_mask (sigset_t *prev_mask);
static int sigtrap_is_event (int status);
static int (*linux_nat_status_is_event) (int status) = sigtrap_is_event;
+\f
+/* LWP accessors. */
+
+/* See nat/linux-nat.h. */
+
+ptid_t
+ptid_of_lwp (struct lwp_info *lwp)
+{
+ return lwp->ptid;
+}
+
+/* See nat/linux-nat.h. */
+
+void
+lwp_set_arch_private_info (struct lwp_info *lwp,
+ struct arch_lwp_info *info)
+{
+ lwp->arch_private = info;
+}
+
+/* See nat/linux-nat.h. */
+
+struct arch_lwp_info *
+lwp_arch_private_info (struct lwp_info *lwp)
+{
+ return lwp->arch_private;
+}
+
+/* See nat/linux-nat.h. */
+
+int
+lwp_is_stopped (struct lwp_info *lwp)
+{
+ return lwp->stopped;
+}
+
+/* See nat/linux-nat.h. */
+
+enum target_stop_reason
+lwp_stop_reason (struct lwp_info *lwp)
+{
+ return lwp->stop_reason;
+}
+
\f
/* Trivial list manipulation functions to keep track of a list of
new stopped processes. */
static void
add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
{
- struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
+ struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
new_pid->pid = pid;
new_pid->status = status;
return 0;
}
+/* Return the ptrace options that we want to try to enable. */
+
+static int
+linux_nat_ptrace_options (int attached)
+{
+ int options = 0;
+
+ if (!attached)
+ options |= PTRACE_O_EXITKILL;
+
+ options |= (PTRACE_O_TRACESYSGOOD
+ | PTRACE_O_TRACEVFORKDONE
+ | PTRACE_O_TRACEVFORK
+ | PTRACE_O_TRACEFORK
+ | PTRACE_O_TRACEEXEC);
+
+ return options;
+}
+
/* Initialize ptrace warnings and check for supported ptrace
features given PID.
static void
linux_init_ptrace (pid_t pid, int attached)
{
- linux_enable_event_reporting (pid, attached);
+ int options = linux_nat_ptrace_options (attached);
+
+ linux_enable_event_reporting (pid, options);
linux_ptrace_init_warnings ();
}
int status = W_STOPCODE (0);
struct cleanup *old_chain;
int has_vforked;
+ ptid_t parent_ptid, child_ptid;
int parent_pid, child_pid;
has_vforked = (inferior_thread ()->pending_follow.kind
== TARGET_WAITKIND_VFORKED);
- parent_pid = ptid_get_lwp (inferior_ptid);
- if (parent_pid == 0)
- parent_pid = ptid_get_pid (inferior_ptid);
- child_pid
- = ptid_get_pid (inferior_thread ()->pending_follow.value.related_pid);
-
+ parent_ptid = inferior_ptid;
+ child_ptid = inferior_thread ()->pending_follow.value.related_pid;
+ parent_pid = ptid_get_lwp (parent_ptid);
+ child_pid = ptid_get_lwp (child_ptid);
/* We're already attached to the parent, by default. */
old_chain = save_inferior_ptid ();
- inferior_ptid = ptid_build (child_pid, child_pid, 0);
+ inferior_ptid = child_ptid;
child_lp = add_lwp (inferior_ptid);
child_lp->stopped = 1;
child_lp->last_resume_kind = resume_stop;
{
struct lwp_info *parent_lp;
- parent_lp = find_lwp_pid (pid_to_ptid (parent_pid));
+ parent_lp = find_lwp_pid (parent_ptid);
gdb_assert (linux_supports_tracefork () >= 0);
if (linux_supports_tracevforkdone ())
gdb_assert (ptid_lwp_p (ptid));
- lp = (struct lwp_info *) xmalloc (sizeof (struct lwp_info));
+ lp = XNEW (struct lwp_info);
memset (lp, 0, sizeof (struct lwp_info));
return NULL;
}
-/* Call CALLBACK with its second argument set to DATA for every LWP in
- the list. If CALLBACK returns 1 for a particular LWP, return a
- pointer to the structure describing that LWP immediately.
- Otherwise return NULL. */
+/* See nat/linux-nat.h. */
struct lwp_info *
iterate_over_lwps (ptid_t filter,
- int (*callback) (struct lwp_info *, void *),
+ iterate_over_lwps_ftype callback,
void *data)
{
struct lwp_info *lp, *lpnext;
if (ptid_match (lp->ptid, filter))
{
- if ((*callback) (lp, data))
+ if ((*callback) (lp, data) != 0)
return lp;
}
}
struct lwp_info *lp;
int status;
ptid_t ptid;
- volatile struct gdb_exception ex;
/* Make sure we report all signals during attach. */
linux_nat_pass_signals (ops, 0, NULL);
- TRY_CATCH (ex, RETURN_MASK_ERROR)
+ TRY
{
linux_ops->to_attach (ops, args, from_tty);
}
- if (ex.reason < 0)
+ CATCH (ex, RETURN_MASK_ERROR)
{
pid_t pid = parse_pid_to_attach (args);
struct buffer buffer;
else
throw_error (ex.error, "%s", message);
}
+ END_CATCH
/* The ptrace base target adds the main thread with (pid,0,0)
format. Decorate it with lwp info. */
attach_proc_task_lwp_callback);
if (target_can_async_p ())
- target_async (inferior_event_handler, 0);
+ target_async (1);
}
/* Get pending status of LP. */
signo = GDB_SIGNAL_0; /* a pending ptrace event, not a real signal. */
else if (lp->status)
signo = gdb_signal_from_host (WSTOPSIG (lp->status));
- else if (non_stop && !is_executing (lp->ptid))
+ else if (target_is_non_stop_p () && !is_executing (lp->ptid))
{
struct thread_info *tp = find_thread_ptid (lp->ptid);
signo = tp->suspend.stop_signal;
}
- else if (!non_stop)
+ else if (!target_is_non_stop_p ())
{
struct target_waitstatus last;
ptid_t last_ptid;
single-step it. If SIGNAL is nonzero, give it that signal. */
static void
-linux_resume_one_lwp (struct lwp_info *lp, int step, enum gdb_signal signo)
+linux_resume_one_lwp_throw (struct lwp_info *lp, int step,
+ enum gdb_signal signo)
{
- ptid_t ptid;
-
lp->step = step;
/* stop_pc doubles as the PC the LWP had when it was last resumed.
if (linux_nat_prepare_to_resume != NULL)
linux_nat_prepare_to_resume (lp);
- /* Convert to something the lower layer understands. */
- ptid = pid_to_ptid (ptid_get_lwp (lp->ptid));
- linux_ops->to_resume (linux_ops, ptid, step, signo);
- lp->stop_reason = LWP_STOPPED_BY_NO_REASON;
+ linux_ops->to_resume (linux_ops, lp->ptid, step, signo);
+
+ /* Successfully resumed. Clear state that no longer makes sense,
+ and mark the LWP as running. Must not do this before resuming
+ otherwise if that fails other code will be confused. E.g., we'd
+ later try to stop the LWP and hang forever waiting for a stop
+ status. Note that we must not throw after this is cleared,
+ otherwise handle_zombie_lwp_error would get confused. */
lp->stopped = 0;
+ lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
registers_changed_ptid (lp->ptid);
}
+/* Called when we try to resume a stopped LWP and that errors out. If
+ the LWP is no longer in ptrace-stopped state (meaning it's zombie,
+ or about to become), discard the error, clear any pending status
+ the LWP may have, and return true (we'll collect the exit status
+ soon enough). Otherwise, return false. */
+
+static int
+check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
+{
+ /* If we get an error after resuming the LWP successfully, we'd
+ confuse !T state for the LWP being gone. */
+ gdb_assert (lp->stopped);
+
+ /* We can't just check whether the LWP is in 'Z (Zombie)' state,
+ because even if ptrace failed with ESRCH, the tracee may be "not
+ yet fully dead", but already refusing ptrace requests. In that
+ case the tracee has 'R (Running)' state for a little bit
+ (observed in Linux 3.18). See also the note on ESRCH in the
+ ptrace(2) man page. Instead, check whether the LWP has any state
+ other than ptrace-stopped. */
+
+ /* Don't assume anything if /proc/PID/status can't be read. */
+ if (linux_proc_pid_is_trace_stopped_nowarn (ptid_get_lwp (lp->ptid)) == 0)
+ {
+ lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
+ lp->status = 0;
+ lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
+ return 1;
+ }
+ return 0;
+}
+
+/* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
+ disappears while we try to resume it. */
+
+static void
+linux_resume_one_lwp (struct lwp_info *lp, int step, enum gdb_signal signo)
+{
+ TRY
+ {
+ linux_resume_one_lwp_throw (lp, step, signo);
+ }
+ CATCH (ex, RETURN_MASK_ERROR)
+ {
+ if (!check_ptrace_stopped_lwp_gone (lp))
+ throw_exception (ex);
+ }
+ END_CATCH
+}
+
/* Resume LP. */
static void
if (target_can_async_p ())
{
- target_async (inferior_event_handler, 0);
+ target_async (1);
/* Tell the event loop we have something to process. */
async_file_mark ();
}
if (resume_many)
iterate_over_lwps (ptid, linux_nat_resume_callback, lp);
- linux_resume_one_lwp (lp, step, signo);
-
if (debug_linux_nat)
fprintf_unfiltered (gdb_stdlog,
"LLR: %s %s, %s (resume event thread)\n",
step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
- target_pid_to_str (ptid),
+ target_pid_to_str (lp->ptid),
(signo != GDB_SIGNAL_0
? strsignal (gdb_signal_to_host (signo)) : "0"));
+ linux_resume_one_lwp (lp, step, signo);
+
if (target_can_async_p ())
- target_async (inferior_event_handler, 0);
+ target_async (1);
}
/* Send a signal to an LWP. */
true, the new LWP remains stopped, otherwise it is continued. */
static int
-linux_handle_extended_wait (struct lwp_info *lp, int status,
- int stopping)
+linux_handle_extended_wait (struct lwp_info *lp, int status)
{
int pid = ptid_get_lwp (lp->ptid);
struct target_waitstatus *ourstatus = &lp->waitstatus;
ourstatus->kind = TARGET_WAITKIND_FORKED;
else if (event == PTRACE_EVENT_VFORK)
ourstatus->kind = TARGET_WAITKIND_VFORKED;
- else
+ else if (event == PTRACE_EVENT_CLONE)
{
struct lwp_info *new_lp;
new_lp = add_lwp (ptid_build (ptid_get_pid (lp->ptid), new_pid, 0));
new_lp->cloned = 1;
new_lp->stopped = 1;
+ new_lp->resumed = 1;
+
+ /* If the thread_db layer is active, let it record the user
+ level thread id and status, and add the thread to GDB's
+ list. */
+ if (!thread_db_notice_clone (lp->ptid, new_lp->ptid))
+ {
+ /* The process is not using thread_db. Add the LWP to
+ GDB's list. */
+ target_post_attach (ptid_get_lwp (new_lp->ptid));
+ add_thread (new_lp->ptid);
+ }
+
+ /* Even if we're stopping the thread for some reason
+ internal to this module, from the perspective of infrun
+ and the user/frontend, this new thread is running until
+ it next reports a stop. */
+ set_running (new_lp->ptid, 1);
+ set_executing (new_lp->ptid, 1);
if (WSTOPSIG (status) != SIGSTOP)
{
have a lower number than SIGSTOP (e.g. SIGUSR1).
This is an unlikely case, and harder to handle for
fork / vfork than for clone, so we do not try - but
- we handle it for clone events here. We'll send
- the other signal on to the thread below. */
+ we handle it for clone events here. */
new_lp->signalled = 1;
- }
- else
- {
- struct thread_info *tp;
-
- /* When we stop for an event in some other thread, and
- pull the thread list just as this thread has cloned,
- we'll have seen the new thread in the thread_db list
- before handling the CLONE event (glibc's
- pthread_create adds the new thread to the thread list
- before clone'ing, and has the kernel fill in the
- thread's tid on the clone call with
- CLONE_PARENT_SETTID). If that happened, and the core
- had requested the new thread to stop, we'll have
- killed it with SIGSTOP. But since SIGSTOP is not an
- RT signal, it can only be queued once. We need to be
- careful to not resume the LWP if we wanted it to
- stop. In that case, we'll leave the SIGSTOP pending.
- It will later be reported as GDB_SIGNAL_0. */
- tp = find_thread_ptid (new_lp->ptid);
- if (tp != NULL && tp->stop_requested)
- new_lp->last_resume_kind = resume_stop;
- else
- status = 0;
- }
-
- if (non_stop)
- {
- /* Add the new thread to GDB's lists as soon as possible
- so that:
- 1) the frontend doesn't have to wait for a stop to
- display them, and,
-
- 2) we tag it with the correct running state. */
-
- /* If the thread_db layer is active, let it know about
- this new thread, and add it to GDB's list. */
- if (!thread_db_attach_lwp (new_lp->ptid))
- {
- /* We're not using thread_db. Add it to GDB's
- list. */
- target_post_attach (ptid_get_lwp (new_lp->ptid));
- add_thread (new_lp->ptid);
- }
-
- if (!stopping)
- {
- set_running (new_lp->ptid, 1);
- set_executing (new_lp->ptid, 1);
- /* thread_db_attach_lwp -> lin_lwp_attach_lwp forced
- resume_stop. */
- new_lp->last_resume_kind = resume_continue;
- }
- }
-
- if (status != 0)
- {
/* We created NEW_LP so it cannot yet contain STATUS. */
gdb_assert (new_lp->status == 0);
new_lp->status = status;
}
- new_lp->resumed = !stopping;
return 1;
}
/* Check if the thread has exited. */
if (WIFEXITED (status) || WIFSIGNALED (status))
{
+ if (ptid_get_pid (lp->ptid) == ptid_get_lwp (lp->ptid))
+ {
+ if (debug_linux_nat)
+ fprintf_unfiltered (gdb_stdlog, "WL: Process %d exited.\n",
+ ptid_get_pid (lp->ptid));
+
+ /* This is the leader exiting, it means the whole
+ process is gone. Store the status to report to the
+ core. Store it in lp->waitstatus, because lp->status
+ would be ambiguous (W_EXITCODE(0,0) == 0). */
+ store_waitstatus (&lp->waitstatus, status);
+ return 0;
+ }
+
thread_dead = 1;
if (debug_linux_nat)
fprintf_unfiltered (gdb_stdlog, "WL: %s exited.\n",
if (lp->must_set_ptrace_flags)
{
struct inferior *inf = find_inferior_pid (ptid_get_pid (lp->ptid));
+ int options = linux_nat_ptrace_options (inf->attach_flag);
- linux_enable_event_reporting (ptid_get_lwp (lp->ptid), inf->attach_flag);
+ linux_enable_event_reporting (ptid_get_lwp (lp->ptid), options);
lp->must_set_ptrace_flags = 0;
}
fprintf_unfiltered (gdb_stdlog,
"WL: Handling extended status 0x%06x\n",
status);
- linux_handle_extended_wait (lp, status, 1);
+ linux_handle_extended_wait (lp, status);
return 0;
}
stop_callback (lwp, NULL);
}
+/* See linux-nat.h */
+
+void
+linux_stop_and_wait_all_lwps (void)
+{
+ /* Stop all LWP's ... */
+ iterate_over_lwps (minus_one_ptid, stop_callback, NULL);
+
+ /* ... and wait until all of them have reported back that
+ they're no longer running. */
+ iterate_over_lwps (minus_one_ptid, stop_wait_callback, NULL);
+}
+
+/* See linux-nat.h */
+
+void
+linux_unstop_all_lwps (void)
+{
+ iterate_over_lwps (minus_one_ptid,
+ resume_stopped_resumed_lwps, &minus_one_ptid);
+}
+
/* Return non-zero if LWP PID has a pending SIGINT. */
static int
if (linux_ops->to_stopped_by_watchpoint (linux_ops))
{
- lp->stop_reason = LWP_STOPPED_BY_WATCHPOINT;
+ lp->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
if (linux_ops->to_stopped_data_address != NULL)
lp->stopped_data_address_p =
do_cleanups (old_chain);
- return lp->stop_reason == LWP_STOPPED_BY_WATCHPOINT;
+ return lp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
}
/* Called when the LWP stopped for a trap that could be explained by a
static void
save_sigtrap (struct lwp_info *lp)
{
- gdb_assert (lp->stop_reason == LWP_STOPPED_BY_NO_REASON);
+ gdb_assert (lp->stop_reason == TARGET_STOPPED_BY_NO_REASON);
gdb_assert (lp->status != 0);
- if (check_stopped_by_watchpoint (lp))
- return;
-
+ /* Check first if this was a SW/HW breakpoint before checking
+ watchpoints, because at least s390 can't tell the data address of
+ hardware watchpoint hits, and the kernel returns
+ stopped-by-watchpoint as long as there's a watchpoint set. */
if (linux_nat_status_is_event (lp->status))
check_stopped_by_breakpoint (lp);
+
+ /* Note that TRAP_HWBKPT can indicate either a hardware breakpoint
+ or hardware watchpoint. Check which is which if we got
+ TARGET_STOPPED_BY_HW_BREAKPOINT. */
+ if (lp->stop_reason == TARGET_STOPPED_BY_NO_REASON
+ || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
+ check_stopped_by_watchpoint (lp);
}
/* Returns true if the LWP had stopped for a watchpoint. */
gdb_assert (lp != NULL);
- return lp->stop_reason == LWP_STOPPED_BY_WATCHPOINT;
+ return lp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
}
static int
if (debug_linux_nat)
fprintf_unfiltered (gdb_stdlog,
- "SWC: Delayed SIGSTOP caught for %s.\n",
+ "SWC: Expected SIGSTOP caught for %s.\n",
target_pid_to_str (lp->ptid));
/* Reset SIGNALLED only after the stop_wait_callback call
if (!lp->resumed)
return 0;
- if (lp->stop_reason == LWP_STOPPED_BY_SW_BREAKPOINT
- || lp->stop_reason == LWP_STOPPED_BY_HW_BREAKPOINT)
+ if (!lwp_status_pending_p (lp))
+ return 0;
+
+ if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
+ || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
{
struct regcache *regcache = get_thread_regcache (lp->ptid);
struct gdbarch *gdbarch = get_regcache_arch (regcache);
CORE_ADDR pc;
int discard = 0;
- gdb_assert (lp->status != 0);
-
pc = regcache_read_pc (regcache);
if (pc != lp->stop_pc)
paddress (target_gdbarch (), pc));
discard = 1;
}
+
+#if !USE_SIGTRAP_SIGINFO
else if (!breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
{
if (debug_linux_nat)
discard = 1;
}
+#endif
if (discard)
{
linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
return 0;
}
- return 1;
}
- return lwp_status_pending_p (lp);
+ return 1;
}
/* Return non-zero if LP isn't stopped. */
return lp->status != 0 || lp->waitstatus.kind != TARGET_WAITKIND_IGNORE;
}
-/* Select the Nth LWP that has had a SIGTRAP event. */
+/* Select the Nth LWP that has had an event. */
static int
select_event_lwp_callback (struct lwp_info *lp, void *data)
struct gdbarch *gdbarch = get_regcache_arch (regcache);
CORE_ADDR pc;
CORE_ADDR sw_bp_pc;
+#if USE_SIGTRAP_SIGINFO
+ siginfo_t siginfo;
+#endif
pc = regcache_read_pc (regcache);
- sw_bp_pc = pc - target_decr_pc_after_break (gdbarch);
+ sw_bp_pc = pc - gdbarch_decr_pc_after_break (gdbarch);
+
+#if USE_SIGTRAP_SIGINFO
+ if (linux_nat_get_siginfo (lp->ptid, &siginfo))
+ {
+ if (siginfo.si_signo == SIGTRAP)
+ {
+ if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
+ {
+ if (debug_linux_nat)
+ fprintf_unfiltered (gdb_stdlog,
+ "CSBB: %s stopped by software "
+ "breakpoint\n",
+ target_pid_to_str (lp->ptid));
+
+ /* Back up the PC if necessary. */
+ if (pc != sw_bp_pc)
+ regcache_write_pc (regcache, sw_bp_pc);
+ lp->stop_pc = sw_bp_pc;
+ lp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
+ return 1;
+ }
+ else if (siginfo.si_code == TRAP_HWBKPT)
+ {
+ if (debug_linux_nat)
+ fprintf_unfiltered (gdb_stdlog,
+ "CSBB: %s stopped by hardware "
+ "breakpoint/watchpoint\n",
+ target_pid_to_str (lp->ptid));
+
+ lp->stop_pc = pc;
+ lp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
+ return 1;
+ }
+ else if (siginfo.si_code == TRAP_TRACE)
+ {
+ if (debug_linux_nat)
+ fprintf_unfiltered (gdb_stdlog,
+ "CSBB: %s stopped by trace\n",
+ target_pid_to_str (lp->ptid));
+ }
+ }
+ }
+#else
if ((!lp->step || lp->stop_pc == sw_bp_pc)
&& software_breakpoint_inserted_here_p (get_regcache_aspace (regcache),
sw_bp_pc))
breakpoint instruction. */
if (debug_linux_nat)
fprintf_unfiltered (gdb_stdlog,
- "CB: Push back software breakpoint for %s\n",
+ "CSBB: %s stopped by software breakpoint\n",
target_pid_to_str (lp->ptid));
/* Back up the PC if necessary. */
regcache_write_pc (regcache, sw_bp_pc);
lp->stop_pc = sw_bp_pc;
- lp->stop_reason = LWP_STOPPED_BY_SW_BREAKPOINT;
+ lp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
return 1;
}
{
if (debug_linux_nat)
fprintf_unfiltered (gdb_stdlog,
- "CB: Push back hardware breakpoint for %s\n",
+ "CSBB: stopped by hardware breakpoint %s\n",
target_pid_to_str (lp->ptid));
lp->stop_pc = pc;
- lp->stop_reason = LWP_STOPPED_BY_HW_BREAKPOINT;
+ lp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
return 1;
}
+#endif
return 0;
}
+
+/* Returns true if the LWP had stopped for a software breakpoint. */
+
+static int
+linux_nat_stopped_by_sw_breakpoint (struct target_ops *ops)
+{
+ struct lwp_info *lp = find_lwp_pid (inferior_ptid);
+
+ gdb_assert (lp != NULL);
+
+ return lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
+}
+
+/* Implement the supports_stopped_by_sw_breakpoint method. */
+
+static int
+linux_nat_supports_stopped_by_sw_breakpoint (struct target_ops *ops)
+{
+ return USE_SIGTRAP_SIGINFO;
+}
+
+/* Returns true if the LWP had stopped for a hardware
+ breakpoint/watchpoint. */
+
+static int
+linux_nat_stopped_by_hw_breakpoint (struct target_ops *ops)
+{
+ struct lwp_info *lp = find_lwp_pid (inferior_ptid);
+
+ gdb_assert (lp != NULL);
+
+ return lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
+}
+
+/* Implement the supports_stopped_by_hw_breakpoint method. */
+
+static int
+linux_nat_supports_stopped_by_hw_breakpoint (struct target_ops *ops)
+{
+ return USE_SIGTRAP_SIGINFO;
+}
+
/* Select one LWP out of those that have events pending. */
static void
having stepped the thread, wouldn't understand what the trap was
for, and therefore would report it to the user as a random
signal. */
- if (!non_stop)
+ if (!target_is_non_stop_p ())
{
event_lp = iterate_over_lwps (filter,
select_singlestep_lwp_callback, NULL);
/* First see how many events we have. */
iterate_over_lwps (filter, count_events_callback, &num_events);
+ gdb_assert (num_events > 0);
/* Now randomly pick a LWP out of those that have had
events. */
if (WIFSTOPPED (status) && lp->must_set_ptrace_flags)
{
struct inferior *inf = find_inferior_pid (ptid_get_pid (lp->ptid));
+ int options = linux_nat_ptrace_options (inf->attach_flag);
- linux_enable_event_reporting (ptid_get_lwp (lp->ptid), inf->attach_flag);
+ linux_enable_event_reporting (ptid_get_lwp (lp->ptid), options);
lp->must_set_ptrace_flags = 0;
}
fprintf_unfiltered (gdb_stdlog,
"LLW: Handling extended status 0x%06x\n",
status);
- if (linux_handle_extended_wait (lp, status, 0))
+ if (linux_handle_extended_wait (lp, status))
return NULL;
}
}
}
- gdb_assert (lp->resumed);
-
+ /* Note that even if the leader was ptrace-stopped, it can still
+ exit, if e.g., some other thread brings down the whole
+ process (calls `exit'). So don't assert that the lwp is
+ resumed. */
if (debug_linux_nat)
fprintf_unfiltered (gdb_stdlog,
- "Process %ld exited\n",
- ptid_get_lwp (lp->ptid));
+ "Process %ld exited (resumed=%d)\n",
+ ptid_get_lwp (lp->ptid), lp->resumed);
/* This was the last lwp in the process. Since events are
serialized to GDB core, we may not be able report this one
if (lp->signalled
&& WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP)
{
- if (debug_linux_nat)
- fprintf_unfiltered (gdb_stdlog,
- "LLW: Delayed SIGSTOP caught for %s.\n",
- target_pid_to_str (lp->ptid));
-
lp->signalled = 0;
- if (lp->last_resume_kind != resume_stop)
+ if (lp->last_resume_kind == resume_stop)
{
- /* This is a delayed SIGSTOP. */
+ if (debug_linux_nat)
+ fprintf_unfiltered (gdb_stdlog,
+ "LLW: resume_stop SIGSTOP caught for %s.\n",
+ target_pid_to_str (lp->ptid));
+ }
+ else
+ {
+ /* This is a delayed SIGSTOP. Filter out the event. */
- linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
if (debug_linux_nat)
fprintf_unfiltered (gdb_stdlog,
- "LLW: %s %s, 0, 0 (discard SIGSTOP)\n",
+ "LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
lp->step ?
"PTRACE_SINGLESTEP" : "PTRACE_CONT",
target_pid_to_str (lp->ptid));
+ linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
gdb_assert (lp->resumed);
-
- /* Discard the event. */
return NULL;
}
}
{
enum gdb_signal signo = gdb_signal_from_host (WSTOPSIG (status));
- if (!non_stop)
+ if (!target_is_non_stop_p ())
{
/* Only do the below in all-stop, as we currently use SIGSTOP
to implement target_stop (see linux_nat_stop) in
target_pid_to_str (lp->ptid));
}
- if (!target_is_async_p ())
- {
- /* Causes SIGINT to be passed on to the attached process. */
- set_sigint_trap ();
- }
-
/* But if we don't find a pending event, we'll have to wait. Always
pull all events out of the kernel. We'll randomly select an
event LWP out of all that have events, to prevent starvation. */
ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
- if (!target_is_async_p ())
- clear_sigint_trap ();
-
restore_child_signals_mask (&prev_mask);
return minus_one_ptid;
}
sigsuspend (&suspend_mask);
}
- if (!target_is_async_p ())
- clear_sigint_trap ();
-
gdb_assert (lp);
status = lp->status;
lp->status = 0;
- if (!non_stop)
+ if (!target_is_non_stop_p ())
{
/* Now stop all other LWP's ... */
iterate_over_lwps (minus_one_ptid, stop_callback, NULL);
gdb_assert (lp != NULL);
/* Now that we've selected our final event LWP, un-adjust its PC if
- it was a software breakpoint. */
- if (lp->stop_reason == LWP_STOPPED_BY_SW_BREAKPOINT)
+ it was a software breakpoint, and we can't reliably support the
+ "stopped by software breakpoint" stop reason. */
+ if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
+ && !USE_SIGTRAP_SIGINFO)
{
struct regcache *regcache = get_thread_regcache (lp->ptid);
struct gdbarch *gdbarch = get_regcache_arch (regcache);
- int decr_pc = target_decr_pc_after_break (gdbarch);
+ int decr_pc = gdbarch_decr_pc_after_break (gdbarch);
if (decr_pc != 0)
{
clears it. */
last_resume_kind = lp->last_resume_kind;
- if (!non_stop)
+ if (!target_is_non_stop_p ())
{
/* In all-stop, from the core's perspective, all LWPs are now
stopped until a new resume action is sent over. */
{
ptid_t *wait_ptid_p = data;
- if (lp->stopped
- && lp->resumed
- && !lwp_status_pending_p (lp))
+ if (!lp->stopped)
+ {
+ if (debug_linux_nat)
+ fprintf_unfiltered (gdb_stdlog,
+ "RSRL: NOT resuming LWP %s, not stopped\n",
+ target_pid_to_str (lp->ptid));
+ }
+ else if (!lp->resumed)
+ {
+ if (debug_linux_nat)
+ fprintf_unfiltered (gdb_stdlog,
+ "RSRL: NOT resuming LWP %s, not resumed\n",
+ target_pid_to_str (lp->ptid));
+ }
+ else if (lwp_status_pending_p (lp))
+ {
+ if (debug_linux_nat)
+ fprintf_unfiltered (gdb_stdlog,
+ "RSRL: NOT resuming LWP %s, has pending status\n",
+ target_pid_to_str (lp->ptid));
+ }
+ else
{
struct regcache *regcache = get_thread_regcache (lp->ptid);
struct gdbarch *gdbarch = get_regcache_arch (regcache);
- CORE_ADDR pc = regcache_read_pc (regcache);
- /* Don't bother if there's a breakpoint at PC that we'd hit
- immediately, and we're not waiting for this LWP. */
- if (!ptid_match (lp->ptid, *wait_ptid_p))
+ TRY
{
- if (breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
- return 0;
- }
+ CORE_ADDR pc = regcache_read_pc (regcache);
+ int leave_stopped = 0;
- if (debug_linux_nat)
- fprintf_unfiltered (gdb_stdlog,
- "RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
- target_pid_to_str (lp->ptid),
- paddress (gdbarch, pc),
- lp->step);
+ /* Don't bother if there's a breakpoint at PC that we'd hit
+ immediately, and we're not waiting for this LWP. */
+ if (!ptid_match (lp->ptid, *wait_ptid_p))
+ {
+ if (breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
+ leave_stopped = 1;
+ }
- linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
+ if (!leave_stopped)
+ {
+ if (debug_linux_nat)
+ fprintf_unfiltered (gdb_stdlog,
+ "RSRL: resuming stopped-resumed LWP %s at "
+ "%s: step=%d\n",
+ target_pid_to_str (lp->ptid),
+ paddress (gdbarch, pc),
+ lp->step);
+
+ linux_resume_one_lwp_throw (lp, lp->step, GDB_SIGNAL_0);
+ }
+ }
+ CATCH (ex, RETURN_MASK_ERROR)
+ {
+ if (!check_ptrace_stopped_lwp_gone (lp))
+ throw_exception (ex);
+ }
+ END_CATCH
}
return 0;
specific_process, for example, see linux_nat_wait_1), and
meanwhile the event became uninteresting. Don't bother resuming
LWPs we're not going to wait for if they'd stop immediately. */
- if (non_stop)
+ if (target_is_non_stop_p ())
iterate_over_lwps (minus_one_ptid, resume_stopped_resumed_lwps, &ptid);
event_ptid = linux_nat_wait_1 (ops, ptid, ourstatus, target_options);
return linux_thread_alive (ptid);
}
+/* Implement the to_update_thread_list target method for this
+ target. */
+
+static void
+linux_nat_update_thread_list (struct target_ops *ops)
+{
+ if (linux_supports_traceclone ())
+ {
+ /* With support for clone events, we add/delete threads from the
+ list as clone/exit events are processed, so just try deleting
+ exited threads still in the thread list. */
+ delete_exited_threads ();
+ }
+ else
+ prune_threads ();
+}
+
static char *
linux_nat_pid_to_str (struct target_ops *ops, ptid_t ptid)
{
static char *
linux_child_pid_to_exec_file (struct target_ops *self, int pid)
{
- static char buf[PATH_MAX];
- char name[PATH_MAX];
-
- xsnprintf (name, PATH_MAX, "/proc/%d/exe", pid);
- memset (buf, 0, PATH_MAX);
- if (readlink (name, buf, PATH_MAX - 1) <= 0)
- strcpy (buf, name);
-
- return buf;
+ return linux_proc_pid_to_exec_file (pid);
}
/* Implement the to_xfer_partial interface for memory reads using the /proc
char filename[64];
if (object != TARGET_OBJECT_MEMORY || !readbuf)
- return 0;
+ return TARGET_XFER_EOF;
/* Don't bother for one word. */
if (len < 3 * sizeof (long))
Unfortunately some Red Hat kernels include the shared pending
queue but not the ShdPnd status field. */
- if (strncmp (buffer, "SigPnd:\t", 8) == 0)
+ if (startswith (buffer, "SigPnd:\t"))
add_line_to_sigset (buffer + 8, pending);
- else if (strncmp (buffer, "ShdPnd:\t", 8) == 0)
+ else if (startswith (buffer, "ShdPnd:\t"))
add_line_to_sigset (buffer + 8, pending);
- else if (strncmp (buffer, "SigBlk:\t", 8) == 0)
+ else if (startswith (buffer, "SigBlk:\t"))
add_line_to_sigset (buffer + 8, blocked);
- else if (strncmp (buffer, "SigIgn:\t", 8) == 0)
+ else if (startswith (buffer, "SigIgn:\t"))
add_line_to_sigset (buffer + 8, ignored);
}
return 1;
}
+/* to_always_non_stop_p implementation. */
+
+static int
+linux_nat_always_non_stop_p (struct target_ops *self)
+{
+ if (linux_ops->to_always_non_stop_p != NULL)
+ return linux_ops->to_always_non_stop_p (linux_ops);
+ return 1;
+}
+
/* True if we want to support multi-process. To be removed when GDB
supports multi-exec. */
static void
linux_nat_terminal_inferior (struct target_ops *self)
{
- /* Like target_terminal_inferior, use target_can_async_p, not
- target_is_async_p, since at this point the target is not async
- yet. If it can async, then we know it will become async prior to
- resume. */
- if (!target_can_async_p ())
- {
- /* Async mode is disabled. */
- child_terminal_inferior (self);
- return;
- }
-
child_terminal_inferior (self);
/* Calls to target_terminal_*() are meant to be idempotent. */
async_terminal_is_ours = 1;
}
-static void (*async_client_callback) (enum inferior_event_type event_type,
- void *context);
-static void *async_client_context;
-
/* SIGCHLD handler that serves two purposes: In non-stop/async mode,
so we notice when any child changes state, and notify the
event-loop; it allows us to use sigsuspend in linux_nat_wait_1
static void
handle_target_event (int error, gdb_client_data client_data)
{
- (*async_client_callback) (INF_REG_EVENT, async_client_context);
+ inferior_event_handler (INF_REG_EVENT, NULL);
}
/* Create/destroy the target events pipe. Returns previous state. */
/* target_async implementation. */
static void
-linux_nat_async (struct target_ops *ops,
- void (*callback) (enum inferior_event_type event_type,
- void *context),
- void *context)
+linux_nat_async (struct target_ops *ops, int enable)
{
- if (callback != NULL)
+ if (enable)
{
- async_client_callback = callback;
- async_client_context = context;
if (!linux_async_pipe (1))
{
add_file_handler (linux_nat_event_pipe[0],
}
else
{
- async_client_callback = callback;
- async_client_context = context;
delete_file_handler (linux_nat_event_pipe[0]);
linux_async_pipe (0);
}
static void
linux_nat_stop (struct target_ops *self, ptid_t ptid)
+{
+ iterate_over_lwps (ptid, linux_nat_stop_lwp, NULL);
+}
+
+static void
+linux_nat_interrupt (struct target_ops *self, ptid_t ptid)
{
if (non_stop)
iterate_over_lwps (ptid, linux_nat_stop_lwp, NULL);
else
- linux_ops->to_stop (linux_ops, ptid);
+ linux_ops->to_interrupt (linux_ops, ptid);
}
static void
{
/* Unregister from the event loop. */
if (linux_nat_is_async_p (self))
- linux_nat_async (self, NULL, NULL);
+ linux_nat_async (self, 0);
if (linux_ops->to_close)
linux_ops->to_close (linux_ops);
return -1;
}
+/* Implementation of to_filesystem_is_local. */
+
+static int
+linux_nat_filesystem_is_local (struct target_ops *ops)
+{
+ struct inferior *inf = current_inferior ();
+
+ if (inf->fake_pid_p || inf->pid == 0)
+ return 1;
+
+ return linux_ns_same (inf->pid, LINUX_NS_MNT);
+}
+
+/* Convert the INF argument passed to a to_fileio_* method
+ to a process ID suitable for passing to its corresponding
+ linux_mntns_* function. If INF is non-NULL then the
+ caller is requesting the filesystem seen by INF. If INF
+ is NULL then the caller is requesting the filesystem seen
+ by the GDB. We fall back to GDB's filesystem in the case
+ that INF is non-NULL but its PID is unknown. */
+
+static pid_t
+linux_nat_fileio_pid_of (struct inferior *inf)
+{
+ if (inf == NULL || inf->fake_pid_p || inf->pid == 0)
+ return getpid ();
+ else
+ return inf->pid;
+}
+
+/* Implementation of to_fileio_open. */
+
+static int
+linux_nat_fileio_open (struct target_ops *self,
+ struct inferior *inf, const char *filename,
+ int flags, int mode, int warn_if_slow,
+ int *target_errno)
+{
+ int nat_flags;
+ mode_t nat_mode;
+ int fd;
+
+ if (fileio_to_host_openflags (flags, &nat_flags) == -1
+ || fileio_to_host_mode (mode, &nat_mode) == -1)
+ {
+ *target_errno = FILEIO_EINVAL;
+ return -1;
+ }
+
+ fd = linux_mntns_open_cloexec (linux_nat_fileio_pid_of (inf),
+ filename, nat_flags, nat_mode);
+ if (fd == -1)
+ *target_errno = host_to_fileio_error (errno);
+
+ return fd;
+}
+
+/* Implementation of to_fileio_readlink. */
+
+static char *
+linux_nat_fileio_readlink (struct target_ops *self,
+ struct inferior *inf, const char *filename,
+ int *target_errno)
+{
+ char buf[PATH_MAX];
+ int len;
+ char *ret;
+
+ len = linux_mntns_readlink (linux_nat_fileio_pid_of (inf),
+ filename, buf, sizeof (buf));
+ if (len < 0)
+ {
+ *target_errno = host_to_fileio_error (errno);
+ return NULL;
+ }
+
+ ret = xmalloc (len + 1);
+ memcpy (ret, buf, len);
+ ret[len] = '\0';
+ return ret;
+}
+
+/* Implementation of to_fileio_unlink. */
+
+static int
+linux_nat_fileio_unlink (struct target_ops *self,
+ struct inferior *inf, const char *filename,
+ int *target_errno)
+{
+ int ret;
+
+ ret = linux_mntns_unlink (linux_nat_fileio_pid_of (inf),
+ filename);
+ if (ret == -1)
+ *target_errno = host_to_fileio_error (errno);
+
+ return ret;
+}
+
void
linux_nat_add_target (struct target_ops *t)
{
t->to_kill = linux_nat_kill;
t->to_mourn_inferior = linux_nat_mourn_inferior;
t->to_thread_alive = linux_nat_thread_alive;
+ t->to_update_thread_list = linux_nat_update_thread_list;
t->to_pid_to_str = linux_nat_pid_to_str;
t->to_thread_name = linux_nat_thread_name;
t->to_has_thread_control = tc_schedlock;
t->to_thread_address_space = linux_nat_thread_address_space;
t->to_stopped_by_watchpoint = linux_nat_stopped_by_watchpoint;
t->to_stopped_data_address = linux_nat_stopped_data_address;
+ t->to_stopped_by_sw_breakpoint = linux_nat_stopped_by_sw_breakpoint;
+ t->to_supports_stopped_by_sw_breakpoint = linux_nat_supports_stopped_by_sw_breakpoint;
+ t->to_stopped_by_hw_breakpoint = linux_nat_stopped_by_hw_breakpoint;
+ t->to_supports_stopped_by_hw_breakpoint = linux_nat_supports_stopped_by_hw_breakpoint;
t->to_can_async_p = linux_nat_can_async_p;
t->to_is_async_p = linux_nat_is_async_p;
t->to_supports_non_stop = linux_nat_supports_non_stop;
+ t->to_always_non_stop_p = linux_nat_always_non_stop_p;
t->to_async = linux_nat_async;
t->to_terminal_inferior = linux_nat_terminal_inferior;
t->to_terminal_ours = linux_nat_terminal_ours;
super_close = t->to_close;
t->to_close = linux_nat_close;
- /* Methods for non-stop support. */
t->to_stop = linux_nat_stop;
+ t->to_interrupt = linux_nat_interrupt;
t->to_supports_multi_process = linux_nat_supports_multi_process;
t->to_core_of_thread = linux_nat_core_of_thread;
+ t->to_filesystem_is_local = linux_nat_filesystem_is_local;
+ t->to_fileio_open = linux_nat_fileio_open;
+ t->to_fileio_readlink = linux_nat_fileio_readlink;
+ t->to_fileio_unlink = linux_nat_fileio_unlink;
+
/* We don't change the stratum; this target will sit at
process_stratum and thread_db will set at thread_stratum. This
is a little strange, since this is a multi-threaded-capable
return 1;
}
+/* See nat/linux-nat.h. */
+
+ptid_t
+current_lwp_ptid (void)
+{
+ gdb_assert (ptid_lwp_p (inferior_ptid));
+ return inferior_ptid;
+}
+
/* Provide a prototype to silence -Wmissing-prototypes. */
extern initialize_file_ftype _initialize_linux_nat;
show_debug_linux_nat,
&setdebuglist, &showdebuglist);
+ add_setshow_boolean_cmd ("linux-namespaces", class_maintenance,
+ &debug_linux_namespaces, _("\
+Set debugging of GNU/Linux namespaces module."), _("\
+Show debugging of GNU/Linux namespaces module."), _("\
+Enables printf debugging output."),
+ NULL,
+ NULL,
+ &setdebuglist, &showdebuglist);
+
/* Save this mask as the default. */
sigprocmask (SIG_SETMASK, NULL, &normal_mask);
sigdelset (&suspend_mask, SIGCHLD);
sigemptyset (&blocked_mask);
-
- /* Do not enable PTRACE_O_TRACEEXIT until GDB is more prepared to
- support read-only process state. */
- linux_ptrace_set_additional_flags (PTRACE_O_TRACESYSGOOD
- | PTRACE_O_TRACEVFORKDONE
- | PTRACE_O_TRACEVFORK
- | PTRACE_O_TRACEFORK
- | PTRACE_O_TRACEEXEC);
}
\f