+/* This function should only be called if the LWP got a SIGTRAP.
+
+ Handle any tracepoint steps or hits. Return true if a tracepoint
+ event was handled, 0 otherwise. */
+
+static int
+handle_tracepoints (struct lwp_info *lwp)
+{
+ struct thread_info *tinfo = get_lwp_thread (lwp);
+ int tpoint_related_event = 0;
+
+ /* If this tracepoint hit causes a tracing stop, we'll immediately
+ uninsert tracepoints. To do this, we temporarily pause all
+ threads, unpatch away, and then unpause threads. We need to make
+ sure the unpausing doesn't resume LWP too. */
+ lwp->suspended++;
+
+ /* And we need to be sure that any all-threads-stopping doesn't try
+ to move threads out of the jump pads, as it could deadlock the
+ inferior (LWP could be in the jump pad, maybe even holding the
+ lock.) */
+
+ /* Do any necessary step collect actions. */
+ tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
+
+ tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
+
+ /* See if we just hit a tracepoint and do its main collect
+ actions. */
+ tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
+
+ lwp->suspended--;
+
+ gdb_assert (lwp->suspended == 0);
+ gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
+
+ if (tpoint_related_event)
+ {
+ if (debug_threads)
+ fprintf (stderr, "got a tracepoint event\n");
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Convenience wrapper. Returns true if LWP is presently collecting a
+ fast tracepoint. */
+
+static int
+linux_fast_tracepoint_collecting (struct lwp_info *lwp,
+ struct fast_tpoint_collect_status *status)
+{
+ CORE_ADDR thread_area;
+
+ if (the_low_target.get_thread_area == NULL)
+ return 0;
+
+ /* Get the thread area address. This is used to recognize which
+ thread is which when tracing with the in-process agent library.
+ We don't read anything from the address, and treat it as opaque;
+ it's the address itself that we assume is unique per-thread. */
+ if ((*the_low_target.get_thread_area) (lwpid_of (lwp), &thread_area) == -1)
+ return 0;
+
+ return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
+}
+
+/* The reason we resume in the caller, is because we want to be able
+ to pass lwp->status_pending as WSTAT, and we need to clear
+ status_pending_p before resuming, otherwise, linux_resume_one_lwp
+ refuses to resume. */
+
+static int
+maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
+{
+ struct thread_info *saved_inferior;
+
+ saved_inferior = current_inferior;
+ current_inferior = get_lwp_thread (lwp);
+
+ if ((wstat == NULL
+ || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
+ && supports_fast_tracepoints ()
+ && in_process_agent_loaded ())
+ {
+ struct fast_tpoint_collect_status status;
+ int r;
+
+ if (debug_threads)
+ fprintf (stderr, "\
+Checking whether LWP %ld needs to move out of the jump pad.\n",
+ lwpid_of (lwp));
+
+ r = linux_fast_tracepoint_collecting (lwp, &status);
+
+ if (wstat == NULL
+ || (WSTOPSIG (*wstat) != SIGILL
+ && WSTOPSIG (*wstat) != SIGFPE
+ && WSTOPSIG (*wstat) != SIGSEGV
+ && WSTOPSIG (*wstat) != SIGBUS))
+ {
+ lwp->collecting_fast_tracepoint = r;
+
+ if (r != 0)
+ {
+ if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
+ {
+ /* Haven't executed the original instruction yet.
+ Set breakpoint there, and wait till it's hit,
+ then single-step until exiting the jump pad. */
+ lwp->exit_jump_pad_bkpt
+ = set_breakpoint_at (status.adjusted_insn_addr, NULL);
+ }
+
+ if (debug_threads)
+ fprintf (stderr, "\
+Checking whether LWP %ld needs to move out of the jump pad...it does\n",
+ lwpid_of (lwp));
+
+ return 1;
+ }
+ }
+ else
+ {
+ /* If we get a synchronous signal while collecting, *and*
+ while executing the (relocated) original instruction,
+ reset the PC to point at the tpoint address, before
+ reporting to GDB. Otherwise, it's an IPA lib bug: just
+ report the signal to GDB, and pray for the best. */
+
+ lwp->collecting_fast_tracepoint = 0;
+
+ if (r != 0
+ && (status.adjusted_insn_addr <= lwp->stop_pc
+ && lwp->stop_pc < status.adjusted_insn_addr_end))
+ {
+ siginfo_t info;
+ struct regcache *regcache;
+
+ /* The si_addr on a few signals references the address
+ of the faulting instruction. Adjust that as
+ well. */
+ if ((WSTOPSIG (*wstat) == SIGILL
+ || WSTOPSIG (*wstat) == SIGFPE
+ || WSTOPSIG (*wstat) == SIGBUS
+ || WSTOPSIG (*wstat) == SIGSEGV)
+ && ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &info) == 0
+ /* Final check just to make sure we don't clobber
+ the siginfo of non-kernel-sent signals. */
+ && (uintptr_t) info.si_addr == lwp->stop_pc)
+ {
+ info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
+ ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &info);
+ }
+
+ regcache = get_thread_regcache (get_lwp_thread (lwp), 1);
+ (*the_low_target.set_pc) (regcache, status.tpoint_addr);
+ lwp->stop_pc = status.tpoint_addr;
+
+ /* Cancel any fast tracepoint lock this thread was
+ holding. */
+ force_unlock_trace_buffer ();
+ }
+
+ if (lwp->exit_jump_pad_bkpt != NULL)
+ {
+ if (debug_threads)
+ fprintf (stderr,
+ "Cancelling fast exit-jump-pad: removing bkpt. "
+ "stopping all threads momentarily.\n");
+
+ stop_all_lwps (1, lwp);
+ cancel_breakpoints ();
+
+ delete_breakpoint (lwp->exit_jump_pad_bkpt);
+ lwp->exit_jump_pad_bkpt = NULL;
+
+ unstop_all_lwps (1, lwp);
+
+ gdb_assert (lwp->suspended >= 0);
+ }
+ }
+ }
+
+ if (debug_threads)
+ fprintf (stderr, "\
+Checking whether LWP %ld needs to move out of the jump pad...no\n",
+ lwpid_of (lwp));
+ return 0;
+}
+
+/* Enqueue one signal in the "signals to report later when out of the
+ jump pad" list. */
+
+static void
+enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
+{
+ struct pending_signals *p_sig;
+
+ if (debug_threads)
+ fprintf (stderr, "\
+Deferring signal %d for LWP %ld.\n", WSTOPSIG (*wstat), lwpid_of (lwp));
+
+ if (debug_threads)
+ {
+ struct pending_signals *sig;
+
+ for (sig = lwp->pending_signals_to_report;
+ sig != NULL;
+ sig = sig->prev)
+ fprintf (stderr,
+ " Already queued %d\n",
+ sig->signal);
+
+ fprintf (stderr, " (no more currently queued signals)\n");
+ }
+
+ p_sig = xmalloc (sizeof (*p_sig));
+ p_sig->prev = lwp->pending_signals_to_report;
+ p_sig->signal = WSTOPSIG (*wstat);
+ memset (&p_sig->info, 0, sizeof (siginfo_t));
+ ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &p_sig->info);
+
+ lwp->pending_signals_to_report = p_sig;
+}
+
+/* Dequeue one signal from the "signals to report later when out of
+ the jump pad" list. */
+
+static int
+dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
+{
+ if (lwp->pending_signals_to_report != NULL)
+ {
+ struct pending_signals **p_sig;
+
+ p_sig = &lwp->pending_signals_to_report;
+ while ((*p_sig)->prev != NULL)
+ p_sig = &(*p_sig)->prev;
+
+ *wstat = W_STOPCODE ((*p_sig)->signal);
+ if ((*p_sig)->info.si_signo != 0)
+ ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &(*p_sig)->info);
+ free (*p_sig);
+ *p_sig = NULL;
+
+ if (debug_threads)
+ fprintf (stderr, "Reporting deferred signal %d for LWP %ld.\n",
+ WSTOPSIG (*wstat), lwpid_of (lwp));
+
+ if (debug_threads)
+ {
+ struct pending_signals *sig;
+
+ for (sig = lwp->pending_signals_to_report;
+ sig != NULL;
+ sig = sig->prev)
+ fprintf (stderr,
+ " Still queued %d\n",
+ sig->signal);
+
+ fprintf (stderr, " (no more queued signals)\n");
+ }
+
+ return 1;
+ }
+
+ return 0;
+}
+