gdb: pass inferior to check_pid_namespace_match
[deliverable/binutils-gdb.git] / gdb / linux-nat.c
CommitLineData
3993f6b1 1/* GNU/Linux native-dependent code common to multiple platforms.
dba24537 2
b811d2c2 3 Copyright (C) 2001-2020 Free Software Foundation, Inc.
3993f6b1
DJ
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
a9762ec7 9 the Free Software Foundation; either version 3 of the License, or
3993f6b1
DJ
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
a9762ec7 18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
3993f6b1
DJ
19
20#include "defs.h"
21#include "inferior.h"
45741a9c 22#include "infrun.h"
3993f6b1 23#include "target.h"
96d7229d
LM
24#include "nat/linux-nat.h"
25#include "nat/linux-waitpid.h"
268a13a5 26#include "gdbsupport/gdb_wait.h"
d6b0e80f
AC
27#include <unistd.h>
28#include <sys/syscall.h>
5826e159 29#include "nat/gdb_ptrace.h"
0274a8ce 30#include "linux-nat.h"
125f8a3d
GB
31#include "nat/linux-ptrace.h"
32#include "nat/linux-procfs.h"
8cc73a39 33#include "nat/linux-personality.h"
ac264b3b 34#include "linux-fork.h"
d6b0e80f
AC
35#include "gdbthread.h"
36#include "gdbcmd.h"
37#include "regcache.h"
4f844a66 38#include "regset.h"
dab06dbe 39#include "inf-child.h"
10d6c8cd
DJ
40#include "inf-ptrace.h"
41#include "auxv.h"
1777feb0 42#include <sys/procfs.h> /* for elf_gregset etc. */
dba24537
AC
43#include "elf-bfd.h" /* for elfcore_write_* */
44#include "gregset.h" /* for gregset */
45#include "gdbcore.h" /* for get_exec_file */
46#include <ctype.h> /* for isdigit */
53ce3c39 47#include <sys/stat.h> /* for struct stat */
dba24537 48#include <fcntl.h> /* for O_RDONLY */
b84876c2 49#include "inf-loop.h"
400b5eca 50#include "gdbsupport/event-loop.h"
b84876c2 51#include "event-top.h"
07e059b5
VP
52#include <pwd.h>
53#include <sys/types.h>
2978b111 54#include <dirent.h>
07e059b5 55#include "xml-support.h"
efcbbd14 56#include <sys/vfs.h>
6c95b8df 57#include "solib.h"
125f8a3d 58#include "nat/linux-osdata.h"
6432734d 59#include "linux-tdep.h"
7dcd53a0 60#include "symfile.h"
268a13a5 61#include "gdbsupport/agent.h"
5808517f 62#include "tracepoint.h"
268a13a5 63#include "gdbsupport/buffer.h"
6ecd4729 64#include "target-descriptions.h"
268a13a5 65#include "gdbsupport/filestuff.h"
77e371c0 66#include "objfiles.h"
7a6a1731 67#include "nat/linux-namespaces.h"
268a13a5
TT
68#include "gdbsupport/fileio.h"
69#include "gdbsupport/scope-exit.h"
21987b9c 70#include "gdbsupport/gdb-sigmask.h"
ba988419 71#include "gdbsupport/common-debug.h"
efcbbd14 72
1777feb0 73/* This comment documents high-level logic of this file.
8a77dff3
VP
74
75Waiting for events in sync mode
76===============================
77
4a6ed09b
PA
78When waiting for an event in a specific thread, we just use waitpid,
79passing the specific pid, and not passing WNOHANG.
80
81When waiting for an event in all threads, waitpid is not quite good:
82
83- If the thread group leader exits while other threads in the thread
84 group still exist, waitpid(TGID, ...) hangs. That waitpid won't
85 return an exit status until the other threads in the group are
86 reaped.
87
88- When a non-leader thread execs, that thread just vanishes without
89 reporting an exit (so we'd hang if we waited for it explicitly in
90 that case). The exec event is instead reported to the TGID pid.
91
92The solution is to always use -1 and WNOHANG, together with
93sigsuspend.
94
95First, we use non-blocking waitpid to check for events. If nothing is
96found, we use sigsuspend to wait for SIGCHLD. When SIGCHLD arrives,
97it means something happened to a child process. As soon as we know
98there's an event, we get back to calling nonblocking waitpid.
99
100Note that SIGCHLD should be blocked between waitpid and sigsuspend
101calls, so that we don't miss a signal. If SIGCHLD arrives in between,
102when it's blocked, the signal becomes pending and sigsuspend
103immediately notices it and returns.
104
105Waiting for events in async mode (TARGET_WNOHANG)
106=================================================
8a77dff3 107
7feb7d06
PA
108In async mode, GDB should always be ready to handle both user input
109and target events, so neither blocking waitpid nor sigsuspend are
110viable options. Instead, we should asynchronously notify the GDB main
111event loop whenever there's an unprocessed event from the target. We
112detect asynchronous target events by handling SIGCHLD signals. To
113notify the event loop about target events, the self-pipe trick is used
114--- a pipe is registered as waitable event source in the event loop,
115the event loop select/poll's on the read end of this pipe (as well on
116other event sources, e.g., stdin), and the SIGCHLD handler writes a
117byte to this pipe. This is more portable than relying on
118pselect/ppoll, since on kernels that lack those syscalls, libc
119emulates them with select/poll+sigprocmask, and that is racy
120(a.k.a. plain broken).
121
122Obviously, if we fail to notify the event loop if there's a target
123event, it's bad. OTOH, if we notify the event loop when there's no
124event from the target, linux_nat_wait will detect that there's no real
125event to report, and return event of type TARGET_WAITKIND_IGNORE.
126This is mostly harmless, but it will waste time and is better avoided.
127
128The main design point is that every time GDB is outside linux-nat.c,
129we have a SIGCHLD handler installed that is called when something
130happens to the target and notifies the GDB event loop. Whenever GDB
131core decides to handle the event, and calls into linux-nat.c, we
132process things as in sync mode, except that the we never block in
133sigsuspend.
134
135While processing an event, we may end up momentarily blocked in
136waitpid calls. Those waitpid calls, while blocking, are guarantied to
137return quickly. E.g., in all-stop mode, before reporting to the core
138that an LWP hit a breakpoint, all LWPs are stopped by sending them
139SIGSTOP, and synchronously waiting for the SIGSTOP to be reported.
140Note that this is different from blocking indefinitely waiting for the
141next event --- here, we're already handling an event.
8a77dff3
VP
142
143Use of signals
144==============
145
146We stop threads by sending a SIGSTOP. The use of SIGSTOP instead of another
147signal is not entirely significant; we just need for a signal to be delivered,
148so that we can intercept it. SIGSTOP's advantage is that it can not be
149blocked. A disadvantage is that it is not a real-time signal, so it can only
150be queued once; we do not keep track of other sources of SIGSTOP.
151
152Two other signals that can't be blocked are SIGCONT and SIGKILL. But we can't
153use them, because they have special behavior when the signal is generated -
154not when it is delivered. SIGCONT resumes the entire thread group and SIGKILL
155kills the entire thread group.
156
157A delivered SIGSTOP would stop the entire thread group, not just the thread we
158tkill'd. But we never let the SIGSTOP be delivered; we always intercept and
159cancel it (by PTRACE_CONT without passing SIGSTOP).
160
161We could use a real-time signal instead. This would solve those problems; we
162could use PTRACE_GETSIGINFO to locate the specific stop signals sent by GDB.
163But we would still have to have some support for SIGSTOP, since PTRACE_ATTACH
164generates it, and there are races with trying to find a signal that is not
4a6ed09b
PA
165blocked.
166
167Exec events
168===========
169
170The case of a thread group (process) with 3 or more threads, and a
171thread other than the leader execs is worth detailing:
172
173On an exec, the Linux kernel destroys all threads except the execing
174one in the thread group, and resets the execing thread's tid to the
175tgid. No exit notification is sent for the execing thread -- from the
176ptracer's perspective, it appears as though the execing thread just
177vanishes. Until we reap all other threads except the leader and the
178execing thread, the leader will be zombie, and the execing thread will
179be in `D (disc sleep)' state. As soon as all other threads are
180reaped, the execing thread changes its tid to the tgid, and the
181previous (zombie) leader vanishes, giving place to the "new"
182leader. */
a0ef4274 183
dba24537
AC
184#ifndef O_LARGEFILE
185#define O_LARGEFILE 0
186#endif
0274a8ce 187
f6ac5f3d
PA
188struct linux_nat_target *linux_target;
189
433bbbf8 190/* Does the current host support PTRACE_GETREGSET? */
0bdb2f78 191enum tribool have_ptrace_getregset = TRIBOOL_UNKNOWN;
433bbbf8 192
ccce17b0 193static unsigned int debug_linux_nat;
920d2a44
AC
194static void
195show_debug_linux_nat (struct ui_file *file, int from_tty,
196 struct cmd_list_element *c, const char *value)
197{
198 fprintf_filtered (file, _("Debugging of GNU/Linux lwp module is %s.\n"),
199 value);
200}
d6b0e80f 201
9327494e
SM
202/* Print a debug statement. Should be used through linux_nat_debug_printf. */
203
204static void ATTRIBUTE_PRINTF (2, 3)
205linux_nat_debug_printf_1 (const char *func_name, const char *fmt, ...)
206{
9327494e
SM
207 va_list ap;
208 va_start (ap, fmt);
c426fddb 209 debug_prefixed_vprintf ("linux-nat", func_name, fmt, ap);
9327494e 210 va_end (ap);
9327494e
SM
211}
212
213#define linux_nat_debug_printf(fmt, ...) \
956bdb59
SM
214 do \
215 { \
216 if (debug_linux_nat) \
217 linux_nat_debug_printf_1 (__func__, fmt, ##__VA_ARGS__); \
218 } \
219 while (0)
9327494e 220
ae087d01
DJ
221struct simple_pid_list
222{
223 int pid;
3d799a95 224 int status;
ae087d01
DJ
225 struct simple_pid_list *next;
226};
05c309a8 227static struct simple_pid_list *stopped_pids;
ae087d01 228
aa01bd36
PA
229/* Whether target_thread_events is in effect. */
230static int report_thread_events;
231
3dd5b83d
PA
232/* Async mode support. */
233
b84876c2
PA
234/* The read/write ends of the pipe registered as waitable file in the
235 event loop. */
236static int linux_nat_event_pipe[2] = { -1, -1 };
237
198297aa
PA
238/* True if we're currently in async mode. */
239#define linux_is_async_p() (linux_nat_event_pipe[0] != -1)
240
7feb7d06 241/* Flush the event pipe. */
b84876c2 242
7feb7d06
PA
243static void
244async_file_flush (void)
b84876c2 245{
7feb7d06
PA
246 int ret;
247 char buf;
b84876c2 248
7feb7d06 249 do
b84876c2 250 {
7feb7d06 251 ret = read (linux_nat_event_pipe[0], &buf, 1);
b84876c2 252 }
7feb7d06 253 while (ret >= 0 || (ret == -1 && errno == EINTR));
b84876c2
PA
254}
255
7feb7d06
PA
256/* Put something (anything, doesn't matter what, or how much) in event
257 pipe, so that the select/poll in the event-loop realizes we have
258 something to process. */
252fbfc8 259
b84876c2 260static void
7feb7d06 261async_file_mark (void)
b84876c2 262{
7feb7d06 263 int ret;
b84876c2 264
7feb7d06
PA
265 /* It doesn't really matter what the pipe contains, as long we end
266 up with something in it. Might as well flush the previous
267 left-overs. */
268 async_file_flush ();
b84876c2 269
7feb7d06 270 do
b84876c2 271 {
7feb7d06 272 ret = write (linux_nat_event_pipe[1], "+", 1);
b84876c2 273 }
7feb7d06 274 while (ret == -1 && errno == EINTR);
b84876c2 275
7feb7d06
PA
276 /* Ignore EAGAIN. If the pipe is full, the event loop will already
277 be awakened anyway. */
b84876c2
PA
278}
279
7feb7d06
PA
280static int kill_lwp (int lwpid, int signo);
281
d3a70e03 282static int stop_callback (struct lwp_info *lp);
7feb7d06
PA
283
284static void block_child_signals (sigset_t *prev_mask);
285static void restore_child_signals_mask (sigset_t *prev_mask);
2277426b
PA
286
287struct lwp_info;
288static struct lwp_info *add_lwp (ptid_t ptid);
289static void purge_lwp_list (int pid);
4403d8e9 290static void delete_lwp (ptid_t ptid);
2277426b
PA
291static struct lwp_info *find_lwp_pid (ptid_t ptid);
292
8a99810d
PA
293static int lwp_status_pending_p (struct lwp_info *lp);
294
e7ad2f14
PA
295static void save_stop_reason (struct lwp_info *lp);
296
cff068da
GB
297\f
298/* LWP accessors. */
299
300/* See nat/linux-nat.h. */
301
302ptid_t
303ptid_of_lwp (struct lwp_info *lwp)
304{
305 return lwp->ptid;
306}
307
308/* See nat/linux-nat.h. */
309
4b134ca1
GB
310void
311lwp_set_arch_private_info (struct lwp_info *lwp,
312 struct arch_lwp_info *info)
313{
314 lwp->arch_private = info;
315}
316
317/* See nat/linux-nat.h. */
318
319struct arch_lwp_info *
320lwp_arch_private_info (struct lwp_info *lwp)
321{
322 return lwp->arch_private;
323}
324
325/* See nat/linux-nat.h. */
326
cff068da
GB
327int
328lwp_is_stopped (struct lwp_info *lwp)
329{
330 return lwp->stopped;
331}
332
333/* See nat/linux-nat.h. */
334
335enum target_stop_reason
336lwp_stop_reason (struct lwp_info *lwp)
337{
338 return lwp->stop_reason;
339}
340
0e00e962
AA
341/* See nat/linux-nat.h. */
342
343int
344lwp_is_stepping (struct lwp_info *lwp)
345{
346 return lwp->step;
347}
348
ae087d01
DJ
349\f
350/* Trivial list manipulation functions to keep track of a list of
351 new stopped processes. */
352static void
3d799a95 353add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
ae087d01 354{
8d749320 355 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
e0881a8e 356
ae087d01 357 new_pid->pid = pid;
3d799a95 358 new_pid->status = status;
ae087d01
DJ
359 new_pid->next = *listp;
360 *listp = new_pid;
361}
362
363static int
46a96992 364pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
ae087d01
DJ
365{
366 struct simple_pid_list **p;
367
368 for (p = listp; *p != NULL; p = &(*p)->next)
369 if ((*p)->pid == pid)
370 {
371 struct simple_pid_list *next = (*p)->next;
e0881a8e 372
46a96992 373 *statusp = (*p)->status;
ae087d01
DJ
374 xfree (*p);
375 *p = next;
376 return 1;
377 }
378 return 0;
379}
380
de0d863e
DB
381/* Return the ptrace options that we want to try to enable. */
382
383static int
384linux_nat_ptrace_options (int attached)
385{
386 int options = 0;
387
388 if (!attached)
389 options |= PTRACE_O_EXITKILL;
390
391 options |= (PTRACE_O_TRACESYSGOOD
392 | PTRACE_O_TRACEVFORKDONE
393 | PTRACE_O_TRACEVFORK
394 | PTRACE_O_TRACEFORK
395 | PTRACE_O_TRACEEXEC);
396
397 return options;
398}
399
1b919490
VB
400/* Initialize ptrace and procfs warnings and check for supported
401 ptrace features given PID.
beed38b8
JB
402
403 ATTACHED should be nonzero iff we attached to the inferior. */
3993f6b1
DJ
404
405static void
1b919490 406linux_init_ptrace_procfs (pid_t pid, int attached)
3993f6b1 407{
de0d863e
DB
408 int options = linux_nat_ptrace_options (attached);
409
410 linux_enable_event_reporting (pid, options);
96d7229d 411 linux_ptrace_init_warnings ();
1b919490 412 linux_proc_init_warnings ();
4de4c07c
DJ
413}
414
f6ac5f3d
PA
415linux_nat_target::~linux_nat_target ()
416{}
417
418void
419linux_nat_target::post_attach (int pid)
4de4c07c 420{
1b919490 421 linux_init_ptrace_procfs (pid, 1);
4de4c07c
DJ
422}
423
f6ac5f3d
PA
424void
425linux_nat_target::post_startup_inferior (ptid_t ptid)
4de4c07c 426{
1b919490 427 linux_init_ptrace_procfs (ptid.pid (), 0);
4de4c07c
DJ
428}
429
4403d8e9
JK
430/* Return the number of known LWPs in the tgid given by PID. */
431
432static int
433num_lwps (int pid)
434{
435 int count = 0;
436 struct lwp_info *lp;
437
438 for (lp = lwp_list; lp; lp = lp->next)
e99b03dc 439 if (lp->ptid.pid () == pid)
4403d8e9
JK
440 count++;
441
442 return count;
443}
444
169bb27b 445/* Deleter for lwp_info unique_ptr specialisation. */
4403d8e9 446
169bb27b 447struct lwp_deleter
4403d8e9 448{
169bb27b
AB
449 void operator() (struct lwp_info *lwp) const
450 {
451 delete_lwp (lwp->ptid);
452 }
453};
4403d8e9 454
169bb27b
AB
455/* A unique_ptr specialisation for lwp_info. */
456
457typedef std::unique_ptr<struct lwp_info, lwp_deleter> lwp_info_up;
4403d8e9 458
d83ad864
DB
459/* Target hook for follow_fork. On entry inferior_ptid must be the
460 ptid of the followed inferior. At return, inferior_ptid will be
461 unchanged. */
462
5ab2fbf1
SM
463bool
464linux_nat_target::follow_fork (bool follow_child, bool detach_fork)
3993f6b1 465{
d83ad864 466 if (!follow_child)
4de4c07c 467 {
6c95b8df 468 struct lwp_info *child_lp = NULL;
d83ad864 469 int has_vforked;
79639e11 470 ptid_t parent_ptid, child_ptid;
d83ad864
DB
471 int parent_pid, child_pid;
472
473 has_vforked = (inferior_thread ()->pending_follow.kind
474 == TARGET_WAITKIND_VFORKED);
79639e11
PA
475 parent_ptid = inferior_ptid;
476 child_ptid = inferior_thread ()->pending_follow.value.related_pid;
e38504b3
TT
477 parent_pid = parent_ptid.lwp ();
478 child_pid = child_ptid.lwp ();
4de4c07c 479
1777feb0 480 /* We're already attached to the parent, by default. */
2989a365 481 child_lp = add_lwp (child_ptid);
d83ad864
DB
482 child_lp->stopped = 1;
483 child_lp->last_resume_kind = resume_stop;
4de4c07c 484
ac264b3b
MS
485 /* Detach new forked process? */
486 if (detach_fork)
f75c00e4 487 {
95347337
AB
488 int child_stop_signal = 0;
489 bool detach_child = true;
4403d8e9 490
169bb27b
AB
491 /* Move CHILD_LP into a unique_ptr and clear the source pointer
492 to prevent us doing anything stupid with it. */
493 lwp_info_up child_lp_ptr (child_lp);
494 child_lp = nullptr;
495
496 linux_target->low_prepare_to_resume (child_lp_ptr.get ());
c077881a
HZ
497
498 /* When debugging an inferior in an architecture that supports
499 hardware single stepping on a kernel without commit
500 6580807da14c423f0d0a708108e6df6ebc8bc83d, the vfork child
501 process starts with the TIF_SINGLESTEP/X86_EFLAGS_TF bits
502 set if the parent process had them set.
503 To work around this, single step the child process
504 once before detaching to clear the flags. */
505
2fd9d7ca
PA
506 /* Note that we consult the parent's architecture instead of
507 the child's because there's no inferior for the child at
508 this point. */
c077881a 509 if (!gdbarch_software_single_step_p (target_thread_architecture
2fd9d7ca 510 (parent_ptid)))
c077881a 511 {
95347337
AB
512 int status;
513
c077881a
HZ
514 linux_disable_event_reporting (child_pid);
515 if (ptrace (PTRACE_SINGLESTEP, child_pid, 0, 0) < 0)
516 perror_with_name (_("Couldn't do single step"));
517 if (my_waitpid (child_pid, &status, 0) < 0)
518 perror_with_name (_("Couldn't wait vfork process"));
95347337
AB
519 else
520 {
521 detach_child = WIFSTOPPED (status);
522 child_stop_signal = WSTOPSIG (status);
523 }
c077881a
HZ
524 }
525
95347337 526 if (detach_child)
9caaaa83 527 {
95347337 528 int signo = child_stop_signal;
9caaaa83 529
9caaaa83
PA
530 if (signo != 0
531 && !signal_pass_state (gdb_signal_from_host (signo)))
532 signo = 0;
533 ptrace (PTRACE_DETACH, child_pid, 0, signo);
534 }
ac264b3b
MS
535 }
536 else
537 {
5b6d1e4f
PA
538 /* Switching inferior_ptid is not enough, because then
539 inferior_thread () would crash by not finding the thread
540 in the current inferior. */
541 scoped_restore_current_thread restore_current_thread;
542 thread_info *child = find_thread_ptid (this, child_ptid);
543 switch_to_thread (child);
2989a365 544
6c95b8df 545 /* Let the thread_db layer learn about this new process. */
2277426b 546 check_for_thread_db ();
ac264b3b 547 }
9016a515
DJ
548
549 if (has_vforked)
550 {
3ced3da4 551 struct lwp_info *parent_lp;
6c95b8df 552
79639e11 553 parent_lp = find_lwp_pid (parent_ptid);
96d7229d 554 gdb_assert (linux_supports_tracefork () >= 0);
3ced3da4 555
96d7229d 556 if (linux_supports_tracevforkdone ())
9016a515 557 {
9327494e
SM
558 linux_nat_debug_printf ("waiting for VFORK_DONE on %d",
559 parent_pid);
3ced3da4 560 parent_lp->stopped = 1;
9016a515 561
6c95b8df
PA
562 /* We'll handle the VFORK_DONE event like any other
563 event, in target_wait. */
9016a515
DJ
564 }
565 else
566 {
567 /* We can't insert breakpoints until the child has
568 finished with the shared memory region. We need to
569 wait until that happens. Ideal would be to just
570 call:
571 - ptrace (PTRACE_SYSCALL, parent_pid, 0, 0);
572 - waitpid (parent_pid, &status, __WALL);
573 However, most architectures can't handle a syscall
574 being traced on the way out if it wasn't traced on
575 the way in.
576
577 We might also think to loop, continuing the child
578 until it exits or gets a SIGTRAP. One problem is
579 that the child might call ptrace with PTRACE_TRACEME.
580
581 There's no simple and reliable way to figure out when
582 the vforked child will be done with its copy of the
583 shared memory. We could step it out of the syscall,
584 two instructions, let it go, and then single-step the
585 parent once. When we have hardware single-step, this
586 would work; with software single-step it could still
587 be made to work but we'd have to be able to insert
588 single-step breakpoints in the child, and we'd have
589 to insert -just- the single-step breakpoint in the
590 parent. Very awkward.
591
592 In the end, the best we can do is to make sure it
593 runs for a little while. Hopefully it will be out of
594 range of any breakpoints we reinsert. Usually this
595 is only the single-step breakpoint at vfork's return
596 point. */
597
9327494e 598 linux_nat_debug_printf ("no VFORK_DONE support, sleeping a bit");
6c95b8df 599
9016a515 600 usleep (10000);
9016a515 601
6c95b8df
PA
602 /* Pretend we've seen a PTRACE_EVENT_VFORK_DONE event,
603 and leave it pending. The next linux_nat_resume call
604 will notice a pending event, and bypasses actually
605 resuming the inferior. */
3ced3da4
PA
606 parent_lp->status = 0;
607 parent_lp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
608 parent_lp->stopped = 1;
6c95b8df
PA
609
610 /* If we're in async mode, need to tell the event loop
611 there's something here to process. */
d9d41e78 612 if (target_is_async_p ())
6c95b8df
PA
613 async_file_mark ();
614 }
9016a515 615 }
4de4c07c 616 }
3993f6b1 617 else
4de4c07c 618 {
3ced3da4 619 struct lwp_info *child_lp;
4de4c07c 620
3ced3da4
PA
621 child_lp = add_lwp (inferior_ptid);
622 child_lp->stopped = 1;
25289eb2 623 child_lp->last_resume_kind = resume_stop;
6c95b8df 624
6c95b8df 625 /* Let the thread_db layer learn about this new process. */
ef29ce1a 626 check_for_thread_db ();
4de4c07c
DJ
627 }
628
5ab2fbf1 629 return false;
4de4c07c
DJ
630}
631
4de4c07c 632\f
f6ac5f3d
PA
633int
634linux_nat_target::insert_fork_catchpoint (int pid)
4de4c07c 635{
96d7229d 636 return !linux_supports_tracefork ();
3993f6b1
DJ
637}
638
f6ac5f3d
PA
639int
640linux_nat_target::remove_fork_catchpoint (int pid)
eb73ad13
PA
641{
642 return 0;
643}
644
f6ac5f3d
PA
645int
646linux_nat_target::insert_vfork_catchpoint (int pid)
3993f6b1 647{
96d7229d 648 return !linux_supports_tracefork ();
3993f6b1
DJ
649}
650
f6ac5f3d
PA
651int
652linux_nat_target::remove_vfork_catchpoint (int pid)
eb73ad13
PA
653{
654 return 0;
655}
656
f6ac5f3d
PA
657int
658linux_nat_target::insert_exec_catchpoint (int pid)
3993f6b1 659{
96d7229d 660 return !linux_supports_tracefork ();
3993f6b1
DJ
661}
662
f6ac5f3d
PA
663int
664linux_nat_target::remove_exec_catchpoint (int pid)
eb73ad13
PA
665{
666 return 0;
667}
668
f6ac5f3d
PA
669int
670linux_nat_target::set_syscall_catchpoint (int pid, bool needed, int any_count,
671 gdb::array_view<const int> syscall_counts)
a96d9b2e 672{
96d7229d 673 if (!linux_supports_tracesysgood ())
77b06cd7
TJB
674 return 1;
675
a96d9b2e
SDJ
676 /* On GNU/Linux, we ignore the arguments. It means that we only
677 enable the syscall catchpoints, but do not disable them.
77b06cd7 678
649a140c 679 Also, we do not use the `syscall_counts' information because we do not
a96d9b2e
SDJ
680 filter system calls here. We let GDB do the logic for us. */
681 return 0;
682}
683
774113b0
PA
684/* List of known LWPs, keyed by LWP PID. This speeds up the common
685 case of mapping a PID returned from the kernel to our corresponding
686 lwp_info data structure. */
687static htab_t lwp_lwpid_htab;
688
689/* Calculate a hash from a lwp_info's LWP PID. */
690
691static hashval_t
692lwp_info_hash (const void *ap)
693{
694 const struct lwp_info *lp = (struct lwp_info *) ap;
e38504b3 695 pid_t pid = lp->ptid.lwp ();
774113b0
PA
696
697 return iterative_hash_object (pid, 0);
698}
699
700/* Equality function for the lwp_info hash table. Compares the LWP's
701 PID. */
702
703static int
704lwp_lwpid_htab_eq (const void *a, const void *b)
705{
706 const struct lwp_info *entry = (const struct lwp_info *) a;
707 const struct lwp_info *element = (const struct lwp_info *) b;
708
e38504b3 709 return entry->ptid.lwp () == element->ptid.lwp ();
774113b0
PA
710}
711
712/* Create the lwp_lwpid_htab hash table. */
713
714static void
715lwp_lwpid_htab_create (void)
716{
717 lwp_lwpid_htab = htab_create (100, lwp_info_hash, lwp_lwpid_htab_eq, NULL);
718}
719
720/* Add LP to the hash table. */
721
722static void
723lwp_lwpid_htab_add_lwp (struct lwp_info *lp)
724{
725 void **slot;
726
727 slot = htab_find_slot (lwp_lwpid_htab, lp, INSERT);
728 gdb_assert (slot != NULL && *slot == NULL);
729 *slot = lp;
730}
731
732/* Head of doubly-linked list of known LWPs. Sorted by reverse
733 creation order. This order is assumed in some cases. E.g.,
734 reaping status after killing alls lwps of a process: the leader LWP
735 must be reaped last. */
9f0bdab8 736struct lwp_info *lwp_list;
774113b0
PA
737
738/* Add LP to sorted-by-reverse-creation-order doubly-linked list. */
739
740static void
741lwp_list_add (struct lwp_info *lp)
742{
743 lp->next = lwp_list;
744 if (lwp_list != NULL)
745 lwp_list->prev = lp;
746 lwp_list = lp;
747}
748
749/* Remove LP from sorted-by-reverse-creation-order doubly-linked
750 list. */
751
752static void
753lwp_list_remove (struct lwp_info *lp)
754{
755 /* Remove from sorted-by-creation-order list. */
756 if (lp->next != NULL)
757 lp->next->prev = lp->prev;
758 if (lp->prev != NULL)
759 lp->prev->next = lp->next;
760 if (lp == lwp_list)
761 lwp_list = lp->next;
762}
763
d6b0e80f
AC
764\f
765
d6b0e80f
AC
766/* Signal mask for use with sigsuspend in linux_nat_wait, initialized in
767 _initialize_linux_nat. */
768static sigset_t suspend_mask;
769
7feb7d06
PA
770/* Signals to block to make that sigsuspend work. */
771static sigset_t blocked_mask;
772
773/* SIGCHLD action. */
774struct sigaction sigchld_action;
b84876c2 775
7feb7d06
PA
776/* Block child signals (SIGCHLD and linux threads signals), and store
777 the previous mask in PREV_MASK. */
84e46146 778
7feb7d06
PA
779static void
780block_child_signals (sigset_t *prev_mask)
781{
782 /* Make sure SIGCHLD is blocked. */
783 if (!sigismember (&blocked_mask, SIGCHLD))
784 sigaddset (&blocked_mask, SIGCHLD);
785
21987b9c 786 gdb_sigmask (SIG_BLOCK, &blocked_mask, prev_mask);
7feb7d06
PA
787}
788
789/* Restore child signals mask, previously returned by
790 block_child_signals. */
791
792static void
793restore_child_signals_mask (sigset_t *prev_mask)
794{
21987b9c 795 gdb_sigmask (SIG_SETMASK, prev_mask, NULL);
7feb7d06 796}
2455069d
UW
797
798/* Mask of signals to pass directly to the inferior. */
799static sigset_t pass_mask;
800
801/* Update signals to pass to the inferior. */
f6ac5f3d 802void
adc6a863
PA
803linux_nat_target::pass_signals
804 (gdb::array_view<const unsigned char> pass_signals)
2455069d
UW
805{
806 int signo;
807
808 sigemptyset (&pass_mask);
809
810 for (signo = 1; signo < NSIG; signo++)
811 {
2ea28649 812 int target_signo = gdb_signal_from_host (signo);
adc6a863 813 if (target_signo < pass_signals.size () && pass_signals[target_signo])
2455069d
UW
814 sigaddset (&pass_mask, signo);
815 }
816}
817
d6b0e80f
AC
818\f
819
820/* Prototypes for local functions. */
d3a70e03
TT
821static int stop_wait_callback (struct lwp_info *lp);
822static int resume_stopped_resumed_lwps (struct lwp_info *lp, const ptid_t wait_ptid);
ced2dffb 823static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);
710151dd 824
d6b0e80f 825\f
d6b0e80f 826
7b50312a
PA
827/* Destroy and free LP. */
828
829static void
830lwp_free (struct lwp_info *lp)
831{
466eecee 832 /* Let the arch specific bits release arch_lwp_info. */
135340af 833 linux_target->low_delete_thread (lp->arch_private);
466eecee 834
7b50312a
PA
835 xfree (lp);
836}
837
774113b0 838/* Traversal function for purge_lwp_list. */
d90e17a7 839
774113b0
PA
840static int
841lwp_lwpid_htab_remove_pid (void **slot, void *info)
d90e17a7 842{
774113b0
PA
843 struct lwp_info *lp = (struct lwp_info *) *slot;
844 int pid = *(int *) info;
d90e17a7 845
e99b03dc 846 if (lp->ptid.pid () == pid)
d90e17a7 847 {
774113b0
PA
848 htab_clear_slot (lwp_lwpid_htab, slot);
849 lwp_list_remove (lp);
850 lwp_free (lp);
851 }
d90e17a7 852
774113b0
PA
853 return 1;
854}
d90e17a7 855
774113b0
PA
856/* Remove all LWPs belong to PID from the lwp list. */
857
858static void
859purge_lwp_list (int pid)
860{
861 htab_traverse_noresize (lwp_lwpid_htab, lwp_lwpid_htab_remove_pid, &pid);
d90e17a7
PA
862}
863
26cb8b7c
PA
864/* Add the LWP specified by PTID to the list. PTID is the first LWP
865 in the process. Return a pointer to the structure describing the
866 new LWP.
867
868 This differs from add_lwp in that we don't let the arch specific
869 bits know about this new thread. Current clients of this callback
870 take the opportunity to install watchpoints in the new thread, and
871 we shouldn't do that for the first thread. If we're spawning a
872 child ("run"), the thread executes the shell wrapper first, and we
873 shouldn't touch it until it execs the program we want to debug.
874 For "attach", it'd be okay to call the callback, but it's not
875 necessary, because watchpoints can't yet have been inserted into
876 the inferior. */
d6b0e80f
AC
877
878static struct lwp_info *
26cb8b7c 879add_initial_lwp (ptid_t ptid)
d6b0e80f
AC
880{
881 struct lwp_info *lp;
882
15a9e13e 883 gdb_assert (ptid.lwp_p ());
d6b0e80f 884
8d749320 885 lp = XNEW (struct lwp_info);
d6b0e80f
AC
886
887 memset (lp, 0, sizeof (struct lwp_info));
888
25289eb2 889 lp->last_resume_kind = resume_continue;
d6b0e80f
AC
890 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
891
892 lp->ptid = ptid;
dc146f7c 893 lp->core = -1;
d6b0e80f 894
774113b0
PA
895 /* Add to sorted-by-reverse-creation-order list. */
896 lwp_list_add (lp);
897
898 /* Add to keyed-by-pid htab. */
899 lwp_lwpid_htab_add_lwp (lp);
d6b0e80f 900
26cb8b7c
PA
901 return lp;
902}
903
904/* Add the LWP specified by PID to the list. Return a pointer to the
905 structure describing the new LWP. The LWP should already be
906 stopped. */
907
908static struct lwp_info *
909add_lwp (ptid_t ptid)
910{
911 struct lwp_info *lp;
912
913 lp = add_initial_lwp (ptid);
914
6e012a6c
PA
915 /* Let the arch specific bits know about this new thread. Current
916 clients of this callback take the opportunity to install
26cb8b7c
PA
917 watchpoints in the new thread. We don't do this for the first
918 thread though. See add_initial_lwp. */
135340af 919 linux_target->low_new_thread (lp);
9f0bdab8 920
d6b0e80f
AC
921 return lp;
922}
923
924/* Remove the LWP specified by PID from the list. */
925
926static void
927delete_lwp (ptid_t ptid)
928{
774113b0
PA
929 struct lwp_info *lp;
930 void **slot;
931 struct lwp_info dummy;
d6b0e80f 932
774113b0
PA
933 dummy.ptid = ptid;
934 slot = htab_find_slot (lwp_lwpid_htab, &dummy, NO_INSERT);
935 if (slot == NULL)
936 return;
d6b0e80f 937
774113b0
PA
938 lp = *(struct lwp_info **) slot;
939 gdb_assert (lp != NULL);
d6b0e80f 940
774113b0 941 htab_clear_slot (lwp_lwpid_htab, slot);
d6b0e80f 942
774113b0
PA
943 /* Remove from sorted-by-creation-order list. */
944 lwp_list_remove (lp);
d6b0e80f 945
774113b0 946 /* Release. */
7b50312a 947 lwp_free (lp);
d6b0e80f
AC
948}
949
950/* Return a pointer to the structure describing the LWP corresponding
951 to PID. If no corresponding LWP could be found, return NULL. */
952
953static struct lwp_info *
954find_lwp_pid (ptid_t ptid)
955{
956 struct lwp_info *lp;
957 int lwp;
774113b0 958 struct lwp_info dummy;
d6b0e80f 959
15a9e13e 960 if (ptid.lwp_p ())
e38504b3 961 lwp = ptid.lwp ();
d6b0e80f 962 else
e99b03dc 963 lwp = ptid.pid ();
d6b0e80f 964
fd79271b 965 dummy.ptid = ptid_t (0, lwp, 0);
774113b0
PA
966 lp = (struct lwp_info *) htab_find (lwp_lwpid_htab, &dummy);
967 return lp;
d6b0e80f
AC
968}
969
6d4ee8c6 970/* See nat/linux-nat.h. */
d6b0e80f
AC
971
972struct lwp_info *
d90e17a7 973iterate_over_lwps (ptid_t filter,
d3a70e03 974 gdb::function_view<iterate_over_lwps_ftype> callback)
d6b0e80f
AC
975{
976 struct lwp_info *lp, *lpnext;
977
978 for (lp = lwp_list; lp; lp = lpnext)
979 {
980 lpnext = lp->next;
d90e17a7 981
26a57c92 982 if (lp->ptid.matches (filter))
d90e17a7 983 {
d3a70e03 984 if (callback (lp) != 0)
d90e17a7
PA
985 return lp;
986 }
d6b0e80f
AC
987 }
988
989 return NULL;
990}
991
2277426b
PA
992/* Update our internal state when changing from one checkpoint to
993 another indicated by NEW_PTID. We can only switch single-threaded
994 applications, so we only create one new LWP, and the previous list
995 is discarded. */
f973ed9c
DJ
996
997void
998linux_nat_switch_fork (ptid_t new_ptid)
999{
1000 struct lwp_info *lp;
1001
e99b03dc 1002 purge_lwp_list (inferior_ptid.pid ());
2277426b 1003
f973ed9c
DJ
1004 lp = add_lwp (new_ptid);
1005 lp->stopped = 1;
e26af52f 1006
2277426b
PA
1007 /* This changes the thread's ptid while preserving the gdb thread
1008 num. Also changes the inferior pid, while preserving the
1009 inferior num. */
5b6d1e4f 1010 thread_change_ptid (linux_target, inferior_ptid, new_ptid);
2277426b
PA
1011
1012 /* We've just told GDB core that the thread changed target id, but,
1013 in fact, it really is a different thread, with different register
1014 contents. */
1015 registers_changed ();
e26af52f
DJ
1016}
1017
e26af52f
DJ
1018/* Handle the exit of a single thread LP. */
1019
1020static void
1021exit_lwp (struct lwp_info *lp)
1022{
5b6d1e4f 1023 struct thread_info *th = find_thread_ptid (linux_target, lp->ptid);
063bfe2e
VP
1024
1025 if (th)
e26af52f 1026 {
17faa917 1027 if (print_thread_events)
a068643d
TT
1028 printf_unfiltered (_("[%s exited]\n"),
1029 target_pid_to_str (lp->ptid).c_str ());
17faa917 1030
00431a78 1031 delete_thread (th);
e26af52f
DJ
1032 }
1033
1034 delete_lwp (lp->ptid);
1035}
1036
a0ef4274
DJ
1037/* Wait for the LWP specified by LP, which we have just attached to.
1038 Returns a wait status for that LWP, to cache. */
1039
1040static int
22827c51 1041linux_nat_post_attach_wait (ptid_t ptid, int *signalled)
a0ef4274 1042{
e38504b3 1043 pid_t new_pid, pid = ptid.lwp ();
a0ef4274
DJ
1044 int status;
1045
644cebc9 1046 if (linux_proc_pid_is_stopped (pid))
a0ef4274 1047 {
9327494e 1048 linux_nat_debug_printf ("Attaching to a stopped process");
a0ef4274
DJ
1049
1050 /* The process is definitely stopped. It is in a job control
1051 stop, unless the kernel predates the TASK_STOPPED /
1052 TASK_TRACED distinction, in which case it might be in a
1053 ptrace stop. Make sure it is in a ptrace stop; from there we
1054 can kill it, signal it, et cetera.
1055
1056 First make sure there is a pending SIGSTOP. Since we are
1057 already attached, the process can not transition from stopped
1058 to running without a PTRACE_CONT; so we know this signal will
1059 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1060 probably already in the queue (unless this kernel is old
1061 enough to use TASK_STOPPED for ptrace stops); but since SIGSTOP
1062 is not an RT signal, it can only be queued once. */
1063 kill_lwp (pid, SIGSTOP);
1064
1065 /* Finally, resume the stopped process. This will deliver the SIGSTOP
1066 (or a higher priority signal, just like normal PTRACE_ATTACH). */
1067 ptrace (PTRACE_CONT, pid, 0, 0);
1068 }
1069
1070 /* Make sure the initial process is stopped. The user-level threads
1071 layer might want to poke around in the inferior, and that won't
1072 work if things haven't stabilized yet. */
4a6ed09b 1073 new_pid = my_waitpid (pid, &status, __WALL);
dacc9cb2
PP
1074 gdb_assert (pid == new_pid);
1075
1076 if (!WIFSTOPPED (status))
1077 {
1078 /* The pid we tried to attach has apparently just exited. */
9327494e
SM
1079 linux_nat_debug_printf ("Failed to stop %d: %s", pid,
1080 status_to_str (status));
dacc9cb2
PP
1081 return status;
1082 }
a0ef4274
DJ
1083
1084 if (WSTOPSIG (status) != SIGSTOP)
1085 {
1086 *signalled = 1;
9327494e
SM
1087 linux_nat_debug_printf ("Received %s after attaching",
1088 status_to_str (status));
a0ef4274
DJ
1089 }
1090
1091 return status;
1092}
1093
f6ac5f3d
PA
1094void
1095linux_nat_target::create_inferior (const char *exec_file,
1096 const std::string &allargs,
1097 char **env, int from_tty)
b84876c2 1098{
41272101
TT
1099 maybe_disable_address_space_randomization restore_personality
1100 (disable_randomization);
b84876c2
PA
1101
1102 /* The fork_child mechanism is synchronous and calls target_wait, so
1103 we have to mask the async mode. */
1104
2455069d 1105 /* Make sure we report all signals during startup. */
adc6a863 1106 pass_signals ({});
2455069d 1107
f6ac5f3d 1108 inf_ptrace_target::create_inferior (exec_file, allargs, env, from_tty);
b84876c2
PA
1109}
1110
8784d563
PA
1111/* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1112 already attached. Returns true if a new LWP is found, false
1113 otherwise. */
1114
1115static int
1116attach_proc_task_lwp_callback (ptid_t ptid)
1117{
1118 struct lwp_info *lp;
1119
1120 /* Ignore LWPs we're already attached to. */
1121 lp = find_lwp_pid (ptid);
1122 if (lp == NULL)
1123 {
e38504b3 1124 int lwpid = ptid.lwp ();
8784d563
PA
1125
1126 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) < 0)
1127 {
1128 int err = errno;
1129
1130 /* Be quiet if we simply raced with the thread exiting.
1131 EPERM is returned if the thread's task still exists, and
1132 is marked as exited or zombie, as well as other
1133 conditions, so in that case, confirm the status in
1134 /proc/PID/status. */
1135 if (err == ESRCH
1136 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1137 {
9327494e
SM
1138 linux_nat_debug_printf
1139 ("Cannot attach to lwp %d: thread is gone (%d: %s)",
1140 lwpid, err, safe_strerror (err));
1141
8784d563
PA
1142 }
1143 else
1144 {
4d9b86e1 1145 std::string reason
50fa3001 1146 = linux_ptrace_attach_fail_reason_string (ptid, err);
4d9b86e1 1147
f71f0b0d 1148 warning (_("Cannot attach to lwp %d: %s"),
4d9b86e1 1149 lwpid, reason.c_str ());
8784d563
PA
1150 }
1151 }
1152 else
1153 {
9327494e
SM
1154 linux_nat_debug_printf ("PTRACE_ATTACH %s, 0, 0 (OK)",
1155 target_pid_to_str (ptid).c_str ());
8784d563
PA
1156
1157 lp = add_lwp (ptid);
8784d563
PA
1158
1159 /* The next time we wait for this LWP we'll see a SIGSTOP as
1160 PTRACE_ATTACH brings it to a halt. */
1161 lp->signalled = 1;
1162
1163 /* We need to wait for a stop before being able to make the
1164 next ptrace call on this LWP. */
1165 lp->must_set_ptrace_flags = 1;
026a9174
PA
1166
1167 /* So that wait collects the SIGSTOP. */
1168 lp->resumed = 1;
1169
1170 /* Also add the LWP to gdb's thread list, in case a
1171 matching libthread_db is not found (or the process uses
1172 raw clone). */
5b6d1e4f 1173 add_thread (linux_target, lp->ptid);
719546c4
SM
1174 set_running (linux_target, lp->ptid, true);
1175 set_executing (linux_target, lp->ptid, true);
8784d563
PA
1176 }
1177
1178 return 1;
1179 }
1180 return 0;
1181}
1182
f6ac5f3d
PA
1183void
1184linux_nat_target::attach (const char *args, int from_tty)
d6b0e80f
AC
1185{
1186 struct lwp_info *lp;
d6b0e80f 1187 int status;
af990527 1188 ptid_t ptid;
d6b0e80f 1189
2455069d 1190 /* Make sure we report all signals during attach. */
adc6a863 1191 pass_signals ({});
2455069d 1192
a70b8144 1193 try
87b0bb13 1194 {
f6ac5f3d 1195 inf_ptrace_target::attach (args, from_tty);
87b0bb13 1196 }
230d2906 1197 catch (const gdb_exception_error &ex)
87b0bb13
JK
1198 {
1199 pid_t pid = parse_pid_to_attach (args);
50fa3001 1200 std::string reason = linux_ptrace_attach_fail_reason (pid);
87b0bb13 1201
4d9b86e1 1202 if (!reason.empty ())
3d6e9d23
TT
1203 throw_error (ex.error, "warning: %s\n%s", reason.c_str (),
1204 ex.what ());
7ae1a6a6 1205 else
3d6e9d23 1206 throw_error (ex.error, "%s", ex.what ());
87b0bb13 1207 }
d6b0e80f 1208
af990527
PA
1209 /* The ptrace base target adds the main thread with (pid,0,0)
1210 format. Decorate it with lwp info. */
e99b03dc
TT
1211 ptid = ptid_t (inferior_ptid.pid (),
1212 inferior_ptid.pid (),
fd79271b 1213 0);
5b6d1e4f 1214 thread_change_ptid (linux_target, inferior_ptid, ptid);
af990527 1215
9f0bdab8 1216 /* Add the initial process as the first LWP to the list. */
26cb8b7c 1217 lp = add_initial_lwp (ptid);
a0ef4274 1218
22827c51 1219 status = linux_nat_post_attach_wait (lp->ptid, &lp->signalled);
dacc9cb2
PP
1220 if (!WIFSTOPPED (status))
1221 {
1222 if (WIFEXITED (status))
1223 {
1224 int exit_code = WEXITSTATUS (status);
1225
223ffa71 1226 target_terminal::ours ();
bc1e6c81 1227 target_mourn_inferior (inferior_ptid);
dacc9cb2
PP
1228 if (exit_code == 0)
1229 error (_("Unable to attach: program exited normally."));
1230 else
1231 error (_("Unable to attach: program exited with code %d."),
1232 exit_code);
1233 }
1234 else if (WIFSIGNALED (status))
1235 {
2ea28649 1236 enum gdb_signal signo;
dacc9cb2 1237
223ffa71 1238 target_terminal::ours ();
bc1e6c81 1239 target_mourn_inferior (inferior_ptid);
dacc9cb2 1240
2ea28649 1241 signo = gdb_signal_from_host (WTERMSIG (status));
dacc9cb2
PP
1242 error (_("Unable to attach: program terminated with signal "
1243 "%s, %s."),
2ea28649
PA
1244 gdb_signal_to_name (signo),
1245 gdb_signal_to_string (signo));
dacc9cb2
PP
1246 }
1247
1248 internal_error (__FILE__, __LINE__,
1249 _("unexpected status %d for PID %ld"),
e38504b3 1250 status, (long) ptid.lwp ());
dacc9cb2
PP
1251 }
1252
a0ef4274 1253 lp->stopped = 1;
9f0bdab8 1254
a0ef4274 1255 /* Save the wait status to report later. */
d6b0e80f 1256 lp->resumed = 1;
9327494e
SM
1257 linux_nat_debug_printf ("waitpid %ld, saving status %s",
1258 (long) lp->ptid.pid (), status_to_str (status));
710151dd 1259
7feb7d06
PA
1260 lp->status = status;
1261
8784d563
PA
1262 /* We must attach to every LWP. If /proc is mounted, use that to
1263 find them now. The inferior may be using raw clone instead of
1264 using pthreads. But even if it is using pthreads, thread_db
1265 walks structures in the inferior's address space to find the list
1266 of threads/LWPs, and those structures may well be corrupted.
1267 Note that once thread_db is loaded, we'll still use it to list
1268 threads and associate pthread info with each LWP. */
e99b03dc 1269 linux_proc_attach_tgid_threads (lp->ptid.pid (),
8784d563
PA
1270 attach_proc_task_lwp_callback);
1271
7feb7d06 1272 if (target_can_async_p ())
6a3753b3 1273 target_async (1);
d6b0e80f
AC
1274}
1275
ced2dffb
PA
1276/* Get pending signal of THREAD as a host signal number, for detaching
1277 purposes. This is the signal the thread last stopped for, which we
1278 need to deliver to the thread when detaching, otherwise, it'd be
1279 suppressed/lost. */
1280
a0ef4274 1281static int
ced2dffb 1282get_detach_signal (struct lwp_info *lp)
a0ef4274 1283{
a493e3e2 1284 enum gdb_signal signo = GDB_SIGNAL_0;
ca2163eb
PA
1285
1286 /* If we paused threads momentarily, we may have stored pending
1287 events in lp->status or lp->waitstatus (see stop_wait_callback),
1288 and GDB core hasn't seen any signal for those threads.
1289 Otherwise, the last signal reported to the core is found in the
1290 thread object's stop_signal.
1291
1292 There's a corner case that isn't handled here at present. Only
1293 if the thread stopped with a TARGET_WAITKIND_STOPPED does
1294 stop_signal make sense as a real signal to pass to the inferior.
1295 Some catchpoint related events, like
1296 TARGET_WAITKIND_(V)FORK|EXEC|SYSCALL, have their stop_signal set
a493e3e2 1297 to GDB_SIGNAL_SIGTRAP when the catchpoint triggers. But,
ca2163eb
PA
1298 those traps are debug API (ptrace in our case) related and
1299 induced; the inferior wouldn't see them if it wasn't being
1300 traced. Hence, we should never pass them to the inferior, even
1301 when set to pass state. Since this corner case isn't handled by
1302 infrun.c when proceeding with a signal, for consistency, neither
1303 do we handle it here (or elsewhere in the file we check for
1304 signal pass state). Normally SIGTRAP isn't set to pass state, so
1305 this is really a corner case. */
1306
1307 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
a493e3e2 1308 signo = GDB_SIGNAL_0; /* a pending ptrace event, not a real signal. */
ca2163eb 1309 else if (lp->status)
2ea28649 1310 signo = gdb_signal_from_host (WSTOPSIG (lp->status));
00431a78 1311 else
ca2163eb 1312 {
5b6d1e4f 1313 struct thread_info *tp = find_thread_ptid (linux_target, lp->ptid);
e0881a8e 1314
00431a78 1315 if (target_is_non_stop_p () && !tp->executing)
ca2163eb 1316 {
00431a78
PA
1317 if (tp->suspend.waitstatus_pending_p)
1318 signo = tp->suspend.waitstatus.value.sig;
1319 else
1320 signo = tp->suspend.stop_signal;
1321 }
1322 else if (!target_is_non_stop_p ())
1323 {
00431a78 1324 ptid_t last_ptid;
5b6d1e4f 1325 process_stratum_target *last_target;
00431a78 1326
5b6d1e4f 1327 get_last_target_status (&last_target, &last_ptid, nullptr);
e0881a8e 1328
5b6d1e4f
PA
1329 if (last_target == linux_target
1330 && lp->ptid.lwp () == last_ptid.lwp ())
00431a78 1331 signo = tp->suspend.stop_signal;
4c28f408 1332 }
ca2163eb 1333 }
4c28f408 1334
a493e3e2 1335 if (signo == GDB_SIGNAL_0)
ca2163eb 1336 {
9327494e
SM
1337 linux_nat_debug_printf ("lwp %s has no pending signal",
1338 target_pid_to_str (lp->ptid).c_str ());
ca2163eb
PA
1339 }
1340 else if (!signal_pass_state (signo))
1341 {
9327494e
SM
1342 linux_nat_debug_printf
1343 ("lwp %s had signal %s but it is in no pass state",
1344 target_pid_to_str (lp->ptid).c_str (), gdb_signal_to_string (signo));
a0ef4274 1345 }
a0ef4274 1346 else
4c28f408 1347 {
9327494e
SM
1348 linux_nat_debug_printf ("lwp %s has pending signal %s",
1349 target_pid_to_str (lp->ptid).c_str (),
1350 gdb_signal_to_string (signo));
ced2dffb
PA
1351
1352 return gdb_signal_to_host (signo);
4c28f408 1353 }
a0ef4274
DJ
1354
1355 return 0;
1356}
1357
ced2dffb
PA
1358/* Detach from LP. If SIGNO_P is non-NULL, then it points to the
1359 signal number that should be passed to the LWP when detaching.
1360 Otherwise pass any pending signal the LWP may have, if any. */
1361
1362static void
1363detach_one_lwp (struct lwp_info *lp, int *signo_p)
d6b0e80f 1364{
e38504b3 1365 int lwpid = lp->ptid.lwp ();
ced2dffb
PA
1366 int signo;
1367
d6b0e80f
AC
1368 gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status));
1369
9327494e
SM
1370 if (lp->status != 0)
1371 linux_nat_debug_printf ("Pending %s for %s on detach.",
1372 strsignal (WSTOPSIG (lp->status)),
1373 target_pid_to_str (lp->ptid).c_str ());
d6b0e80f 1374
a0ef4274
DJ
1375 /* If there is a pending SIGSTOP, get rid of it. */
1376 if (lp->signalled)
d6b0e80f 1377 {
9327494e
SM
1378 linux_nat_debug_printf ("Sending SIGCONT to %s",
1379 target_pid_to_str (lp->ptid).c_str ());
d6b0e80f 1380
ced2dffb 1381 kill_lwp (lwpid, SIGCONT);
d6b0e80f 1382 lp->signalled = 0;
d6b0e80f
AC
1383 }
1384
ced2dffb 1385 if (signo_p == NULL)
d6b0e80f 1386 {
a0ef4274 1387 /* Pass on any pending signal for this LWP. */
ced2dffb
PA
1388 signo = get_detach_signal (lp);
1389 }
1390 else
1391 signo = *signo_p;
a0ef4274 1392
ced2dffb
PA
1393 /* Preparing to resume may try to write registers, and fail if the
1394 lwp is zombie. If that happens, ignore the error. We'll handle
1395 it below, when detach fails with ESRCH. */
a70b8144 1396 try
ced2dffb 1397 {
135340af 1398 linux_target->low_prepare_to_resume (lp);
ced2dffb 1399 }
230d2906 1400 catch (const gdb_exception_error &ex)
ced2dffb
PA
1401 {
1402 if (!check_ptrace_stopped_lwp_gone (lp))
eedc3f4f 1403 throw;
ced2dffb 1404 }
d6b0e80f 1405
ced2dffb
PA
1406 if (ptrace (PTRACE_DETACH, lwpid, 0, signo) < 0)
1407 {
1408 int save_errno = errno;
1409
1410 /* We know the thread exists, so ESRCH must mean the lwp is
1411 zombie. This can happen if one of the already-detached
1412 threads exits the whole thread group. In that case we're
1413 still attached, and must reap the lwp. */
1414 if (save_errno == ESRCH)
1415 {
1416 int ret, status;
d6b0e80f 1417
ced2dffb
PA
1418 ret = my_waitpid (lwpid, &status, __WALL);
1419 if (ret == -1)
1420 {
1421 warning (_("Couldn't reap LWP %d while detaching: %s"),
6d91ce9a 1422 lwpid, safe_strerror (errno));
ced2dffb
PA
1423 }
1424 else if (!WIFEXITED (status) && !WIFSIGNALED (status))
1425 {
1426 warning (_("Reaping LWP %d while detaching "
1427 "returned unexpected status 0x%x"),
1428 lwpid, status);
1429 }
1430 }
1431 else
1432 {
a068643d
TT
1433 error (_("Can't detach %s: %s"),
1434 target_pid_to_str (lp->ptid).c_str (),
ced2dffb
PA
1435 safe_strerror (save_errno));
1436 }
d6b0e80f 1437 }
9327494e
SM
1438 else
1439 linux_nat_debug_printf ("PTRACE_DETACH (%s, %s, 0) (OK)",
1440 target_pid_to_str (lp->ptid).c_str (),
1441 strsignal (signo));
ced2dffb
PA
1442
1443 delete_lwp (lp->ptid);
1444}
d6b0e80f 1445
ced2dffb 1446static int
d3a70e03 1447detach_callback (struct lwp_info *lp)
ced2dffb
PA
1448{
1449 /* We don't actually detach from the thread group leader just yet.
1450 If the thread group exits, we must reap the zombie clone lwps
1451 before we're able to reap the leader. */
e38504b3 1452 if (lp->ptid.lwp () != lp->ptid.pid ())
ced2dffb 1453 detach_one_lwp (lp, NULL);
d6b0e80f
AC
1454 return 0;
1455}
1456
f6ac5f3d
PA
1457void
1458linux_nat_target::detach (inferior *inf, int from_tty)
d6b0e80f 1459{
d90e17a7 1460 struct lwp_info *main_lwp;
bc09b0c1 1461 int pid = inf->pid;
a0ef4274 1462
ae5e0686
MK
1463 /* Don't unregister from the event loop, as there may be other
1464 inferiors running. */
b84876c2 1465
4c28f408 1466 /* Stop all threads before detaching. ptrace requires that the
30baf67b 1467 thread is stopped to successfully detach. */
d3a70e03 1468 iterate_over_lwps (ptid_t (pid), stop_callback);
4c28f408
PA
1469 /* ... and wait until all of them have reported back that
1470 they're no longer running. */
d3a70e03 1471 iterate_over_lwps (ptid_t (pid), stop_wait_callback);
4c28f408 1472
d3a70e03 1473 iterate_over_lwps (ptid_t (pid), detach_callback);
d6b0e80f
AC
1474
1475 /* Only the initial process should be left right now. */
bc09b0c1 1476 gdb_assert (num_lwps (pid) == 1);
d90e17a7 1477
f2907e49 1478 main_lwp = find_lwp_pid (ptid_t (pid));
d6b0e80f 1479
7a7d3353
PA
1480 if (forks_exist_p ())
1481 {
1482 /* Multi-fork case. The current inferior_ptid is being detached
1483 from, but there are other viable forks to debug. Detach from
1484 the current fork, and context-switch to the first
1485 available. */
6bd6f3b6 1486 linux_fork_detach (from_tty);
7a7d3353
PA
1487 }
1488 else
ced2dffb 1489 {
ced2dffb
PA
1490 target_announce_detach (from_tty);
1491
6bd6f3b6
SM
1492 /* Pass on any pending signal for the last LWP. */
1493 int signo = get_detach_signal (main_lwp);
ced2dffb
PA
1494
1495 detach_one_lwp (main_lwp, &signo);
1496
f6ac5f3d 1497 detach_success (inf);
ced2dffb 1498 }
d6b0e80f
AC
1499}
1500
8a99810d
PA
1501/* Resume execution of the inferior process. If STEP is nonzero,
1502 single-step it. If SIGNAL is nonzero, give it that signal. */
1503
1504static void
23f238d3
PA
1505linux_resume_one_lwp_throw (struct lwp_info *lp, int step,
1506 enum gdb_signal signo)
8a99810d 1507{
8a99810d 1508 lp->step = step;
9c02b525
PA
1509
1510 /* stop_pc doubles as the PC the LWP had when it was last resumed.
1511 We only presently need that if the LWP is stepped though (to
1512 handle the case of stepping a breakpoint instruction). */
1513 if (step)
1514 {
5b6d1e4f 1515 struct regcache *regcache = get_thread_regcache (linux_target, lp->ptid);
9c02b525
PA
1516
1517 lp->stop_pc = regcache_read_pc (regcache);
1518 }
1519 else
1520 lp->stop_pc = 0;
1521
135340af 1522 linux_target->low_prepare_to_resume (lp);
f6ac5f3d 1523 linux_target->low_resume (lp->ptid, step, signo);
23f238d3
PA
1524
1525 /* Successfully resumed. Clear state that no longer makes sense,
1526 and mark the LWP as running. Must not do this before resuming
1527 otherwise if that fails other code will be confused. E.g., we'd
1528 later try to stop the LWP and hang forever waiting for a stop
1529 status. Note that we must not throw after this is cleared,
1530 otherwise handle_zombie_lwp_error would get confused. */
8a99810d 1531 lp->stopped = 0;
1ad3de98 1532 lp->core = -1;
23f238d3 1533 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
5b6d1e4f 1534 registers_changed_ptid (linux_target, lp->ptid);
8a99810d
PA
1535}
1536
23f238d3
PA
1537/* Called when we try to resume a stopped LWP and that errors out. If
1538 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
1539 or about to become), discard the error, clear any pending status
1540 the LWP may have, and return true (we'll collect the exit status
1541 soon enough). Otherwise, return false. */
1542
1543static int
1544check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
1545{
1546 /* If we get an error after resuming the LWP successfully, we'd
1547 confuse !T state for the LWP being gone. */
1548 gdb_assert (lp->stopped);
1549
1550 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
1551 because even if ptrace failed with ESRCH, the tracee may be "not
1552 yet fully dead", but already refusing ptrace requests. In that
1553 case the tracee has 'R (Running)' state for a little bit
1554 (observed in Linux 3.18). See also the note on ESRCH in the
1555 ptrace(2) man page. Instead, check whether the LWP has any state
1556 other than ptrace-stopped. */
1557
1558 /* Don't assume anything if /proc/PID/status can't be read. */
e38504b3 1559 if (linux_proc_pid_is_trace_stopped_nowarn (lp->ptid.lwp ()) == 0)
23f238d3
PA
1560 {
1561 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
1562 lp->status = 0;
1563 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
1564 return 1;
1565 }
1566 return 0;
1567}
1568
1569/* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
1570 disappears while we try to resume it. */
1571
1572static void
1573linux_resume_one_lwp (struct lwp_info *lp, int step, enum gdb_signal signo)
1574{
a70b8144 1575 try
23f238d3
PA
1576 {
1577 linux_resume_one_lwp_throw (lp, step, signo);
1578 }
230d2906 1579 catch (const gdb_exception_error &ex)
23f238d3
PA
1580 {
1581 if (!check_ptrace_stopped_lwp_gone (lp))
eedc3f4f 1582 throw;
23f238d3 1583 }
23f238d3
PA
1584}
1585
d6b0e80f
AC
1586/* Resume LP. */
1587
25289eb2 1588static void
e5ef252a 1589resume_lwp (struct lwp_info *lp, int step, enum gdb_signal signo)
d6b0e80f 1590{
25289eb2 1591 if (lp->stopped)
6c95b8df 1592 {
5b6d1e4f 1593 struct inferior *inf = find_inferior_ptid (linux_target, lp->ptid);
25289eb2
PA
1594
1595 if (inf->vfork_child != NULL)
1596 {
9327494e
SM
1597 linux_nat_debug_printf ("Not resuming %s (vfork parent)",
1598 target_pid_to_str (lp->ptid).c_str ());
25289eb2 1599 }
8a99810d 1600 else if (!lwp_status_pending_p (lp))
25289eb2 1601 {
9327494e
SM
1602 linux_nat_debug_printf ("Resuming sibling %s, %s, %s",
1603 target_pid_to_str (lp->ptid).c_str (),
1604 (signo != GDB_SIGNAL_0
1605 ? strsignal (gdb_signal_to_host (signo))
1606 : "0"),
1607 step ? "step" : "resume");
25289eb2 1608
8a99810d 1609 linux_resume_one_lwp (lp, step, signo);
25289eb2
PA
1610 }
1611 else
1612 {
9327494e
SM
1613 linux_nat_debug_printf ("Not resuming sibling %s (has pending)",
1614 target_pid_to_str (lp->ptid).c_str ());
25289eb2 1615 }
6c95b8df 1616 }
25289eb2 1617 else
9327494e 1618 linux_nat_debug_printf ("Not resuming sibling %s (not stopped)",
a068643d 1619 target_pid_to_str (lp->ptid).c_str ());
25289eb2 1620}
d6b0e80f 1621
8817a6f2
PA
1622/* Callback for iterate_over_lwps. If LWP is EXCEPT, do nothing.
1623 Resume LWP with the last stop signal, if it is in pass state. */
e5ef252a 1624
25289eb2 1625static int
d3a70e03 1626linux_nat_resume_callback (struct lwp_info *lp, struct lwp_info *except)
25289eb2 1627{
e5ef252a
PA
1628 enum gdb_signal signo = GDB_SIGNAL_0;
1629
8817a6f2
PA
1630 if (lp == except)
1631 return 0;
1632
e5ef252a
PA
1633 if (lp->stopped)
1634 {
1635 struct thread_info *thread;
1636
5b6d1e4f 1637 thread = find_thread_ptid (linux_target, lp->ptid);
e5ef252a
PA
1638 if (thread != NULL)
1639 {
70509625 1640 signo = thread->suspend.stop_signal;
e5ef252a
PA
1641 thread->suspend.stop_signal = GDB_SIGNAL_0;
1642 }
1643 }
1644
1645 resume_lwp (lp, 0, signo);
d6b0e80f
AC
1646 return 0;
1647}
1648
1649static int
d3a70e03 1650resume_clear_callback (struct lwp_info *lp)
d6b0e80f
AC
1651{
1652 lp->resumed = 0;
25289eb2 1653 lp->last_resume_kind = resume_stop;
d6b0e80f
AC
1654 return 0;
1655}
1656
1657static int
d3a70e03 1658resume_set_callback (struct lwp_info *lp)
d6b0e80f
AC
1659{
1660 lp->resumed = 1;
25289eb2 1661 lp->last_resume_kind = resume_continue;
d6b0e80f
AC
1662 return 0;
1663}
1664
f6ac5f3d
PA
1665void
1666linux_nat_target::resume (ptid_t ptid, int step, enum gdb_signal signo)
d6b0e80f
AC
1667{
1668 struct lwp_info *lp;
d90e17a7 1669 int resume_many;
d6b0e80f 1670
9327494e
SM
1671 linux_nat_debug_printf ("Preparing to %s %s, %s, inferior_ptid %s",
1672 step ? "step" : "resume",
1673 target_pid_to_str (ptid).c_str (),
1674 (signo != GDB_SIGNAL_0
1675 ? strsignal (gdb_signal_to_host (signo)) : "0"),
1676 target_pid_to_str (inferior_ptid).c_str ());
76f50ad1 1677
d6b0e80f 1678 /* A specific PTID means `step only this process id'. */
d7e15655 1679 resume_many = (minus_one_ptid == ptid
0e998d96 1680 || ptid.is_pid ());
4c28f408 1681
7da6a5b9
LM
1682 /* Mark the lwps we're resuming as resumed and update their
1683 last_resume_kind to resume_continue. */
d3a70e03 1684 iterate_over_lwps (ptid, resume_set_callback);
d6b0e80f 1685
d90e17a7
PA
1686 /* See if it's the current inferior that should be handled
1687 specially. */
1688 if (resume_many)
1689 lp = find_lwp_pid (inferior_ptid);
1690 else
1691 lp = find_lwp_pid (ptid);
9f0bdab8 1692 gdb_assert (lp != NULL);
d6b0e80f 1693
9f0bdab8 1694 /* Remember if we're stepping. */
25289eb2 1695 lp->last_resume_kind = step ? resume_step : resume_continue;
d6b0e80f 1696
9f0bdab8
DJ
1697 /* If we have a pending wait status for this thread, there is no
1698 point in resuming the process. But first make sure that
1699 linux_nat_wait won't preemptively handle the event - we
1700 should never take this short-circuit if we are going to
1701 leave LP running, since we have skipped resuming all the
1702 other threads. This bit of code needs to be synchronized
1703 with linux_nat_wait. */
76f50ad1 1704
9f0bdab8
DJ
1705 if (lp->status && WIFSTOPPED (lp->status))
1706 {
2455069d
UW
1707 if (!lp->step
1708 && WSTOPSIG (lp->status)
1709 && sigismember (&pass_mask, WSTOPSIG (lp->status)))
d6b0e80f 1710 {
9327494e
SM
1711 linux_nat_debug_printf
1712 ("Not short circuiting for ignored status 0x%x", lp->status);
9f0bdab8 1713
d6b0e80f
AC
1714 /* FIXME: What should we do if we are supposed to continue
1715 this thread with a signal? */
a493e3e2 1716 gdb_assert (signo == GDB_SIGNAL_0);
2ea28649 1717 signo = gdb_signal_from_host (WSTOPSIG (lp->status));
9f0bdab8
DJ
1718 lp->status = 0;
1719 }
1720 }
76f50ad1 1721
8a99810d 1722 if (lwp_status_pending_p (lp))
9f0bdab8
DJ
1723 {
1724 /* FIXME: What should we do if we are supposed to continue
1725 this thread with a signal? */
a493e3e2 1726 gdb_assert (signo == GDB_SIGNAL_0);
76f50ad1 1727
9327494e
SM
1728 linux_nat_debug_printf ("Short circuiting for status 0x%x",
1729 lp->status);
d6b0e80f 1730
7feb7d06
PA
1731 if (target_can_async_p ())
1732 {
6a3753b3 1733 target_async (1);
7feb7d06
PA
1734 /* Tell the event loop we have something to process. */
1735 async_file_mark ();
1736 }
9f0bdab8 1737 return;
d6b0e80f
AC
1738 }
1739
d90e17a7 1740 if (resume_many)
d3a70e03
TT
1741 iterate_over_lwps (ptid, [=] (struct lwp_info *info)
1742 {
1743 return linux_nat_resume_callback (info, lp);
1744 });
d90e17a7 1745
9327494e
SM
1746 linux_nat_debug_printf ("%s %s, %s (resume event thread)",
1747 step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
1748 target_pid_to_str (lp->ptid).c_str (),
1749 (signo != GDB_SIGNAL_0
1750 ? strsignal (gdb_signal_to_host (signo)) : "0"));
b84876c2 1751
2bf6fb9d
PA
1752 linux_resume_one_lwp (lp, step, signo);
1753
b84876c2 1754 if (target_can_async_p ())
6a3753b3 1755 target_async (1);
d6b0e80f
AC
1756}
1757
c5f62d5f 1758/* Send a signal to an LWP. */
d6b0e80f
AC
1759
1760static int
1761kill_lwp (int lwpid, int signo)
1762{
4a6ed09b 1763 int ret;
d6b0e80f 1764
4a6ed09b
PA
1765 errno = 0;
1766 ret = syscall (__NR_tkill, lwpid, signo);
1767 if (errno == ENOSYS)
1768 {
1769 /* If tkill fails, then we are not using nptl threads, a
1770 configuration we no longer support. */
1771 perror_with_name (("tkill"));
1772 }
1773 return ret;
d6b0e80f
AC
1774}
1775
ca2163eb
PA
1776/* Handle a GNU/Linux syscall trap wait response. If we see a syscall
1777 event, check if the core is interested in it: if not, ignore the
1778 event, and keep waiting; otherwise, we need to toggle the LWP's
1779 syscall entry/exit status, since the ptrace event itself doesn't
1780 indicate it, and report the trap to higher layers. */
1781
1782static int
1783linux_handle_syscall_trap (struct lwp_info *lp, int stopping)
1784{
1785 struct target_waitstatus *ourstatus = &lp->waitstatus;
1786 struct gdbarch *gdbarch = target_thread_architecture (lp->ptid);
5b6d1e4f 1787 thread_info *thread = find_thread_ptid (linux_target, lp->ptid);
00431a78 1788 int syscall_number = (int) gdbarch_get_syscall_number (gdbarch, thread);
ca2163eb
PA
1789
1790 if (stopping)
1791 {
1792 /* If we're stopping threads, there's a SIGSTOP pending, which
1793 makes it so that the LWP reports an immediate syscall return,
1794 followed by the SIGSTOP. Skip seeing that "return" using
1795 PTRACE_CONT directly, and let stop_wait_callback collect the
1796 SIGSTOP. Later when the thread is resumed, a new syscall
1797 entry event. If we didn't do this (and returned 0), we'd
1798 leave a syscall entry pending, and our caller, by using
1799 PTRACE_CONT to collect the SIGSTOP, skips the syscall return
1800 itself. Later, when the user re-resumes this LWP, we'd see
1801 another syscall entry event and we'd mistake it for a return.
1802
1803 If stop_wait_callback didn't force the SIGSTOP out of the LWP
1804 (leaving immediately with LWP->signalled set, without issuing
1805 a PTRACE_CONT), it would still be problematic to leave this
1806 syscall enter pending, as later when the thread is resumed,
1807 it would then see the same syscall exit mentioned above,
1808 followed by the delayed SIGSTOP, while the syscall didn't
1809 actually get to execute. It seems it would be even more
1810 confusing to the user. */
1811
9327494e
SM
1812 linux_nat_debug_printf
1813 ("ignoring syscall %d for LWP %ld (stopping threads), resuming with "
1814 "PTRACE_CONT for SIGSTOP", syscall_number, lp->ptid.lwp ());
ca2163eb
PA
1815
1816 lp->syscall_state = TARGET_WAITKIND_IGNORE;
e38504b3 1817 ptrace (PTRACE_CONT, lp->ptid.lwp (), 0, 0);
8817a6f2 1818 lp->stopped = 0;
ca2163eb
PA
1819 return 1;
1820 }
1821
bfd09d20
JS
1822 /* Always update the entry/return state, even if this particular
1823 syscall isn't interesting to the core now. In async mode,
1824 the user could install a new catchpoint for this syscall
1825 between syscall enter/return, and we'll need to know to
1826 report a syscall return if that happens. */
1827 lp->syscall_state = (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
1828 ? TARGET_WAITKIND_SYSCALL_RETURN
1829 : TARGET_WAITKIND_SYSCALL_ENTRY);
1830
ca2163eb
PA
1831 if (catch_syscall_enabled ())
1832 {
ca2163eb
PA
1833 if (catching_syscall_number (syscall_number))
1834 {
1835 /* Alright, an event to report. */
1836 ourstatus->kind = lp->syscall_state;
1837 ourstatus->value.syscall_number = syscall_number;
1838
9327494e
SM
1839 linux_nat_debug_printf
1840 ("stopping for %s of syscall %d for LWP %ld",
1841 (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
1842 ? "entry" : "return"), syscall_number, lp->ptid.lwp ());
1843
ca2163eb
PA
1844 return 0;
1845 }
1846
9327494e
SM
1847 linux_nat_debug_printf
1848 ("ignoring %s of syscall %d for LWP %ld",
1849 (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
1850 ? "entry" : "return"), syscall_number, lp->ptid.lwp ());
ca2163eb
PA
1851 }
1852 else
1853 {
1854 /* If we had been syscall tracing, and hence used PT_SYSCALL
1855 before on this LWP, it could happen that the user removes all
1856 syscall catchpoints before we get to process this event.
1857 There are two noteworthy issues here:
1858
1859 - When stopped at a syscall entry event, resuming with
1860 PT_STEP still resumes executing the syscall and reports a
1861 syscall return.
1862
1863 - Only PT_SYSCALL catches syscall enters. If we last
1864 single-stepped this thread, then this event can't be a
1865 syscall enter. If we last single-stepped this thread, this
1866 has to be a syscall exit.
1867
1868 The points above mean that the next resume, be it PT_STEP or
1869 PT_CONTINUE, can not trigger a syscall trace event. */
9327494e
SM
1870 linux_nat_debug_printf
1871 ("caught syscall event with no syscall catchpoints. %d for LWP %ld, "
1872 "ignoring", syscall_number, lp->ptid.lwp ());
ca2163eb
PA
1873 lp->syscall_state = TARGET_WAITKIND_IGNORE;
1874 }
1875
1876 /* The core isn't interested in this event. For efficiency, avoid
1877 stopping all threads only to have the core resume them all again.
1878 Since we're not stopping threads, if we're still syscall tracing
1879 and not stepping, we can't use PTRACE_CONT here, as we'd miss any
1880 subsequent syscall. Simply resume using the inf-ptrace layer,
1881 which knows when to use PT_SYSCALL or PT_CONTINUE. */
1882
8a99810d 1883 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
ca2163eb
PA
1884 return 1;
1885}
1886
3d799a95
DJ
1887/* Handle a GNU/Linux extended wait response. If we see a clone
1888 event, we need to add the new LWP to our list (and not report the
1889 trap to higher layers). This function returns non-zero if the
1890 event should be ignored and we should wait again. If STOPPING is
1891 true, the new LWP remains stopped, otherwise it is continued. */
d6b0e80f
AC
1892
1893static int
4dd63d48 1894linux_handle_extended_wait (struct lwp_info *lp, int status)
d6b0e80f 1895{
e38504b3 1896 int pid = lp->ptid.lwp ();
3d799a95 1897 struct target_waitstatus *ourstatus = &lp->waitstatus;
89a5711c 1898 int event = linux_ptrace_get_extended_event (status);
d6b0e80f 1899
bfd09d20
JS
1900 /* All extended events we currently use are mid-syscall. Only
1901 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
1902 you have to be using PTRACE_SEIZE to get that. */
1903 lp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
1904
3d799a95
DJ
1905 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK
1906 || event == PTRACE_EVENT_CLONE)
d6b0e80f 1907 {
3d799a95
DJ
1908 unsigned long new_pid;
1909 int ret;
1910
1911 ptrace (PTRACE_GETEVENTMSG, pid, 0, &new_pid);
6fc19103 1912
3d799a95
DJ
1913 /* If we haven't already seen the new PID stop, wait for it now. */
1914 if (! pull_pid_from_list (&stopped_pids, new_pid, &status))
1915 {
1916 /* The new child has a pending SIGSTOP. We can't affect it until it
1917 hits the SIGSTOP, but we're already attached. */
4a6ed09b 1918 ret = my_waitpid (new_pid, &status, __WALL);
3d799a95
DJ
1919 if (ret == -1)
1920 perror_with_name (_("waiting for new child"));
1921 else if (ret != new_pid)
1922 internal_error (__FILE__, __LINE__,
1923 _("wait returned unexpected PID %d"), ret);
1924 else if (!WIFSTOPPED (status))
1925 internal_error (__FILE__, __LINE__,
1926 _("wait returned unexpected status 0x%x"), status);
1927 }
1928
fd79271b 1929 ourstatus->value.related_pid = ptid_t (new_pid, new_pid, 0);
3d799a95 1930
26cb8b7c
PA
1931 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
1932 {
1933 /* The arch-specific native code may need to know about new
1934 forks even if those end up never mapped to an
1935 inferior. */
135340af 1936 linux_target->low_new_fork (lp, new_pid);
26cb8b7c 1937 }
1310c1b0
PFC
1938 else if (event == PTRACE_EVENT_CLONE)
1939 {
1940 linux_target->low_new_clone (lp, new_pid);
1941 }
26cb8b7c 1942
2277426b 1943 if (event == PTRACE_EVENT_FORK
e99b03dc 1944 && linux_fork_checkpointing_p (lp->ptid.pid ()))
2277426b 1945 {
2277426b
PA
1946 /* Handle checkpointing by linux-fork.c here as a special
1947 case. We don't want the follow-fork-mode or 'catch fork'
1948 to interfere with this. */
1949
1950 /* This won't actually modify the breakpoint list, but will
1951 physically remove the breakpoints from the child. */
fd79271b 1952 detach_breakpoints (ptid_t (new_pid, new_pid, 0));
2277426b
PA
1953
1954 /* Retain child fork in ptrace (stopped) state. */
14571dad
MS
1955 if (!find_fork_pid (new_pid))
1956 add_fork (new_pid);
2277426b
PA
1957
1958 /* Report as spurious, so that infrun doesn't want to follow
1959 this fork. We're actually doing an infcall in
1960 linux-fork.c. */
1961 ourstatus->kind = TARGET_WAITKIND_SPURIOUS;
2277426b
PA
1962
1963 /* Report the stop to the core. */
1964 return 0;
1965 }
1966
3d799a95
DJ
1967 if (event == PTRACE_EVENT_FORK)
1968 ourstatus->kind = TARGET_WAITKIND_FORKED;
1969 else if (event == PTRACE_EVENT_VFORK)
1970 ourstatus->kind = TARGET_WAITKIND_VFORKED;
4dd63d48 1971 else if (event == PTRACE_EVENT_CLONE)
3d799a95 1972 {
78768c4a
JK
1973 struct lwp_info *new_lp;
1974
3d799a95 1975 ourstatus->kind = TARGET_WAITKIND_IGNORE;
78768c4a 1976
9327494e
SM
1977 linux_nat_debug_printf
1978 ("Got clone event from LWP %d, new child is LWP %ld", pid, new_pid);
3c4d7e12 1979
e99b03dc 1980 new_lp = add_lwp (ptid_t (lp->ptid.pid (), new_pid, 0));
4c28f408 1981 new_lp->stopped = 1;
4dd63d48 1982 new_lp->resumed = 1;
d6b0e80f 1983
2db9a427
PA
1984 /* If the thread_db layer is active, let it record the user
1985 level thread id and status, and add the thread to GDB's
1986 list. */
1987 if (!thread_db_notice_clone (lp->ptid, new_lp->ptid))
3d799a95 1988 {
2db9a427
PA
1989 /* The process is not using thread_db. Add the LWP to
1990 GDB's list. */
e38504b3 1991 target_post_attach (new_lp->ptid.lwp ());
5b6d1e4f 1992 add_thread (linux_target, new_lp->ptid);
2db9a427 1993 }
4c28f408 1994
2ee52aa4 1995 /* Even if we're stopping the thread for some reason
4dd63d48
PA
1996 internal to this module, from the perspective of infrun
1997 and the user/frontend, this new thread is running until
1998 it next reports a stop. */
719546c4
SM
1999 set_running (linux_target, new_lp->ptid, true);
2000 set_executing (linux_target, new_lp->ptid, true);
4c28f408 2001
4dd63d48 2002 if (WSTOPSIG (status) != SIGSTOP)
79395f92 2003 {
4dd63d48
PA
2004 /* This can happen if someone starts sending signals to
2005 the new thread before it gets a chance to run, which
2006 have a lower number than SIGSTOP (e.g. SIGUSR1).
2007 This is an unlikely case, and harder to handle for
2008 fork / vfork than for clone, so we do not try - but
2009 we handle it for clone events here. */
2010
2011 new_lp->signalled = 1;
2012
79395f92
PA
2013 /* We created NEW_LP so it cannot yet contain STATUS. */
2014 gdb_assert (new_lp->status == 0);
2015
2016 /* Save the wait status to report later. */
9327494e
SM
2017 linux_nat_debug_printf
2018 ("waitpid of new LWP %ld, saving status %s",
2019 (long) new_lp->ptid.lwp (), status_to_str (status));
79395f92
PA
2020 new_lp->status = status;
2021 }
aa01bd36
PA
2022 else if (report_thread_events)
2023 {
2024 new_lp->waitstatus.kind = TARGET_WAITKIND_THREAD_CREATED;
2025 new_lp->status = status;
2026 }
79395f92 2027
3d799a95
DJ
2028 return 1;
2029 }
2030
2031 return 0;
d6b0e80f
AC
2032 }
2033
3d799a95
DJ
2034 if (event == PTRACE_EVENT_EXEC)
2035 {
9327494e 2036 linux_nat_debug_printf ("Got exec event from LWP %ld", lp->ptid.lwp ());
a75724bc 2037
3d799a95
DJ
2038 ourstatus->kind = TARGET_WAITKIND_EXECD;
2039 ourstatus->value.execd_pathname
f6ac5f3d 2040 = xstrdup (linux_proc_pid_to_exec_file (pid));
3d799a95 2041
8af756ef
PA
2042 /* The thread that execed must have been resumed, but, when a
2043 thread execs, it changes its tid to the tgid, and the old
2044 tgid thread might have not been resumed. */
2045 lp->resumed = 1;
6c95b8df
PA
2046 return 0;
2047 }
2048
2049 if (event == PTRACE_EVENT_VFORK_DONE)
2050 {
2051 if (current_inferior ()->waiting_for_vfork_done)
3d799a95 2052 {
9327494e
SM
2053 linux_nat_debug_printf
2054 ("Got expected PTRACE_EVENT_VFORK_DONE from LWP %ld: stopping",
2055 lp->ptid.lwp ());
3d799a95 2056
6c95b8df
PA
2057 ourstatus->kind = TARGET_WAITKIND_VFORK_DONE;
2058 return 0;
3d799a95
DJ
2059 }
2060
9327494e
SM
2061 linux_nat_debug_printf
2062 ("Got PTRACE_EVENT_VFORK_DONE from LWP %ld: ignoring", lp->ptid.lwp ());
2063
6c95b8df 2064 return 1;
3d799a95
DJ
2065 }
2066
2067 internal_error (__FILE__, __LINE__,
2068 _("unknown ptrace event %d"), event);
d6b0e80f
AC
2069}
2070
9c3a5d93
PA
2071/* Suspend waiting for a signal. We're mostly interested in
2072 SIGCHLD/SIGINT. */
2073
2074static void
2075wait_for_signal ()
2076{
9327494e 2077 linux_nat_debug_printf ("about to sigsuspend");
9c3a5d93
PA
2078 sigsuspend (&suspend_mask);
2079
2080 /* If the quit flag is set, it means that the user pressed Ctrl-C
2081 and we're debugging a process that is running on a separate
2082 terminal, so we must forward the Ctrl-C to the inferior. (If the
2083 inferior is sharing GDB's terminal, then the Ctrl-C reaches the
2084 inferior directly.) We must do this here because functions that
2085 need to block waiting for a signal loop forever until there's an
2086 event to report before returning back to the event loop. */
2087 if (!target_terminal::is_ours ())
2088 {
2089 if (check_quit_flag ())
2090 target_pass_ctrlc ();
2091 }
2092}
2093
d6b0e80f
AC
2094/* Wait for LP to stop. Returns the wait status, or 0 if the LWP has
2095 exited. */
2096
2097static int
2098wait_lwp (struct lwp_info *lp)
2099{
2100 pid_t pid;
432b4d03 2101 int status = 0;
d6b0e80f 2102 int thread_dead = 0;
432b4d03 2103 sigset_t prev_mask;
d6b0e80f
AC
2104
2105 gdb_assert (!lp->stopped);
2106 gdb_assert (lp->status == 0);
2107
432b4d03
JK
2108 /* Make sure SIGCHLD is blocked for sigsuspend avoiding a race below. */
2109 block_child_signals (&prev_mask);
2110
2111 for (;;)
d6b0e80f 2112 {
e38504b3 2113 pid = my_waitpid (lp->ptid.lwp (), &status, __WALL | WNOHANG);
a9f4bb21
PA
2114 if (pid == -1 && errno == ECHILD)
2115 {
2116 /* The thread has previously exited. We need to delete it
4a6ed09b
PA
2117 now because if this was a non-leader thread execing, we
2118 won't get an exit event. See comments on exec events at
2119 the top of the file. */
a9f4bb21 2120 thread_dead = 1;
9327494e
SM
2121 linux_nat_debug_printf ("%s vanished.",
2122 target_pid_to_str (lp->ptid).c_str ());
a9f4bb21 2123 }
432b4d03
JK
2124 if (pid != 0)
2125 break;
2126
2127 /* Bugs 10970, 12702.
2128 Thread group leader may have exited in which case we'll lock up in
2129 waitpid if there are other threads, even if they are all zombies too.
2130 Basically, we're not supposed to use waitpid this way.
4a6ed09b
PA
2131 tkill(pid,0) cannot be used here as it gets ESRCH for both
2132 for zombie and running processes.
432b4d03
JK
2133
2134 As a workaround, check if we're waiting for the thread group leader and
2135 if it's a zombie, and avoid calling waitpid if it is.
2136
2137 This is racy, what if the tgl becomes a zombie right after we check?
2138 Therefore always use WNOHANG with sigsuspend - it is equivalent to
5f572dec 2139 waiting waitpid but linux_proc_pid_is_zombie is safe this way. */
432b4d03 2140
e38504b3
TT
2141 if (lp->ptid.pid () == lp->ptid.lwp ()
2142 && linux_proc_pid_is_zombie (lp->ptid.lwp ()))
d6b0e80f 2143 {
d6b0e80f 2144 thread_dead = 1;
9327494e
SM
2145 linux_nat_debug_printf ("Thread group leader %s vanished.",
2146 target_pid_to_str (lp->ptid).c_str ());
432b4d03 2147 break;
d6b0e80f 2148 }
432b4d03
JK
2149
2150 /* Wait for next SIGCHLD and try again. This may let SIGCHLD handlers
2151 get invoked despite our caller had them intentionally blocked by
2152 block_child_signals. This is sensitive only to the loop of
2153 linux_nat_wait_1 and there if we get called my_waitpid gets called
2154 again before it gets to sigsuspend so we can safely let the handlers
2155 get executed here. */
9c3a5d93 2156 wait_for_signal ();
432b4d03
JK
2157 }
2158
2159 restore_child_signals_mask (&prev_mask);
2160
d6b0e80f
AC
2161 if (!thread_dead)
2162 {
e38504b3 2163 gdb_assert (pid == lp->ptid.lwp ());
d6b0e80f 2164
9327494e 2165 linux_nat_debug_printf ("waitpid %s received %s",
a068643d 2166 target_pid_to_str (lp->ptid).c_str (),
d6b0e80f 2167 status_to_str (status));
d6b0e80f 2168
a9f4bb21
PA
2169 /* Check if the thread has exited. */
2170 if (WIFEXITED (status) || WIFSIGNALED (status))
2171 {
aa01bd36 2172 if (report_thread_events
e38504b3 2173 || lp->ptid.pid () == lp->ptid.lwp ())
69dde7dc 2174 {
9327494e 2175 linux_nat_debug_printf ("LWP %d exited.", lp->ptid.pid ());
69dde7dc 2176
aa01bd36 2177 /* If this is the leader exiting, it means the whole
69dde7dc
PA
2178 process is gone. Store the status to report to the
2179 core. Store it in lp->waitstatus, because lp->status
2180 would be ambiguous (W_EXITCODE(0,0) == 0). */
2181 store_waitstatus (&lp->waitstatus, status);
2182 return 0;
2183 }
2184
a9f4bb21 2185 thread_dead = 1;
9327494e
SM
2186 linux_nat_debug_printf ("%s exited.",
2187 target_pid_to_str (lp->ptid).c_str ());
a9f4bb21 2188 }
d6b0e80f
AC
2189 }
2190
2191 if (thread_dead)
2192 {
e26af52f 2193 exit_lwp (lp);
d6b0e80f
AC
2194 return 0;
2195 }
2196
2197 gdb_assert (WIFSTOPPED (status));
8817a6f2 2198 lp->stopped = 1;
d6b0e80f 2199
8784d563
PA
2200 if (lp->must_set_ptrace_flags)
2201 {
5b6d1e4f 2202 inferior *inf = find_inferior_pid (linux_target, lp->ptid.pid ());
de0d863e 2203 int options = linux_nat_ptrace_options (inf->attach_flag);
8784d563 2204
e38504b3 2205 linux_enable_event_reporting (lp->ptid.lwp (), options);
8784d563
PA
2206 lp->must_set_ptrace_flags = 0;
2207 }
2208
ca2163eb
PA
2209 /* Handle GNU/Linux's syscall SIGTRAPs. */
2210 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
2211 {
2212 /* No longer need the sysgood bit. The ptrace event ends up
2213 recorded in lp->waitstatus if we care for it. We can carry
2214 on handling the event like a regular SIGTRAP from here
2215 on. */
2216 status = W_STOPCODE (SIGTRAP);
2217 if (linux_handle_syscall_trap (lp, 1))
2218 return wait_lwp (lp);
2219 }
bfd09d20
JS
2220 else
2221 {
2222 /* Almost all other ptrace-stops are known to be outside of system
2223 calls, with further exceptions in linux_handle_extended_wait. */
2224 lp->syscall_state = TARGET_WAITKIND_IGNORE;
2225 }
ca2163eb 2226
d6b0e80f 2227 /* Handle GNU/Linux's extended waitstatus for trace events. */
89a5711c
DB
2228 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP
2229 && linux_is_extended_waitstatus (status))
d6b0e80f 2230 {
9327494e 2231 linux_nat_debug_printf ("Handling extended status 0x%06x", status);
4dd63d48 2232 linux_handle_extended_wait (lp, status);
20ba1ce6 2233 return 0;
d6b0e80f
AC
2234 }
2235
2236 return status;
2237}
2238
2239/* Send a SIGSTOP to LP. */
2240
2241static int
d3a70e03 2242stop_callback (struct lwp_info *lp)
d6b0e80f
AC
2243{
2244 if (!lp->stopped && !lp->signalled)
2245 {
2246 int ret;
2247
9327494e 2248 linux_nat_debug_printf ("kill %s **<SIGSTOP>**",
a068643d 2249 target_pid_to_str (lp->ptid).c_str ());
9327494e 2250
d6b0e80f 2251 errno = 0;
e38504b3 2252 ret = kill_lwp (lp->ptid.lwp (), SIGSTOP);
9327494e 2253 linux_nat_debug_printf ("lwp kill %d %s", ret,
d6b0e80f 2254 errno ? safe_strerror (errno) : "ERRNO-OK");
d6b0e80f
AC
2255
2256 lp->signalled = 1;
2257 gdb_assert (lp->status == 0);
2258 }
2259
2260 return 0;
2261}
2262
7b50312a
PA
2263/* Request a stop on LWP. */
2264
2265void
2266linux_stop_lwp (struct lwp_info *lwp)
2267{
d3a70e03 2268 stop_callback (lwp);
7b50312a
PA
2269}
2270
2db9a427
PA
2271/* See linux-nat.h */
2272
2273void
2274linux_stop_and_wait_all_lwps (void)
2275{
2276 /* Stop all LWP's ... */
d3a70e03 2277 iterate_over_lwps (minus_one_ptid, stop_callback);
2db9a427
PA
2278
2279 /* ... and wait until all of them have reported back that
2280 they're no longer running. */
d3a70e03 2281 iterate_over_lwps (minus_one_ptid, stop_wait_callback);
2db9a427
PA
2282}
2283
2284/* See linux-nat.h */
2285
2286void
2287linux_unstop_all_lwps (void)
2288{
2289 iterate_over_lwps (minus_one_ptid,
d3a70e03
TT
2290 [] (struct lwp_info *info)
2291 {
2292 return resume_stopped_resumed_lwps (info, minus_one_ptid);
2293 });
2db9a427
PA
2294}
2295
57380f4e 2296/* Return non-zero if LWP PID has a pending SIGINT. */
d6b0e80f
AC
2297
2298static int
57380f4e
DJ
2299linux_nat_has_pending_sigint (int pid)
2300{
2301 sigset_t pending, blocked, ignored;
57380f4e
DJ
2302
2303 linux_proc_pending_signals (pid, &pending, &blocked, &ignored);
2304
2305 if (sigismember (&pending, SIGINT)
2306 && !sigismember (&ignored, SIGINT))
2307 return 1;
2308
2309 return 0;
2310}
2311
2312/* Set a flag in LP indicating that we should ignore its next SIGINT. */
2313
2314static int
d3a70e03 2315set_ignore_sigint (struct lwp_info *lp)
d6b0e80f 2316{
57380f4e
DJ
2317 /* If a thread has a pending SIGINT, consume it; otherwise, set a
2318 flag to consume the next one. */
2319 if (lp->stopped && lp->status != 0 && WIFSTOPPED (lp->status)
2320 && WSTOPSIG (lp->status) == SIGINT)
2321 lp->status = 0;
2322 else
2323 lp->ignore_sigint = 1;
2324
2325 return 0;
2326}
2327
2328/* If LP does not have a SIGINT pending, then clear the ignore_sigint flag.
2329 This function is called after we know the LWP has stopped; if the LWP
2330 stopped before the expected SIGINT was delivered, then it will never have
2331 arrived. Also, if the signal was delivered to a shared queue and consumed
2332 by a different thread, it will never be delivered to this LWP. */
d6b0e80f 2333
57380f4e
DJ
2334static void
2335maybe_clear_ignore_sigint (struct lwp_info *lp)
2336{
2337 if (!lp->ignore_sigint)
2338 return;
2339
e38504b3 2340 if (!linux_nat_has_pending_sigint (lp->ptid.lwp ()))
57380f4e 2341 {
9327494e
SM
2342 linux_nat_debug_printf ("Clearing bogus flag for %s",
2343 target_pid_to_str (lp->ptid).c_str ());
57380f4e
DJ
2344 lp->ignore_sigint = 0;
2345 }
2346}
2347
ebec9a0f
PA
2348/* Fetch the possible triggered data watchpoint info and store it in
2349 LP.
2350
2351 On some archs, like x86, that use debug registers to set
2352 watchpoints, it's possible that the way to know which watched
2353 address trapped, is to check the register that is used to select
2354 which address to watch. Problem is, between setting the watchpoint
2355 and reading back which data address trapped, the user may change
2356 the set of watchpoints, and, as a consequence, GDB changes the
2357 debug registers in the inferior. To avoid reading back a stale
2358 stopped-data-address when that happens, we cache in LP the fact
2359 that a watchpoint trapped, and the corresponding data address, as
2360 soon as we see LP stop with a SIGTRAP. If GDB changes the debug
2361 registers meanwhile, we have the cached data we can rely on. */
2362
9c02b525
PA
2363static int
2364check_stopped_by_watchpoint (struct lwp_info *lp)
ebec9a0f 2365{
2989a365 2366 scoped_restore save_inferior_ptid = make_scoped_restore (&inferior_ptid);
ebec9a0f
PA
2367 inferior_ptid = lp->ptid;
2368
f6ac5f3d 2369 if (linux_target->low_stopped_by_watchpoint ())
ebec9a0f 2370 {
15c66dd6 2371 lp->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
f6ac5f3d
PA
2372 lp->stopped_data_address_p
2373 = linux_target->low_stopped_data_address (&lp->stopped_data_address);
ebec9a0f
PA
2374 }
2375
15c66dd6 2376 return lp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
9c02b525
PA
2377}
2378
9c02b525 2379/* Returns true if the LWP had stopped for a watchpoint. */
ebec9a0f 2380
57810aa7 2381bool
f6ac5f3d 2382linux_nat_target::stopped_by_watchpoint ()
ebec9a0f
PA
2383{
2384 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2385
2386 gdb_assert (lp != NULL);
2387
15c66dd6 2388 return lp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
ebec9a0f
PA
2389}
2390
57810aa7 2391bool
f6ac5f3d 2392linux_nat_target::stopped_data_address (CORE_ADDR *addr_p)
ebec9a0f
PA
2393{
2394 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2395
2396 gdb_assert (lp != NULL);
2397
2398 *addr_p = lp->stopped_data_address;
2399
2400 return lp->stopped_data_address_p;
2401}
2402
26ab7092
JK
2403/* Commonly any breakpoint / watchpoint generate only SIGTRAP. */
2404
135340af
PA
2405bool
2406linux_nat_target::low_status_is_event (int status)
26ab7092
JK
2407{
2408 return WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP;
2409}
2410
57380f4e
DJ
2411/* Wait until LP is stopped. */
2412
2413static int
d3a70e03 2414stop_wait_callback (struct lwp_info *lp)
57380f4e 2415{
5b6d1e4f 2416 inferior *inf = find_inferior_ptid (linux_target, lp->ptid);
6c95b8df
PA
2417
2418 /* If this is a vfork parent, bail out, it is not going to report
2419 any SIGSTOP until the vfork is done with. */
2420 if (inf->vfork_child != NULL)
2421 return 0;
2422
d6b0e80f
AC
2423 if (!lp->stopped)
2424 {
2425 int status;
2426
2427 status = wait_lwp (lp);
2428 if (status == 0)
2429 return 0;
2430
57380f4e
DJ
2431 if (lp->ignore_sigint && WIFSTOPPED (status)
2432 && WSTOPSIG (status) == SIGINT)
d6b0e80f 2433 {
57380f4e 2434 lp->ignore_sigint = 0;
d6b0e80f
AC
2435
2436 errno = 0;
e38504b3 2437 ptrace (PTRACE_CONT, lp->ptid.lwp (), 0, 0);
8817a6f2 2438 lp->stopped = 0;
9327494e
SM
2439 linux_nat_debug_printf
2440 ("PTRACE_CONT %s, 0, 0 (%s) (discarding SIGINT)",
2441 target_pid_to_str (lp->ptid).c_str (),
2442 errno ? safe_strerror (errno) : "OK");
d6b0e80f 2443
d3a70e03 2444 return stop_wait_callback (lp);
d6b0e80f
AC
2445 }
2446
57380f4e
DJ
2447 maybe_clear_ignore_sigint (lp);
2448
d6b0e80f
AC
2449 if (WSTOPSIG (status) != SIGSTOP)
2450 {
e5ef252a 2451 /* The thread was stopped with a signal other than SIGSTOP. */
7feb7d06 2452
9327494e
SM
2453 linux_nat_debug_printf ("Pending event %s in %s",
2454 status_to_str ((int) status),
2455 target_pid_to_str (lp->ptid).c_str ());
e5ef252a
PA
2456
2457 /* Save the sigtrap event. */
2458 lp->status = status;
e5ef252a 2459 gdb_assert (lp->signalled);
e7ad2f14 2460 save_stop_reason (lp);
d6b0e80f
AC
2461 }
2462 else
2463 {
7010835a 2464 /* We caught the SIGSTOP that we intended to catch. */
e5ef252a 2465
9327494e
SM
2466 linux_nat_debug_printf ("Expected SIGSTOP caught for %s.",
2467 target_pid_to_str (lp->ptid).c_str ());
e5ef252a 2468
d6b0e80f 2469 lp->signalled = 0;
7010835a
AB
2470
2471 /* If we are waiting for this stop so we can report the thread
2472 stopped then we need to record this status. Otherwise, we can
2473 now discard this stop event. */
2474 if (lp->last_resume_kind == resume_stop)
2475 {
2476 lp->status = status;
2477 save_stop_reason (lp);
2478 }
d6b0e80f
AC
2479 }
2480 }
2481
2482 return 0;
2483}
2484
9c02b525
PA
2485/* Return non-zero if LP has a wait status pending. Discard the
2486 pending event and resume the LWP if the event that originally
2487 caused the stop became uninteresting. */
d6b0e80f
AC
2488
2489static int
d3a70e03 2490status_callback (struct lwp_info *lp)
d6b0e80f
AC
2491{
2492 /* Only report a pending wait status if we pretend that this has
2493 indeed been resumed. */
ca2163eb
PA
2494 if (!lp->resumed)
2495 return 0;
2496
eb54c8bf
PA
2497 if (!lwp_status_pending_p (lp))
2498 return 0;
2499
15c66dd6
PA
2500 if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
2501 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
9c02b525 2502 {
5b6d1e4f 2503 struct regcache *regcache = get_thread_regcache (linux_target, lp->ptid);
9c02b525
PA
2504 CORE_ADDR pc;
2505 int discard = 0;
2506
9c02b525
PA
2507 pc = regcache_read_pc (regcache);
2508
2509 if (pc != lp->stop_pc)
2510 {
9327494e
SM
2511 linux_nat_debug_printf ("PC of %s changed. was=%s, now=%s",
2512 target_pid_to_str (lp->ptid).c_str (),
2513 paddress (target_gdbarch (), lp->stop_pc),
2514 paddress (target_gdbarch (), pc));
9c02b525
PA
2515 discard = 1;
2516 }
faf09f01
PA
2517
2518#if !USE_SIGTRAP_SIGINFO
a01bda52 2519 else if (!breakpoint_inserted_here_p (regcache->aspace (), pc))
9c02b525 2520 {
9327494e
SM
2521 linux_nat_debug_printf ("previous breakpoint of %s, at %s gone",
2522 target_pid_to_str (lp->ptid).c_str (),
2523 paddress (target_gdbarch (), lp->stop_pc));
9c02b525
PA
2524
2525 discard = 1;
2526 }
faf09f01 2527#endif
9c02b525
PA
2528
2529 if (discard)
2530 {
9327494e
SM
2531 linux_nat_debug_printf ("pending event of %s cancelled.",
2532 target_pid_to_str (lp->ptid).c_str ());
9c02b525
PA
2533
2534 lp->status = 0;
2535 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
2536 return 0;
2537 }
9c02b525
PA
2538 }
2539
eb54c8bf 2540 return 1;
d6b0e80f
AC
2541}
2542
d6b0e80f
AC
2543/* Count the LWP's that have had events. */
2544
2545static int
d3a70e03 2546count_events_callback (struct lwp_info *lp, int *count)
d6b0e80f 2547{
d6b0e80f
AC
2548 gdb_assert (count != NULL);
2549
9c02b525
PA
2550 /* Select only resumed LWPs that have an event pending. */
2551 if (lp->resumed && lwp_status_pending_p (lp))
d6b0e80f
AC
2552 (*count)++;
2553
2554 return 0;
2555}
2556
2557/* Select the LWP (if any) that is currently being single-stepped. */
2558
2559static int
d3a70e03 2560select_singlestep_lwp_callback (struct lwp_info *lp)
d6b0e80f 2561{
25289eb2
PA
2562 if (lp->last_resume_kind == resume_step
2563 && lp->status != 0)
d6b0e80f
AC
2564 return 1;
2565 else
2566 return 0;
2567}
2568
8a99810d
PA
2569/* Returns true if LP has a status pending. */
2570
2571static int
2572lwp_status_pending_p (struct lwp_info *lp)
2573{
2574 /* We check for lp->waitstatus in addition to lp->status, because we
2575 can have pending process exits recorded in lp->status and
2576 W_EXITCODE(0,0) happens to be 0. */
2577 return lp->status != 0 || lp->waitstatus.kind != TARGET_WAITKIND_IGNORE;
2578}
2579
b90fc188 2580/* Select the Nth LWP that has had an event. */
d6b0e80f
AC
2581
2582static int
d3a70e03 2583select_event_lwp_callback (struct lwp_info *lp, int *selector)
d6b0e80f 2584{
d6b0e80f
AC
2585 gdb_assert (selector != NULL);
2586
9c02b525
PA
2587 /* Select only resumed LWPs that have an event pending. */
2588 if (lp->resumed && lwp_status_pending_p (lp))
d6b0e80f
AC
2589 if ((*selector)-- == 0)
2590 return 1;
2591
2592 return 0;
2593}
2594
e7ad2f14
PA
2595/* Called when the LWP stopped for a signal/trap. If it stopped for a
2596 trap check what caused it (breakpoint, watchpoint, trace, etc.),
2597 and save the result in the LWP's stop_reason field. If it stopped
2598 for a breakpoint, decrement the PC if necessary on the lwp's
2599 architecture. */
9c02b525 2600
e7ad2f14
PA
2601static void
2602save_stop_reason (struct lwp_info *lp)
710151dd 2603{
e7ad2f14
PA
2604 struct regcache *regcache;
2605 struct gdbarch *gdbarch;
515630c5 2606 CORE_ADDR pc;
9c02b525 2607 CORE_ADDR sw_bp_pc;
faf09f01
PA
2608#if USE_SIGTRAP_SIGINFO
2609 siginfo_t siginfo;
2610#endif
9c02b525 2611
e7ad2f14
PA
2612 gdb_assert (lp->stop_reason == TARGET_STOPPED_BY_NO_REASON);
2613 gdb_assert (lp->status != 0);
2614
135340af 2615 if (!linux_target->low_status_is_event (lp->status))
e7ad2f14
PA
2616 return;
2617
5b6d1e4f 2618 regcache = get_thread_regcache (linux_target, lp->ptid);
ac7936df 2619 gdbarch = regcache->arch ();
e7ad2f14 2620
9c02b525 2621 pc = regcache_read_pc (regcache);
527a273a 2622 sw_bp_pc = pc - gdbarch_decr_pc_after_break (gdbarch);
515630c5 2623
faf09f01
PA
2624#if USE_SIGTRAP_SIGINFO
2625 if (linux_nat_get_siginfo (lp->ptid, &siginfo))
2626 {
2627 if (siginfo.si_signo == SIGTRAP)
2628 {
e7ad2f14
PA
2629 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
2630 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
faf09f01 2631 {
e7ad2f14
PA
2632 /* The si_code is ambiguous on this arch -- check debug
2633 registers. */
2634 if (!check_stopped_by_watchpoint (lp))
2635 lp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
2636 }
2637 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
2638 {
2639 /* If we determine the LWP stopped for a SW breakpoint,
2640 trust it. Particularly don't check watchpoint
7da6a5b9 2641 registers, because, at least on s390, we'd find
e7ad2f14
PA
2642 stopped-by-watchpoint as long as there's a watchpoint
2643 set. */
faf09f01 2644 lp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
faf09f01 2645 }
e7ad2f14 2646 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
faf09f01 2647 {
e7ad2f14
PA
2648 /* This can indicate either a hardware breakpoint or
2649 hardware watchpoint. Check debug registers. */
2650 if (!check_stopped_by_watchpoint (lp))
2651 lp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
faf09f01 2652 }
2bf6fb9d
PA
2653 else if (siginfo.si_code == TRAP_TRACE)
2654 {
9327494e
SM
2655 linux_nat_debug_printf ("%s stopped by trace",
2656 target_pid_to_str (lp->ptid).c_str ());
e7ad2f14
PA
2657
2658 /* We may have single stepped an instruction that
2659 triggered a watchpoint. In that case, on some
2660 architectures (such as x86), instead of TRAP_HWBKPT,
2661 si_code indicates TRAP_TRACE, and we need to check
2662 the debug registers separately. */
2663 check_stopped_by_watchpoint (lp);
2bf6fb9d 2664 }
faf09f01
PA
2665 }
2666 }
2667#else
9c02b525 2668 if ((!lp->step || lp->stop_pc == sw_bp_pc)
a01bda52 2669 && software_breakpoint_inserted_here_p (regcache->aspace (),
9c02b525 2670 sw_bp_pc))
710151dd 2671 {
9c02b525
PA
2672 /* The LWP was either continued, or stepped a software
2673 breakpoint instruction. */
e7ad2f14
PA
2674 lp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
2675 }
2676
a01bda52 2677 if (hardware_breakpoint_inserted_here_p (regcache->aspace (), pc))
e7ad2f14
PA
2678 lp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
2679
2680 if (lp->stop_reason == TARGET_STOPPED_BY_NO_REASON)
2681 check_stopped_by_watchpoint (lp);
2682#endif
2683
2684 if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
2685 {
9327494e
SM
2686 linux_nat_debug_printf ("%s stopped by software breakpoint",
2687 target_pid_to_str (lp->ptid).c_str ());
710151dd
PA
2688
2689 /* Back up the PC if necessary. */
9c02b525
PA
2690 if (pc != sw_bp_pc)
2691 regcache_write_pc (regcache, sw_bp_pc);
515630c5 2692
e7ad2f14
PA
2693 /* Update this so we record the correct stop PC below. */
2694 pc = sw_bp_pc;
710151dd 2695 }
e7ad2f14 2696 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
9c02b525 2697 {
9327494e
SM
2698 linux_nat_debug_printf ("%s stopped by hardware breakpoint",
2699 target_pid_to_str (lp->ptid).c_str ());
e7ad2f14
PA
2700 }
2701 else if (lp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
2702 {
9327494e
SM
2703 linux_nat_debug_printf ("%s stopped by hardware watchpoint",
2704 target_pid_to_str (lp->ptid).c_str ());
9c02b525 2705 }
d6b0e80f 2706
e7ad2f14 2707 lp->stop_pc = pc;
d6b0e80f
AC
2708}
2709
faf09f01
PA
2710
2711/* Returns true if the LWP had stopped for a software breakpoint. */
2712
57810aa7 2713bool
f6ac5f3d 2714linux_nat_target::stopped_by_sw_breakpoint ()
faf09f01
PA
2715{
2716 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2717
2718 gdb_assert (lp != NULL);
2719
2720 return lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2721}
2722
2723/* Implement the supports_stopped_by_sw_breakpoint method. */
2724
57810aa7 2725bool
f6ac5f3d 2726linux_nat_target::supports_stopped_by_sw_breakpoint ()
faf09f01
PA
2727{
2728 return USE_SIGTRAP_SIGINFO;
2729}
2730
2731/* Returns true if the LWP had stopped for a hardware
2732 breakpoint/watchpoint. */
2733
57810aa7 2734bool
f6ac5f3d 2735linux_nat_target::stopped_by_hw_breakpoint ()
faf09f01
PA
2736{
2737 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2738
2739 gdb_assert (lp != NULL);
2740
2741 return lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2742}
2743
2744/* Implement the supports_stopped_by_hw_breakpoint method. */
2745
57810aa7 2746bool
f6ac5f3d 2747linux_nat_target::supports_stopped_by_hw_breakpoint ()
faf09f01
PA
2748{
2749 return USE_SIGTRAP_SIGINFO;
2750}
2751
d6b0e80f
AC
2752/* Select one LWP out of those that have events pending. */
2753
2754static void
d90e17a7 2755select_event_lwp (ptid_t filter, struct lwp_info **orig_lp, int *status)
d6b0e80f
AC
2756{
2757 int num_events = 0;
2758 int random_selector;
9c02b525 2759 struct lwp_info *event_lp = NULL;
d6b0e80f 2760
ac264b3b 2761 /* Record the wait status for the original LWP. */
d6b0e80f
AC
2762 (*orig_lp)->status = *status;
2763
9c02b525
PA
2764 /* In all-stop, give preference to the LWP that is being
2765 single-stepped. There will be at most one, and it will be the
2766 LWP that the core is most interested in. If we didn't do this,
2767 then we'd have to handle pending step SIGTRAPs somehow in case
2768 the core later continues the previously-stepped thread, as
2769 otherwise we'd report the pending SIGTRAP then, and the core, not
2770 having stepped the thread, wouldn't understand what the trap was
2771 for, and therefore would report it to the user as a random
2772 signal. */
fbea99ea 2773 if (!target_is_non_stop_p ())
d6b0e80f 2774 {
d3a70e03 2775 event_lp = iterate_over_lwps (filter, select_singlestep_lwp_callback);
9c02b525
PA
2776 if (event_lp != NULL)
2777 {
9327494e
SM
2778 linux_nat_debug_printf ("Select single-step %s",
2779 target_pid_to_str (event_lp->ptid).c_str ());
9c02b525 2780 }
d6b0e80f 2781 }
9c02b525
PA
2782
2783 if (event_lp == NULL)
d6b0e80f 2784 {
9c02b525 2785 /* Pick one at random, out of those which have had events. */
d6b0e80f 2786
9c02b525 2787 /* First see how many events we have. */
d3a70e03
TT
2788 iterate_over_lwps (filter,
2789 [&] (struct lwp_info *info)
2790 {
2791 return count_events_callback (info, &num_events);
2792 });
8bf3b159 2793 gdb_assert (num_events > 0);
d6b0e80f 2794
9c02b525
PA
2795 /* Now randomly pick a LWP out of those that have had
2796 events. */
d6b0e80f
AC
2797 random_selector = (int)
2798 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2799
9327494e
SM
2800 if (num_events > 1)
2801 linux_nat_debug_printf ("Found %d events, selecting #%d",
2802 num_events, random_selector);
d6b0e80f 2803
d3a70e03
TT
2804 event_lp
2805 = (iterate_over_lwps
2806 (filter,
2807 [&] (struct lwp_info *info)
2808 {
2809 return select_event_lwp_callback (info,
2810 &random_selector);
2811 }));
d6b0e80f
AC
2812 }
2813
2814 if (event_lp != NULL)
2815 {
2816 /* Switch the event LWP. */
2817 *orig_lp = event_lp;
2818 *status = event_lp->status;
2819 }
2820
2821 /* Flush the wait status for the event LWP. */
2822 (*orig_lp)->status = 0;
2823}
2824
2825/* Return non-zero if LP has been resumed. */
2826
2827static int
d3a70e03 2828resumed_callback (struct lwp_info *lp)
d6b0e80f
AC
2829{
2830 return lp->resumed;
2831}
2832
02f3fc28 2833/* Check if we should go on and pass this event to common code.
7da6a5b9 2834 Return the affected lwp if we should, or NULL otherwise. */
12d9289a 2835
02f3fc28 2836static struct lwp_info *
9c02b525 2837linux_nat_filter_event (int lwpid, int status)
02f3fc28
PA
2838{
2839 struct lwp_info *lp;
89a5711c 2840 int event = linux_ptrace_get_extended_event (status);
02f3fc28 2841
f2907e49 2842 lp = find_lwp_pid (ptid_t (lwpid));
02f3fc28
PA
2843
2844 /* Check for stop events reported by a process we didn't already
2845 know about - anything not already in our LWP list.
2846
2847 If we're expecting to receive stopped processes after
2848 fork, vfork, and clone events, then we'll just add the
2849 new one to our list and go back to waiting for the event
2850 to be reported - the stopped process might be returned
0e5bf2a8
PA
2851 from waitpid before or after the event is.
2852
2853 But note the case of a non-leader thread exec'ing after the
2854 leader having exited, and gone from our lists. The non-leader
2855 thread changes its tid to the tgid. */
2856
2857 if (WIFSTOPPED (status) && lp == NULL
89a5711c 2858 && (WSTOPSIG (status) == SIGTRAP && event == PTRACE_EVENT_EXEC))
0e5bf2a8
PA
2859 {
2860 /* A multi-thread exec after we had seen the leader exiting. */
9327494e 2861 linux_nat_debug_printf ("Re-adding thread group leader LWP %d.", lwpid);
0e5bf2a8 2862
fd79271b 2863 lp = add_lwp (ptid_t (lwpid, lwpid, 0));
0e5bf2a8
PA
2864 lp->stopped = 1;
2865 lp->resumed = 1;
5b6d1e4f 2866 add_thread (linux_target, lp->ptid);
0e5bf2a8
PA
2867 }
2868
02f3fc28
PA
2869 if (WIFSTOPPED (status) && !lp)
2870 {
9327494e
SM
2871 linux_nat_debug_printf ("saving LWP %ld status %s in stopped_pids list",
2872 (long) lwpid, status_to_str (status));
84636d28 2873 add_to_pid_list (&stopped_pids, lwpid, status);
02f3fc28
PA
2874 return NULL;
2875 }
2876
2877 /* Make sure we don't report an event for the exit of an LWP not in
1777feb0 2878 our list, i.e. not part of the current process. This can happen
fd62cb89 2879 if we detach from a program we originally forked and then it
02f3fc28
PA
2880 exits. */
2881 if (!WIFSTOPPED (status) && !lp)
2882 return NULL;
2883
8817a6f2
PA
2884 /* This LWP is stopped now. (And if dead, this prevents it from
2885 ever being continued.) */
2886 lp->stopped = 1;
2887
8784d563
PA
2888 if (WIFSTOPPED (status) && lp->must_set_ptrace_flags)
2889 {
5b6d1e4f 2890 inferior *inf = find_inferior_pid (linux_target, lp->ptid.pid ());
de0d863e 2891 int options = linux_nat_ptrace_options (inf->attach_flag);
8784d563 2892
e38504b3 2893 linux_enable_event_reporting (lp->ptid.lwp (), options);
8784d563
PA
2894 lp->must_set_ptrace_flags = 0;
2895 }
2896
ca2163eb
PA
2897 /* Handle GNU/Linux's syscall SIGTRAPs. */
2898 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
2899 {
2900 /* No longer need the sysgood bit. The ptrace event ends up
2901 recorded in lp->waitstatus if we care for it. We can carry
2902 on handling the event like a regular SIGTRAP from here
2903 on. */
2904 status = W_STOPCODE (SIGTRAP);
2905 if (linux_handle_syscall_trap (lp, 0))
2906 return NULL;
2907 }
bfd09d20
JS
2908 else
2909 {
2910 /* Almost all other ptrace-stops are known to be outside of system
2911 calls, with further exceptions in linux_handle_extended_wait. */
2912 lp->syscall_state = TARGET_WAITKIND_IGNORE;
2913 }
02f3fc28 2914
ca2163eb 2915 /* Handle GNU/Linux's extended waitstatus for trace events. */
89a5711c
DB
2916 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP
2917 && linux_is_extended_waitstatus (status))
02f3fc28 2918 {
9327494e
SM
2919 linux_nat_debug_printf ("Handling extended status 0x%06x", status);
2920
4dd63d48 2921 if (linux_handle_extended_wait (lp, status))
02f3fc28
PA
2922 return NULL;
2923 }
2924
2925 /* Check if the thread has exited. */
9c02b525
PA
2926 if (WIFEXITED (status) || WIFSIGNALED (status))
2927 {
aa01bd36 2928 if (!report_thread_events
e99b03dc 2929 && num_lwps (lp->ptid.pid ()) > 1)
02f3fc28 2930 {
9327494e
SM
2931 linux_nat_debug_printf ("%s exited.",
2932 target_pid_to_str (lp->ptid).c_str ());
9c02b525 2933
4a6ed09b
PA
2934 /* If there is at least one more LWP, then the exit signal
2935 was not the end of the debugged application and should be
2936 ignored. */
2937 exit_lwp (lp);
2938 return NULL;
02f3fc28
PA
2939 }
2940
77598427
PA
2941 /* Note that even if the leader was ptrace-stopped, it can still
2942 exit, if e.g., some other thread brings down the whole
2943 process (calls `exit'). So don't assert that the lwp is
2944 resumed. */
9327494e
SM
2945 linux_nat_debug_printf ("LWP %ld exited (resumed=%d)",
2946 lp->ptid.lwp (), lp->resumed);
02f3fc28 2947
9c02b525
PA
2948 /* Dead LWP's aren't expected to reported a pending sigstop. */
2949 lp->signalled = 0;
2950
2951 /* Store the pending event in the waitstatus, because
2952 W_EXITCODE(0,0) == 0. */
2953 store_waitstatus (&lp->waitstatus, status);
2954 return lp;
02f3fc28
PA
2955 }
2956
02f3fc28
PA
2957 /* Make sure we don't report a SIGSTOP that we sent ourselves in
2958 an attempt to stop an LWP. */
2959 if (lp->signalled
2960 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP)
2961 {
02f3fc28
PA
2962 lp->signalled = 0;
2963
2bf6fb9d 2964 if (lp->last_resume_kind == resume_stop)
25289eb2 2965 {
9327494e
SM
2966 linux_nat_debug_printf ("resume_stop SIGSTOP caught for %s.",
2967 target_pid_to_str (lp->ptid).c_str ());
2bf6fb9d
PA
2968 }
2969 else
2970 {
2971 /* This is a delayed SIGSTOP. Filter out the event. */
02f3fc28 2972
9327494e
SM
2973 linux_nat_debug_printf
2974 ("%s %s, 0, 0 (discard delayed SIGSTOP)",
2975 lp->step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2976 target_pid_to_str (lp->ptid).c_str ());
02f3fc28 2977
2bf6fb9d 2978 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
25289eb2 2979 gdb_assert (lp->resumed);
25289eb2
PA
2980 return NULL;
2981 }
02f3fc28
PA
2982 }
2983
57380f4e
DJ
2984 /* Make sure we don't report a SIGINT that we have already displayed
2985 for another thread. */
2986 if (lp->ignore_sigint
2987 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGINT)
2988 {
9327494e
SM
2989 linux_nat_debug_printf ("Delayed SIGINT caught for %s.",
2990 target_pid_to_str (lp->ptid).c_str ());
57380f4e
DJ
2991
2992 /* This is a delayed SIGINT. */
2993 lp->ignore_sigint = 0;
2994
8a99810d 2995 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
9327494e
SM
2996 linux_nat_debug_printf ("%s %s, 0, 0 (discard SIGINT)",
2997 lp->step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2998 target_pid_to_str (lp->ptid).c_str ());
57380f4e
DJ
2999 gdb_assert (lp->resumed);
3000
3001 /* Discard the event. */
3002 return NULL;
3003 }
3004
9c02b525
PA
3005 /* Don't report signals that GDB isn't interested in, such as
3006 signals that are neither printed nor stopped upon. Stopping all
7da6a5b9 3007 threads can be a bit time-consuming, so if we want decent
9c02b525
PA
3008 performance with heavily multi-threaded programs, especially when
3009 they're using a high frequency timer, we'd better avoid it if we
3010 can. */
3011 if (WIFSTOPPED (status))
3012 {
3013 enum gdb_signal signo = gdb_signal_from_host (WSTOPSIG (status));
3014
fbea99ea 3015 if (!target_is_non_stop_p ())
9c02b525
PA
3016 {
3017 /* Only do the below in all-stop, as we currently use SIGSTOP
3018 to implement target_stop (see linux_nat_stop) in
3019 non-stop. */
3020 if (signo == GDB_SIGNAL_INT && signal_pass_state (signo) == 0)
3021 {
3022 /* If ^C/BREAK is typed at the tty/console, SIGINT gets
3023 forwarded to the entire process group, that is, all LWPs
3024 will receive it - unless they're using CLONE_THREAD to
3025 share signals. Since we only want to report it once, we
3026 mark it as ignored for all LWPs except this one. */
d3a70e03 3027 iterate_over_lwps (ptid_t (lp->ptid.pid ()), set_ignore_sigint);
9c02b525
PA
3028 lp->ignore_sigint = 0;
3029 }
3030 else
3031 maybe_clear_ignore_sigint (lp);
3032 }
3033
3034 /* When using hardware single-step, we need to report every signal.
c9587f88 3035 Otherwise, signals in pass_mask may be short-circuited
d8c06f22
AB
3036 except signals that might be caused by a breakpoint, or SIGSTOP
3037 if we sent the SIGSTOP and are waiting for it to arrive. */
9c02b525 3038 if (!lp->step
c9587f88 3039 && WSTOPSIG (status) && sigismember (&pass_mask, WSTOPSIG (status))
d8c06f22 3040 && (WSTOPSIG (status) != SIGSTOP
5b6d1e4f 3041 || !find_thread_ptid (linux_target, lp->ptid)->stop_requested)
c9587f88 3042 && !linux_wstatus_maybe_breakpoint (status))
9c02b525
PA
3043 {
3044 linux_resume_one_lwp (lp, lp->step, signo);
9327494e
SM
3045 linux_nat_debug_printf
3046 ("%s %s, %s (preempt 'handle')",
3047 lp->step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3048 target_pid_to_str (lp->ptid).c_str (),
3049 (signo != GDB_SIGNAL_0
3050 ? strsignal (gdb_signal_to_host (signo)) : "0"));
9c02b525
PA
3051 return NULL;
3052 }
3053 }
3054
02f3fc28
PA
3055 /* An interesting event. */
3056 gdb_assert (lp);
ca2163eb 3057 lp->status = status;
e7ad2f14 3058 save_stop_reason (lp);
02f3fc28
PA
3059 return lp;
3060}
3061
0e5bf2a8
PA
3062/* Detect zombie thread group leaders, and "exit" them. We can't reap
3063 their exits until all other threads in the group have exited. */
3064
3065static void
3066check_zombie_leaders (void)
3067{
08036331 3068 for (inferior *inf : all_inferiors ())
0e5bf2a8
PA
3069 {
3070 struct lwp_info *leader_lp;
3071
3072 if (inf->pid == 0)
3073 continue;
3074
f2907e49 3075 leader_lp = find_lwp_pid (ptid_t (inf->pid));
0e5bf2a8
PA
3076 if (leader_lp != NULL
3077 /* Check if there are other threads in the group, as we may
3078 have raced with the inferior simply exiting. */
3079 && num_lwps (inf->pid) > 1
5f572dec 3080 && linux_proc_pid_is_zombie (inf->pid))
0e5bf2a8 3081 {
9327494e
SM
3082 linux_nat_debug_printf ("Thread group leader %d zombie "
3083 "(it exited, or another thread execd).",
3084 inf->pid);
0e5bf2a8
PA
3085
3086 /* A leader zombie can mean one of two things:
3087
3088 - It exited, and there's an exit status pending
3089 available, or only the leader exited (not the whole
3090 program). In the latter case, we can't waitpid the
3091 leader's exit status until all other threads are gone.
3092
3093 - There are 3 or more threads in the group, and a thread
4a6ed09b
PA
3094 other than the leader exec'd. See comments on exec
3095 events at the top of the file. We could try
0e5bf2a8
PA
3096 distinguishing the exit and exec cases, by waiting once
3097 more, and seeing if something comes out, but it doesn't
3098 sound useful. The previous leader _does_ go away, and
3099 we'll re-add the new one once we see the exec event
3100 (which is just the same as what would happen if the
3101 previous leader did exit voluntarily before some other
3102 thread execs). */
3103
9327494e 3104 linux_nat_debug_printf ("Thread group leader %d vanished.", inf->pid);
0e5bf2a8
PA
3105 exit_lwp (leader_lp);
3106 }
3107 }
3108}
3109
aa01bd36
PA
3110/* Convenience function that is called when the kernel reports an exit
3111 event. This decides whether to report the event to GDB as a
3112 process exit event, a thread exit event, or to suppress the
3113 event. */
3114
3115static ptid_t
3116filter_exit_event (struct lwp_info *event_child,
3117 struct target_waitstatus *ourstatus)
3118{
3119 ptid_t ptid = event_child->ptid;
3120
e99b03dc 3121 if (num_lwps (ptid.pid ()) > 1)
aa01bd36
PA
3122 {
3123 if (report_thread_events)
3124 ourstatus->kind = TARGET_WAITKIND_THREAD_EXITED;
3125 else
3126 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3127
3128 exit_lwp (event_child);
3129 }
3130
3131 return ptid;
3132}
3133
d6b0e80f 3134static ptid_t
f6ac5f3d 3135linux_nat_wait_1 (ptid_t ptid, struct target_waitstatus *ourstatus,
b60cea74 3136 target_wait_flags target_options)
d6b0e80f 3137{
fc9b8e47 3138 sigset_t prev_mask;
4b60df3d 3139 enum resume_kind last_resume_kind;
12d9289a 3140 struct lwp_info *lp;
12d9289a 3141 int status;
d6b0e80f 3142
9327494e 3143 linux_nat_debug_printf ("enter");
b84876c2 3144
f973ed9c
DJ
3145 /* The first time we get here after starting a new inferior, we may
3146 not have added it to the LWP list yet - this is the earliest
3147 moment at which we know its PID. */
677c92fe 3148 if (ptid.is_pid () && find_lwp_pid (ptid) == nullptr)
f973ed9c 3149 {
677c92fe 3150 ptid_t lwp_ptid (ptid.pid (), ptid.pid ());
27c9d204 3151
677c92fe
SM
3152 /* Upgrade the main thread's ptid. */
3153 thread_change_ptid (linux_target, ptid, lwp_ptid);
3154 lp = add_initial_lwp (lwp_ptid);
f973ed9c
DJ
3155 lp->resumed = 1;
3156 }
3157
12696c10 3158 /* Make sure SIGCHLD is blocked until the sigsuspend below. */
7feb7d06 3159 block_child_signals (&prev_mask);
d6b0e80f 3160
d6b0e80f 3161 /* First check if there is a LWP with a wait status pending. */
d3a70e03 3162 lp = iterate_over_lwps (ptid, status_callback);
8a99810d 3163 if (lp != NULL)
d6b0e80f 3164 {
9327494e
SM
3165 linux_nat_debug_printf ("Using pending wait status %s for %s.",
3166 status_to_str (lp->status),
3167 target_pid_to_str (lp->ptid).c_str ());
d6b0e80f
AC
3168 }
3169
9c02b525
PA
3170 /* But if we don't find a pending event, we'll have to wait. Always
3171 pull all events out of the kernel. We'll randomly select an
3172 event LWP out of all that have events, to prevent starvation. */
7feb7d06 3173
d90e17a7 3174 while (lp == NULL)
d6b0e80f
AC
3175 {
3176 pid_t lwpid;
3177
0e5bf2a8
PA
3178 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
3179 quirks:
3180
3181 - If the thread group leader exits while other threads in the
3182 thread group still exist, waitpid(TGID, ...) hangs. That
3183 waitpid won't return an exit status until the other threads
85102364 3184 in the group are reaped.
0e5bf2a8
PA
3185
3186 - When a non-leader thread execs, that thread just vanishes
3187 without reporting an exit (so we'd hang if we waited for it
3188 explicitly in that case). The exec event is reported to
3189 the TGID pid. */
3190
3191 errno = 0;
4a6ed09b 3192 lwpid = my_waitpid (-1, &status, __WALL | WNOHANG);
0e5bf2a8 3193
9327494e
SM
3194 linux_nat_debug_printf ("waitpid(-1, ...) returned %d, %s",
3195 lwpid,
3196 errno ? safe_strerror (errno) : "ERRNO-OK");
b84876c2 3197
d6b0e80f
AC
3198 if (lwpid > 0)
3199 {
9327494e 3200 linux_nat_debug_printf ("waitpid %ld received %s",
d6b0e80f 3201 (long) lwpid, status_to_str (status));
d6b0e80f 3202
9c02b525 3203 linux_nat_filter_event (lwpid, status);
0e5bf2a8
PA
3204 /* Retry until nothing comes out of waitpid. A single
3205 SIGCHLD can indicate more than one child stopped. */
3206 continue;
d6b0e80f
AC
3207 }
3208
20ba1ce6
PA
3209 /* Now that we've pulled all events out of the kernel, resume
3210 LWPs that don't have an interesting event to report. */
3211 iterate_over_lwps (minus_one_ptid,
d3a70e03
TT
3212 [] (struct lwp_info *info)
3213 {
3214 return resume_stopped_resumed_lwps (info, minus_one_ptid);
3215 });
20ba1ce6
PA
3216
3217 /* ... and find an LWP with a status to report to the core, if
3218 any. */
d3a70e03 3219 lp = iterate_over_lwps (ptid, status_callback);
9c02b525
PA
3220 if (lp != NULL)
3221 break;
3222
0e5bf2a8
PA
3223 /* Check for zombie thread group leaders. Those can't be reaped
3224 until all other threads in the thread group are. */
3225 check_zombie_leaders ();
d6b0e80f 3226
0e5bf2a8
PA
3227 /* If there are no resumed children left, bail. We'd be stuck
3228 forever in the sigsuspend call below otherwise. */
d3a70e03 3229 if (iterate_over_lwps (ptid, resumed_callback) == NULL)
0e5bf2a8 3230 {
9327494e 3231 linux_nat_debug_printf ("exit (no resumed LWP)");
b84876c2 3232
0e5bf2a8 3233 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
b84876c2 3234
0e5bf2a8
PA
3235 restore_child_signals_mask (&prev_mask);
3236 return minus_one_ptid;
d6b0e80f 3237 }
28736962 3238
0e5bf2a8
PA
3239 /* No interesting event to report to the core. */
3240
3241 if (target_options & TARGET_WNOHANG)
3242 {
9327494e 3243 linux_nat_debug_printf ("exit (ignore)");
28736962 3244
0e5bf2a8 3245 ourstatus->kind = TARGET_WAITKIND_IGNORE;
28736962
PA
3246 restore_child_signals_mask (&prev_mask);
3247 return minus_one_ptid;
3248 }
d6b0e80f
AC
3249
3250 /* We shouldn't end up here unless we want to try again. */
d90e17a7 3251 gdb_assert (lp == NULL);
0e5bf2a8
PA
3252
3253 /* Block until we get an event reported with SIGCHLD. */
9c3a5d93 3254 wait_for_signal ();
d6b0e80f
AC
3255 }
3256
d6b0e80f
AC
3257 gdb_assert (lp);
3258
ca2163eb
PA
3259 status = lp->status;
3260 lp->status = 0;
3261
fbea99ea 3262 if (!target_is_non_stop_p ())
4c28f408
PA
3263 {
3264 /* Now stop all other LWP's ... */
d3a70e03 3265 iterate_over_lwps (minus_one_ptid, stop_callback);
4c28f408
PA
3266
3267 /* ... and wait until all of them have reported back that
3268 they're no longer running. */
d3a70e03 3269 iterate_over_lwps (minus_one_ptid, stop_wait_callback);
9c02b525
PA
3270 }
3271
3272 /* If we're not waiting for a specific LWP, choose an event LWP from
3273 among those that have had events. Giving equal priority to all
3274 LWPs that have had events helps prevent starvation. */
d7e15655 3275 if (ptid == minus_one_ptid || ptid.is_pid ())
9c02b525
PA
3276 select_event_lwp (ptid, &lp, &status);
3277
3278 gdb_assert (lp != NULL);
3279
3280 /* Now that we've selected our final event LWP, un-adjust its PC if
faf09f01
PA
3281 it was a software breakpoint, and we can't reliably support the
3282 "stopped by software breakpoint" stop reason. */
3283 if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3284 && !USE_SIGTRAP_SIGINFO)
9c02b525 3285 {
5b6d1e4f 3286 struct regcache *regcache = get_thread_regcache (linux_target, lp->ptid);
ac7936df 3287 struct gdbarch *gdbarch = regcache->arch ();
527a273a 3288 int decr_pc = gdbarch_decr_pc_after_break (gdbarch);
4c28f408 3289
9c02b525
PA
3290 if (decr_pc != 0)
3291 {
3292 CORE_ADDR pc;
d6b0e80f 3293
9c02b525
PA
3294 pc = regcache_read_pc (regcache);
3295 regcache_write_pc (regcache, pc + decr_pc);
3296 }
3297 }
e3e9f5a2 3298
9c02b525
PA
3299 /* We'll need this to determine whether to report a SIGSTOP as
3300 GDB_SIGNAL_0. Need to take a copy because resume_clear_callback
3301 clears it. */
3302 last_resume_kind = lp->last_resume_kind;
4b60df3d 3303
fbea99ea 3304 if (!target_is_non_stop_p ())
9c02b525 3305 {
e3e9f5a2
PA
3306 /* In all-stop, from the core's perspective, all LWPs are now
3307 stopped until a new resume action is sent over. */
d3a70e03 3308 iterate_over_lwps (minus_one_ptid, resume_clear_callback);
e3e9f5a2
PA
3309 }
3310 else
25289eb2 3311 {
d3a70e03 3312 resume_clear_callback (lp);
25289eb2 3313 }
d6b0e80f 3314
135340af 3315 if (linux_target->low_status_is_event (status))
d6b0e80f 3316 {
9327494e
SM
3317 linux_nat_debug_printf ("trap ptid is %s.",
3318 target_pid_to_str (lp->ptid).c_str ());
d6b0e80f 3319 }
d6b0e80f
AC
3320
3321 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3322 {
3323 *ourstatus = lp->waitstatus;
3324 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3325 }
3326 else
3327 store_waitstatus (ourstatus, status);
3328
9327494e 3329 linux_nat_debug_printf ("exit");
b84876c2 3330
7feb7d06 3331 restore_child_signals_mask (&prev_mask);
1e225492 3332
4b60df3d 3333 if (last_resume_kind == resume_stop
25289eb2
PA
3334 && ourstatus->kind == TARGET_WAITKIND_STOPPED
3335 && WSTOPSIG (status) == SIGSTOP)
3336 {
3337 /* A thread that has been requested to stop by GDB with
3338 target_stop, and it stopped cleanly, so report as SIG0. The
3339 use of SIGSTOP is an implementation detail. */
a493e3e2 3340 ourstatus->value.sig = GDB_SIGNAL_0;
25289eb2
PA
3341 }
3342
1e225492
JK
3343 if (ourstatus->kind == TARGET_WAITKIND_EXITED
3344 || ourstatus->kind == TARGET_WAITKIND_SIGNALLED)
3345 lp->core = -1;
3346 else
2e794194 3347 lp->core = linux_common_core_of_thread (lp->ptid);
1e225492 3348
aa01bd36
PA
3349 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3350 return filter_exit_event (lp, ourstatus);
3351
f973ed9c 3352 return lp->ptid;
d6b0e80f
AC
3353}
3354
e3e9f5a2
PA
3355/* Resume LWPs that are currently stopped without any pending status
3356 to report, but are resumed from the core's perspective. */
3357
3358static int
d3a70e03 3359resume_stopped_resumed_lwps (struct lwp_info *lp, const ptid_t wait_ptid)
e3e9f5a2 3360{
4dd63d48
PA
3361 if (!lp->stopped)
3362 {
9327494e
SM
3363 linux_nat_debug_printf ("NOT resuming LWP %s, not stopped",
3364 target_pid_to_str (lp->ptid).c_str ());
4dd63d48
PA
3365 }
3366 else if (!lp->resumed)
3367 {
9327494e
SM
3368 linux_nat_debug_printf ("NOT resuming LWP %s, not resumed",
3369 target_pid_to_str (lp->ptid).c_str ());
4dd63d48
PA
3370 }
3371 else if (lwp_status_pending_p (lp))
3372 {
9327494e
SM
3373 linux_nat_debug_printf ("NOT resuming LWP %s, has pending status",
3374 target_pid_to_str (lp->ptid).c_str ());
4dd63d48
PA
3375 }
3376 else
e3e9f5a2 3377 {
5b6d1e4f 3378 struct regcache *regcache = get_thread_regcache (linux_target, lp->ptid);
ac7936df 3379 struct gdbarch *gdbarch = regcache->arch ();
336060f3 3380
a70b8144 3381 try
e3e9f5a2 3382 {
23f238d3
PA
3383 CORE_ADDR pc = regcache_read_pc (regcache);
3384 int leave_stopped = 0;
e3e9f5a2 3385
23f238d3
PA
3386 /* Don't bother if there's a breakpoint at PC that we'd hit
3387 immediately, and we're not waiting for this LWP. */
d3a70e03 3388 if (!lp->ptid.matches (wait_ptid))
23f238d3 3389 {
a01bda52 3390 if (breakpoint_inserted_here_p (regcache->aspace (), pc))
23f238d3
PA
3391 leave_stopped = 1;
3392 }
e3e9f5a2 3393
23f238d3
PA
3394 if (!leave_stopped)
3395 {
9327494e
SM
3396 linux_nat_debug_printf
3397 ("resuming stopped-resumed LWP %s at %s: step=%d",
3398 target_pid_to_str (lp->ptid).c_str (), paddress (gdbarch, pc),
3399 lp->step);
23f238d3
PA
3400
3401 linux_resume_one_lwp_throw (lp, lp->step, GDB_SIGNAL_0);
3402 }
3403 }
230d2906 3404 catch (const gdb_exception_error &ex)
23f238d3
PA
3405 {
3406 if (!check_ptrace_stopped_lwp_gone (lp))
eedc3f4f 3407 throw;
23f238d3 3408 }
e3e9f5a2
PA
3409 }
3410
3411 return 0;
3412}
3413
f6ac5f3d
PA
3414ptid_t
3415linux_nat_target::wait (ptid_t ptid, struct target_waitstatus *ourstatus,
b60cea74 3416 target_wait_flags target_options)
7feb7d06
PA
3417{
3418 ptid_t event_ptid;
3419
9327494e
SM
3420 linux_nat_debug_printf ("[%s], [%s]", target_pid_to_str (ptid).c_str (),
3421 target_options_to_string (target_options).c_str ());
7feb7d06
PA
3422
3423 /* Flush the async file first. */
d9d41e78 3424 if (target_is_async_p ())
7feb7d06
PA
3425 async_file_flush ();
3426
e3e9f5a2
PA
3427 /* Resume LWPs that are currently stopped without any pending status
3428 to report, but are resumed from the core's perspective. LWPs get
3429 in this state if we find them stopping at a time we're not
3430 interested in reporting the event (target_wait on a
3431 specific_process, for example, see linux_nat_wait_1), and
3432 meanwhile the event became uninteresting. Don't bother resuming
3433 LWPs we're not going to wait for if they'd stop immediately. */
fbea99ea 3434 if (target_is_non_stop_p ())
d3a70e03
TT
3435 iterate_over_lwps (minus_one_ptid,
3436 [=] (struct lwp_info *info)
3437 {
3438 return resume_stopped_resumed_lwps (info, ptid);
3439 });
e3e9f5a2 3440
f6ac5f3d 3441 event_ptid = linux_nat_wait_1 (ptid, ourstatus, target_options);
7feb7d06
PA
3442
3443 /* If we requested any event, and something came out, assume there
3444 may be more. If we requested a specific lwp or process, also
3445 assume there may be more. */
d9d41e78 3446 if (target_is_async_p ()
6953d224
PA
3447 && ((ourstatus->kind != TARGET_WAITKIND_IGNORE
3448 && ourstatus->kind != TARGET_WAITKIND_NO_RESUMED)
d7e15655 3449 || ptid != minus_one_ptid))
7feb7d06
PA
3450 async_file_mark ();
3451
7feb7d06
PA
3452 return event_ptid;
3453}
3454
1d2736d4
PA
3455/* Kill one LWP. */
3456
3457static void
3458kill_one_lwp (pid_t pid)
d6b0e80f 3459{
ed731959
JK
3460 /* PTRACE_KILL may resume the inferior. Send SIGKILL first. */
3461
3462 errno = 0;
1d2736d4 3463 kill_lwp (pid, SIGKILL);
9327494e 3464
ed731959 3465 if (debug_linux_nat)
57745c90
PA
3466 {
3467 int save_errno = errno;
3468
9327494e
SM
3469 linux_nat_debug_printf
3470 ("kill (SIGKILL) %ld, 0, 0 (%s)", (long) pid,
3471 save_errno != 0 ? safe_strerror (save_errno) : "OK");
57745c90 3472 }
ed731959
JK
3473
3474 /* Some kernels ignore even SIGKILL for processes under ptrace. */
3475
d6b0e80f 3476 errno = 0;
1d2736d4 3477 ptrace (PTRACE_KILL, pid, 0, 0);
d6b0e80f 3478 if (debug_linux_nat)
57745c90
PA
3479 {
3480 int save_errno = errno;
3481
9327494e
SM
3482 linux_nat_debug_printf
3483 ("PTRACE_KILL %ld, 0, 0 (%s)", (long) pid,
3484 save_errno ? safe_strerror (save_errno) : "OK");
57745c90 3485 }
d6b0e80f
AC
3486}
3487
1d2736d4
PA
3488/* Wait for an LWP to die. */
3489
3490static void
3491kill_wait_one_lwp (pid_t pid)
d6b0e80f 3492{
1d2736d4 3493 pid_t res;
d6b0e80f
AC
3494
3495 /* We must make sure that there are no pending events (delayed
3496 SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current
3497 program doesn't interfere with any following debugging session. */
3498
d6b0e80f
AC
3499 do
3500 {
1d2736d4
PA
3501 res = my_waitpid (pid, NULL, __WALL);
3502 if (res != (pid_t) -1)
d6b0e80f 3503 {
9327494e
SM
3504 linux_nat_debug_printf ("wait %ld received unknown.", (long) pid);
3505
4a6ed09b
PA
3506 /* The Linux kernel sometimes fails to kill a thread
3507 completely after PTRACE_KILL; that goes from the stop
3508 point in do_fork out to the one in get_signal_to_deliver
3509 and waits again. So kill it again. */
1d2736d4 3510 kill_one_lwp (pid);
d6b0e80f
AC
3511 }
3512 }
1d2736d4
PA
3513 while (res == pid);
3514
3515 gdb_assert (res == -1 && errno == ECHILD);
3516}
3517
3518/* Callback for iterate_over_lwps. */
d6b0e80f 3519
1d2736d4 3520static int
d3a70e03 3521kill_callback (struct lwp_info *lp)
1d2736d4 3522{
e38504b3 3523 kill_one_lwp (lp->ptid.lwp ());
d6b0e80f
AC
3524 return 0;
3525}
3526
1d2736d4
PA
3527/* Callback for iterate_over_lwps. */
3528
3529static int
d3a70e03 3530kill_wait_callback (struct lwp_info *lp)
1d2736d4 3531{
e38504b3 3532 kill_wait_one_lwp (lp->ptid.lwp ());
1d2736d4
PA
3533 return 0;
3534}
3535
3536/* Kill the fork children of any threads of inferior INF that are
3537 stopped at a fork event. */
3538
3539static void
3540kill_unfollowed_fork_children (struct inferior *inf)
3541{
08036331
PA
3542 for (thread_info *thread : inf->non_exited_threads ())
3543 {
3544 struct target_waitstatus *ws = &thread->pending_follow;
1d2736d4 3545
08036331
PA
3546 if (ws->kind == TARGET_WAITKIND_FORKED
3547 || ws->kind == TARGET_WAITKIND_VFORKED)
3548 {
3549 ptid_t child_ptid = ws->value.related_pid;
3550 int child_pid = child_ptid.pid ();
3551 int child_lwp = child_ptid.lwp ();
3552
3553 kill_one_lwp (child_lwp);
3554 kill_wait_one_lwp (child_lwp);
3555
3556 /* Let the arch-specific native code know this process is
3557 gone. */
3558 linux_target->low_forget_process (child_pid);
3559 }
3560 }
1d2736d4
PA
3561}
3562
f6ac5f3d
PA
3563void
3564linux_nat_target::kill ()
d6b0e80f 3565{
f973ed9c
DJ
3566 /* If we're stopped while forking and we haven't followed yet,
3567 kill the other task. We need to do this first because the
3568 parent will be sleeping if this is a vfork. */
1d2736d4 3569 kill_unfollowed_fork_children (current_inferior ());
f973ed9c
DJ
3570
3571 if (forks_exist_p ())
7feb7d06 3572 linux_fork_killall ();
f973ed9c
DJ
3573 else
3574 {
e99b03dc 3575 ptid_t ptid = ptid_t (inferior_ptid.pid ());
e0881a8e 3576
4c28f408 3577 /* Stop all threads before killing them, since ptrace requires
30baf67b 3578 that the thread is stopped to successfully PTRACE_KILL. */
d3a70e03 3579 iterate_over_lwps (ptid, stop_callback);
4c28f408
PA
3580 /* ... and wait until all of them have reported back that
3581 they're no longer running. */
d3a70e03 3582 iterate_over_lwps (ptid, stop_wait_callback);
4c28f408 3583
f973ed9c 3584 /* Kill all LWP's ... */
d3a70e03 3585 iterate_over_lwps (ptid, kill_callback);
f973ed9c
DJ
3586
3587 /* ... and wait until we've flushed all events. */
d3a70e03 3588 iterate_over_lwps (ptid, kill_wait_callback);
f973ed9c
DJ
3589 }
3590
bc1e6c81 3591 target_mourn_inferior (inferior_ptid);
d6b0e80f
AC
3592}
3593
f6ac5f3d
PA
3594void
3595linux_nat_target::mourn_inferior ()
d6b0e80f 3596{
e99b03dc 3597 int pid = inferior_ptid.pid ();
26cb8b7c
PA
3598
3599 purge_lwp_list (pid);
d6b0e80f 3600
f973ed9c 3601 if (! forks_exist_p ())
d90e17a7 3602 /* Normal case, no other forks available. */
f6ac5f3d 3603 inf_ptrace_target::mourn_inferior ();
f973ed9c
DJ
3604 else
3605 /* Multi-fork case. The current inferior_ptid has exited, but
3606 there are other viable forks to debug. Delete the exiting
3607 one and context-switch to the first available. */
3608 linux_fork_mourn_inferior ();
26cb8b7c
PA
3609
3610 /* Let the arch-specific native code know this process is gone. */
135340af 3611 linux_target->low_forget_process (pid);
d6b0e80f
AC
3612}
3613
5b009018
PA
3614/* Convert a native/host siginfo object, into/from the siginfo in the
3615 layout of the inferiors' architecture. */
3616
3617static void
a5362b9a 3618siginfo_fixup (siginfo_t *siginfo, gdb_byte *inf_siginfo, int direction)
5b009018 3619{
135340af
PA
3620 /* If the low target didn't do anything, then just do a straight
3621 memcpy. */
3622 if (!linux_target->low_siginfo_fixup (siginfo, inf_siginfo, direction))
5b009018
PA
3623 {
3624 if (direction == 1)
a5362b9a 3625 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
5b009018 3626 else
a5362b9a 3627 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
5b009018
PA
3628 }
3629}
3630
9b409511 3631static enum target_xfer_status
f6ac5f3d 3632linux_xfer_siginfo (enum target_object object,
4aa995e1 3633 const char *annex, gdb_byte *readbuf,
9b409511
YQ
3634 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
3635 ULONGEST *xfered_len)
4aa995e1 3636{
4aa995e1 3637 int pid;
a5362b9a
TS
3638 siginfo_t siginfo;
3639 gdb_byte inf_siginfo[sizeof (siginfo_t)];
4aa995e1
PA
3640
3641 gdb_assert (object == TARGET_OBJECT_SIGNAL_INFO);
3642 gdb_assert (readbuf || writebuf);
3643
e38504b3 3644 pid = inferior_ptid.lwp ();
4aa995e1 3645 if (pid == 0)
e99b03dc 3646 pid = inferior_ptid.pid ();
4aa995e1
PA
3647
3648 if (offset > sizeof (siginfo))
2ed4b548 3649 return TARGET_XFER_E_IO;
4aa995e1
PA
3650
3651 errno = 0;
3652 ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
3653 if (errno != 0)
2ed4b548 3654 return TARGET_XFER_E_IO;
4aa995e1 3655
5b009018
PA
3656 /* When GDB is built as a 64-bit application, ptrace writes into
3657 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
3658 inferior with a 64-bit GDB should look the same as debugging it
3659 with a 32-bit GDB, we need to convert it. GDB core always sees
3660 the converted layout, so any read/write will have to be done
3661 post-conversion. */
3662 siginfo_fixup (&siginfo, inf_siginfo, 0);
3663
4aa995e1
PA
3664 if (offset + len > sizeof (siginfo))
3665 len = sizeof (siginfo) - offset;
3666
3667 if (readbuf != NULL)
5b009018 3668 memcpy (readbuf, inf_siginfo + offset, len);
4aa995e1
PA
3669 else
3670 {
5b009018
PA
3671 memcpy (inf_siginfo + offset, writebuf, len);
3672
3673 /* Convert back to ptrace layout before flushing it out. */
3674 siginfo_fixup (&siginfo, inf_siginfo, 1);
3675
4aa995e1
PA
3676 errno = 0;
3677 ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
3678 if (errno != 0)
2ed4b548 3679 return TARGET_XFER_E_IO;
4aa995e1
PA
3680 }
3681
9b409511
YQ
3682 *xfered_len = len;
3683 return TARGET_XFER_OK;
4aa995e1
PA
3684}
3685
9b409511 3686static enum target_xfer_status
f6ac5f3d
PA
3687linux_nat_xfer_osdata (enum target_object object,
3688 const char *annex, gdb_byte *readbuf,
3689 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
3690 ULONGEST *xfered_len);
3691
f6ac5f3d
PA
3692static enum target_xfer_status
3693linux_proc_xfer_partial (enum target_object object,
3694 const char *annex, gdb_byte *readbuf,
3695 const gdb_byte *writebuf,
3696 ULONGEST offset, LONGEST len, ULONGEST *xfered_len);
3697
3698enum target_xfer_status
3699linux_nat_target::xfer_partial (enum target_object object,
3700 const char *annex, gdb_byte *readbuf,
3701 const gdb_byte *writebuf,
3702 ULONGEST offset, ULONGEST len, ULONGEST *xfered_len)
d6b0e80f 3703{
9b409511 3704 enum target_xfer_status xfer;
d6b0e80f 3705
4aa995e1 3706 if (object == TARGET_OBJECT_SIGNAL_INFO)
f6ac5f3d 3707 return linux_xfer_siginfo (object, annex, readbuf, writebuf,
9b409511 3708 offset, len, xfered_len);
4aa995e1 3709
c35b1492
PA
3710 /* The target is connected but no live inferior is selected. Pass
3711 this request down to a lower stratum (e.g., the executable
3712 file). */
d7e15655 3713 if (object == TARGET_OBJECT_MEMORY && inferior_ptid == null_ptid)
9b409511 3714 return TARGET_XFER_EOF;
c35b1492 3715
f6ac5f3d
PA
3716 if (object == TARGET_OBJECT_AUXV)
3717 return memory_xfer_auxv (this, object, annex, readbuf, writebuf,
3718 offset, len, xfered_len);
3719
3720 if (object == TARGET_OBJECT_OSDATA)
3721 return linux_nat_xfer_osdata (object, annex, readbuf, writebuf,
3722 offset, len, xfered_len);
d6b0e80f 3723
f6ac5f3d
PA
3724 /* GDB calculates all addresses in the largest possible address
3725 width.
3726 The address width must be masked before its final use - either by
3727 linux_proc_xfer_partial or inf_ptrace_target::xfer_partial.
3728
3729 Compare ADDR_BIT first to avoid a compiler warning on shift overflow. */
3730
3731 if (object == TARGET_OBJECT_MEMORY)
3732 {
3733 int addr_bit = gdbarch_addr_bit (target_gdbarch ());
3734
3735 if (addr_bit < (sizeof (ULONGEST) * HOST_CHAR_BIT))
3736 offset &= ((ULONGEST) 1 << addr_bit) - 1;
3737 }
3738
3739 xfer = linux_proc_xfer_partial (object, annex, readbuf, writebuf,
3740 offset, len, xfered_len);
3741 if (xfer != TARGET_XFER_EOF)
3742 return xfer;
3743
3744 return inf_ptrace_target::xfer_partial (object, annex, readbuf, writebuf,
3745 offset, len, xfered_len);
d6b0e80f
AC
3746}
3747
57810aa7 3748bool
f6ac5f3d 3749linux_nat_target::thread_alive (ptid_t ptid)
28439f5e 3750{
4a6ed09b
PA
3751 /* As long as a PTID is in lwp list, consider it alive. */
3752 return find_lwp_pid (ptid) != NULL;
28439f5e
PA
3753}
3754
8a06aea7
PA
3755/* Implement the to_update_thread_list target method for this
3756 target. */
3757
f6ac5f3d
PA
3758void
3759linux_nat_target::update_thread_list ()
8a06aea7 3760{
a6904d5a
PA
3761 struct lwp_info *lwp;
3762
4a6ed09b
PA
3763 /* We add/delete threads from the list as clone/exit events are
3764 processed, so just try deleting exited threads still in the
3765 thread list. */
3766 delete_exited_threads ();
a6904d5a
PA
3767
3768 /* Update the processor core that each lwp/thread was last seen
3769 running on. */
3770 ALL_LWPS (lwp)
1ad3de98
PA
3771 {
3772 /* Avoid accessing /proc if the thread hasn't run since we last
3773 time we fetched the thread's core. Accessing /proc becomes
3774 noticeably expensive when we have thousands of LWPs. */
3775 if (lwp->core == -1)
3776 lwp->core = linux_common_core_of_thread (lwp->ptid);
3777 }
8a06aea7
PA
3778}
3779
a068643d 3780std::string
f6ac5f3d 3781linux_nat_target::pid_to_str (ptid_t ptid)
d6b0e80f 3782{
15a9e13e 3783 if (ptid.lwp_p ()
e38504b3 3784 && (ptid.pid () != ptid.lwp ()
e99b03dc 3785 || num_lwps (ptid.pid ()) > 1))
a068643d 3786 return string_printf ("LWP %ld", ptid.lwp ());
d6b0e80f
AC
3787
3788 return normal_pid_to_str (ptid);
3789}
3790
f6ac5f3d
PA
3791const char *
3792linux_nat_target::thread_name (struct thread_info *thr)
4694da01 3793{
79efa585 3794 return linux_proc_tid_get_name (thr->ptid);
4694da01
TT
3795}
3796
dba24537
AC
3797/* Accepts an integer PID; Returns a string representing a file that
3798 can be opened to get the symbols for the child process. */
3799
f6ac5f3d
PA
3800char *
3801linux_nat_target::pid_to_exec_file (int pid)
dba24537 3802{
e0d86d2c 3803 return linux_proc_pid_to_exec_file (pid);
dba24537
AC
3804}
3805
a379284a
AA
3806/* Implement the to_xfer_partial target method using /proc/<pid>/mem.
3807 Because we can use a single read/write call, this can be much more
3808 efficient than banging away at PTRACE_PEEKTEXT. */
10d6c8cd 3809
9b409511 3810static enum target_xfer_status
f6ac5f3d 3811linux_proc_xfer_partial (enum target_object object,
10d6c8cd
DJ
3812 const char *annex, gdb_byte *readbuf,
3813 const gdb_byte *writebuf,
9b409511 3814 ULONGEST offset, LONGEST len, ULONGEST *xfered_len)
dba24537 3815{
10d6c8cd
DJ
3816 LONGEST ret;
3817 int fd;
dba24537
AC
3818 char filename[64];
3819
a379284a 3820 if (object != TARGET_OBJECT_MEMORY)
f486487f 3821 return TARGET_XFER_EOF;
dba24537
AC
3822
3823 /* Don't bother for one word. */
3824 if (len < 3 * sizeof (long))
9b409511 3825 return TARGET_XFER_EOF;
dba24537
AC
3826
3827 /* We could keep this file open and cache it - possibly one per
3828 thread. That requires some juggling, but is even faster. */
b67aeab0 3829 xsnprintf (filename, sizeof filename, "/proc/%ld/mem",
e38504b3 3830 inferior_ptid.lwp ());
a379284a
AA
3831 fd = gdb_open_cloexec (filename, ((readbuf ? O_RDONLY : O_WRONLY)
3832 | O_LARGEFILE), 0);
dba24537 3833 if (fd == -1)
9b409511 3834 return TARGET_XFER_EOF;
dba24537 3835
a379284a
AA
3836 /* Use pread64/pwrite64 if available, since they save a syscall and can
3837 handle 64-bit offsets even on 32-bit platforms (for instance, SPARC
3838 debugging a SPARC64 application). */
dba24537 3839#ifdef HAVE_PREAD64
a379284a
AA
3840 ret = (readbuf ? pread64 (fd, readbuf, len, offset)
3841 : pwrite64 (fd, writebuf, len, offset));
dba24537 3842#else
a379284a
AA
3843 ret = lseek (fd, offset, SEEK_SET);
3844 if (ret != -1)
3845 ret = (readbuf ? read (fd, readbuf, len)
3846 : write (fd, writebuf, len));
dba24537 3847#endif
dba24537
AC
3848
3849 close (fd);
9b409511 3850
a379284a 3851 if (ret == -1 || ret == 0)
9b409511
YQ
3852 return TARGET_XFER_EOF;
3853 else
3854 {
3855 *xfered_len = ret;
3856 return TARGET_XFER_OK;
3857 }
dba24537
AC
3858}
3859
efcbbd14 3860
dba24537
AC
3861/* Parse LINE as a signal set and add its set bits to SIGS. */
3862
3863static void
3864add_line_to_sigset (const char *line, sigset_t *sigs)
3865{
3866 int len = strlen (line) - 1;
3867 const char *p;
3868 int signum;
3869
3870 if (line[len] != '\n')
8a3fe4f8 3871 error (_("Could not parse signal set: %s"), line);
dba24537
AC
3872
3873 p = line;
3874 signum = len * 4;
3875 while (len-- > 0)
3876 {
3877 int digit;
3878
3879 if (*p >= '0' && *p <= '9')
3880 digit = *p - '0';
3881 else if (*p >= 'a' && *p <= 'f')
3882 digit = *p - 'a' + 10;
3883 else
8a3fe4f8 3884 error (_("Could not parse signal set: %s"), line);
dba24537
AC
3885
3886 signum -= 4;
3887
3888 if (digit & 1)
3889 sigaddset (sigs, signum + 1);
3890 if (digit & 2)
3891 sigaddset (sigs, signum + 2);
3892 if (digit & 4)
3893 sigaddset (sigs, signum + 3);
3894 if (digit & 8)
3895 sigaddset (sigs, signum + 4);
3896
3897 p++;
3898 }
3899}
3900
3901/* Find process PID's pending signals from /proc/pid/status and set
3902 SIGS to match. */
3903
3904void
3e43a32a
MS
3905linux_proc_pending_signals (int pid, sigset_t *pending,
3906 sigset_t *blocked, sigset_t *ignored)
dba24537 3907{
d8d2a3ee 3908 char buffer[PATH_MAX], fname[PATH_MAX];
dba24537
AC
3909
3910 sigemptyset (pending);
3911 sigemptyset (blocked);
3912 sigemptyset (ignored);
cde33bf1 3913 xsnprintf (fname, sizeof fname, "/proc/%d/status", pid);
d419f42d 3914 gdb_file_up procfile = gdb_fopen_cloexec (fname, "r");
dba24537 3915 if (procfile == NULL)
8a3fe4f8 3916 error (_("Could not open %s"), fname);
dba24537 3917
d419f42d 3918 while (fgets (buffer, PATH_MAX, procfile.get ()) != NULL)
dba24537
AC
3919 {
3920 /* Normal queued signals are on the SigPnd line in the status
3921 file. However, 2.6 kernels also have a "shared" pending
3922 queue for delivering signals to a thread group, so check for
3923 a ShdPnd line also.
3924
3925 Unfortunately some Red Hat kernels include the shared pending
3926 queue but not the ShdPnd status field. */
3927
61012eef 3928 if (startswith (buffer, "SigPnd:\t"))
dba24537 3929 add_line_to_sigset (buffer + 8, pending);
61012eef 3930 else if (startswith (buffer, "ShdPnd:\t"))
dba24537 3931 add_line_to_sigset (buffer + 8, pending);
61012eef 3932 else if (startswith (buffer, "SigBlk:\t"))
dba24537 3933 add_line_to_sigset (buffer + 8, blocked);
61012eef 3934 else if (startswith (buffer, "SigIgn:\t"))
dba24537
AC
3935 add_line_to_sigset (buffer + 8, ignored);
3936 }
dba24537
AC
3937}
3938
9b409511 3939static enum target_xfer_status
f6ac5f3d 3940linux_nat_xfer_osdata (enum target_object object,
e0881a8e 3941 const char *annex, gdb_byte *readbuf,
9b409511
YQ
3942 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
3943 ULONGEST *xfered_len)
07e059b5 3944{
07e059b5
VP
3945 gdb_assert (object == TARGET_OBJECT_OSDATA);
3946
9b409511
YQ
3947 *xfered_len = linux_common_xfer_osdata (annex, readbuf, offset, len);
3948 if (*xfered_len == 0)
3949 return TARGET_XFER_EOF;
3950 else
3951 return TARGET_XFER_OK;
07e059b5
VP
3952}
3953
f6ac5f3d
PA
3954std::vector<static_tracepoint_marker>
3955linux_nat_target::static_tracepoint_markers_by_strid (const char *strid)
5808517f
YQ
3956{
3957 char s[IPA_CMD_BUF_SIZE];
e99b03dc 3958 int pid = inferior_ptid.pid ();
5d9310c4 3959 std::vector<static_tracepoint_marker> markers;
256642e8 3960 const char *p = s;
fd79271b 3961 ptid_t ptid = ptid_t (pid, 0, 0);
5d9310c4 3962 static_tracepoint_marker marker;
5808517f
YQ
3963
3964 /* Pause all */
3965 target_stop (ptid);
3966
3967 memcpy (s, "qTfSTM", sizeof ("qTfSTM"));
3968 s[sizeof ("qTfSTM")] = 0;
3969
42476b70 3970 agent_run_command (pid, s, strlen (s) + 1);
5808517f 3971
1db93f14
TT
3972 /* Unpause all. */
3973 SCOPE_EXIT { target_continue_no_signal (ptid); };
5808517f
YQ
3974
3975 while (*p++ == 'm')
3976 {
5808517f
YQ
3977 do
3978 {
5d9310c4 3979 parse_static_tracepoint_marker_definition (p, &p, &marker);
5808517f 3980
5d9310c4
SM
3981 if (strid == NULL || marker.str_id == strid)
3982 markers.push_back (std::move (marker));
5808517f
YQ
3983 }
3984 while (*p++ == ','); /* comma-separated list */
3985
3986 memcpy (s, "qTsSTM", sizeof ("qTsSTM"));
3987 s[sizeof ("qTsSTM")] = 0;
42476b70 3988 agent_run_command (pid, s, strlen (s) + 1);
5808517f
YQ
3989 p = s;
3990 }
3991
5808517f
YQ
3992 return markers;
3993}
3994
b84876c2
PA
3995/* target_is_async_p implementation. */
3996
57810aa7 3997bool
f6ac5f3d 3998linux_nat_target::is_async_p ()
b84876c2 3999{
198297aa 4000 return linux_is_async_p ();
b84876c2
PA
4001}
4002
4003/* target_can_async_p implementation. */
4004
57810aa7 4005bool
f6ac5f3d 4006linux_nat_target::can_async_p ()
b84876c2 4007{
fde1b17d
SM
4008 /* We're always async, unless the user explicitly prevented it with the
4009 "maint set target-async" command. */
3dd5b83d 4010 return target_async_permitted;
b84876c2
PA
4011}
4012
57810aa7 4013bool
f6ac5f3d 4014linux_nat_target::supports_non_stop ()
9908b566 4015{
f80c8ec4 4016 return true;
9908b566
VP
4017}
4018
fbea99ea
PA
4019/* to_always_non_stop_p implementation. */
4020
57810aa7 4021bool
f6ac5f3d 4022linux_nat_target::always_non_stop_p ()
fbea99ea 4023{
f80c8ec4 4024 return true;
fbea99ea
PA
4025}
4026
57810aa7 4027bool
f6ac5f3d 4028linux_nat_target::supports_multi_process ()
d90e17a7 4029{
aee91db3 4030 return true;
d90e17a7
PA
4031}
4032
57810aa7 4033bool
f6ac5f3d 4034linux_nat_target::supports_disable_randomization ()
03583c20
UW
4035{
4036#ifdef HAVE_PERSONALITY
f80c8ec4 4037 return true;
03583c20 4038#else
f80c8ec4 4039 return false;
03583c20
UW
4040#endif
4041}
4042
7feb7d06
PA
4043/* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4044 so we notice when any child changes state, and notify the
4045 event-loop; it allows us to use sigsuspend in linux_nat_wait_1
4046 above to wait for the arrival of a SIGCHLD. */
4047
b84876c2 4048static void
7feb7d06 4049sigchld_handler (int signo)
b84876c2 4050{
7feb7d06
PA
4051 int old_errno = errno;
4052
01124a23 4053 if (debug_linux_nat)
da5bd37e 4054 gdb_stdlog->write_async_safe ("sigchld\n", sizeof ("sigchld\n") - 1);
7feb7d06
PA
4055
4056 if (signo == SIGCHLD
4057 && linux_nat_event_pipe[0] != -1)
4058 async_file_mark (); /* Let the event loop know that there are
4059 events to handle. */
4060
4061 errno = old_errno;
4062}
4063
4064/* Callback registered with the target events file descriptor. */
4065
4066static void
4067handle_target_event (int error, gdb_client_data client_data)
4068{
b1a35af2 4069 inferior_event_handler (INF_REG_EVENT);
7feb7d06
PA
4070}
4071
4072/* Create/destroy the target events pipe. Returns previous state. */
4073
4074static int
4075linux_async_pipe (int enable)
4076{
198297aa 4077 int previous = linux_is_async_p ();
7feb7d06
PA
4078
4079 if (previous != enable)
4080 {
4081 sigset_t prev_mask;
4082
12696c10
PA
4083 /* Block child signals while we create/destroy the pipe, as
4084 their handler writes to it. */
7feb7d06
PA
4085 block_child_signals (&prev_mask);
4086
4087 if (enable)
4088 {
614c279d 4089 if (gdb_pipe_cloexec (linux_nat_event_pipe) == -1)
7feb7d06
PA
4090 internal_error (__FILE__, __LINE__,
4091 "creating event pipe failed.");
4092
4093 fcntl (linux_nat_event_pipe[0], F_SETFL, O_NONBLOCK);
4094 fcntl (linux_nat_event_pipe[1], F_SETFL, O_NONBLOCK);
4095 }
4096 else
4097 {
4098 close (linux_nat_event_pipe[0]);
4099 close (linux_nat_event_pipe[1]);
4100 linux_nat_event_pipe[0] = -1;
4101 linux_nat_event_pipe[1] = -1;
4102 }
4103
4104 restore_child_signals_mask (&prev_mask);
4105 }
4106
4107 return previous;
b84876c2
PA
4108}
4109
5b6d1e4f
PA
4110int
4111linux_nat_target::async_wait_fd ()
4112{
4113 return linux_nat_event_pipe[0];
4114}
4115
b84876c2
PA
4116/* target_async implementation. */
4117
f6ac5f3d
PA
4118void
4119linux_nat_target::async (int enable)
b84876c2 4120{
6a3753b3 4121 if (enable)
b84876c2 4122 {
7feb7d06
PA
4123 if (!linux_async_pipe (1))
4124 {
4125 add_file_handler (linux_nat_event_pipe[0],
2554f6f5
SM
4126 handle_target_event, NULL,
4127 "linux-nat");
7feb7d06
PA
4128 /* There may be pending events to handle. Tell the event loop
4129 to poll them. */
4130 async_file_mark ();
4131 }
b84876c2
PA
4132 }
4133 else
4134 {
b84876c2 4135 delete_file_handler (linux_nat_event_pipe[0]);
7feb7d06 4136 linux_async_pipe (0);
b84876c2
PA
4137 }
4138 return;
4139}
4140
a493e3e2 4141/* Stop an LWP, and push a GDB_SIGNAL_0 stop status if no other
252fbfc8
PA
4142 event came out. */
4143
4c28f408 4144static int
d3a70e03 4145linux_nat_stop_lwp (struct lwp_info *lwp)
4c28f408 4146{
d90e17a7 4147 if (!lwp->stopped)
252fbfc8 4148 {
9327494e
SM
4149 linux_nat_debug_printf ("running -> suspending %s",
4150 target_pid_to_str (lwp->ptid).c_str ());
252fbfc8 4151
252fbfc8 4152
25289eb2
PA
4153 if (lwp->last_resume_kind == resume_stop)
4154 {
9327494e
SM
4155 linux_nat_debug_printf ("already stopping LWP %ld at GDB's request",
4156 lwp->ptid.lwp ());
25289eb2
PA
4157 return 0;
4158 }
252fbfc8 4159
d3a70e03 4160 stop_callback (lwp);
25289eb2 4161 lwp->last_resume_kind = resume_stop;
d90e17a7
PA
4162 }
4163 else
4164 {
4165 /* Already known to be stopped; do nothing. */
252fbfc8 4166
d90e17a7
PA
4167 if (debug_linux_nat)
4168 {
5b6d1e4f 4169 if (find_thread_ptid (linux_target, lwp->ptid)->stop_requested)
9327494e
SM
4170 linux_nat_debug_printf ("already stopped/stop_requested %s",
4171 target_pid_to_str (lwp->ptid).c_str ());
d90e17a7 4172 else
9327494e
SM
4173 linux_nat_debug_printf ("already stopped/no stop_requested yet %s",
4174 target_pid_to_str (lwp->ptid).c_str ());
252fbfc8
PA
4175 }
4176 }
4c28f408
PA
4177 return 0;
4178}
4179
f6ac5f3d
PA
4180void
4181linux_nat_target::stop (ptid_t ptid)
4c28f408 4182{
d3a70e03 4183 iterate_over_lwps (ptid, linux_nat_stop_lwp);
bfedc46a
PA
4184}
4185
f6ac5f3d
PA
4186void
4187linux_nat_target::close ()
d90e17a7
PA
4188{
4189 /* Unregister from the event loop. */
f6ac5f3d
PA
4190 if (is_async_p ())
4191 async (0);
d90e17a7 4192
f6ac5f3d 4193 inf_ptrace_target::close ();
d90e17a7
PA
4194}
4195
c0694254
PA
4196/* When requests are passed down from the linux-nat layer to the
4197 single threaded inf-ptrace layer, ptids of (lwpid,0,0) form are
4198 used. The address space pointer is stored in the inferior object,
4199 but the common code that is passed such ptid can't tell whether
4200 lwpid is a "main" process id or not (it assumes so). We reverse
4201 look up the "main" process id from the lwp here. */
4202
f6ac5f3d
PA
4203struct address_space *
4204linux_nat_target::thread_address_space (ptid_t ptid)
c0694254
PA
4205{
4206 struct lwp_info *lwp;
4207 struct inferior *inf;
4208 int pid;
4209
e38504b3 4210 if (ptid.lwp () == 0)
c0694254
PA
4211 {
4212 /* An (lwpid,0,0) ptid. Look up the lwp object to get at the
4213 tgid. */
4214 lwp = find_lwp_pid (ptid);
e99b03dc 4215 pid = lwp->ptid.pid ();
c0694254
PA
4216 }
4217 else
4218 {
4219 /* A (pid,lwpid,0) ptid. */
e99b03dc 4220 pid = ptid.pid ();
c0694254
PA
4221 }
4222
5b6d1e4f 4223 inf = find_inferior_pid (this, pid);
c0694254
PA
4224 gdb_assert (inf != NULL);
4225 return inf->aspace;
4226}
4227
dc146f7c
VP
4228/* Return the cached value of the processor core for thread PTID. */
4229
f6ac5f3d
PA
4230int
4231linux_nat_target::core_of_thread (ptid_t ptid)
dc146f7c
VP
4232{
4233 struct lwp_info *info = find_lwp_pid (ptid);
e0881a8e 4234
dc146f7c
VP
4235 if (info)
4236 return info->core;
4237 return -1;
4238}
4239
7a6a1731
GB
4240/* Implementation of to_filesystem_is_local. */
4241
57810aa7 4242bool
f6ac5f3d 4243linux_nat_target::filesystem_is_local ()
7a6a1731
GB
4244{
4245 struct inferior *inf = current_inferior ();
4246
4247 if (inf->fake_pid_p || inf->pid == 0)
57810aa7 4248 return true;
7a6a1731
GB
4249
4250 return linux_ns_same (inf->pid, LINUX_NS_MNT);
4251}
4252
4253/* Convert the INF argument passed to a to_fileio_* method
4254 to a process ID suitable for passing to its corresponding
4255 linux_mntns_* function. If INF is non-NULL then the
4256 caller is requesting the filesystem seen by INF. If INF
4257 is NULL then the caller is requesting the filesystem seen
4258 by the GDB. We fall back to GDB's filesystem in the case
4259 that INF is non-NULL but its PID is unknown. */
4260
4261static pid_t
4262linux_nat_fileio_pid_of (struct inferior *inf)
4263{
4264 if (inf == NULL || inf->fake_pid_p || inf->pid == 0)
4265 return getpid ();
4266 else
4267 return inf->pid;
4268}
4269
4270/* Implementation of to_fileio_open. */
4271
f6ac5f3d
PA
4272int
4273linux_nat_target::fileio_open (struct inferior *inf, const char *filename,
4274 int flags, int mode, int warn_if_slow,
4275 int *target_errno)
7a6a1731
GB
4276{
4277 int nat_flags;
4278 mode_t nat_mode;
4279 int fd;
4280
4281 if (fileio_to_host_openflags (flags, &nat_flags) == -1
4282 || fileio_to_host_mode (mode, &nat_mode) == -1)
4283 {
4284 *target_errno = FILEIO_EINVAL;
4285 return -1;
4286 }
4287
4288 fd = linux_mntns_open_cloexec (linux_nat_fileio_pid_of (inf),
4289 filename, nat_flags, nat_mode);
4290 if (fd == -1)
4291 *target_errno = host_to_fileio_error (errno);
4292
4293 return fd;
4294}
4295
4296/* Implementation of to_fileio_readlink. */
4297
f6ac5f3d
PA
4298gdb::optional<std::string>
4299linux_nat_target::fileio_readlink (struct inferior *inf, const char *filename,
4300 int *target_errno)
7a6a1731
GB
4301{
4302 char buf[PATH_MAX];
4303 int len;
7a6a1731
GB
4304
4305 len = linux_mntns_readlink (linux_nat_fileio_pid_of (inf),
4306 filename, buf, sizeof (buf));
4307 if (len < 0)
4308 {
4309 *target_errno = host_to_fileio_error (errno);
e0d3522b 4310 return {};
7a6a1731
GB
4311 }
4312
e0d3522b 4313 return std::string (buf, len);
7a6a1731
GB
4314}
4315
4316/* Implementation of to_fileio_unlink. */
4317
f6ac5f3d
PA
4318int
4319linux_nat_target::fileio_unlink (struct inferior *inf, const char *filename,
4320 int *target_errno)
7a6a1731
GB
4321{
4322 int ret;
4323
4324 ret = linux_mntns_unlink (linux_nat_fileio_pid_of (inf),
4325 filename);
4326 if (ret == -1)
4327 *target_errno = host_to_fileio_error (errno);
4328
4329 return ret;
4330}
4331
aa01bd36
PA
4332/* Implementation of the to_thread_events method. */
4333
f6ac5f3d
PA
4334void
4335linux_nat_target::thread_events (int enable)
aa01bd36
PA
4336{
4337 report_thread_events = enable;
4338}
4339
f6ac5f3d
PA
4340linux_nat_target::linux_nat_target ()
4341{
f973ed9c
DJ
4342 /* We don't change the stratum; this target will sit at
4343 process_stratum and thread_db will set at thread_stratum. This
4344 is a little strange, since this is a multi-threaded-capable
4345 target, but we want to be on the stack below thread_db, and we
4346 also want to be used for single-threaded processes. */
f973ed9c
DJ
4347}
4348
f865ee35
JK
4349/* See linux-nat.h. */
4350
4351int
4352linux_nat_get_siginfo (ptid_t ptid, siginfo_t *siginfo)
9f0bdab8 4353{
da559b09 4354 int pid;
9f0bdab8 4355
e38504b3 4356 pid = ptid.lwp ();
da559b09 4357 if (pid == 0)
e99b03dc 4358 pid = ptid.pid ();
f865ee35 4359
da559b09
JK
4360 errno = 0;
4361 ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, siginfo);
4362 if (errno != 0)
4363 {
4364 memset (siginfo, 0, sizeof (*siginfo));
4365 return 0;
4366 }
f865ee35 4367 return 1;
9f0bdab8
DJ
4368}
4369
7b669087
GB
4370/* See nat/linux-nat.h. */
4371
4372ptid_t
4373current_lwp_ptid (void)
4374{
15a9e13e 4375 gdb_assert (inferior_ptid.lwp_p ());
7b669087
GB
4376 return inferior_ptid;
4377}
4378
6c265988 4379void _initialize_linux_nat ();
d6b0e80f 4380void
6c265988 4381_initialize_linux_nat ()
d6b0e80f 4382{
ccce17b0
YQ
4383 add_setshow_zuinteger_cmd ("lin-lwp", class_maintenance,
4384 &debug_linux_nat, _("\
b84876c2
PA
4385Set debugging of GNU/Linux lwp module."), _("\
4386Show debugging of GNU/Linux lwp module."), _("\
4387Enables printf debugging output."),
ccce17b0
YQ
4388 NULL,
4389 show_debug_linux_nat,
4390 &setdebuglist, &showdebuglist);
b84876c2 4391
7a6a1731
GB
4392 add_setshow_boolean_cmd ("linux-namespaces", class_maintenance,
4393 &debug_linux_namespaces, _("\
4394Set debugging of GNU/Linux namespaces module."), _("\
4395Show debugging of GNU/Linux namespaces module."), _("\
4396Enables printf debugging output."),
4397 NULL,
4398 NULL,
4399 &setdebuglist, &showdebuglist);
4400
7feb7d06
PA
4401 /* Install a SIGCHLD handler. */
4402 sigchld_action.sa_handler = sigchld_handler;
4403 sigemptyset (&sigchld_action.sa_mask);
4404 sigchld_action.sa_flags = SA_RESTART;
b84876c2
PA
4405
4406 /* Make it the default. */
7feb7d06 4407 sigaction (SIGCHLD, &sigchld_action, NULL);
d6b0e80f
AC
4408
4409 /* Make sure we don't block SIGCHLD during a sigsuspend. */
21987b9c 4410 gdb_sigmask (SIG_SETMASK, NULL, &suspend_mask);
d6b0e80f
AC
4411 sigdelset (&suspend_mask, SIGCHLD);
4412
7feb7d06 4413 sigemptyset (&blocked_mask);
774113b0
PA
4414
4415 lwp_lwpid_htab_create ();
d6b0e80f
AC
4416}
4417\f
4418
4419/* FIXME: kettenis/2000-08-26: The stuff on this page is specific to
4420 the GNU/Linux Threads library and therefore doesn't really belong
4421 here. */
4422
d6b0e80f
AC
4423/* Return the set of signals used by the threads library in *SET. */
4424
4425void
4426lin_thread_get_thread_signals (sigset_t *set)
4427{
d6b0e80f
AC
4428 sigemptyset (set);
4429
4a6ed09b
PA
4430 /* NPTL reserves the first two RT signals, but does not provide any
4431 way for the debugger to query the signal numbers - fortunately
4432 they don't change. */
4433 sigaddset (set, __SIGRTMIN);
4434 sigaddset (set, __SIGRTMIN + 1);
d6b0e80f 4435}
This page took 2.441726 seconds and 4 git commands to generate.