[Ada] problem printing negative integer values in packed arrays.
[deliverable/binutils-gdb.git] / gdb / linux-nat.c
CommitLineData
3993f6b1 1/* GNU/Linux native-dependent code common to multiple platforms.
dba24537 2
32d0add0 3 Copyright (C) 2001-2015 Free Software Foundation, Inc.
3993f6b1
DJ
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
a9762ec7 9 the Free Software Foundation; either version 3 of the License, or
3993f6b1
DJ
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
a9762ec7 18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
3993f6b1
DJ
19
20#include "defs.h"
21#include "inferior.h"
45741a9c 22#include "infrun.h"
3993f6b1 23#include "target.h"
96d7229d
LM
24#include "nat/linux-nat.h"
25#include "nat/linux-waitpid.h"
3993f6b1 26#include "gdb_wait.h"
d6b0e80f
AC
27#ifdef HAVE_TKILL_SYSCALL
28#include <unistd.h>
29#include <sys/syscall.h>
30#endif
3993f6b1 31#include <sys/ptrace.h>
0274a8ce 32#include "linux-nat.h"
125f8a3d
GB
33#include "nat/linux-ptrace.h"
34#include "nat/linux-procfs.h"
8cc73a39 35#include "nat/linux-personality.h"
ac264b3b 36#include "linux-fork.h"
d6b0e80f
AC
37#include "gdbthread.h"
38#include "gdbcmd.h"
39#include "regcache.h"
4f844a66 40#include "regset.h"
dab06dbe 41#include "inf-child.h"
10d6c8cd
DJ
42#include "inf-ptrace.h"
43#include "auxv.h"
1777feb0 44#include <sys/procfs.h> /* for elf_gregset etc. */
dba24537
AC
45#include "elf-bfd.h" /* for elfcore_write_* */
46#include "gregset.h" /* for gregset */
47#include "gdbcore.h" /* for get_exec_file */
48#include <ctype.h> /* for isdigit */
53ce3c39 49#include <sys/stat.h> /* for struct stat */
dba24537 50#include <fcntl.h> /* for O_RDONLY */
b84876c2
PA
51#include "inf-loop.h"
52#include "event-loop.h"
53#include "event-top.h"
07e059b5
VP
54#include <pwd.h>
55#include <sys/types.h>
2978b111 56#include <dirent.h>
07e059b5 57#include "xml-support.h"
efcbbd14 58#include <sys/vfs.h>
6c95b8df 59#include "solib.h"
125f8a3d 60#include "nat/linux-osdata.h"
6432734d 61#include "linux-tdep.h"
7dcd53a0 62#include "symfile.h"
5808517f
YQ
63#include "agent.h"
64#include "tracepoint.h"
87b0bb13 65#include "buffer.h"
6ecd4729 66#include "target-descriptions.h"
614c279d 67#include "filestuff.h"
77e371c0 68#include "objfiles.h"
efcbbd14
UW
69
70#ifndef SPUFS_MAGIC
71#define SPUFS_MAGIC 0x23c9b64e
72#endif
dba24537 73
1777feb0 74/* This comment documents high-level logic of this file.
8a77dff3
VP
75
76Waiting for events in sync mode
77===============================
78
79When waiting for an event in a specific thread, we just use waitpid, passing
80the specific pid, and not passing WNOHANG.
81
1777feb0 82When waiting for an event in all threads, waitpid is not quite good. Prior to
8a77dff3 83version 2.4, Linux can either wait for event in main thread, or in secondary
1777feb0 84threads. (2.4 has the __WALL flag). So, if we use blocking waitpid, we might
8a77dff3
VP
85miss an event. The solution is to use non-blocking waitpid, together with
86sigsuspend. First, we use non-blocking waitpid to get an event in the main
1777feb0 87process, if any. Second, we use non-blocking waitpid with the __WCLONED
8a77dff3
VP
88flag to check for events in cloned processes. If nothing is found, we use
89sigsuspend to wait for SIGCHLD. When SIGCHLD arrives, it means something
90happened to a child process -- and SIGCHLD will be delivered both for events
91in main debugged process and in cloned processes. As soon as we know there's
3e43a32a
MS
92an event, we get back to calling nonblocking waitpid with and without
93__WCLONED.
8a77dff3
VP
94
95Note that SIGCHLD should be blocked between waitpid and sigsuspend calls,
1777feb0 96so that we don't miss a signal. If SIGCHLD arrives in between, when it's
8a77dff3
VP
97blocked, the signal becomes pending and sigsuspend immediately
98notices it and returns.
99
100Waiting for events in async mode
101================================
102
7feb7d06
PA
103In async mode, GDB should always be ready to handle both user input
104and target events, so neither blocking waitpid nor sigsuspend are
105viable options. Instead, we should asynchronously notify the GDB main
106event loop whenever there's an unprocessed event from the target. We
107detect asynchronous target events by handling SIGCHLD signals. To
108notify the event loop about target events, the self-pipe trick is used
109--- a pipe is registered as waitable event source in the event loop,
110the event loop select/poll's on the read end of this pipe (as well on
111other event sources, e.g., stdin), and the SIGCHLD handler writes a
112byte to this pipe. This is more portable than relying on
113pselect/ppoll, since on kernels that lack those syscalls, libc
114emulates them with select/poll+sigprocmask, and that is racy
115(a.k.a. plain broken).
116
117Obviously, if we fail to notify the event loop if there's a target
118event, it's bad. OTOH, if we notify the event loop when there's no
119event from the target, linux_nat_wait will detect that there's no real
120event to report, and return event of type TARGET_WAITKIND_IGNORE.
121This is mostly harmless, but it will waste time and is better avoided.
122
123The main design point is that every time GDB is outside linux-nat.c,
124we have a SIGCHLD handler installed that is called when something
125happens to the target and notifies the GDB event loop. Whenever GDB
126core decides to handle the event, and calls into linux-nat.c, we
127process things as in sync mode, except that the we never block in
128sigsuspend.
129
130While processing an event, we may end up momentarily blocked in
131waitpid calls. Those waitpid calls, while blocking, are guarantied to
132return quickly. E.g., in all-stop mode, before reporting to the core
133that an LWP hit a breakpoint, all LWPs are stopped by sending them
134SIGSTOP, and synchronously waiting for the SIGSTOP to be reported.
135Note that this is different from blocking indefinitely waiting for the
136next event --- here, we're already handling an event.
8a77dff3
VP
137
138Use of signals
139==============
140
141We stop threads by sending a SIGSTOP. The use of SIGSTOP instead of another
142signal is not entirely significant; we just need for a signal to be delivered,
143so that we can intercept it. SIGSTOP's advantage is that it can not be
144blocked. A disadvantage is that it is not a real-time signal, so it can only
145be queued once; we do not keep track of other sources of SIGSTOP.
146
147Two other signals that can't be blocked are SIGCONT and SIGKILL. But we can't
148use them, because they have special behavior when the signal is generated -
149not when it is delivered. SIGCONT resumes the entire thread group and SIGKILL
150kills the entire thread group.
151
152A delivered SIGSTOP would stop the entire thread group, not just the thread we
153tkill'd. But we never let the SIGSTOP be delivered; we always intercept and
154cancel it (by PTRACE_CONT without passing SIGSTOP).
155
156We could use a real-time signal instead. This would solve those problems; we
157could use PTRACE_GETSIGINFO to locate the specific stop signals sent by GDB.
158But we would still have to have some support for SIGSTOP, since PTRACE_ATTACH
159generates it, and there are races with trying to find a signal that is not
160blocked. */
a0ef4274 161
dba24537
AC
162#ifndef O_LARGEFILE
163#define O_LARGEFILE 0
164#endif
0274a8ce 165
10d6c8cd
DJ
166/* The single-threaded native GNU/Linux target_ops. We save a pointer for
167 the use of the multi-threaded target. */
168static struct target_ops *linux_ops;
f973ed9c 169static struct target_ops linux_ops_saved;
10d6c8cd 170
9f0bdab8 171/* The method to call, if any, when a new thread is attached. */
7b50312a
PA
172static void (*linux_nat_new_thread) (struct lwp_info *);
173
26cb8b7c
PA
174/* The method to call, if any, when a new fork is attached. */
175static linux_nat_new_fork_ftype *linux_nat_new_fork;
176
177/* The method to call, if any, when a process is no longer
178 attached. */
179static linux_nat_forget_process_ftype *linux_nat_forget_process_hook;
180
7b50312a
PA
181/* Hook to call prior to resuming a thread. */
182static void (*linux_nat_prepare_to_resume) (struct lwp_info *);
9f0bdab8 183
5b009018
PA
184/* The method to call, if any, when the siginfo object needs to be
185 converted between the layout returned by ptrace, and the layout in
186 the architecture of the inferior. */
a5362b9a 187static int (*linux_nat_siginfo_fixup) (siginfo_t *,
5b009018
PA
188 gdb_byte *,
189 int);
190
ac264b3b
MS
191/* The saved to_xfer_partial method, inherited from inf-ptrace.c.
192 Called by our to_xfer_partial. */
4ac248ca 193static target_xfer_partial_ftype *super_xfer_partial;
10d6c8cd 194
6a3cb8e8
PA
195/* The saved to_close method, inherited from inf-ptrace.c.
196 Called by our to_close. */
197static void (*super_close) (struct target_ops *);
198
ccce17b0 199static unsigned int debug_linux_nat;
920d2a44
AC
200static void
201show_debug_linux_nat (struct ui_file *file, int from_tty,
202 struct cmd_list_element *c, const char *value)
203{
204 fprintf_filtered (file, _("Debugging of GNU/Linux lwp module is %s.\n"),
205 value);
206}
d6b0e80f 207
ae087d01
DJ
208struct simple_pid_list
209{
210 int pid;
3d799a95 211 int status;
ae087d01
DJ
212 struct simple_pid_list *next;
213};
214struct simple_pid_list *stopped_pids;
215
3dd5b83d
PA
216/* Async mode support. */
217
b84876c2
PA
218/* The read/write ends of the pipe registered as waitable file in the
219 event loop. */
220static int linux_nat_event_pipe[2] = { -1, -1 };
221
198297aa
PA
222/* True if we're currently in async mode. */
223#define linux_is_async_p() (linux_nat_event_pipe[0] != -1)
224
7feb7d06 225/* Flush the event pipe. */
b84876c2 226
7feb7d06
PA
227static void
228async_file_flush (void)
b84876c2 229{
7feb7d06
PA
230 int ret;
231 char buf;
b84876c2 232
7feb7d06 233 do
b84876c2 234 {
7feb7d06 235 ret = read (linux_nat_event_pipe[0], &buf, 1);
b84876c2 236 }
7feb7d06 237 while (ret >= 0 || (ret == -1 && errno == EINTR));
b84876c2
PA
238}
239
7feb7d06
PA
240/* Put something (anything, doesn't matter what, or how much) in event
241 pipe, so that the select/poll in the event-loop realizes we have
242 something to process. */
252fbfc8 243
b84876c2 244static void
7feb7d06 245async_file_mark (void)
b84876c2 246{
7feb7d06 247 int ret;
b84876c2 248
7feb7d06
PA
249 /* It doesn't really matter what the pipe contains, as long we end
250 up with something in it. Might as well flush the previous
251 left-overs. */
252 async_file_flush ();
b84876c2 253
7feb7d06 254 do
b84876c2 255 {
7feb7d06 256 ret = write (linux_nat_event_pipe[1], "+", 1);
b84876c2 257 }
7feb7d06 258 while (ret == -1 && errno == EINTR);
b84876c2 259
7feb7d06
PA
260 /* Ignore EAGAIN. If the pipe is full, the event loop will already
261 be awakened anyway. */
b84876c2
PA
262}
263
7feb7d06
PA
264static int kill_lwp (int lwpid, int signo);
265
266static int stop_callback (struct lwp_info *lp, void *data);
2db9a427 267static int resume_stopped_resumed_lwps (struct lwp_info *lp, void *data);
7feb7d06
PA
268
269static void block_child_signals (sigset_t *prev_mask);
270static void restore_child_signals_mask (sigset_t *prev_mask);
2277426b
PA
271
272struct lwp_info;
273static struct lwp_info *add_lwp (ptid_t ptid);
274static void purge_lwp_list (int pid);
4403d8e9 275static void delete_lwp (ptid_t ptid);
2277426b
PA
276static struct lwp_info *find_lwp_pid (ptid_t ptid);
277
8a99810d
PA
278static int lwp_status_pending_p (struct lwp_info *lp);
279
9c02b525
PA
280static int check_stopped_by_breakpoint (struct lwp_info *lp);
281static int sigtrap_is_event (int status);
282static int (*linux_nat_status_is_event) (int status) = sigtrap_is_event;
283
cff068da
GB
284\f
285/* LWP accessors. */
286
287/* See nat/linux-nat.h. */
288
289ptid_t
290ptid_of_lwp (struct lwp_info *lwp)
291{
292 return lwp->ptid;
293}
294
295/* See nat/linux-nat.h. */
296
4b134ca1
GB
297void
298lwp_set_arch_private_info (struct lwp_info *lwp,
299 struct arch_lwp_info *info)
300{
301 lwp->arch_private = info;
302}
303
304/* See nat/linux-nat.h. */
305
306struct arch_lwp_info *
307lwp_arch_private_info (struct lwp_info *lwp)
308{
309 return lwp->arch_private;
310}
311
312/* See nat/linux-nat.h. */
313
cff068da
GB
314int
315lwp_is_stopped (struct lwp_info *lwp)
316{
317 return lwp->stopped;
318}
319
320/* See nat/linux-nat.h. */
321
322enum target_stop_reason
323lwp_stop_reason (struct lwp_info *lwp)
324{
325 return lwp->stop_reason;
326}
327
ae087d01
DJ
328\f
329/* Trivial list manipulation functions to keep track of a list of
330 new stopped processes. */
331static void
3d799a95 332add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
ae087d01
DJ
333{
334 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
e0881a8e 335
ae087d01 336 new_pid->pid = pid;
3d799a95 337 new_pid->status = status;
ae087d01
DJ
338 new_pid->next = *listp;
339 *listp = new_pid;
340}
341
84636d28
PA
342static int
343in_pid_list_p (struct simple_pid_list *list, int pid)
344{
345 struct simple_pid_list *p;
346
347 for (p = list; p != NULL; p = p->next)
348 if (p->pid == pid)
349 return 1;
350 return 0;
351}
352
ae087d01 353static int
46a96992 354pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
ae087d01
DJ
355{
356 struct simple_pid_list **p;
357
358 for (p = listp; *p != NULL; p = &(*p)->next)
359 if ((*p)->pid == pid)
360 {
361 struct simple_pid_list *next = (*p)->next;
e0881a8e 362
46a96992 363 *statusp = (*p)->status;
ae087d01
DJ
364 xfree (*p);
365 *p = next;
366 return 1;
367 }
368 return 0;
369}
370
de0d863e
DB
371/* Return the ptrace options that we want to try to enable. */
372
373static int
374linux_nat_ptrace_options (int attached)
375{
376 int options = 0;
377
378 if (!attached)
379 options |= PTRACE_O_EXITKILL;
380
381 options |= (PTRACE_O_TRACESYSGOOD
382 | PTRACE_O_TRACEVFORKDONE
383 | PTRACE_O_TRACEVFORK
384 | PTRACE_O_TRACEFORK
385 | PTRACE_O_TRACEEXEC);
386
387 return options;
388}
389
96d7229d 390/* Initialize ptrace warnings and check for supported ptrace
beed38b8
JB
391 features given PID.
392
393 ATTACHED should be nonzero iff we attached to the inferior. */
3993f6b1
DJ
394
395static void
beed38b8 396linux_init_ptrace (pid_t pid, int attached)
3993f6b1 397{
de0d863e
DB
398 int options = linux_nat_ptrace_options (attached);
399
400 linux_enable_event_reporting (pid, options);
96d7229d 401 linux_ptrace_init_warnings ();
4de4c07c
DJ
402}
403
6d8fd2b7 404static void
f045800c 405linux_child_post_attach (struct target_ops *self, int pid)
4de4c07c 406{
beed38b8 407 linux_init_ptrace (pid, 1);
4de4c07c
DJ
408}
409
10d6c8cd 410static void
2e97a79e 411linux_child_post_startup_inferior (struct target_ops *self, ptid_t ptid)
4de4c07c 412{
beed38b8 413 linux_init_ptrace (ptid_get_pid (ptid), 0);
4de4c07c
DJ
414}
415
4403d8e9
JK
416/* Return the number of known LWPs in the tgid given by PID. */
417
418static int
419num_lwps (int pid)
420{
421 int count = 0;
422 struct lwp_info *lp;
423
424 for (lp = lwp_list; lp; lp = lp->next)
425 if (ptid_get_pid (lp->ptid) == pid)
426 count++;
427
428 return count;
429}
430
431/* Call delete_lwp with prototype compatible for make_cleanup. */
432
433static void
434delete_lwp_cleanup (void *lp_voidp)
435{
436 struct lwp_info *lp = lp_voidp;
437
438 delete_lwp (lp->ptid);
439}
440
d83ad864
DB
441/* Target hook for follow_fork. On entry inferior_ptid must be the
442 ptid of the followed inferior. At return, inferior_ptid will be
443 unchanged. */
444
6d8fd2b7 445static int
07107ca6
LM
446linux_child_follow_fork (struct target_ops *ops, int follow_child,
447 int detach_fork)
3993f6b1 448{
d83ad864 449 if (!follow_child)
4de4c07c 450 {
6c95b8df 451 struct lwp_info *child_lp = NULL;
d83ad864
DB
452 int status = W_STOPCODE (0);
453 struct cleanup *old_chain;
454 int has_vforked;
79639e11 455 ptid_t parent_ptid, child_ptid;
d83ad864
DB
456 int parent_pid, child_pid;
457
458 has_vforked = (inferior_thread ()->pending_follow.kind
459 == TARGET_WAITKIND_VFORKED);
79639e11
PA
460 parent_ptid = inferior_ptid;
461 child_ptid = inferior_thread ()->pending_follow.value.related_pid;
462 parent_pid = ptid_get_lwp (parent_ptid);
463 child_pid = ptid_get_lwp (child_ptid);
4de4c07c 464
1777feb0 465 /* We're already attached to the parent, by default. */
d83ad864 466 old_chain = save_inferior_ptid ();
79639e11 467 inferior_ptid = child_ptid;
d83ad864
DB
468 child_lp = add_lwp (inferior_ptid);
469 child_lp->stopped = 1;
470 child_lp->last_resume_kind = resume_stop;
4de4c07c 471
ac264b3b
MS
472 /* Detach new forked process? */
473 if (detach_fork)
f75c00e4 474 {
4403d8e9
JK
475 make_cleanup (delete_lwp_cleanup, child_lp);
476
4403d8e9
JK
477 if (linux_nat_prepare_to_resume != NULL)
478 linux_nat_prepare_to_resume (child_lp);
c077881a
HZ
479
480 /* When debugging an inferior in an architecture that supports
481 hardware single stepping on a kernel without commit
482 6580807da14c423f0d0a708108e6df6ebc8bc83d, the vfork child
483 process starts with the TIF_SINGLESTEP/X86_EFLAGS_TF bits
484 set if the parent process had them set.
485 To work around this, single step the child process
486 once before detaching to clear the flags. */
487
488 if (!gdbarch_software_single_step_p (target_thread_architecture
489 (child_lp->ptid)))
490 {
c077881a
HZ
491 linux_disable_event_reporting (child_pid);
492 if (ptrace (PTRACE_SINGLESTEP, child_pid, 0, 0) < 0)
493 perror_with_name (_("Couldn't do single step"));
494 if (my_waitpid (child_pid, &status, 0) < 0)
495 perror_with_name (_("Couldn't wait vfork process"));
496 }
497
498 if (WIFSTOPPED (status))
9caaaa83
PA
499 {
500 int signo;
501
502 signo = WSTOPSIG (status);
503 if (signo != 0
504 && !signal_pass_state (gdb_signal_from_host (signo)))
505 signo = 0;
506 ptrace (PTRACE_DETACH, child_pid, 0, signo);
507 }
4403d8e9 508
d83ad864 509 /* Resets value of inferior_ptid to parent ptid. */
4403d8e9 510 do_cleanups (old_chain);
ac264b3b
MS
511 }
512 else
513 {
6c95b8df 514 /* Let the thread_db layer learn about this new process. */
2277426b 515 check_for_thread_db ();
ac264b3b 516 }
9016a515 517
d83ad864
DB
518 do_cleanups (old_chain);
519
9016a515
DJ
520 if (has_vforked)
521 {
3ced3da4 522 struct lwp_info *parent_lp;
6c95b8df 523
79639e11 524 parent_lp = find_lwp_pid (parent_ptid);
96d7229d 525 gdb_assert (linux_supports_tracefork () >= 0);
3ced3da4 526
96d7229d 527 if (linux_supports_tracevforkdone ())
9016a515 528 {
6c95b8df
PA
529 if (debug_linux_nat)
530 fprintf_unfiltered (gdb_stdlog,
531 "LCFF: waiting for VFORK_DONE on %d\n",
532 parent_pid);
3ced3da4 533 parent_lp->stopped = 1;
9016a515 534
6c95b8df
PA
535 /* We'll handle the VFORK_DONE event like any other
536 event, in target_wait. */
9016a515
DJ
537 }
538 else
539 {
540 /* We can't insert breakpoints until the child has
541 finished with the shared memory region. We need to
542 wait until that happens. Ideal would be to just
543 call:
544 - ptrace (PTRACE_SYSCALL, parent_pid, 0, 0);
545 - waitpid (parent_pid, &status, __WALL);
546 However, most architectures can't handle a syscall
547 being traced on the way out if it wasn't traced on
548 the way in.
549
550 We might also think to loop, continuing the child
551 until it exits or gets a SIGTRAP. One problem is
552 that the child might call ptrace with PTRACE_TRACEME.
553
554 There's no simple and reliable way to figure out when
555 the vforked child will be done with its copy of the
556 shared memory. We could step it out of the syscall,
557 two instructions, let it go, and then single-step the
558 parent once. When we have hardware single-step, this
559 would work; with software single-step it could still
560 be made to work but we'd have to be able to insert
561 single-step breakpoints in the child, and we'd have
562 to insert -just- the single-step breakpoint in the
563 parent. Very awkward.
564
565 In the end, the best we can do is to make sure it
566 runs for a little while. Hopefully it will be out of
567 range of any breakpoints we reinsert. Usually this
568 is only the single-step breakpoint at vfork's return
569 point. */
570
6c95b8df
PA
571 if (debug_linux_nat)
572 fprintf_unfiltered (gdb_stdlog,
3e43a32a
MS
573 "LCFF: no VFORK_DONE "
574 "support, sleeping a bit\n");
6c95b8df 575
9016a515 576 usleep (10000);
9016a515 577
6c95b8df
PA
578 /* Pretend we've seen a PTRACE_EVENT_VFORK_DONE event,
579 and leave it pending. The next linux_nat_resume call
580 will notice a pending event, and bypasses actually
581 resuming the inferior. */
3ced3da4
PA
582 parent_lp->status = 0;
583 parent_lp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
584 parent_lp->stopped = 1;
6c95b8df
PA
585
586 /* If we're in async mode, need to tell the event loop
587 there's something here to process. */
d9d41e78 588 if (target_is_async_p ())
6c95b8df
PA
589 async_file_mark ();
590 }
9016a515 591 }
4de4c07c 592 }
3993f6b1 593 else
4de4c07c 594 {
3ced3da4 595 struct lwp_info *child_lp;
4de4c07c 596
3ced3da4
PA
597 child_lp = add_lwp (inferior_ptid);
598 child_lp->stopped = 1;
25289eb2 599 child_lp->last_resume_kind = resume_stop;
6c95b8df 600
6c95b8df 601 /* Let the thread_db layer learn about this new process. */
ef29ce1a 602 check_for_thread_db ();
4de4c07c
DJ
603 }
604
605 return 0;
606}
607
4de4c07c 608\f
77b06cd7 609static int
a863b201 610linux_child_insert_fork_catchpoint (struct target_ops *self, int pid)
4de4c07c 611{
96d7229d 612 return !linux_supports_tracefork ();
3993f6b1
DJ
613}
614
eb73ad13 615static int
973fc227 616linux_child_remove_fork_catchpoint (struct target_ops *self, int pid)
eb73ad13
PA
617{
618 return 0;
619}
620
77b06cd7 621static int
3ecc7da0 622linux_child_insert_vfork_catchpoint (struct target_ops *self, int pid)
3993f6b1 623{
96d7229d 624 return !linux_supports_tracefork ();
3993f6b1
DJ
625}
626
eb73ad13 627static int
e98cf0cd 628linux_child_remove_vfork_catchpoint (struct target_ops *self, int pid)
eb73ad13
PA
629{
630 return 0;
631}
632
77b06cd7 633static int
ba025e51 634linux_child_insert_exec_catchpoint (struct target_ops *self, int pid)
3993f6b1 635{
96d7229d 636 return !linux_supports_tracefork ();
3993f6b1
DJ
637}
638
eb73ad13 639static int
758e29d2 640linux_child_remove_exec_catchpoint (struct target_ops *self, int pid)
eb73ad13
PA
641{
642 return 0;
643}
644
a96d9b2e 645static int
ff214e67
TT
646linux_child_set_syscall_catchpoint (struct target_ops *self,
647 int pid, int needed, int any_count,
a96d9b2e
SDJ
648 int table_size, int *table)
649{
96d7229d 650 if (!linux_supports_tracesysgood ())
77b06cd7
TJB
651 return 1;
652
a96d9b2e
SDJ
653 /* On GNU/Linux, we ignore the arguments. It means that we only
654 enable the syscall catchpoints, but do not disable them.
77b06cd7 655
a96d9b2e
SDJ
656 Also, we do not use the `table' information because we do not
657 filter system calls here. We let GDB do the logic for us. */
658 return 0;
659}
660
d6b0e80f
AC
661/* On GNU/Linux there are no real LWP's. The closest thing to LWP's
662 are processes sharing the same VM space. A multi-threaded process
663 is basically a group of such processes. However, such a grouping
664 is almost entirely a user-space issue; the kernel doesn't enforce
665 such a grouping at all (this might change in the future). In
666 general, we'll rely on the threads library (i.e. the GNU/Linux
667 Threads library) to provide such a grouping.
668
669 It is perfectly well possible to write a multi-threaded application
670 without the assistance of a threads library, by using the clone
671 system call directly. This module should be able to give some
672 rudimentary support for debugging such applications if developers
673 specify the CLONE_PTRACE flag in the clone system call, and are
674 using the Linux kernel 2.4 or above.
675
676 Note that there are some peculiarities in GNU/Linux that affect
677 this code:
678
679 - In general one should specify the __WCLONE flag to waitpid in
680 order to make it report events for any of the cloned processes
681 (and leave it out for the initial process). However, if a cloned
682 process has exited the exit status is only reported if the
683 __WCLONE flag is absent. Linux kernel 2.4 has a __WALL flag, but
684 we cannot use it since GDB must work on older systems too.
685
686 - When a traced, cloned process exits and is waited for by the
687 debugger, the kernel reassigns it to the original parent and
688 keeps it around as a "zombie". Somehow, the GNU/Linux Threads
689 library doesn't notice this, which leads to the "zombie problem":
690 When debugged a multi-threaded process that spawns a lot of
691 threads will run out of processes, even if the threads exit,
692 because the "zombies" stay around. */
693
694/* List of known LWPs. */
9f0bdab8 695struct lwp_info *lwp_list;
d6b0e80f
AC
696\f
697
d6b0e80f
AC
698/* Original signal mask. */
699static sigset_t normal_mask;
700
701/* Signal mask for use with sigsuspend in linux_nat_wait, initialized in
702 _initialize_linux_nat. */
703static sigset_t suspend_mask;
704
7feb7d06
PA
705/* Signals to block to make that sigsuspend work. */
706static sigset_t blocked_mask;
707
708/* SIGCHLD action. */
709struct sigaction sigchld_action;
b84876c2 710
7feb7d06
PA
711/* Block child signals (SIGCHLD and linux threads signals), and store
712 the previous mask in PREV_MASK. */
84e46146 713
7feb7d06
PA
714static void
715block_child_signals (sigset_t *prev_mask)
716{
717 /* Make sure SIGCHLD is blocked. */
718 if (!sigismember (&blocked_mask, SIGCHLD))
719 sigaddset (&blocked_mask, SIGCHLD);
720
721 sigprocmask (SIG_BLOCK, &blocked_mask, prev_mask);
722}
723
724/* Restore child signals mask, previously returned by
725 block_child_signals. */
726
727static void
728restore_child_signals_mask (sigset_t *prev_mask)
729{
730 sigprocmask (SIG_SETMASK, prev_mask, NULL);
731}
2455069d
UW
732
733/* Mask of signals to pass directly to the inferior. */
734static sigset_t pass_mask;
735
736/* Update signals to pass to the inferior. */
737static void
94bedb42
TT
738linux_nat_pass_signals (struct target_ops *self,
739 int numsigs, unsigned char *pass_signals)
2455069d
UW
740{
741 int signo;
742
743 sigemptyset (&pass_mask);
744
745 for (signo = 1; signo < NSIG; signo++)
746 {
2ea28649 747 int target_signo = gdb_signal_from_host (signo);
2455069d
UW
748 if (target_signo < numsigs && pass_signals[target_signo])
749 sigaddset (&pass_mask, signo);
750 }
751}
752
d6b0e80f
AC
753\f
754
755/* Prototypes for local functions. */
756static int stop_wait_callback (struct lwp_info *lp, void *data);
28439f5e 757static int linux_thread_alive (ptid_t ptid);
8dd27370 758static char *linux_child_pid_to_exec_file (struct target_ops *self, int pid);
20ba1ce6 759static int resume_stopped_resumed_lwps (struct lwp_info *lp, void *data);
710151dd 760
d6b0e80f 761\f
d6b0e80f 762
7b50312a
PA
763/* Destroy and free LP. */
764
765static void
766lwp_free (struct lwp_info *lp)
767{
768 xfree (lp->arch_private);
769 xfree (lp);
770}
771
d90e17a7
PA
772/* Remove all LWPs belong to PID from the lwp list. */
773
774static void
775purge_lwp_list (int pid)
776{
777 struct lwp_info *lp, *lpprev, *lpnext;
778
779 lpprev = NULL;
780
781 for (lp = lwp_list; lp; lp = lpnext)
782 {
783 lpnext = lp->next;
784
785 if (ptid_get_pid (lp->ptid) == pid)
786 {
787 if (lp == lwp_list)
788 lwp_list = lp->next;
789 else
790 lpprev->next = lp->next;
791
7b50312a 792 lwp_free (lp);
d90e17a7
PA
793 }
794 else
795 lpprev = lp;
796 }
797}
798
26cb8b7c
PA
799/* Add the LWP specified by PTID to the list. PTID is the first LWP
800 in the process. Return a pointer to the structure describing the
801 new LWP.
802
803 This differs from add_lwp in that we don't let the arch specific
804 bits know about this new thread. Current clients of this callback
805 take the opportunity to install watchpoints in the new thread, and
806 we shouldn't do that for the first thread. If we're spawning a
807 child ("run"), the thread executes the shell wrapper first, and we
808 shouldn't touch it until it execs the program we want to debug.
809 For "attach", it'd be okay to call the callback, but it's not
810 necessary, because watchpoints can't yet have been inserted into
811 the inferior. */
d6b0e80f
AC
812
813static struct lwp_info *
26cb8b7c 814add_initial_lwp (ptid_t ptid)
d6b0e80f
AC
815{
816 struct lwp_info *lp;
817
dfd4cc63 818 gdb_assert (ptid_lwp_p (ptid));
d6b0e80f
AC
819
820 lp = (struct lwp_info *) xmalloc (sizeof (struct lwp_info));
821
822 memset (lp, 0, sizeof (struct lwp_info));
823
25289eb2 824 lp->last_resume_kind = resume_continue;
d6b0e80f
AC
825 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
826
827 lp->ptid = ptid;
dc146f7c 828 lp->core = -1;
d6b0e80f
AC
829
830 lp->next = lwp_list;
831 lwp_list = lp;
d6b0e80f 832
26cb8b7c
PA
833 return lp;
834}
835
836/* Add the LWP specified by PID to the list. Return a pointer to the
837 structure describing the new LWP. The LWP should already be
838 stopped. */
839
840static struct lwp_info *
841add_lwp (ptid_t ptid)
842{
843 struct lwp_info *lp;
844
845 lp = add_initial_lwp (ptid);
846
6e012a6c
PA
847 /* Let the arch specific bits know about this new thread. Current
848 clients of this callback take the opportunity to install
26cb8b7c
PA
849 watchpoints in the new thread. We don't do this for the first
850 thread though. See add_initial_lwp. */
851 if (linux_nat_new_thread != NULL)
7b50312a 852 linux_nat_new_thread (lp);
9f0bdab8 853
d6b0e80f
AC
854 return lp;
855}
856
857/* Remove the LWP specified by PID from the list. */
858
859static void
860delete_lwp (ptid_t ptid)
861{
862 struct lwp_info *lp, *lpprev;
863
864 lpprev = NULL;
865
866 for (lp = lwp_list; lp; lpprev = lp, lp = lp->next)
867 if (ptid_equal (lp->ptid, ptid))
868 break;
869
870 if (!lp)
871 return;
872
d6b0e80f
AC
873 if (lpprev)
874 lpprev->next = lp->next;
875 else
876 lwp_list = lp->next;
877
7b50312a 878 lwp_free (lp);
d6b0e80f
AC
879}
880
881/* Return a pointer to the structure describing the LWP corresponding
882 to PID. If no corresponding LWP could be found, return NULL. */
883
884static struct lwp_info *
885find_lwp_pid (ptid_t ptid)
886{
887 struct lwp_info *lp;
888 int lwp;
889
dfd4cc63
LM
890 if (ptid_lwp_p (ptid))
891 lwp = ptid_get_lwp (ptid);
d6b0e80f 892 else
dfd4cc63 893 lwp = ptid_get_pid (ptid);
d6b0e80f
AC
894
895 for (lp = lwp_list; lp; lp = lp->next)
dfd4cc63 896 if (lwp == ptid_get_lwp (lp->ptid))
d6b0e80f
AC
897 return lp;
898
899 return NULL;
900}
901
6d4ee8c6 902/* See nat/linux-nat.h. */
d6b0e80f
AC
903
904struct lwp_info *
d90e17a7 905iterate_over_lwps (ptid_t filter,
6d4ee8c6 906 iterate_over_lwps_ftype callback,
d90e17a7 907 void *data)
d6b0e80f
AC
908{
909 struct lwp_info *lp, *lpnext;
910
911 for (lp = lwp_list; lp; lp = lpnext)
912 {
913 lpnext = lp->next;
d90e17a7
PA
914
915 if (ptid_match (lp->ptid, filter))
916 {
6d4ee8c6 917 if ((*callback) (lp, data) != 0)
d90e17a7
PA
918 return lp;
919 }
d6b0e80f
AC
920 }
921
922 return NULL;
923}
924
2277426b
PA
925/* Update our internal state when changing from one checkpoint to
926 another indicated by NEW_PTID. We can only switch single-threaded
927 applications, so we only create one new LWP, and the previous list
928 is discarded. */
f973ed9c
DJ
929
930void
931linux_nat_switch_fork (ptid_t new_ptid)
932{
933 struct lwp_info *lp;
934
dfd4cc63 935 purge_lwp_list (ptid_get_pid (inferior_ptid));
2277426b 936
f973ed9c
DJ
937 lp = add_lwp (new_ptid);
938 lp->stopped = 1;
e26af52f 939
2277426b
PA
940 /* This changes the thread's ptid while preserving the gdb thread
941 num. Also changes the inferior pid, while preserving the
942 inferior num. */
943 thread_change_ptid (inferior_ptid, new_ptid);
944
945 /* We've just told GDB core that the thread changed target id, but,
946 in fact, it really is a different thread, with different register
947 contents. */
948 registers_changed ();
e26af52f
DJ
949}
950
e26af52f
DJ
951/* Handle the exit of a single thread LP. */
952
953static void
954exit_lwp (struct lwp_info *lp)
955{
e09875d4 956 struct thread_info *th = find_thread_ptid (lp->ptid);
063bfe2e
VP
957
958 if (th)
e26af52f 959 {
17faa917
DJ
960 if (print_thread_events)
961 printf_unfiltered (_("[%s exited]\n"), target_pid_to_str (lp->ptid));
962
4f8d22e3 963 delete_thread (lp->ptid);
e26af52f
DJ
964 }
965
966 delete_lwp (lp->ptid);
967}
968
a0ef4274
DJ
969/* Wait for the LWP specified by LP, which we have just attached to.
970 Returns a wait status for that LWP, to cache. */
971
972static int
973linux_nat_post_attach_wait (ptid_t ptid, int first, int *cloned,
974 int *signalled)
975{
dfd4cc63 976 pid_t new_pid, pid = ptid_get_lwp (ptid);
a0ef4274
DJ
977 int status;
978
644cebc9 979 if (linux_proc_pid_is_stopped (pid))
a0ef4274
DJ
980 {
981 if (debug_linux_nat)
982 fprintf_unfiltered (gdb_stdlog,
983 "LNPAW: Attaching to a stopped process\n");
984
985 /* The process is definitely stopped. It is in a job control
986 stop, unless the kernel predates the TASK_STOPPED /
987 TASK_TRACED distinction, in which case it might be in a
988 ptrace stop. Make sure it is in a ptrace stop; from there we
989 can kill it, signal it, et cetera.
990
991 First make sure there is a pending SIGSTOP. Since we are
992 already attached, the process can not transition from stopped
993 to running without a PTRACE_CONT; so we know this signal will
994 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
995 probably already in the queue (unless this kernel is old
996 enough to use TASK_STOPPED for ptrace stops); but since SIGSTOP
997 is not an RT signal, it can only be queued once. */
998 kill_lwp (pid, SIGSTOP);
999
1000 /* Finally, resume the stopped process. This will deliver the SIGSTOP
1001 (or a higher priority signal, just like normal PTRACE_ATTACH). */
1002 ptrace (PTRACE_CONT, pid, 0, 0);
1003 }
1004
1005 /* Make sure the initial process is stopped. The user-level threads
1006 layer might want to poke around in the inferior, and that won't
1007 work if things haven't stabilized yet. */
1008 new_pid = my_waitpid (pid, &status, 0);
1009 if (new_pid == -1 && errno == ECHILD)
1010 {
1011 if (first)
1012 warning (_("%s is a cloned process"), target_pid_to_str (ptid));
1013
1014 /* Try again with __WCLONE to check cloned processes. */
1015 new_pid = my_waitpid (pid, &status, __WCLONE);
1016 *cloned = 1;
1017 }
1018
dacc9cb2
PP
1019 gdb_assert (pid == new_pid);
1020
1021 if (!WIFSTOPPED (status))
1022 {
1023 /* The pid we tried to attach has apparently just exited. */
1024 if (debug_linux_nat)
1025 fprintf_unfiltered (gdb_stdlog, "LNPAW: Failed to stop %d: %s",
1026 pid, status_to_str (status));
1027 return status;
1028 }
a0ef4274
DJ
1029
1030 if (WSTOPSIG (status) != SIGSTOP)
1031 {
1032 *signalled = 1;
1033 if (debug_linux_nat)
1034 fprintf_unfiltered (gdb_stdlog,
1035 "LNPAW: Received %s after attaching\n",
1036 status_to_str (status));
1037 }
1038
1039 return status;
1040}
1041
84636d28
PA
1042/* Attach to the LWP specified by PID. Return 0 if successful, -1 if
1043 the new LWP could not be attached, or 1 if we're already auto
1044 attached to this thread, but haven't processed the
1045 PTRACE_EVENT_CLONE event of its parent thread, so we just ignore
1046 its existance, without considering it an error. */
d6b0e80f 1047
9ee57c33 1048int
93815fbf 1049lin_lwp_attach_lwp (ptid_t ptid)
d6b0e80f 1050{
9ee57c33 1051 struct lwp_info *lp;
84636d28 1052 int lwpid;
d6b0e80f 1053
dfd4cc63 1054 gdb_assert (ptid_lwp_p (ptid));
d6b0e80f 1055
9ee57c33 1056 lp = find_lwp_pid (ptid);
dfd4cc63 1057 lwpid = ptid_get_lwp (ptid);
d6b0e80f 1058
3b27ef47 1059 /* We assume that we're already attached to any LWP that is already
d6b0e80f
AC
1060 in our list of LWPs. If we're not seeing exit events from threads
1061 and we've had PID wraparound since we last tried to stop all threads,
1062 this assumption might be wrong; fortunately, this is very unlikely
1063 to happen. */
3b27ef47 1064 if (lp == NULL)
d6b0e80f 1065 {
a0ef4274 1066 int status, cloned = 0, signalled = 0;
d6b0e80f 1067
84636d28 1068 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) < 0)
9ee57c33 1069 {
96d7229d 1070 if (linux_supports_tracefork ())
84636d28
PA
1071 {
1072 /* If we haven't stopped all threads when we get here,
1073 we may have seen a thread listed in thread_db's list,
1074 but not processed the PTRACE_EVENT_CLONE yet. If
1075 that's the case, ignore this new thread, and let
1076 normal event handling discover it later. */
1077 if (in_pid_list_p (stopped_pids, lwpid))
1078 {
1079 /* We've already seen this thread stop, but we
1080 haven't seen the PTRACE_EVENT_CLONE extended
1081 event yet. */
3b27ef47
PA
1082 if (debug_linux_nat)
1083 fprintf_unfiltered (gdb_stdlog,
1084 "LLAL: attach failed, but already seen "
1085 "this thread %s stop\n",
1086 target_pid_to_str (ptid));
1087 return 1;
84636d28
PA
1088 }
1089 else
1090 {
1091 int new_pid;
1092 int status;
1093
3b27ef47
PA
1094 if (debug_linux_nat)
1095 fprintf_unfiltered (gdb_stdlog,
1096 "LLAL: attach failed, and haven't seen "
1097 "this thread %s stop yet\n",
1098 target_pid_to_str (ptid));
1099
1100 /* We may or may not be attached to the LWP already.
1101 Try waitpid on it. If that errors, we're not
1102 attached to the LWP yet. Otherwise, we're
1103 already attached. */
a33e3959 1104 gdb_assert (lwpid > 0);
84636d28
PA
1105 new_pid = my_waitpid (lwpid, &status, WNOHANG);
1106 if (new_pid == -1 && errno == ECHILD)
1107 new_pid = my_waitpid (lwpid, &status, __WCLONE | WNOHANG);
1108 if (new_pid != -1)
1109 {
3b27ef47
PA
1110 if (new_pid == 0)
1111 {
1112 /* The child hasn't stopped for its initial
1113 SIGSTOP stop yet. */
1114 if (debug_linux_nat)
1115 fprintf_unfiltered (gdb_stdlog,
1116 "LLAL: child hasn't "
1117 "stopped yet\n");
1118 }
1119 else if (WIFSTOPPED (status))
1120 {
1121 if (debug_linux_nat)
1122 fprintf_unfiltered (gdb_stdlog,
1123 "LLAL: adding to stopped_pids\n");
1124 add_to_pid_list (&stopped_pids, lwpid, status);
1125 }
84636d28
PA
1126 return 1;
1127 }
1128 }
1129 }
1130
9ee57c33
DJ
1131 /* If we fail to attach to the thread, issue a warning,
1132 but continue. One way this can happen is if thread
e9efe249 1133 creation is interrupted; as of Linux kernel 2.6.19, a
9ee57c33
DJ
1134 bug may place threads in the thread list and then fail
1135 to create them. */
1136 warning (_("Can't attach %s: %s"), target_pid_to_str (ptid),
1137 safe_strerror (errno));
1138 return -1;
1139 }
1140
d6b0e80f
AC
1141 if (debug_linux_nat)
1142 fprintf_unfiltered (gdb_stdlog,
1143 "LLAL: PTRACE_ATTACH %s, 0, 0 (OK)\n",
1144 target_pid_to_str (ptid));
1145
a0ef4274 1146 status = linux_nat_post_attach_wait (ptid, 0, &cloned, &signalled);
dacc9cb2 1147 if (!WIFSTOPPED (status))
12696c10 1148 return 1;
dacc9cb2 1149
a0ef4274
DJ
1150 lp = add_lwp (ptid);
1151 lp->stopped = 1;
3b27ef47 1152 lp->last_resume_kind = resume_stop;
a0ef4274
DJ
1153 lp->cloned = cloned;
1154 lp->signalled = signalled;
1155 if (WSTOPSIG (status) != SIGSTOP)
d6b0e80f 1156 {
a0ef4274
DJ
1157 lp->resumed = 1;
1158 lp->status = status;
d6b0e80f
AC
1159 }
1160
dfd4cc63 1161 target_post_attach (ptid_get_lwp (lp->ptid));
d6b0e80f
AC
1162
1163 if (debug_linux_nat)
1164 {
1165 fprintf_unfiltered (gdb_stdlog,
1166 "LLAL: waitpid %s received %s\n",
1167 target_pid_to_str (ptid),
1168 status_to_str (status));
1169 }
1170 }
9ee57c33 1171
9ee57c33 1172 return 0;
d6b0e80f
AC
1173}
1174
b84876c2 1175static void
136d6dae
VP
1176linux_nat_create_inferior (struct target_ops *ops,
1177 char *exec_file, char *allargs, char **env,
b84876c2
PA
1178 int from_tty)
1179{
8cc73a39
SDJ
1180 struct cleanup *restore_personality
1181 = maybe_disable_address_space_randomization (disable_randomization);
b84876c2
PA
1182
1183 /* The fork_child mechanism is synchronous and calls target_wait, so
1184 we have to mask the async mode. */
1185
2455069d 1186 /* Make sure we report all signals during startup. */
94bedb42 1187 linux_nat_pass_signals (ops, 0, NULL);
2455069d 1188
136d6dae 1189 linux_ops->to_create_inferior (ops, exec_file, allargs, env, from_tty);
b84876c2 1190
8cc73a39 1191 do_cleanups (restore_personality);
b84876c2
PA
1192}
1193
8784d563
PA
1194/* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1195 already attached. Returns true if a new LWP is found, false
1196 otherwise. */
1197
1198static int
1199attach_proc_task_lwp_callback (ptid_t ptid)
1200{
1201 struct lwp_info *lp;
1202
1203 /* Ignore LWPs we're already attached to. */
1204 lp = find_lwp_pid (ptid);
1205 if (lp == NULL)
1206 {
1207 int lwpid = ptid_get_lwp (ptid);
1208
1209 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) < 0)
1210 {
1211 int err = errno;
1212
1213 /* Be quiet if we simply raced with the thread exiting.
1214 EPERM is returned if the thread's task still exists, and
1215 is marked as exited or zombie, as well as other
1216 conditions, so in that case, confirm the status in
1217 /proc/PID/status. */
1218 if (err == ESRCH
1219 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1220 {
1221 if (debug_linux_nat)
1222 {
1223 fprintf_unfiltered (gdb_stdlog,
1224 "Cannot attach to lwp %d: "
1225 "thread is gone (%d: %s)\n",
1226 lwpid, err, safe_strerror (err));
1227 }
1228 }
1229 else
1230 {
f71f0b0d 1231 warning (_("Cannot attach to lwp %d: %s"),
8784d563
PA
1232 lwpid,
1233 linux_ptrace_attach_fail_reason_string (ptid,
1234 err));
1235 }
1236 }
1237 else
1238 {
1239 if (debug_linux_nat)
1240 fprintf_unfiltered (gdb_stdlog,
1241 "PTRACE_ATTACH %s, 0, 0 (OK)\n",
1242 target_pid_to_str (ptid));
1243
1244 lp = add_lwp (ptid);
1245 lp->cloned = 1;
1246
1247 /* The next time we wait for this LWP we'll see a SIGSTOP as
1248 PTRACE_ATTACH brings it to a halt. */
1249 lp->signalled = 1;
1250
1251 /* We need to wait for a stop before being able to make the
1252 next ptrace call on this LWP. */
1253 lp->must_set_ptrace_flags = 1;
1254 }
1255
1256 return 1;
1257 }
1258 return 0;
1259}
1260
d6b0e80f 1261static void
c0939df1 1262linux_nat_attach (struct target_ops *ops, const char *args, int from_tty)
d6b0e80f
AC
1263{
1264 struct lwp_info *lp;
d6b0e80f 1265 int status;
af990527 1266 ptid_t ptid;
d6b0e80f 1267
2455069d 1268 /* Make sure we report all signals during attach. */
94bedb42 1269 linux_nat_pass_signals (ops, 0, NULL);
2455069d 1270
492d29ea 1271 TRY
87b0bb13
JK
1272 {
1273 linux_ops->to_attach (ops, args, from_tty);
1274 }
492d29ea 1275 CATCH (ex, RETURN_MASK_ERROR)
87b0bb13
JK
1276 {
1277 pid_t pid = parse_pid_to_attach (args);
1278 struct buffer buffer;
1279 char *message, *buffer_s;
1280
1281 message = xstrdup (ex.message);
1282 make_cleanup (xfree, message);
1283
1284 buffer_init (&buffer);
7ae1a6a6 1285 linux_ptrace_attach_fail_reason (pid, &buffer);
87b0bb13
JK
1286
1287 buffer_grow_str0 (&buffer, "");
1288 buffer_s = buffer_finish (&buffer);
1289 make_cleanup (xfree, buffer_s);
1290
7ae1a6a6
PA
1291 if (*buffer_s != '\0')
1292 throw_error (ex.error, "warning: %s\n%s", buffer_s, message);
1293 else
1294 throw_error (ex.error, "%s", message);
87b0bb13 1295 }
492d29ea 1296 END_CATCH
d6b0e80f 1297
af990527
PA
1298 /* The ptrace base target adds the main thread with (pid,0,0)
1299 format. Decorate it with lwp info. */
dfd4cc63
LM
1300 ptid = ptid_build (ptid_get_pid (inferior_ptid),
1301 ptid_get_pid (inferior_ptid),
1302 0);
af990527
PA
1303 thread_change_ptid (inferior_ptid, ptid);
1304
9f0bdab8 1305 /* Add the initial process as the first LWP to the list. */
26cb8b7c 1306 lp = add_initial_lwp (ptid);
a0ef4274
DJ
1307
1308 status = linux_nat_post_attach_wait (lp->ptid, 1, &lp->cloned,
1309 &lp->signalled);
dacc9cb2
PP
1310 if (!WIFSTOPPED (status))
1311 {
1312 if (WIFEXITED (status))
1313 {
1314 int exit_code = WEXITSTATUS (status);
1315
1316 target_terminal_ours ();
1317 target_mourn_inferior ();
1318 if (exit_code == 0)
1319 error (_("Unable to attach: program exited normally."));
1320 else
1321 error (_("Unable to attach: program exited with code %d."),
1322 exit_code);
1323 }
1324 else if (WIFSIGNALED (status))
1325 {
2ea28649 1326 enum gdb_signal signo;
dacc9cb2
PP
1327
1328 target_terminal_ours ();
1329 target_mourn_inferior ();
1330
2ea28649 1331 signo = gdb_signal_from_host (WTERMSIG (status));
dacc9cb2
PP
1332 error (_("Unable to attach: program terminated with signal "
1333 "%s, %s."),
2ea28649
PA
1334 gdb_signal_to_name (signo),
1335 gdb_signal_to_string (signo));
dacc9cb2
PP
1336 }
1337
1338 internal_error (__FILE__, __LINE__,
1339 _("unexpected status %d for PID %ld"),
dfd4cc63 1340 status, (long) ptid_get_lwp (ptid));
dacc9cb2
PP
1341 }
1342
a0ef4274 1343 lp->stopped = 1;
9f0bdab8 1344
a0ef4274 1345 /* Save the wait status to report later. */
d6b0e80f 1346 lp->resumed = 1;
a0ef4274
DJ
1347 if (debug_linux_nat)
1348 fprintf_unfiltered (gdb_stdlog,
1349 "LNA: waitpid %ld, saving status %s\n",
dfd4cc63 1350 (long) ptid_get_pid (lp->ptid), status_to_str (status));
710151dd 1351
7feb7d06
PA
1352 lp->status = status;
1353
8784d563
PA
1354 /* We must attach to every LWP. If /proc is mounted, use that to
1355 find them now. The inferior may be using raw clone instead of
1356 using pthreads. But even if it is using pthreads, thread_db
1357 walks structures in the inferior's address space to find the list
1358 of threads/LWPs, and those structures may well be corrupted.
1359 Note that once thread_db is loaded, we'll still use it to list
1360 threads and associate pthread info with each LWP. */
1361 linux_proc_attach_tgid_threads (ptid_get_pid (lp->ptid),
1362 attach_proc_task_lwp_callback);
1363
7feb7d06 1364 if (target_can_async_p ())
6a3753b3 1365 target_async (1);
d6b0e80f
AC
1366}
1367
a0ef4274
DJ
1368/* Get pending status of LP. */
1369static int
1370get_pending_status (struct lwp_info *lp, int *status)
1371{
a493e3e2 1372 enum gdb_signal signo = GDB_SIGNAL_0;
ca2163eb
PA
1373
1374 /* If we paused threads momentarily, we may have stored pending
1375 events in lp->status or lp->waitstatus (see stop_wait_callback),
1376 and GDB core hasn't seen any signal for those threads.
1377 Otherwise, the last signal reported to the core is found in the
1378 thread object's stop_signal.
1379
1380 There's a corner case that isn't handled here at present. Only
1381 if the thread stopped with a TARGET_WAITKIND_STOPPED does
1382 stop_signal make sense as a real signal to pass to the inferior.
1383 Some catchpoint related events, like
1384 TARGET_WAITKIND_(V)FORK|EXEC|SYSCALL, have their stop_signal set
a493e3e2 1385 to GDB_SIGNAL_SIGTRAP when the catchpoint triggers. But,
ca2163eb
PA
1386 those traps are debug API (ptrace in our case) related and
1387 induced; the inferior wouldn't see them if it wasn't being
1388 traced. Hence, we should never pass them to the inferior, even
1389 when set to pass state. Since this corner case isn't handled by
1390 infrun.c when proceeding with a signal, for consistency, neither
1391 do we handle it here (or elsewhere in the file we check for
1392 signal pass state). Normally SIGTRAP isn't set to pass state, so
1393 this is really a corner case. */
1394
1395 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
a493e3e2 1396 signo = GDB_SIGNAL_0; /* a pending ptrace event, not a real signal. */
ca2163eb 1397 else if (lp->status)
2ea28649 1398 signo = gdb_signal_from_host (WSTOPSIG (lp->status));
ca2163eb
PA
1399 else if (non_stop && !is_executing (lp->ptid))
1400 {
1401 struct thread_info *tp = find_thread_ptid (lp->ptid);
e0881a8e 1402
16c381f0 1403 signo = tp->suspend.stop_signal;
ca2163eb
PA
1404 }
1405 else if (!non_stop)
a0ef4274 1406 {
ca2163eb
PA
1407 struct target_waitstatus last;
1408 ptid_t last_ptid;
4c28f408 1409
ca2163eb 1410 get_last_target_status (&last_ptid, &last);
4c28f408 1411
dfd4cc63 1412 if (ptid_get_lwp (lp->ptid) == ptid_get_lwp (last_ptid))
ca2163eb 1413 {
e09875d4 1414 struct thread_info *tp = find_thread_ptid (lp->ptid);
e0881a8e 1415
16c381f0 1416 signo = tp->suspend.stop_signal;
4c28f408 1417 }
ca2163eb 1418 }
4c28f408 1419
ca2163eb 1420 *status = 0;
4c28f408 1421
a493e3e2 1422 if (signo == GDB_SIGNAL_0)
ca2163eb
PA
1423 {
1424 if (debug_linux_nat)
1425 fprintf_unfiltered (gdb_stdlog,
1426 "GPT: lwp %s has no pending signal\n",
1427 target_pid_to_str (lp->ptid));
1428 }
1429 else if (!signal_pass_state (signo))
1430 {
1431 if (debug_linux_nat)
3e43a32a
MS
1432 fprintf_unfiltered (gdb_stdlog,
1433 "GPT: lwp %s had signal %s, "
1434 "but it is in no pass state\n",
ca2163eb 1435 target_pid_to_str (lp->ptid),
2ea28649 1436 gdb_signal_to_string (signo));
a0ef4274 1437 }
a0ef4274 1438 else
4c28f408 1439 {
2ea28649 1440 *status = W_STOPCODE (gdb_signal_to_host (signo));
ca2163eb
PA
1441
1442 if (debug_linux_nat)
1443 fprintf_unfiltered (gdb_stdlog,
1444 "GPT: lwp %s has pending signal %s\n",
1445 target_pid_to_str (lp->ptid),
2ea28649 1446 gdb_signal_to_string (signo));
4c28f408 1447 }
a0ef4274
DJ
1448
1449 return 0;
1450}
1451
d6b0e80f
AC
1452static int
1453detach_callback (struct lwp_info *lp, void *data)
1454{
1455 gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status));
1456
1457 if (debug_linux_nat && lp->status)
1458 fprintf_unfiltered (gdb_stdlog, "DC: Pending %s for %s on detach.\n",
1459 strsignal (WSTOPSIG (lp->status)),
1460 target_pid_to_str (lp->ptid));
1461
a0ef4274
DJ
1462 /* If there is a pending SIGSTOP, get rid of it. */
1463 if (lp->signalled)
d6b0e80f 1464 {
d6b0e80f
AC
1465 if (debug_linux_nat)
1466 fprintf_unfiltered (gdb_stdlog,
a0ef4274
DJ
1467 "DC: Sending SIGCONT to %s\n",
1468 target_pid_to_str (lp->ptid));
d6b0e80f 1469
dfd4cc63 1470 kill_lwp (ptid_get_lwp (lp->ptid), SIGCONT);
d6b0e80f 1471 lp->signalled = 0;
d6b0e80f
AC
1472 }
1473
1474 /* We don't actually detach from the LWP that has an id equal to the
1475 overall process id just yet. */
dfd4cc63 1476 if (ptid_get_lwp (lp->ptid) != ptid_get_pid (lp->ptid))
d6b0e80f 1477 {
a0ef4274
DJ
1478 int status = 0;
1479
1480 /* Pass on any pending signal for this LWP. */
1481 get_pending_status (lp, &status);
1482
7b50312a
PA
1483 if (linux_nat_prepare_to_resume != NULL)
1484 linux_nat_prepare_to_resume (lp);
d6b0e80f 1485 errno = 0;
dfd4cc63 1486 if (ptrace (PTRACE_DETACH, ptid_get_lwp (lp->ptid), 0,
a0ef4274 1487 WSTOPSIG (status)) < 0)
8a3fe4f8 1488 error (_("Can't detach %s: %s"), target_pid_to_str (lp->ptid),
d6b0e80f
AC
1489 safe_strerror (errno));
1490
1491 if (debug_linux_nat)
1492 fprintf_unfiltered (gdb_stdlog,
1493 "PTRACE_DETACH (%s, %s, 0) (OK)\n",
1494 target_pid_to_str (lp->ptid),
7feb7d06 1495 strsignal (WSTOPSIG (status)));
d6b0e80f
AC
1496
1497 delete_lwp (lp->ptid);
1498 }
1499
1500 return 0;
1501}
1502
1503static void
52554a0e 1504linux_nat_detach (struct target_ops *ops, const char *args, int from_tty)
d6b0e80f 1505{
b84876c2 1506 int pid;
a0ef4274 1507 int status;
d90e17a7
PA
1508 struct lwp_info *main_lwp;
1509
dfd4cc63 1510 pid = ptid_get_pid (inferior_ptid);
a0ef4274 1511
ae5e0686
MK
1512 /* Don't unregister from the event loop, as there may be other
1513 inferiors running. */
b84876c2 1514
4c28f408
PA
1515 /* Stop all threads before detaching. ptrace requires that the
1516 thread is stopped to sucessfully detach. */
d90e17a7 1517 iterate_over_lwps (pid_to_ptid (pid), stop_callback, NULL);
4c28f408
PA
1518 /* ... and wait until all of them have reported back that
1519 they're no longer running. */
d90e17a7 1520 iterate_over_lwps (pid_to_ptid (pid), stop_wait_callback, NULL);
4c28f408 1521
d90e17a7 1522 iterate_over_lwps (pid_to_ptid (pid), detach_callback, NULL);
d6b0e80f
AC
1523
1524 /* Only the initial process should be left right now. */
dfd4cc63 1525 gdb_assert (num_lwps (ptid_get_pid (inferior_ptid)) == 1);
d90e17a7
PA
1526
1527 main_lwp = find_lwp_pid (pid_to_ptid (pid));
d6b0e80f 1528
a0ef4274
DJ
1529 /* Pass on any pending signal for the last LWP. */
1530 if ((args == NULL || *args == '\0')
d90e17a7 1531 && get_pending_status (main_lwp, &status) != -1
a0ef4274
DJ
1532 && WIFSTOPPED (status))
1533 {
52554a0e
TT
1534 char *tem;
1535
a0ef4274
DJ
1536 /* Put the signal number in ARGS so that inf_ptrace_detach will
1537 pass it along with PTRACE_DETACH. */
52554a0e 1538 tem = alloca (8);
cde33bf1 1539 xsnprintf (tem, 8, "%d", (int) WSTOPSIG (status));
52554a0e 1540 args = tem;
ddabfc73
TT
1541 if (debug_linux_nat)
1542 fprintf_unfiltered (gdb_stdlog,
1543 "LND: Sending signal %s to %s\n",
1544 args,
1545 target_pid_to_str (main_lwp->ptid));
a0ef4274
DJ
1546 }
1547
7b50312a
PA
1548 if (linux_nat_prepare_to_resume != NULL)
1549 linux_nat_prepare_to_resume (main_lwp);
d90e17a7 1550 delete_lwp (main_lwp->ptid);
b84876c2 1551
7a7d3353
PA
1552 if (forks_exist_p ())
1553 {
1554 /* Multi-fork case. The current inferior_ptid is being detached
1555 from, but there are other viable forks to debug. Detach from
1556 the current fork, and context-switch to the first
1557 available. */
1558 linux_fork_detach (args, from_tty);
7a7d3353
PA
1559 }
1560 else
1561 linux_ops->to_detach (ops, args, from_tty);
d6b0e80f
AC
1562}
1563
8a99810d
PA
1564/* Resume execution of the inferior process. If STEP is nonzero,
1565 single-step it. If SIGNAL is nonzero, give it that signal. */
1566
1567static void
23f238d3
PA
1568linux_resume_one_lwp_throw (struct lwp_info *lp, int step,
1569 enum gdb_signal signo)
8a99810d 1570{
8a99810d 1571 lp->step = step;
9c02b525
PA
1572
1573 /* stop_pc doubles as the PC the LWP had when it was last resumed.
1574 We only presently need that if the LWP is stepped though (to
1575 handle the case of stepping a breakpoint instruction). */
1576 if (step)
1577 {
1578 struct regcache *regcache = get_thread_regcache (lp->ptid);
1579
1580 lp->stop_pc = regcache_read_pc (regcache);
1581 }
1582 else
1583 lp->stop_pc = 0;
1584
8a99810d
PA
1585 if (linux_nat_prepare_to_resume != NULL)
1586 linux_nat_prepare_to_resume (lp);
90ad5e1d 1587 linux_ops->to_resume (linux_ops, lp->ptid, step, signo);
23f238d3
PA
1588
1589 /* Successfully resumed. Clear state that no longer makes sense,
1590 and mark the LWP as running. Must not do this before resuming
1591 otherwise if that fails other code will be confused. E.g., we'd
1592 later try to stop the LWP and hang forever waiting for a stop
1593 status. Note that we must not throw after this is cleared,
1594 otherwise handle_zombie_lwp_error would get confused. */
8a99810d 1595 lp->stopped = 0;
23f238d3 1596 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
8a99810d
PA
1597 registers_changed_ptid (lp->ptid);
1598}
1599
23f238d3
PA
1600/* Called when we try to resume a stopped LWP and that errors out. If
1601 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
1602 or about to become), discard the error, clear any pending status
1603 the LWP may have, and return true (we'll collect the exit status
1604 soon enough). Otherwise, return false. */
1605
1606static int
1607check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
1608{
1609 /* If we get an error after resuming the LWP successfully, we'd
1610 confuse !T state for the LWP being gone. */
1611 gdb_assert (lp->stopped);
1612
1613 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
1614 because even if ptrace failed with ESRCH, the tracee may be "not
1615 yet fully dead", but already refusing ptrace requests. In that
1616 case the tracee has 'R (Running)' state for a little bit
1617 (observed in Linux 3.18). See also the note on ESRCH in the
1618 ptrace(2) man page. Instead, check whether the LWP has any state
1619 other than ptrace-stopped. */
1620
1621 /* Don't assume anything if /proc/PID/status can't be read. */
1622 if (linux_proc_pid_is_trace_stopped_nowarn (ptid_get_lwp (lp->ptid)) == 0)
1623 {
1624 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
1625 lp->status = 0;
1626 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
1627 return 1;
1628 }
1629 return 0;
1630}
1631
1632/* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
1633 disappears while we try to resume it. */
1634
1635static void
1636linux_resume_one_lwp (struct lwp_info *lp, int step, enum gdb_signal signo)
1637{
1638 TRY
1639 {
1640 linux_resume_one_lwp_throw (lp, step, signo);
1641 }
1642 CATCH (ex, RETURN_MASK_ERROR)
1643 {
1644 if (!check_ptrace_stopped_lwp_gone (lp))
1645 throw_exception (ex);
1646 }
1647 END_CATCH
1648}
1649
d6b0e80f
AC
1650/* Resume LP. */
1651
25289eb2 1652static void
e5ef252a 1653resume_lwp (struct lwp_info *lp, int step, enum gdb_signal signo)
d6b0e80f 1654{
25289eb2 1655 if (lp->stopped)
6c95b8df 1656 {
c9657e70 1657 struct inferior *inf = find_inferior_ptid (lp->ptid);
25289eb2
PA
1658
1659 if (inf->vfork_child != NULL)
1660 {
1661 if (debug_linux_nat)
1662 fprintf_unfiltered (gdb_stdlog,
1663 "RC: Not resuming %s (vfork parent)\n",
1664 target_pid_to_str (lp->ptid));
1665 }
8a99810d 1666 else if (!lwp_status_pending_p (lp))
25289eb2
PA
1667 {
1668 if (debug_linux_nat)
1669 fprintf_unfiltered (gdb_stdlog,
e5ef252a
PA
1670 "RC: Resuming sibling %s, %s, %s\n",
1671 target_pid_to_str (lp->ptid),
1672 (signo != GDB_SIGNAL_0
1673 ? strsignal (gdb_signal_to_host (signo))
1674 : "0"),
1675 step ? "step" : "resume");
25289eb2 1676
8a99810d 1677 linux_resume_one_lwp (lp, step, signo);
25289eb2
PA
1678 }
1679 else
1680 {
1681 if (debug_linux_nat)
1682 fprintf_unfiltered (gdb_stdlog,
1683 "RC: Not resuming sibling %s (has pending)\n",
1684 target_pid_to_str (lp->ptid));
1685 }
6c95b8df 1686 }
25289eb2 1687 else
d6b0e80f 1688 {
d90e17a7
PA
1689 if (debug_linux_nat)
1690 fprintf_unfiltered (gdb_stdlog,
25289eb2 1691 "RC: Not resuming sibling %s (not stopped)\n",
d6b0e80f 1692 target_pid_to_str (lp->ptid));
d6b0e80f 1693 }
25289eb2 1694}
d6b0e80f 1695
8817a6f2
PA
1696/* Callback for iterate_over_lwps. If LWP is EXCEPT, do nothing.
1697 Resume LWP with the last stop signal, if it is in pass state. */
e5ef252a 1698
25289eb2 1699static int
8817a6f2 1700linux_nat_resume_callback (struct lwp_info *lp, void *except)
25289eb2 1701{
e5ef252a
PA
1702 enum gdb_signal signo = GDB_SIGNAL_0;
1703
8817a6f2
PA
1704 if (lp == except)
1705 return 0;
1706
e5ef252a
PA
1707 if (lp->stopped)
1708 {
1709 struct thread_info *thread;
1710
1711 thread = find_thread_ptid (lp->ptid);
1712 if (thread != NULL)
1713 {
70509625 1714 signo = thread->suspend.stop_signal;
e5ef252a
PA
1715 thread->suspend.stop_signal = GDB_SIGNAL_0;
1716 }
1717 }
1718
1719 resume_lwp (lp, 0, signo);
d6b0e80f
AC
1720 return 0;
1721}
1722
1723static int
1724resume_clear_callback (struct lwp_info *lp, void *data)
1725{
1726 lp->resumed = 0;
25289eb2 1727 lp->last_resume_kind = resume_stop;
d6b0e80f
AC
1728 return 0;
1729}
1730
1731static int
1732resume_set_callback (struct lwp_info *lp, void *data)
1733{
1734 lp->resumed = 1;
25289eb2 1735 lp->last_resume_kind = resume_continue;
d6b0e80f
AC
1736 return 0;
1737}
1738
1739static void
28439f5e 1740linux_nat_resume (struct target_ops *ops,
2ea28649 1741 ptid_t ptid, int step, enum gdb_signal signo)
d6b0e80f
AC
1742{
1743 struct lwp_info *lp;
d90e17a7 1744 int resume_many;
d6b0e80f 1745
76f50ad1
DJ
1746 if (debug_linux_nat)
1747 fprintf_unfiltered (gdb_stdlog,
1748 "LLR: Preparing to %s %s, %s, inferior_ptid %s\n",
1749 step ? "step" : "resume",
1750 target_pid_to_str (ptid),
a493e3e2 1751 (signo != GDB_SIGNAL_0
2ea28649 1752 ? strsignal (gdb_signal_to_host (signo)) : "0"),
76f50ad1
DJ
1753 target_pid_to_str (inferior_ptid));
1754
d6b0e80f 1755 /* A specific PTID means `step only this process id'. */
d90e17a7
PA
1756 resume_many = (ptid_equal (minus_one_ptid, ptid)
1757 || ptid_is_pid (ptid));
4c28f408 1758
e3e9f5a2
PA
1759 /* Mark the lwps we're resuming as resumed. */
1760 iterate_over_lwps (ptid, resume_set_callback, NULL);
d6b0e80f 1761
d90e17a7
PA
1762 /* See if it's the current inferior that should be handled
1763 specially. */
1764 if (resume_many)
1765 lp = find_lwp_pid (inferior_ptid);
1766 else
1767 lp = find_lwp_pid (ptid);
9f0bdab8 1768 gdb_assert (lp != NULL);
d6b0e80f 1769
9f0bdab8 1770 /* Remember if we're stepping. */
25289eb2 1771 lp->last_resume_kind = step ? resume_step : resume_continue;
d6b0e80f 1772
9f0bdab8
DJ
1773 /* If we have a pending wait status for this thread, there is no
1774 point in resuming the process. But first make sure that
1775 linux_nat_wait won't preemptively handle the event - we
1776 should never take this short-circuit if we are going to
1777 leave LP running, since we have skipped resuming all the
1778 other threads. This bit of code needs to be synchronized
1779 with linux_nat_wait. */
76f50ad1 1780
9f0bdab8
DJ
1781 if (lp->status && WIFSTOPPED (lp->status))
1782 {
2455069d
UW
1783 if (!lp->step
1784 && WSTOPSIG (lp->status)
1785 && sigismember (&pass_mask, WSTOPSIG (lp->status)))
d6b0e80f 1786 {
9f0bdab8
DJ
1787 if (debug_linux_nat)
1788 fprintf_unfiltered (gdb_stdlog,
1789 "LLR: Not short circuiting for ignored "
1790 "status 0x%x\n", lp->status);
1791
d6b0e80f
AC
1792 /* FIXME: What should we do if we are supposed to continue
1793 this thread with a signal? */
a493e3e2 1794 gdb_assert (signo == GDB_SIGNAL_0);
2ea28649 1795 signo = gdb_signal_from_host (WSTOPSIG (lp->status));
9f0bdab8
DJ
1796 lp->status = 0;
1797 }
1798 }
76f50ad1 1799
8a99810d 1800 if (lwp_status_pending_p (lp))
9f0bdab8
DJ
1801 {
1802 /* FIXME: What should we do if we are supposed to continue
1803 this thread with a signal? */
a493e3e2 1804 gdb_assert (signo == GDB_SIGNAL_0);
76f50ad1 1805
9f0bdab8
DJ
1806 if (debug_linux_nat)
1807 fprintf_unfiltered (gdb_stdlog,
1808 "LLR: Short circuiting for status 0x%x\n",
1809 lp->status);
d6b0e80f 1810
7feb7d06
PA
1811 if (target_can_async_p ())
1812 {
6a3753b3 1813 target_async (1);
7feb7d06
PA
1814 /* Tell the event loop we have something to process. */
1815 async_file_mark ();
1816 }
9f0bdab8 1817 return;
d6b0e80f
AC
1818 }
1819
d90e17a7 1820 if (resume_many)
8817a6f2 1821 iterate_over_lwps (ptid, linux_nat_resume_callback, lp);
d90e17a7 1822
d6b0e80f
AC
1823 if (debug_linux_nat)
1824 fprintf_unfiltered (gdb_stdlog,
1825 "LLR: %s %s, %s (resume event thread)\n",
1826 step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2bf6fb9d 1827 target_pid_to_str (lp->ptid),
a493e3e2 1828 (signo != GDB_SIGNAL_0
2ea28649 1829 ? strsignal (gdb_signal_to_host (signo)) : "0"));
b84876c2 1830
2bf6fb9d
PA
1831 linux_resume_one_lwp (lp, step, signo);
1832
b84876c2 1833 if (target_can_async_p ())
6a3753b3 1834 target_async (1);
d6b0e80f
AC
1835}
1836
c5f62d5f 1837/* Send a signal to an LWP. */
d6b0e80f
AC
1838
1839static int
1840kill_lwp (int lwpid, int signo)
1841{
c5f62d5f
DE
1842 /* Use tkill, if possible, in case we are using nptl threads. If tkill
1843 fails, then we are not using nptl threads and we should be using kill. */
d6b0e80f
AC
1844
1845#ifdef HAVE_TKILL_SYSCALL
c5f62d5f
DE
1846 {
1847 static int tkill_failed;
1848
1849 if (!tkill_failed)
1850 {
1851 int ret;
1852
1853 errno = 0;
1854 ret = syscall (__NR_tkill, lwpid, signo);
1855 if (errno != ENOSYS)
1856 return ret;
1857 tkill_failed = 1;
1858 }
1859 }
d6b0e80f
AC
1860#endif
1861
1862 return kill (lwpid, signo);
1863}
1864
ca2163eb
PA
1865/* Handle a GNU/Linux syscall trap wait response. If we see a syscall
1866 event, check if the core is interested in it: if not, ignore the
1867 event, and keep waiting; otherwise, we need to toggle the LWP's
1868 syscall entry/exit status, since the ptrace event itself doesn't
1869 indicate it, and report the trap to higher layers. */
1870
1871static int
1872linux_handle_syscall_trap (struct lwp_info *lp, int stopping)
1873{
1874 struct target_waitstatus *ourstatus = &lp->waitstatus;
1875 struct gdbarch *gdbarch = target_thread_architecture (lp->ptid);
1876 int syscall_number = (int) gdbarch_get_syscall_number (gdbarch, lp->ptid);
1877
1878 if (stopping)
1879 {
1880 /* If we're stopping threads, there's a SIGSTOP pending, which
1881 makes it so that the LWP reports an immediate syscall return,
1882 followed by the SIGSTOP. Skip seeing that "return" using
1883 PTRACE_CONT directly, and let stop_wait_callback collect the
1884 SIGSTOP. Later when the thread is resumed, a new syscall
1885 entry event. If we didn't do this (and returned 0), we'd
1886 leave a syscall entry pending, and our caller, by using
1887 PTRACE_CONT to collect the SIGSTOP, skips the syscall return
1888 itself. Later, when the user re-resumes this LWP, we'd see
1889 another syscall entry event and we'd mistake it for a return.
1890
1891 If stop_wait_callback didn't force the SIGSTOP out of the LWP
1892 (leaving immediately with LWP->signalled set, without issuing
1893 a PTRACE_CONT), it would still be problematic to leave this
1894 syscall enter pending, as later when the thread is resumed,
1895 it would then see the same syscall exit mentioned above,
1896 followed by the delayed SIGSTOP, while the syscall didn't
1897 actually get to execute. It seems it would be even more
1898 confusing to the user. */
1899
1900 if (debug_linux_nat)
1901 fprintf_unfiltered (gdb_stdlog,
1902 "LHST: ignoring syscall %d "
1903 "for LWP %ld (stopping threads), "
1904 "resuming with PTRACE_CONT for SIGSTOP\n",
1905 syscall_number,
dfd4cc63 1906 ptid_get_lwp (lp->ptid));
ca2163eb
PA
1907
1908 lp->syscall_state = TARGET_WAITKIND_IGNORE;
dfd4cc63 1909 ptrace (PTRACE_CONT, ptid_get_lwp (lp->ptid), 0, 0);
8817a6f2 1910 lp->stopped = 0;
ca2163eb
PA
1911 return 1;
1912 }
1913
1914 if (catch_syscall_enabled ())
1915 {
1916 /* Always update the entry/return state, even if this particular
1917 syscall isn't interesting to the core now. In async mode,
1918 the user could install a new catchpoint for this syscall
1919 between syscall enter/return, and we'll need to know to
1920 report a syscall return if that happens. */
1921 lp->syscall_state = (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
1922 ? TARGET_WAITKIND_SYSCALL_RETURN
1923 : TARGET_WAITKIND_SYSCALL_ENTRY);
1924
1925 if (catching_syscall_number (syscall_number))
1926 {
1927 /* Alright, an event to report. */
1928 ourstatus->kind = lp->syscall_state;
1929 ourstatus->value.syscall_number = syscall_number;
1930
1931 if (debug_linux_nat)
1932 fprintf_unfiltered (gdb_stdlog,
1933 "LHST: stopping for %s of syscall %d"
1934 " for LWP %ld\n",
3e43a32a
MS
1935 lp->syscall_state
1936 == TARGET_WAITKIND_SYSCALL_ENTRY
ca2163eb
PA
1937 ? "entry" : "return",
1938 syscall_number,
dfd4cc63 1939 ptid_get_lwp (lp->ptid));
ca2163eb
PA
1940 return 0;
1941 }
1942
1943 if (debug_linux_nat)
1944 fprintf_unfiltered (gdb_stdlog,
1945 "LHST: ignoring %s of syscall %d "
1946 "for LWP %ld\n",
1947 lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
1948 ? "entry" : "return",
1949 syscall_number,
dfd4cc63 1950 ptid_get_lwp (lp->ptid));
ca2163eb
PA
1951 }
1952 else
1953 {
1954 /* If we had been syscall tracing, and hence used PT_SYSCALL
1955 before on this LWP, it could happen that the user removes all
1956 syscall catchpoints before we get to process this event.
1957 There are two noteworthy issues here:
1958
1959 - When stopped at a syscall entry event, resuming with
1960 PT_STEP still resumes executing the syscall and reports a
1961 syscall return.
1962
1963 - Only PT_SYSCALL catches syscall enters. If we last
1964 single-stepped this thread, then this event can't be a
1965 syscall enter. If we last single-stepped this thread, this
1966 has to be a syscall exit.
1967
1968 The points above mean that the next resume, be it PT_STEP or
1969 PT_CONTINUE, can not trigger a syscall trace event. */
1970 if (debug_linux_nat)
1971 fprintf_unfiltered (gdb_stdlog,
3e43a32a
MS
1972 "LHST: caught syscall event "
1973 "with no syscall catchpoints."
ca2163eb
PA
1974 " %d for LWP %ld, ignoring\n",
1975 syscall_number,
dfd4cc63 1976 ptid_get_lwp (lp->ptid));
ca2163eb
PA
1977 lp->syscall_state = TARGET_WAITKIND_IGNORE;
1978 }
1979
1980 /* The core isn't interested in this event. For efficiency, avoid
1981 stopping all threads only to have the core resume them all again.
1982 Since we're not stopping threads, if we're still syscall tracing
1983 and not stepping, we can't use PTRACE_CONT here, as we'd miss any
1984 subsequent syscall. Simply resume using the inf-ptrace layer,
1985 which knows when to use PT_SYSCALL or PT_CONTINUE. */
1986
8a99810d 1987 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
ca2163eb
PA
1988 return 1;
1989}
1990
3d799a95
DJ
1991/* Handle a GNU/Linux extended wait response. If we see a clone
1992 event, we need to add the new LWP to our list (and not report the
1993 trap to higher layers). This function returns non-zero if the
1994 event should be ignored and we should wait again. If STOPPING is
1995 true, the new LWP remains stopped, otherwise it is continued. */
d6b0e80f
AC
1996
1997static int
3d799a95
DJ
1998linux_handle_extended_wait (struct lwp_info *lp, int status,
1999 int stopping)
d6b0e80f 2000{
dfd4cc63 2001 int pid = ptid_get_lwp (lp->ptid);
3d799a95 2002 struct target_waitstatus *ourstatus = &lp->waitstatus;
89a5711c 2003 int event = linux_ptrace_get_extended_event (status);
d6b0e80f 2004
3d799a95
DJ
2005 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK
2006 || event == PTRACE_EVENT_CLONE)
d6b0e80f 2007 {
3d799a95
DJ
2008 unsigned long new_pid;
2009 int ret;
2010
2011 ptrace (PTRACE_GETEVENTMSG, pid, 0, &new_pid);
6fc19103 2012
3d799a95
DJ
2013 /* If we haven't already seen the new PID stop, wait for it now. */
2014 if (! pull_pid_from_list (&stopped_pids, new_pid, &status))
2015 {
2016 /* The new child has a pending SIGSTOP. We can't affect it until it
2017 hits the SIGSTOP, but we're already attached. */
2018 ret = my_waitpid (new_pid, &status,
2019 (event == PTRACE_EVENT_CLONE) ? __WCLONE : 0);
2020 if (ret == -1)
2021 perror_with_name (_("waiting for new child"));
2022 else if (ret != new_pid)
2023 internal_error (__FILE__, __LINE__,
2024 _("wait returned unexpected PID %d"), ret);
2025 else if (!WIFSTOPPED (status))
2026 internal_error (__FILE__, __LINE__,
2027 _("wait returned unexpected status 0x%x"), status);
2028 }
2029
3a3e9ee3 2030 ourstatus->value.related_pid = ptid_build (new_pid, new_pid, 0);
3d799a95 2031
26cb8b7c
PA
2032 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
2033 {
2034 /* The arch-specific native code may need to know about new
2035 forks even if those end up never mapped to an
2036 inferior. */
2037 if (linux_nat_new_fork != NULL)
2038 linux_nat_new_fork (lp, new_pid);
2039 }
2040
2277426b 2041 if (event == PTRACE_EVENT_FORK
dfd4cc63 2042 && linux_fork_checkpointing_p (ptid_get_pid (lp->ptid)))
2277426b 2043 {
2277426b
PA
2044 /* Handle checkpointing by linux-fork.c here as a special
2045 case. We don't want the follow-fork-mode or 'catch fork'
2046 to interfere with this. */
2047
2048 /* This won't actually modify the breakpoint list, but will
2049 physically remove the breakpoints from the child. */
d80ee84f 2050 detach_breakpoints (ptid_build (new_pid, new_pid, 0));
2277426b
PA
2051
2052 /* Retain child fork in ptrace (stopped) state. */
14571dad
MS
2053 if (!find_fork_pid (new_pid))
2054 add_fork (new_pid);
2277426b
PA
2055
2056 /* Report as spurious, so that infrun doesn't want to follow
2057 this fork. We're actually doing an infcall in
2058 linux-fork.c. */
2059 ourstatus->kind = TARGET_WAITKIND_SPURIOUS;
2277426b
PA
2060
2061 /* Report the stop to the core. */
2062 return 0;
2063 }
2064
3d799a95
DJ
2065 if (event == PTRACE_EVENT_FORK)
2066 ourstatus->kind = TARGET_WAITKIND_FORKED;
2067 else if (event == PTRACE_EVENT_VFORK)
2068 ourstatus->kind = TARGET_WAITKIND_VFORKED;
6fc19103 2069 else
3d799a95 2070 {
78768c4a
JK
2071 struct lwp_info *new_lp;
2072
3d799a95 2073 ourstatus->kind = TARGET_WAITKIND_IGNORE;
78768c4a 2074
3c4d7e12
PA
2075 if (debug_linux_nat)
2076 fprintf_unfiltered (gdb_stdlog,
2077 "LHEW: Got clone event "
2078 "from LWP %d, new child is LWP %ld\n",
2079 pid, new_pid);
2080
dfd4cc63 2081 new_lp = add_lwp (ptid_build (ptid_get_pid (lp->ptid), new_pid, 0));
3d799a95 2082 new_lp->cloned = 1;
4c28f408 2083 new_lp->stopped = 1;
d6b0e80f 2084
3d799a95
DJ
2085 if (WSTOPSIG (status) != SIGSTOP)
2086 {
2087 /* This can happen if someone starts sending signals to
2088 the new thread before it gets a chance to run, which
2089 have a lower number than SIGSTOP (e.g. SIGUSR1).
2090 This is an unlikely case, and harder to handle for
2091 fork / vfork than for clone, so we do not try - but
2092 we handle it for clone events here. We'll send
2093 the other signal on to the thread below. */
2094
2095 new_lp->signalled = 1;
2096 }
2097 else
79395f92
PA
2098 {
2099 struct thread_info *tp;
2100
2101 /* When we stop for an event in some other thread, and
2102 pull the thread list just as this thread has cloned,
2103 we'll have seen the new thread in the thread_db list
2104 before handling the CLONE event (glibc's
2105 pthread_create adds the new thread to the thread list
2106 before clone'ing, and has the kernel fill in the
2107 thread's tid on the clone call with
2108 CLONE_PARENT_SETTID). If that happened, and the core
2109 had requested the new thread to stop, we'll have
2110 killed it with SIGSTOP. But since SIGSTOP is not an
2111 RT signal, it can only be queued once. We need to be
2112 careful to not resume the LWP if we wanted it to
2113 stop. In that case, we'll leave the SIGSTOP pending.
a493e3e2 2114 It will later be reported as GDB_SIGNAL_0. */
79395f92
PA
2115 tp = find_thread_ptid (new_lp->ptid);
2116 if (tp != NULL && tp->stop_requested)
2117 new_lp->last_resume_kind = resume_stop;
2118 else
2119 status = 0;
2120 }
d6b0e80f 2121
2db9a427
PA
2122 /* If the thread_db layer is active, let it record the user
2123 level thread id and status, and add the thread to GDB's
2124 list. */
2125 if (!thread_db_notice_clone (lp->ptid, new_lp->ptid))
3d799a95 2126 {
2db9a427
PA
2127 /* The process is not using thread_db. Add the LWP to
2128 GDB's list. */
2129 target_post_attach (ptid_get_lwp (new_lp->ptid));
2130 add_thread (new_lp->ptid);
2131 }
4c28f408 2132
2ee52aa4
PA
2133 /* Even if we're stopping the thread for some reason
2134 internal to this module, from the user/frontend's
2135 perspective, this new thread is running. */
2136 set_running (new_lp->ptid, 1);
2db9a427
PA
2137 if (!stopping)
2138 {
2db9a427
PA
2139 set_executing (new_lp->ptid, 1);
2140 /* thread_db_attach_lwp -> lin_lwp_attach_lwp forced
2141 resume_stop. */
2142 new_lp->last_resume_kind = resume_continue;
4c28f408
PA
2143 }
2144
79395f92
PA
2145 if (status != 0)
2146 {
2147 /* We created NEW_LP so it cannot yet contain STATUS. */
2148 gdb_assert (new_lp->status == 0);
2149
2150 /* Save the wait status to report later. */
2151 if (debug_linux_nat)
2152 fprintf_unfiltered (gdb_stdlog,
2153 "LHEW: waitpid of new LWP %ld, "
2154 "saving status %s\n",
dfd4cc63 2155 (long) ptid_get_lwp (new_lp->ptid),
79395f92
PA
2156 status_to_str (status));
2157 new_lp->status = status;
2158 }
2159
20ba1ce6 2160 new_lp->resumed = !stopping;
3d799a95
DJ
2161 return 1;
2162 }
2163
2164 return 0;
d6b0e80f
AC
2165 }
2166
3d799a95
DJ
2167 if (event == PTRACE_EVENT_EXEC)
2168 {
a75724bc
PA
2169 if (debug_linux_nat)
2170 fprintf_unfiltered (gdb_stdlog,
2171 "LHEW: Got exec event from LWP %ld\n",
dfd4cc63 2172 ptid_get_lwp (lp->ptid));
a75724bc 2173
3d799a95
DJ
2174 ourstatus->kind = TARGET_WAITKIND_EXECD;
2175 ourstatus->value.execd_pathname
8dd27370 2176 = xstrdup (linux_child_pid_to_exec_file (NULL, pid));
3d799a95 2177
8af756ef
PA
2178 /* The thread that execed must have been resumed, but, when a
2179 thread execs, it changes its tid to the tgid, and the old
2180 tgid thread might have not been resumed. */
2181 lp->resumed = 1;
6c95b8df
PA
2182 return 0;
2183 }
2184
2185 if (event == PTRACE_EVENT_VFORK_DONE)
2186 {
2187 if (current_inferior ()->waiting_for_vfork_done)
3d799a95 2188 {
6c95b8df 2189 if (debug_linux_nat)
3e43a32a
MS
2190 fprintf_unfiltered (gdb_stdlog,
2191 "LHEW: Got expected PTRACE_EVENT_"
2192 "VFORK_DONE from LWP %ld: stopping\n",
dfd4cc63 2193 ptid_get_lwp (lp->ptid));
3d799a95 2194
6c95b8df
PA
2195 ourstatus->kind = TARGET_WAITKIND_VFORK_DONE;
2196 return 0;
3d799a95
DJ
2197 }
2198
6c95b8df 2199 if (debug_linux_nat)
3e43a32a
MS
2200 fprintf_unfiltered (gdb_stdlog,
2201 "LHEW: Got PTRACE_EVENT_VFORK_DONE "
20ba1ce6 2202 "from LWP %ld: ignoring\n",
dfd4cc63 2203 ptid_get_lwp (lp->ptid));
6c95b8df 2204 return 1;
3d799a95
DJ
2205 }
2206
2207 internal_error (__FILE__, __LINE__,
2208 _("unknown ptrace event %d"), event);
d6b0e80f
AC
2209}
2210
2211/* Wait for LP to stop. Returns the wait status, or 0 if the LWP has
2212 exited. */
2213
2214static int
2215wait_lwp (struct lwp_info *lp)
2216{
2217 pid_t pid;
432b4d03 2218 int status = 0;
d6b0e80f 2219 int thread_dead = 0;
432b4d03 2220 sigset_t prev_mask;
d6b0e80f
AC
2221
2222 gdb_assert (!lp->stopped);
2223 gdb_assert (lp->status == 0);
2224
432b4d03
JK
2225 /* Make sure SIGCHLD is blocked for sigsuspend avoiding a race below. */
2226 block_child_signals (&prev_mask);
2227
2228 for (;;)
d6b0e80f 2229 {
432b4d03
JK
2230 /* If my_waitpid returns 0 it means the __WCLONE vs. non-__WCLONE kind
2231 was right and we should just call sigsuspend. */
2232
dfd4cc63 2233 pid = my_waitpid (ptid_get_lwp (lp->ptid), &status, WNOHANG);
d6b0e80f 2234 if (pid == -1 && errno == ECHILD)
dfd4cc63 2235 pid = my_waitpid (ptid_get_lwp (lp->ptid), &status, __WCLONE | WNOHANG);
a9f4bb21
PA
2236 if (pid == -1 && errno == ECHILD)
2237 {
2238 /* The thread has previously exited. We need to delete it
2239 now because, for some vendor 2.4 kernels with NPTL
2240 support backported, there won't be an exit event unless
2241 it is the main thread. 2.6 kernels will report an exit
2242 event for each thread that exits, as expected. */
2243 thread_dead = 1;
2244 if (debug_linux_nat)
2245 fprintf_unfiltered (gdb_stdlog, "WL: %s vanished.\n",
2246 target_pid_to_str (lp->ptid));
2247 }
432b4d03
JK
2248 if (pid != 0)
2249 break;
2250
2251 /* Bugs 10970, 12702.
2252 Thread group leader may have exited in which case we'll lock up in
2253 waitpid if there are other threads, even if they are all zombies too.
2254 Basically, we're not supposed to use waitpid this way.
2255 __WCLONE is not applicable for the leader so we can't use that.
2256 LINUX_NAT_THREAD_ALIVE cannot be used here as it requires a STOPPED
2257 process; it gets ESRCH both for the zombie and for running processes.
2258
2259 As a workaround, check if we're waiting for the thread group leader and
2260 if it's a zombie, and avoid calling waitpid if it is.
2261
2262 This is racy, what if the tgl becomes a zombie right after we check?
2263 Therefore always use WNOHANG with sigsuspend - it is equivalent to
5f572dec 2264 waiting waitpid but linux_proc_pid_is_zombie is safe this way. */
432b4d03 2265
dfd4cc63
LM
2266 if (ptid_get_pid (lp->ptid) == ptid_get_lwp (lp->ptid)
2267 && linux_proc_pid_is_zombie (ptid_get_lwp (lp->ptid)))
d6b0e80f 2268 {
d6b0e80f
AC
2269 thread_dead = 1;
2270 if (debug_linux_nat)
432b4d03
JK
2271 fprintf_unfiltered (gdb_stdlog,
2272 "WL: Thread group leader %s vanished.\n",
d6b0e80f 2273 target_pid_to_str (lp->ptid));
432b4d03 2274 break;
d6b0e80f 2275 }
432b4d03
JK
2276
2277 /* Wait for next SIGCHLD and try again. This may let SIGCHLD handlers
2278 get invoked despite our caller had them intentionally blocked by
2279 block_child_signals. This is sensitive only to the loop of
2280 linux_nat_wait_1 and there if we get called my_waitpid gets called
2281 again before it gets to sigsuspend so we can safely let the handlers
2282 get executed here. */
2283
d36bf488
DE
2284 if (debug_linux_nat)
2285 fprintf_unfiltered (gdb_stdlog, "WL: about to sigsuspend\n");
432b4d03
JK
2286 sigsuspend (&suspend_mask);
2287 }
2288
2289 restore_child_signals_mask (&prev_mask);
2290
d6b0e80f
AC
2291 if (!thread_dead)
2292 {
dfd4cc63 2293 gdb_assert (pid == ptid_get_lwp (lp->ptid));
d6b0e80f
AC
2294
2295 if (debug_linux_nat)
2296 {
2297 fprintf_unfiltered (gdb_stdlog,
2298 "WL: waitpid %s received %s\n",
2299 target_pid_to_str (lp->ptid),
2300 status_to_str (status));
2301 }
d6b0e80f 2302
a9f4bb21
PA
2303 /* Check if the thread has exited. */
2304 if (WIFEXITED (status) || WIFSIGNALED (status))
2305 {
2306 thread_dead = 1;
2307 if (debug_linux_nat)
2308 fprintf_unfiltered (gdb_stdlog, "WL: %s exited.\n",
2309 target_pid_to_str (lp->ptid));
2310 }
d6b0e80f
AC
2311 }
2312
2313 if (thread_dead)
2314 {
e26af52f 2315 exit_lwp (lp);
d6b0e80f
AC
2316 return 0;
2317 }
2318
2319 gdb_assert (WIFSTOPPED (status));
8817a6f2 2320 lp->stopped = 1;
d6b0e80f 2321
8784d563
PA
2322 if (lp->must_set_ptrace_flags)
2323 {
2324 struct inferior *inf = find_inferior_pid (ptid_get_pid (lp->ptid));
de0d863e 2325 int options = linux_nat_ptrace_options (inf->attach_flag);
8784d563 2326
de0d863e 2327 linux_enable_event_reporting (ptid_get_lwp (lp->ptid), options);
8784d563
PA
2328 lp->must_set_ptrace_flags = 0;
2329 }
2330
ca2163eb
PA
2331 /* Handle GNU/Linux's syscall SIGTRAPs. */
2332 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
2333 {
2334 /* No longer need the sysgood bit. The ptrace event ends up
2335 recorded in lp->waitstatus if we care for it. We can carry
2336 on handling the event like a regular SIGTRAP from here
2337 on. */
2338 status = W_STOPCODE (SIGTRAP);
2339 if (linux_handle_syscall_trap (lp, 1))
2340 return wait_lwp (lp);
2341 }
2342
d6b0e80f 2343 /* Handle GNU/Linux's extended waitstatus for trace events. */
89a5711c
DB
2344 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP
2345 && linux_is_extended_waitstatus (status))
d6b0e80f
AC
2346 {
2347 if (debug_linux_nat)
2348 fprintf_unfiltered (gdb_stdlog,
2349 "WL: Handling extended status 0x%06x\n",
2350 status);
20ba1ce6
PA
2351 linux_handle_extended_wait (lp, status, 1);
2352 return 0;
d6b0e80f
AC
2353 }
2354
2355 return status;
2356}
2357
2358/* Send a SIGSTOP to LP. */
2359
2360static int
2361stop_callback (struct lwp_info *lp, void *data)
2362{
2363 if (!lp->stopped && !lp->signalled)
2364 {
2365 int ret;
2366
2367 if (debug_linux_nat)
2368 {
2369 fprintf_unfiltered (gdb_stdlog,
2370 "SC: kill %s **<SIGSTOP>**\n",
2371 target_pid_to_str (lp->ptid));
2372 }
2373 errno = 0;
dfd4cc63 2374 ret = kill_lwp (ptid_get_lwp (lp->ptid), SIGSTOP);
d6b0e80f
AC
2375 if (debug_linux_nat)
2376 {
2377 fprintf_unfiltered (gdb_stdlog,
2378 "SC: lwp kill %d %s\n",
2379 ret,
2380 errno ? safe_strerror (errno) : "ERRNO-OK");
2381 }
2382
2383 lp->signalled = 1;
2384 gdb_assert (lp->status == 0);
2385 }
2386
2387 return 0;
2388}
2389
7b50312a
PA
2390/* Request a stop on LWP. */
2391
2392void
2393linux_stop_lwp (struct lwp_info *lwp)
2394{
2395 stop_callback (lwp, NULL);
2396}
2397
2db9a427
PA
2398/* See linux-nat.h */
2399
2400void
2401linux_stop_and_wait_all_lwps (void)
2402{
2403 /* Stop all LWP's ... */
2404 iterate_over_lwps (minus_one_ptid, stop_callback, NULL);
2405
2406 /* ... and wait until all of them have reported back that
2407 they're no longer running. */
2408 iterate_over_lwps (minus_one_ptid, stop_wait_callback, NULL);
2409}
2410
2411/* See linux-nat.h */
2412
2413void
2414linux_unstop_all_lwps (void)
2415{
2416 iterate_over_lwps (minus_one_ptid,
2417 resume_stopped_resumed_lwps, &minus_one_ptid);
2418}
2419
57380f4e 2420/* Return non-zero if LWP PID has a pending SIGINT. */
d6b0e80f
AC
2421
2422static int
57380f4e
DJ
2423linux_nat_has_pending_sigint (int pid)
2424{
2425 sigset_t pending, blocked, ignored;
57380f4e
DJ
2426
2427 linux_proc_pending_signals (pid, &pending, &blocked, &ignored);
2428
2429 if (sigismember (&pending, SIGINT)
2430 && !sigismember (&ignored, SIGINT))
2431 return 1;
2432
2433 return 0;
2434}
2435
2436/* Set a flag in LP indicating that we should ignore its next SIGINT. */
2437
2438static int
2439set_ignore_sigint (struct lwp_info *lp, void *data)
d6b0e80f 2440{
57380f4e
DJ
2441 /* If a thread has a pending SIGINT, consume it; otherwise, set a
2442 flag to consume the next one. */
2443 if (lp->stopped && lp->status != 0 && WIFSTOPPED (lp->status)
2444 && WSTOPSIG (lp->status) == SIGINT)
2445 lp->status = 0;
2446 else
2447 lp->ignore_sigint = 1;
2448
2449 return 0;
2450}
2451
2452/* If LP does not have a SIGINT pending, then clear the ignore_sigint flag.
2453 This function is called after we know the LWP has stopped; if the LWP
2454 stopped before the expected SIGINT was delivered, then it will never have
2455 arrived. Also, if the signal was delivered to a shared queue and consumed
2456 by a different thread, it will never be delivered to this LWP. */
d6b0e80f 2457
57380f4e
DJ
2458static void
2459maybe_clear_ignore_sigint (struct lwp_info *lp)
2460{
2461 if (!lp->ignore_sigint)
2462 return;
2463
dfd4cc63 2464 if (!linux_nat_has_pending_sigint (ptid_get_lwp (lp->ptid)))
57380f4e
DJ
2465 {
2466 if (debug_linux_nat)
2467 fprintf_unfiltered (gdb_stdlog,
2468 "MCIS: Clearing bogus flag for %s\n",
2469 target_pid_to_str (lp->ptid));
2470 lp->ignore_sigint = 0;
2471 }
2472}
2473
ebec9a0f
PA
2474/* Fetch the possible triggered data watchpoint info and store it in
2475 LP.
2476
2477 On some archs, like x86, that use debug registers to set
2478 watchpoints, it's possible that the way to know which watched
2479 address trapped, is to check the register that is used to select
2480 which address to watch. Problem is, between setting the watchpoint
2481 and reading back which data address trapped, the user may change
2482 the set of watchpoints, and, as a consequence, GDB changes the
2483 debug registers in the inferior. To avoid reading back a stale
2484 stopped-data-address when that happens, we cache in LP the fact
2485 that a watchpoint trapped, and the corresponding data address, as
2486 soon as we see LP stop with a SIGTRAP. If GDB changes the debug
2487 registers meanwhile, we have the cached data we can rely on. */
2488
9c02b525
PA
2489static int
2490check_stopped_by_watchpoint (struct lwp_info *lp)
ebec9a0f
PA
2491{
2492 struct cleanup *old_chain;
2493
2494 if (linux_ops->to_stopped_by_watchpoint == NULL)
9c02b525 2495 return 0;
ebec9a0f
PA
2496
2497 old_chain = save_inferior_ptid ();
2498 inferior_ptid = lp->ptid;
2499
9c02b525 2500 if (linux_ops->to_stopped_by_watchpoint (linux_ops))
ebec9a0f 2501 {
15c66dd6 2502 lp->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
9c02b525 2503
ebec9a0f
PA
2504 if (linux_ops->to_stopped_data_address != NULL)
2505 lp->stopped_data_address_p =
2506 linux_ops->to_stopped_data_address (&current_target,
2507 &lp->stopped_data_address);
2508 else
2509 lp->stopped_data_address_p = 0;
2510 }
2511
2512 do_cleanups (old_chain);
9c02b525 2513
15c66dd6 2514 return lp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
9c02b525
PA
2515}
2516
2517/* Called when the LWP stopped for a trap that could be explained by a
2518 watchpoint or a breakpoint. */
2519
2520static void
2521save_sigtrap (struct lwp_info *lp)
2522{
15c66dd6 2523 gdb_assert (lp->stop_reason == TARGET_STOPPED_BY_NO_REASON);
9c02b525
PA
2524 gdb_assert (lp->status != 0);
2525
faf09f01
PA
2526 /* Check first if this was a SW/HW breakpoint before checking
2527 watchpoints, because at least s390 can't tell the data address of
2528 hardware watchpoint hits, and the kernel returns
2529 stopped-by-watchpoint as long as there's a watchpoint set. */
9c02b525
PA
2530 if (linux_nat_status_is_event (lp->status))
2531 check_stopped_by_breakpoint (lp);
faf09f01
PA
2532
2533 /* Note that TRAP_HWBKPT can indicate either a hardware breakpoint
2534 or hardware watchpoint. Check which is which if we got
2535 TARGET_STOPPED_BY_HW_BREAKPOINT. */
2536 if (lp->stop_reason == TARGET_STOPPED_BY_NO_REASON
2537 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
2538 check_stopped_by_watchpoint (lp);
ebec9a0f
PA
2539}
2540
9c02b525 2541/* Returns true if the LWP had stopped for a watchpoint. */
ebec9a0f
PA
2542
2543static int
6a109b6b 2544linux_nat_stopped_by_watchpoint (struct target_ops *ops)
ebec9a0f
PA
2545{
2546 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2547
2548 gdb_assert (lp != NULL);
2549
15c66dd6 2550 return lp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
ebec9a0f
PA
2551}
2552
2553static int
2554linux_nat_stopped_data_address (struct target_ops *ops, CORE_ADDR *addr_p)
2555{
2556 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2557
2558 gdb_assert (lp != NULL);
2559
2560 *addr_p = lp->stopped_data_address;
2561
2562 return lp->stopped_data_address_p;
2563}
2564
26ab7092
JK
2565/* Commonly any breakpoint / watchpoint generate only SIGTRAP. */
2566
2567static int
2568sigtrap_is_event (int status)
2569{
2570 return WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP;
2571}
2572
26ab7092
JK
2573/* Set alternative SIGTRAP-like events recognizer. If
2574 breakpoint_inserted_here_p there then gdbarch_decr_pc_after_break will be
2575 applied. */
2576
2577void
2578linux_nat_set_status_is_event (struct target_ops *t,
2579 int (*status_is_event) (int status))
2580{
2581 linux_nat_status_is_event = status_is_event;
2582}
2583
57380f4e
DJ
2584/* Wait until LP is stopped. */
2585
2586static int
2587stop_wait_callback (struct lwp_info *lp, void *data)
2588{
c9657e70 2589 struct inferior *inf = find_inferior_ptid (lp->ptid);
6c95b8df
PA
2590
2591 /* If this is a vfork parent, bail out, it is not going to report
2592 any SIGSTOP until the vfork is done with. */
2593 if (inf->vfork_child != NULL)
2594 return 0;
2595
d6b0e80f
AC
2596 if (!lp->stopped)
2597 {
2598 int status;
2599
2600 status = wait_lwp (lp);
2601 if (status == 0)
2602 return 0;
2603
57380f4e
DJ
2604 if (lp->ignore_sigint && WIFSTOPPED (status)
2605 && WSTOPSIG (status) == SIGINT)
d6b0e80f 2606 {
57380f4e 2607 lp->ignore_sigint = 0;
d6b0e80f
AC
2608
2609 errno = 0;
dfd4cc63 2610 ptrace (PTRACE_CONT, ptid_get_lwp (lp->ptid), 0, 0);
8817a6f2 2611 lp->stopped = 0;
d6b0e80f
AC
2612 if (debug_linux_nat)
2613 fprintf_unfiltered (gdb_stdlog,
3e43a32a
MS
2614 "PTRACE_CONT %s, 0, 0 (%s) "
2615 "(discarding SIGINT)\n",
d6b0e80f
AC
2616 target_pid_to_str (lp->ptid),
2617 errno ? safe_strerror (errno) : "OK");
2618
57380f4e 2619 return stop_wait_callback (lp, NULL);
d6b0e80f
AC
2620 }
2621
57380f4e
DJ
2622 maybe_clear_ignore_sigint (lp);
2623
d6b0e80f
AC
2624 if (WSTOPSIG (status) != SIGSTOP)
2625 {
e5ef252a 2626 /* The thread was stopped with a signal other than SIGSTOP. */
7feb7d06 2627
e5ef252a
PA
2628 if (debug_linux_nat)
2629 fprintf_unfiltered (gdb_stdlog,
2630 "SWC: Pending event %s in %s\n",
2631 status_to_str ((int) status),
2632 target_pid_to_str (lp->ptid));
2633
2634 /* Save the sigtrap event. */
2635 lp->status = status;
e5ef252a 2636 gdb_assert (lp->signalled);
9c02b525 2637 save_sigtrap (lp);
d6b0e80f
AC
2638 }
2639 else
2640 {
2641 /* We caught the SIGSTOP that we intended to catch, so
2642 there's no SIGSTOP pending. */
e5ef252a
PA
2643
2644 if (debug_linux_nat)
2645 fprintf_unfiltered (gdb_stdlog,
2bf6fb9d 2646 "SWC: Expected SIGSTOP caught for %s.\n",
e5ef252a
PA
2647 target_pid_to_str (lp->ptid));
2648
e5ef252a
PA
2649 /* Reset SIGNALLED only after the stop_wait_callback call
2650 above as it does gdb_assert on SIGNALLED. */
d6b0e80f
AC
2651 lp->signalled = 0;
2652 }
2653 }
2654
2655 return 0;
2656}
2657
9c02b525
PA
2658/* Return non-zero if LP has a wait status pending. Discard the
2659 pending event and resume the LWP if the event that originally
2660 caused the stop became uninteresting. */
d6b0e80f
AC
2661
2662static int
2663status_callback (struct lwp_info *lp, void *data)
2664{
2665 /* Only report a pending wait status if we pretend that this has
2666 indeed been resumed. */
ca2163eb
PA
2667 if (!lp->resumed)
2668 return 0;
2669
eb54c8bf
PA
2670 if (!lwp_status_pending_p (lp))
2671 return 0;
2672
15c66dd6
PA
2673 if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
2674 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
9c02b525
PA
2675 {
2676 struct regcache *regcache = get_thread_regcache (lp->ptid);
2677 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2678 CORE_ADDR pc;
2679 int discard = 0;
2680
9c02b525
PA
2681 pc = regcache_read_pc (regcache);
2682
2683 if (pc != lp->stop_pc)
2684 {
2685 if (debug_linux_nat)
2686 fprintf_unfiltered (gdb_stdlog,
2687 "SC: PC of %s changed. was=%s, now=%s\n",
2688 target_pid_to_str (lp->ptid),
2689 paddress (target_gdbarch (), lp->stop_pc),
2690 paddress (target_gdbarch (), pc));
2691 discard = 1;
2692 }
faf09f01
PA
2693
2694#if !USE_SIGTRAP_SIGINFO
9c02b525
PA
2695 else if (!breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
2696 {
2697 if (debug_linux_nat)
2698 fprintf_unfiltered (gdb_stdlog,
2699 "SC: previous breakpoint of %s, at %s gone\n",
2700 target_pid_to_str (lp->ptid),
2701 paddress (target_gdbarch (), lp->stop_pc));
2702
2703 discard = 1;
2704 }
faf09f01 2705#endif
9c02b525
PA
2706
2707 if (discard)
2708 {
2709 if (debug_linux_nat)
2710 fprintf_unfiltered (gdb_stdlog,
2711 "SC: pending event of %s cancelled.\n",
2712 target_pid_to_str (lp->ptid));
2713
2714 lp->status = 0;
2715 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
2716 return 0;
2717 }
9c02b525
PA
2718 }
2719
eb54c8bf 2720 return 1;
d6b0e80f
AC
2721}
2722
2723/* Return non-zero if LP isn't stopped. */
2724
2725static int
2726running_callback (struct lwp_info *lp, void *data)
2727{
25289eb2 2728 return (!lp->stopped
8a99810d 2729 || (lwp_status_pending_p (lp) && lp->resumed));
d6b0e80f
AC
2730}
2731
2732/* Count the LWP's that have had events. */
2733
2734static int
2735count_events_callback (struct lwp_info *lp, void *data)
2736{
2737 int *count = data;
2738
2739 gdb_assert (count != NULL);
2740
9c02b525
PA
2741 /* Select only resumed LWPs that have an event pending. */
2742 if (lp->resumed && lwp_status_pending_p (lp))
d6b0e80f
AC
2743 (*count)++;
2744
2745 return 0;
2746}
2747
2748/* Select the LWP (if any) that is currently being single-stepped. */
2749
2750static int
2751select_singlestep_lwp_callback (struct lwp_info *lp, void *data)
2752{
25289eb2
PA
2753 if (lp->last_resume_kind == resume_step
2754 && lp->status != 0)
d6b0e80f
AC
2755 return 1;
2756 else
2757 return 0;
2758}
2759
8a99810d
PA
2760/* Returns true if LP has a status pending. */
2761
2762static int
2763lwp_status_pending_p (struct lwp_info *lp)
2764{
2765 /* We check for lp->waitstatus in addition to lp->status, because we
2766 can have pending process exits recorded in lp->status and
2767 W_EXITCODE(0,0) happens to be 0. */
2768 return lp->status != 0 || lp->waitstatus.kind != TARGET_WAITKIND_IGNORE;
2769}
2770
b90fc188 2771/* Select the Nth LWP that has had an event. */
d6b0e80f
AC
2772
2773static int
2774select_event_lwp_callback (struct lwp_info *lp, void *data)
2775{
2776 int *selector = data;
2777
2778 gdb_assert (selector != NULL);
2779
9c02b525
PA
2780 /* Select only resumed LWPs that have an event pending. */
2781 if (lp->resumed && lwp_status_pending_p (lp))
d6b0e80f
AC
2782 if ((*selector)-- == 0)
2783 return 1;
2784
2785 return 0;
2786}
2787
9c02b525
PA
2788/* Called when the LWP got a signal/trap that could be explained by a
2789 software or hardware breakpoint. */
2790
710151dd 2791static int
9c02b525 2792check_stopped_by_breakpoint (struct lwp_info *lp)
710151dd
PA
2793{
2794 /* Arrange for a breakpoint to be hit again later. We don't keep
2795 the SIGTRAP status and don't forward the SIGTRAP signal to the
2796 LWP. We will handle the current event, eventually we will resume
2797 this LWP, and this breakpoint will trap again.
2798
2799 If we do not do this, then we run the risk that the user will
2800 delete or disable the breakpoint, but the LWP will have already
2801 tripped on it. */
2802
515630c5
UW
2803 struct regcache *regcache = get_thread_regcache (lp->ptid);
2804 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2805 CORE_ADDR pc;
9c02b525 2806 CORE_ADDR sw_bp_pc;
faf09f01
PA
2807#if USE_SIGTRAP_SIGINFO
2808 siginfo_t siginfo;
2809#endif
9c02b525
PA
2810
2811 pc = regcache_read_pc (regcache);
527a273a 2812 sw_bp_pc = pc - gdbarch_decr_pc_after_break (gdbarch);
515630c5 2813
faf09f01
PA
2814#if USE_SIGTRAP_SIGINFO
2815 if (linux_nat_get_siginfo (lp->ptid, &siginfo))
2816 {
2817 if (siginfo.si_signo == SIGTRAP)
2818 {
2819 if (siginfo.si_code == GDB_ARCH_TRAP_BRKPT)
2820 {
2821 if (debug_linux_nat)
2822 fprintf_unfiltered (gdb_stdlog,
2bf6fb9d
PA
2823 "CSBB: %s stopped by software "
2824 "breakpoint\n",
faf09f01
PA
2825 target_pid_to_str (lp->ptid));
2826
2827 /* Back up the PC if necessary. */
2828 if (pc != sw_bp_pc)
2829 regcache_write_pc (regcache, sw_bp_pc);
2830
2831 lp->stop_pc = sw_bp_pc;
2832 lp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
2833 return 1;
2834 }
2835 else if (siginfo.si_code == TRAP_HWBKPT)
2836 {
2837 if (debug_linux_nat)
2838 fprintf_unfiltered (gdb_stdlog,
2bf6fb9d
PA
2839 "CSBB: %s stopped by hardware "
2840 "breakpoint/watchpoint\n",
faf09f01
PA
2841 target_pid_to_str (lp->ptid));
2842
2843 lp->stop_pc = pc;
2844 lp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
2845 return 1;
2846 }
2bf6fb9d
PA
2847 else if (siginfo.si_code == TRAP_TRACE)
2848 {
2849 if (debug_linux_nat)
2850 fprintf_unfiltered (gdb_stdlog,
2851 "CSBB: %s stopped by trace\n",
2852 target_pid_to_str (lp->ptid));
2853 }
faf09f01
PA
2854 }
2855 }
2856#else
9c02b525
PA
2857 if ((!lp->step || lp->stop_pc == sw_bp_pc)
2858 && software_breakpoint_inserted_here_p (get_regcache_aspace (regcache),
2859 sw_bp_pc))
710151dd 2860 {
9c02b525
PA
2861 /* The LWP was either continued, or stepped a software
2862 breakpoint instruction. */
710151dd
PA
2863 if (debug_linux_nat)
2864 fprintf_unfiltered (gdb_stdlog,
2bf6fb9d 2865 "CSBB: %s stopped by software breakpoint\n",
710151dd
PA
2866 target_pid_to_str (lp->ptid));
2867
2868 /* Back up the PC if necessary. */
9c02b525
PA
2869 if (pc != sw_bp_pc)
2870 regcache_write_pc (regcache, sw_bp_pc);
515630c5 2871
9c02b525 2872 lp->stop_pc = sw_bp_pc;
15c66dd6 2873 lp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
710151dd
PA
2874 return 1;
2875 }
710151dd 2876
9c02b525
PA
2877 if (hardware_breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
2878 {
2879 if (debug_linux_nat)
2880 fprintf_unfiltered (gdb_stdlog,
2bf6fb9d 2881 "CSBB: stopped by hardware breakpoint %s\n",
9c02b525 2882 target_pid_to_str (lp->ptid));
d6b0e80f 2883
9c02b525 2884 lp->stop_pc = pc;
15c66dd6 2885 lp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
9c02b525
PA
2886 return 1;
2887 }
faf09f01 2888#endif
d6b0e80f
AC
2889
2890 return 0;
2891}
2892
faf09f01
PA
2893
2894/* Returns true if the LWP had stopped for a software breakpoint. */
2895
2896static int
2897linux_nat_stopped_by_sw_breakpoint (struct target_ops *ops)
2898{
2899 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2900
2901 gdb_assert (lp != NULL);
2902
2903 return lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2904}
2905
2906/* Implement the supports_stopped_by_sw_breakpoint method. */
2907
2908static int
2909linux_nat_supports_stopped_by_sw_breakpoint (struct target_ops *ops)
2910{
2911 return USE_SIGTRAP_SIGINFO;
2912}
2913
2914/* Returns true if the LWP had stopped for a hardware
2915 breakpoint/watchpoint. */
2916
2917static int
2918linux_nat_stopped_by_hw_breakpoint (struct target_ops *ops)
2919{
2920 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2921
2922 gdb_assert (lp != NULL);
2923
2924 return lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2925}
2926
2927/* Implement the supports_stopped_by_hw_breakpoint method. */
2928
2929static int
2930linux_nat_supports_stopped_by_hw_breakpoint (struct target_ops *ops)
2931{
2932 return USE_SIGTRAP_SIGINFO;
2933}
2934
d6b0e80f
AC
2935/* Select one LWP out of those that have events pending. */
2936
2937static void
d90e17a7 2938select_event_lwp (ptid_t filter, struct lwp_info **orig_lp, int *status)
d6b0e80f
AC
2939{
2940 int num_events = 0;
2941 int random_selector;
9c02b525 2942 struct lwp_info *event_lp = NULL;
d6b0e80f 2943
ac264b3b 2944 /* Record the wait status for the original LWP. */
d6b0e80f
AC
2945 (*orig_lp)->status = *status;
2946
9c02b525
PA
2947 /* In all-stop, give preference to the LWP that is being
2948 single-stepped. There will be at most one, and it will be the
2949 LWP that the core is most interested in. If we didn't do this,
2950 then we'd have to handle pending step SIGTRAPs somehow in case
2951 the core later continues the previously-stepped thread, as
2952 otherwise we'd report the pending SIGTRAP then, and the core, not
2953 having stepped the thread, wouldn't understand what the trap was
2954 for, and therefore would report it to the user as a random
2955 signal. */
2956 if (!non_stop)
d6b0e80f 2957 {
9c02b525
PA
2958 event_lp = iterate_over_lwps (filter,
2959 select_singlestep_lwp_callback, NULL);
2960 if (event_lp != NULL)
2961 {
2962 if (debug_linux_nat)
2963 fprintf_unfiltered (gdb_stdlog,
2964 "SEL: Select single-step %s\n",
2965 target_pid_to_str (event_lp->ptid));
2966 }
d6b0e80f 2967 }
9c02b525
PA
2968
2969 if (event_lp == NULL)
d6b0e80f 2970 {
9c02b525 2971 /* Pick one at random, out of those which have had events. */
d6b0e80f 2972
9c02b525 2973 /* First see how many events we have. */
d90e17a7 2974 iterate_over_lwps (filter, count_events_callback, &num_events);
8bf3b159 2975 gdb_assert (num_events > 0);
d6b0e80f 2976
9c02b525
PA
2977 /* Now randomly pick a LWP out of those that have had
2978 events. */
d6b0e80f
AC
2979 random_selector = (int)
2980 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2981
2982 if (debug_linux_nat && num_events > 1)
2983 fprintf_unfiltered (gdb_stdlog,
9c02b525 2984 "SEL: Found %d events, selecting #%d\n",
d6b0e80f
AC
2985 num_events, random_selector);
2986
d90e17a7
PA
2987 event_lp = iterate_over_lwps (filter,
2988 select_event_lwp_callback,
d6b0e80f
AC
2989 &random_selector);
2990 }
2991
2992 if (event_lp != NULL)
2993 {
2994 /* Switch the event LWP. */
2995 *orig_lp = event_lp;
2996 *status = event_lp->status;
2997 }
2998
2999 /* Flush the wait status for the event LWP. */
3000 (*orig_lp)->status = 0;
3001}
3002
3003/* Return non-zero if LP has been resumed. */
3004
3005static int
3006resumed_callback (struct lwp_info *lp, void *data)
3007{
3008 return lp->resumed;
3009}
3010
12d9289a
PA
3011/* Stop an active thread, verify it still exists, then resume it. If
3012 the thread ends up with a pending status, then it is not resumed,
3013 and *DATA (really a pointer to int), is set. */
d6b0e80f
AC
3014
3015static int
3016stop_and_resume_callback (struct lwp_info *lp, void *data)
3017{
25289eb2 3018 if (!lp->stopped)
d6b0e80f 3019 {
25289eb2
PA
3020 ptid_t ptid = lp->ptid;
3021
d6b0e80f
AC
3022 stop_callback (lp, NULL);
3023 stop_wait_callback (lp, NULL);
25289eb2
PA
3024
3025 /* Resume if the lwp still exists, and the core wanted it
3026 running. */
12d9289a
PA
3027 lp = find_lwp_pid (ptid);
3028 if (lp != NULL)
25289eb2 3029 {
12d9289a 3030 if (lp->last_resume_kind == resume_stop
8a99810d 3031 && !lwp_status_pending_p (lp))
12d9289a
PA
3032 {
3033 /* The core wanted the LWP to stop. Even if it stopped
3034 cleanly (with SIGSTOP), leave the event pending. */
3035 if (debug_linux_nat)
3036 fprintf_unfiltered (gdb_stdlog,
3037 "SARC: core wanted LWP %ld stopped "
3038 "(leaving SIGSTOP pending)\n",
dfd4cc63 3039 ptid_get_lwp (lp->ptid));
12d9289a
PA
3040 lp->status = W_STOPCODE (SIGSTOP);
3041 }
3042
8a99810d 3043 if (!lwp_status_pending_p (lp))
12d9289a
PA
3044 {
3045 if (debug_linux_nat)
3046 fprintf_unfiltered (gdb_stdlog,
3047 "SARC: re-resuming LWP %ld\n",
dfd4cc63 3048 ptid_get_lwp (lp->ptid));
e5ef252a 3049 resume_lwp (lp, lp->step, GDB_SIGNAL_0);
12d9289a
PA
3050 }
3051 else
3052 {
3053 if (debug_linux_nat)
3054 fprintf_unfiltered (gdb_stdlog,
3055 "SARC: not re-resuming LWP %ld "
3056 "(has pending)\n",
dfd4cc63 3057 ptid_get_lwp (lp->ptid));
12d9289a 3058 }
25289eb2 3059 }
d6b0e80f
AC
3060 }
3061 return 0;
3062}
3063
02f3fc28 3064/* Check if we should go on and pass this event to common code.
9c02b525 3065 Return the affected lwp if we are, or NULL otherwise. */
12d9289a 3066
02f3fc28 3067static struct lwp_info *
9c02b525 3068linux_nat_filter_event (int lwpid, int status)
02f3fc28
PA
3069{
3070 struct lwp_info *lp;
89a5711c 3071 int event = linux_ptrace_get_extended_event (status);
02f3fc28
PA
3072
3073 lp = find_lwp_pid (pid_to_ptid (lwpid));
3074
3075 /* Check for stop events reported by a process we didn't already
3076 know about - anything not already in our LWP list.
3077
3078 If we're expecting to receive stopped processes after
3079 fork, vfork, and clone events, then we'll just add the
3080 new one to our list and go back to waiting for the event
3081 to be reported - the stopped process might be returned
0e5bf2a8
PA
3082 from waitpid before or after the event is.
3083
3084 But note the case of a non-leader thread exec'ing after the
3085 leader having exited, and gone from our lists. The non-leader
3086 thread changes its tid to the tgid. */
3087
3088 if (WIFSTOPPED (status) && lp == NULL
89a5711c 3089 && (WSTOPSIG (status) == SIGTRAP && event == PTRACE_EVENT_EXEC))
0e5bf2a8
PA
3090 {
3091 /* A multi-thread exec after we had seen the leader exiting. */
3092 if (debug_linux_nat)
3093 fprintf_unfiltered (gdb_stdlog,
3094 "LLW: Re-adding thread group leader LWP %d.\n",
3095 lwpid);
3096
dfd4cc63 3097 lp = add_lwp (ptid_build (lwpid, lwpid, 0));
0e5bf2a8
PA
3098 lp->stopped = 1;
3099 lp->resumed = 1;
3100 add_thread (lp->ptid);
3101 }
3102
02f3fc28
PA
3103 if (WIFSTOPPED (status) && !lp)
3104 {
3b27ef47
PA
3105 if (debug_linux_nat)
3106 fprintf_unfiltered (gdb_stdlog,
3107 "LHEW: saving LWP %ld status %s in stopped_pids list\n",
3108 (long) lwpid, status_to_str (status));
84636d28 3109 add_to_pid_list (&stopped_pids, lwpid, status);
02f3fc28
PA
3110 return NULL;
3111 }
3112
3113 /* Make sure we don't report an event for the exit of an LWP not in
1777feb0 3114 our list, i.e. not part of the current process. This can happen
fd62cb89 3115 if we detach from a program we originally forked and then it
02f3fc28
PA
3116 exits. */
3117 if (!WIFSTOPPED (status) && !lp)
3118 return NULL;
3119
8817a6f2
PA
3120 /* This LWP is stopped now. (And if dead, this prevents it from
3121 ever being continued.) */
3122 lp->stopped = 1;
3123
8784d563
PA
3124 if (WIFSTOPPED (status) && lp->must_set_ptrace_flags)
3125 {
3126 struct inferior *inf = find_inferior_pid (ptid_get_pid (lp->ptid));
de0d863e 3127 int options = linux_nat_ptrace_options (inf->attach_flag);
8784d563 3128
de0d863e 3129 linux_enable_event_reporting (ptid_get_lwp (lp->ptid), options);
8784d563
PA
3130 lp->must_set_ptrace_flags = 0;
3131 }
3132
ca2163eb
PA
3133 /* Handle GNU/Linux's syscall SIGTRAPs. */
3134 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
3135 {
3136 /* No longer need the sysgood bit. The ptrace event ends up
3137 recorded in lp->waitstatus if we care for it. We can carry
3138 on handling the event like a regular SIGTRAP from here
3139 on. */
3140 status = W_STOPCODE (SIGTRAP);
3141 if (linux_handle_syscall_trap (lp, 0))
3142 return NULL;
3143 }
02f3fc28 3144
ca2163eb 3145 /* Handle GNU/Linux's extended waitstatus for trace events. */
89a5711c
DB
3146 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP
3147 && linux_is_extended_waitstatus (status))
02f3fc28
PA
3148 {
3149 if (debug_linux_nat)
3150 fprintf_unfiltered (gdb_stdlog,
3151 "LLW: Handling extended status 0x%06x\n",
3152 status);
3153 if (linux_handle_extended_wait (lp, status, 0))
3154 return NULL;
3155 }
3156
3157 /* Check if the thread has exited. */
9c02b525
PA
3158 if (WIFEXITED (status) || WIFSIGNALED (status))
3159 {
3160 if (num_lwps (ptid_get_pid (lp->ptid)) > 1)
02f3fc28 3161 {
9c02b525
PA
3162 /* If this is the main thread, we must stop all threads and
3163 verify if they are still alive. This is because in the
3164 nptl thread model on Linux 2.4, there is no signal issued
3165 for exiting LWPs other than the main thread. We only get
3166 the main thread exit signal once all child threads have
3167 already exited. If we stop all the threads and use the
3168 stop_wait_callback to check if they have exited we can
3169 determine whether this signal should be ignored or
3170 whether it means the end of the debugged application,
3171 regardless of which threading model is being used. */
3172 if (ptid_get_pid (lp->ptid) == ptid_get_lwp (lp->ptid))
3173 {
3174 iterate_over_lwps (pid_to_ptid (ptid_get_pid (lp->ptid)),
3175 stop_and_resume_callback, NULL);
3176 }
3177
3178 if (debug_linux_nat)
3179 fprintf_unfiltered (gdb_stdlog,
3180 "LLW: %s exited.\n",
3181 target_pid_to_str (lp->ptid));
3182
3183 if (num_lwps (ptid_get_pid (lp->ptid)) > 1)
3184 {
3185 /* If there is at least one more LWP, then the exit signal
3186 was not the end of the debugged application and should be
3187 ignored. */
3188 exit_lwp (lp);
3189 return NULL;
3190 }
02f3fc28
PA
3191 }
3192
9c02b525
PA
3193 gdb_assert (lp->resumed);
3194
02f3fc28
PA
3195 if (debug_linux_nat)
3196 fprintf_unfiltered (gdb_stdlog,
9c02b525
PA
3197 "Process %ld exited\n",
3198 ptid_get_lwp (lp->ptid));
02f3fc28 3199
9c02b525
PA
3200 /* This was the last lwp in the process. Since events are
3201 serialized to GDB core, we may not be able report this one
3202 right now, but GDB core and the other target layers will want
3203 to be notified about the exit code/signal, leave the status
3204 pending for the next time we're able to report it. */
3205
3206 /* Dead LWP's aren't expected to reported a pending sigstop. */
3207 lp->signalled = 0;
3208
3209 /* Store the pending event in the waitstatus, because
3210 W_EXITCODE(0,0) == 0. */
3211 store_waitstatus (&lp->waitstatus, status);
3212 return lp;
02f3fc28
PA
3213 }
3214
3215 /* Check if the current LWP has previously exited. In the nptl
3216 thread model, LWPs other than the main thread do not issue
3217 signals when they exit so we must check whenever the thread has
3218 stopped. A similar check is made in stop_wait_callback(). */
dfd4cc63 3219 if (num_lwps (ptid_get_pid (lp->ptid)) > 1 && !linux_thread_alive (lp->ptid))
02f3fc28 3220 {
dfd4cc63 3221 ptid_t ptid = pid_to_ptid (ptid_get_pid (lp->ptid));
d90e17a7 3222
02f3fc28
PA
3223 if (debug_linux_nat)
3224 fprintf_unfiltered (gdb_stdlog,
3225 "LLW: %s exited.\n",
3226 target_pid_to_str (lp->ptid));
3227
3228 exit_lwp (lp);
3229
3230 /* Make sure there is at least one thread running. */
d90e17a7 3231 gdb_assert (iterate_over_lwps (ptid, running_callback, NULL));
02f3fc28
PA
3232
3233 /* Discard the event. */
3234 return NULL;
3235 }
3236
3237 /* Make sure we don't report a SIGSTOP that we sent ourselves in
3238 an attempt to stop an LWP. */
3239 if (lp->signalled
3240 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP)
3241 {
02f3fc28
PA
3242 lp->signalled = 0;
3243
2bf6fb9d 3244 if (lp->last_resume_kind == resume_stop)
25289eb2 3245 {
2bf6fb9d
PA
3246 if (debug_linux_nat)
3247 fprintf_unfiltered (gdb_stdlog,
3248 "LLW: resume_stop SIGSTOP caught for %s.\n",
3249 target_pid_to_str (lp->ptid));
3250 }
3251 else
3252 {
3253 /* This is a delayed SIGSTOP. Filter out the event. */
02f3fc28 3254
25289eb2
PA
3255 if (debug_linux_nat)
3256 fprintf_unfiltered (gdb_stdlog,
2bf6fb9d 3257 "LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
25289eb2
PA
3258 lp->step ?
3259 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3260 target_pid_to_str (lp->ptid));
02f3fc28 3261
2bf6fb9d 3262 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
25289eb2 3263 gdb_assert (lp->resumed);
25289eb2
PA
3264 return NULL;
3265 }
02f3fc28
PA
3266 }
3267
57380f4e
DJ
3268 /* Make sure we don't report a SIGINT that we have already displayed
3269 for another thread. */
3270 if (lp->ignore_sigint
3271 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGINT)
3272 {
3273 if (debug_linux_nat)
3274 fprintf_unfiltered (gdb_stdlog,
3275 "LLW: Delayed SIGINT caught for %s.\n",
3276 target_pid_to_str (lp->ptid));
3277
3278 /* This is a delayed SIGINT. */
3279 lp->ignore_sigint = 0;
3280
8a99810d 3281 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
57380f4e
DJ
3282 if (debug_linux_nat)
3283 fprintf_unfiltered (gdb_stdlog,
3284 "LLW: %s %s, 0, 0 (discard SIGINT)\n",
3285 lp->step ?
3286 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3287 target_pid_to_str (lp->ptid));
57380f4e
DJ
3288 gdb_assert (lp->resumed);
3289
3290 /* Discard the event. */
3291 return NULL;
3292 }
3293
9c02b525
PA
3294 /* Don't report signals that GDB isn't interested in, such as
3295 signals that are neither printed nor stopped upon. Stopping all
3296 threads can be a bit time-consuming so if we want decent
3297 performance with heavily multi-threaded programs, especially when
3298 they're using a high frequency timer, we'd better avoid it if we
3299 can. */
3300 if (WIFSTOPPED (status))
3301 {
3302 enum gdb_signal signo = gdb_signal_from_host (WSTOPSIG (status));
3303
3304 if (!non_stop)
3305 {
3306 /* Only do the below in all-stop, as we currently use SIGSTOP
3307 to implement target_stop (see linux_nat_stop) in
3308 non-stop. */
3309 if (signo == GDB_SIGNAL_INT && signal_pass_state (signo) == 0)
3310 {
3311 /* If ^C/BREAK is typed at the tty/console, SIGINT gets
3312 forwarded to the entire process group, that is, all LWPs
3313 will receive it - unless they're using CLONE_THREAD to
3314 share signals. Since we only want to report it once, we
3315 mark it as ignored for all LWPs except this one. */
3316 iterate_over_lwps (pid_to_ptid (ptid_get_pid (lp->ptid)),
3317 set_ignore_sigint, NULL);
3318 lp->ignore_sigint = 0;
3319 }
3320 else
3321 maybe_clear_ignore_sigint (lp);
3322 }
3323
3324 /* When using hardware single-step, we need to report every signal.
c9587f88
AT
3325 Otherwise, signals in pass_mask may be short-circuited
3326 except signals that might be caused by a breakpoint. */
9c02b525 3327 if (!lp->step
c9587f88
AT
3328 && WSTOPSIG (status) && sigismember (&pass_mask, WSTOPSIG (status))
3329 && !linux_wstatus_maybe_breakpoint (status))
9c02b525
PA
3330 {
3331 linux_resume_one_lwp (lp, lp->step, signo);
3332 if (debug_linux_nat)
3333 fprintf_unfiltered (gdb_stdlog,
3334 "LLW: %s %s, %s (preempt 'handle')\n",
3335 lp->step ?
3336 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3337 target_pid_to_str (lp->ptid),
3338 (signo != GDB_SIGNAL_0
3339 ? strsignal (gdb_signal_to_host (signo))
3340 : "0"));
3341 return NULL;
3342 }
3343 }
3344
02f3fc28
PA
3345 /* An interesting event. */
3346 gdb_assert (lp);
ca2163eb 3347 lp->status = status;
9c02b525 3348 save_sigtrap (lp);
02f3fc28
PA
3349 return lp;
3350}
3351
0e5bf2a8
PA
3352/* Detect zombie thread group leaders, and "exit" them. We can't reap
3353 their exits until all other threads in the group have exited. */
3354
3355static void
3356check_zombie_leaders (void)
3357{
3358 struct inferior *inf;
3359
3360 ALL_INFERIORS (inf)
3361 {
3362 struct lwp_info *leader_lp;
3363
3364 if (inf->pid == 0)
3365 continue;
3366
3367 leader_lp = find_lwp_pid (pid_to_ptid (inf->pid));
3368 if (leader_lp != NULL
3369 /* Check if there are other threads in the group, as we may
3370 have raced with the inferior simply exiting. */
3371 && num_lwps (inf->pid) > 1
5f572dec 3372 && linux_proc_pid_is_zombie (inf->pid))
0e5bf2a8
PA
3373 {
3374 if (debug_linux_nat)
3375 fprintf_unfiltered (gdb_stdlog,
3376 "CZL: Thread group leader %d zombie "
3377 "(it exited, or another thread execd).\n",
3378 inf->pid);
3379
3380 /* A leader zombie can mean one of two things:
3381
3382 - It exited, and there's an exit status pending
3383 available, or only the leader exited (not the whole
3384 program). In the latter case, we can't waitpid the
3385 leader's exit status until all other threads are gone.
3386
3387 - There are 3 or more threads in the group, and a thread
3388 other than the leader exec'd. On an exec, the Linux
3389 kernel destroys all other threads (except the execing
3390 one) in the thread group, and resets the execing thread's
3391 tid to the tgid. No exit notification is sent for the
3392 execing thread -- from the ptracer's perspective, it
3393 appears as though the execing thread just vanishes.
3394 Until we reap all other threads except the leader and the
3395 execing thread, the leader will be zombie, and the
3396 execing thread will be in `D (disc sleep)'. As soon as
3397 all other threads are reaped, the execing thread changes
3398 it's tid to the tgid, and the previous (zombie) leader
3399 vanishes, giving place to the "new" leader. We could try
3400 distinguishing the exit and exec cases, by waiting once
3401 more, and seeing if something comes out, but it doesn't
3402 sound useful. The previous leader _does_ go away, and
3403 we'll re-add the new one once we see the exec event
3404 (which is just the same as what would happen if the
3405 previous leader did exit voluntarily before some other
3406 thread execs). */
3407
3408 if (debug_linux_nat)
3409 fprintf_unfiltered (gdb_stdlog,
3410 "CZL: Thread group leader %d vanished.\n",
3411 inf->pid);
3412 exit_lwp (leader_lp);
3413 }
3414 }
3415}
3416
d6b0e80f 3417static ptid_t
7feb7d06 3418linux_nat_wait_1 (struct target_ops *ops,
47608cb1
PA
3419 ptid_t ptid, struct target_waitstatus *ourstatus,
3420 int target_options)
d6b0e80f 3421{
fc9b8e47 3422 sigset_t prev_mask;
4b60df3d 3423 enum resume_kind last_resume_kind;
12d9289a 3424 struct lwp_info *lp;
12d9289a 3425 int status;
d6b0e80f 3426
01124a23 3427 if (debug_linux_nat)
b84876c2
PA
3428 fprintf_unfiltered (gdb_stdlog, "LLW: enter\n");
3429
f973ed9c
DJ
3430 /* The first time we get here after starting a new inferior, we may
3431 not have added it to the LWP list yet - this is the earliest
3432 moment at which we know its PID. */
d90e17a7 3433 if (ptid_is_pid (inferior_ptid))
f973ed9c 3434 {
27c9d204
PA
3435 /* Upgrade the main thread's ptid. */
3436 thread_change_ptid (inferior_ptid,
dfd4cc63
LM
3437 ptid_build (ptid_get_pid (inferior_ptid),
3438 ptid_get_pid (inferior_ptid), 0));
27c9d204 3439
26cb8b7c 3440 lp = add_initial_lwp (inferior_ptid);
f973ed9c
DJ
3441 lp->resumed = 1;
3442 }
3443
12696c10 3444 /* Make sure SIGCHLD is blocked until the sigsuspend below. */
7feb7d06 3445 block_child_signals (&prev_mask);
d6b0e80f 3446
d6b0e80f 3447 /* First check if there is a LWP with a wait status pending. */
8a99810d
PA
3448 lp = iterate_over_lwps (ptid, status_callback, NULL);
3449 if (lp != NULL)
d6b0e80f
AC
3450 {
3451 if (debug_linux_nat)
d6b0e80f
AC
3452 fprintf_unfiltered (gdb_stdlog,
3453 "LLW: Using pending wait status %s for %s.\n",
ca2163eb 3454 status_to_str (lp->status),
d6b0e80f 3455 target_pid_to_str (lp->ptid));
d6b0e80f
AC
3456 }
3457
d9d41e78 3458 if (!target_is_async_p ())
b84876c2
PA
3459 {
3460 /* Causes SIGINT to be passed on to the attached process. */
3461 set_sigint_trap ();
b84876c2 3462 }
d6b0e80f 3463
9c02b525
PA
3464 /* But if we don't find a pending event, we'll have to wait. Always
3465 pull all events out of the kernel. We'll randomly select an
3466 event LWP out of all that have events, to prevent starvation. */
7feb7d06 3467
d90e17a7 3468 while (lp == NULL)
d6b0e80f
AC
3469 {
3470 pid_t lwpid;
3471
0e5bf2a8
PA
3472 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
3473 quirks:
3474
3475 - If the thread group leader exits while other threads in the
3476 thread group still exist, waitpid(TGID, ...) hangs. That
3477 waitpid won't return an exit status until the other threads
3478 in the group are reapped.
3479
3480 - When a non-leader thread execs, that thread just vanishes
3481 without reporting an exit (so we'd hang if we waited for it
3482 explicitly in that case). The exec event is reported to
3483 the TGID pid. */
3484
3485 errno = 0;
3486 lwpid = my_waitpid (-1, &status, __WCLONE | WNOHANG);
3487 if (lwpid == 0 || (lwpid == -1 && errno == ECHILD))
3488 lwpid = my_waitpid (-1, &status, WNOHANG);
3489
3490 if (debug_linux_nat)
3491 fprintf_unfiltered (gdb_stdlog,
3492 "LNW: waitpid(-1, ...) returned %d, %s\n",
3493 lwpid, errno ? safe_strerror (errno) : "ERRNO-OK");
b84876c2 3494
d6b0e80f
AC
3495 if (lwpid > 0)
3496 {
d6b0e80f
AC
3497 if (debug_linux_nat)
3498 {
3499 fprintf_unfiltered (gdb_stdlog,
3500 "LLW: waitpid %ld received %s\n",
3501 (long) lwpid, status_to_str (status));
3502 }
3503
9c02b525 3504 linux_nat_filter_event (lwpid, status);
0e5bf2a8
PA
3505 /* Retry until nothing comes out of waitpid. A single
3506 SIGCHLD can indicate more than one child stopped. */
3507 continue;
d6b0e80f
AC
3508 }
3509
20ba1ce6
PA
3510 /* Now that we've pulled all events out of the kernel, resume
3511 LWPs that don't have an interesting event to report. */
3512 iterate_over_lwps (minus_one_ptid,
3513 resume_stopped_resumed_lwps, &minus_one_ptid);
3514
3515 /* ... and find an LWP with a status to report to the core, if
3516 any. */
9c02b525
PA
3517 lp = iterate_over_lwps (ptid, status_callback, NULL);
3518 if (lp != NULL)
3519 break;
3520
0e5bf2a8
PA
3521 /* Check for zombie thread group leaders. Those can't be reaped
3522 until all other threads in the thread group are. */
3523 check_zombie_leaders ();
d6b0e80f 3524
0e5bf2a8
PA
3525 /* If there are no resumed children left, bail. We'd be stuck
3526 forever in the sigsuspend call below otherwise. */
3527 if (iterate_over_lwps (ptid, resumed_callback, NULL) == NULL)
3528 {
3529 if (debug_linux_nat)
3530 fprintf_unfiltered (gdb_stdlog, "LLW: exit (no resumed LWP)\n");
b84876c2 3531
0e5bf2a8 3532 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
b84876c2 3533
d9d41e78 3534 if (!target_is_async_p ())
0e5bf2a8 3535 clear_sigint_trap ();
b84876c2 3536
0e5bf2a8
PA
3537 restore_child_signals_mask (&prev_mask);
3538 return minus_one_ptid;
d6b0e80f 3539 }
28736962 3540
0e5bf2a8
PA
3541 /* No interesting event to report to the core. */
3542
3543 if (target_options & TARGET_WNOHANG)
3544 {
01124a23 3545 if (debug_linux_nat)
28736962
PA
3546 fprintf_unfiltered (gdb_stdlog, "LLW: exit (ignore)\n");
3547
0e5bf2a8 3548 ourstatus->kind = TARGET_WAITKIND_IGNORE;
28736962
PA
3549 restore_child_signals_mask (&prev_mask);
3550 return minus_one_ptid;
3551 }
d6b0e80f
AC
3552
3553 /* We shouldn't end up here unless we want to try again. */
d90e17a7 3554 gdb_assert (lp == NULL);
0e5bf2a8
PA
3555
3556 /* Block until we get an event reported with SIGCHLD. */
d36bf488
DE
3557 if (debug_linux_nat)
3558 fprintf_unfiltered (gdb_stdlog, "LNW: about to sigsuspend\n");
0e5bf2a8 3559 sigsuspend (&suspend_mask);
d6b0e80f
AC
3560 }
3561
d9d41e78 3562 if (!target_is_async_p ())
d26b5354 3563 clear_sigint_trap ();
d6b0e80f
AC
3564
3565 gdb_assert (lp);
3566
ca2163eb
PA
3567 status = lp->status;
3568 lp->status = 0;
3569
4c28f408
PA
3570 if (!non_stop)
3571 {
3572 /* Now stop all other LWP's ... */
d90e17a7 3573 iterate_over_lwps (minus_one_ptid, stop_callback, NULL);
4c28f408
PA
3574
3575 /* ... and wait until all of them have reported back that
3576 they're no longer running. */
d90e17a7 3577 iterate_over_lwps (minus_one_ptid, stop_wait_callback, NULL);
9c02b525
PA
3578 }
3579
3580 /* If we're not waiting for a specific LWP, choose an event LWP from
3581 among those that have had events. Giving equal priority to all
3582 LWPs that have had events helps prevent starvation. */
3583 if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid))
3584 select_event_lwp (ptid, &lp, &status);
3585
3586 gdb_assert (lp != NULL);
3587
3588 /* Now that we've selected our final event LWP, un-adjust its PC if
faf09f01
PA
3589 it was a software breakpoint, and we can't reliably support the
3590 "stopped by software breakpoint" stop reason. */
3591 if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3592 && !USE_SIGTRAP_SIGINFO)
9c02b525
PA
3593 {
3594 struct regcache *regcache = get_thread_regcache (lp->ptid);
3595 struct gdbarch *gdbarch = get_regcache_arch (regcache);
527a273a 3596 int decr_pc = gdbarch_decr_pc_after_break (gdbarch);
4c28f408 3597
9c02b525
PA
3598 if (decr_pc != 0)
3599 {
3600 CORE_ADDR pc;
d6b0e80f 3601
9c02b525
PA
3602 pc = regcache_read_pc (regcache);
3603 regcache_write_pc (regcache, pc + decr_pc);
3604 }
3605 }
e3e9f5a2 3606
9c02b525
PA
3607 /* We'll need this to determine whether to report a SIGSTOP as
3608 GDB_SIGNAL_0. Need to take a copy because resume_clear_callback
3609 clears it. */
3610 last_resume_kind = lp->last_resume_kind;
4b60df3d 3611
9c02b525
PA
3612 if (!non_stop)
3613 {
e3e9f5a2
PA
3614 /* In all-stop, from the core's perspective, all LWPs are now
3615 stopped until a new resume action is sent over. */
3616 iterate_over_lwps (minus_one_ptid, resume_clear_callback, NULL);
3617 }
3618 else
25289eb2 3619 {
4b60df3d 3620 resume_clear_callback (lp, NULL);
25289eb2 3621 }
d6b0e80f 3622
26ab7092 3623 if (linux_nat_status_is_event (status))
d6b0e80f 3624 {
d6b0e80f
AC
3625 if (debug_linux_nat)
3626 fprintf_unfiltered (gdb_stdlog,
4fdebdd0
PA
3627 "LLW: trap ptid is %s.\n",
3628 target_pid_to_str (lp->ptid));
d6b0e80f 3629 }
d6b0e80f
AC
3630
3631 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3632 {
3633 *ourstatus = lp->waitstatus;
3634 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3635 }
3636 else
3637 store_waitstatus (ourstatus, status);
3638
01124a23 3639 if (debug_linux_nat)
b84876c2
PA
3640 fprintf_unfiltered (gdb_stdlog, "LLW: exit\n");
3641
7feb7d06 3642 restore_child_signals_mask (&prev_mask);
1e225492 3643
4b60df3d 3644 if (last_resume_kind == resume_stop
25289eb2
PA
3645 && ourstatus->kind == TARGET_WAITKIND_STOPPED
3646 && WSTOPSIG (status) == SIGSTOP)
3647 {
3648 /* A thread that has been requested to stop by GDB with
3649 target_stop, and it stopped cleanly, so report as SIG0. The
3650 use of SIGSTOP is an implementation detail. */
a493e3e2 3651 ourstatus->value.sig = GDB_SIGNAL_0;
25289eb2
PA
3652 }
3653
1e225492
JK
3654 if (ourstatus->kind == TARGET_WAITKIND_EXITED
3655 || ourstatus->kind == TARGET_WAITKIND_SIGNALLED)
3656 lp->core = -1;
3657 else
2e794194 3658 lp->core = linux_common_core_of_thread (lp->ptid);
1e225492 3659
f973ed9c 3660 return lp->ptid;
d6b0e80f
AC
3661}
3662
e3e9f5a2
PA
3663/* Resume LWPs that are currently stopped without any pending status
3664 to report, but are resumed from the core's perspective. */
3665
3666static int
3667resume_stopped_resumed_lwps (struct lwp_info *lp, void *data)
3668{
3669 ptid_t *wait_ptid_p = data;
3670
3671 if (lp->stopped
3672 && lp->resumed
8a99810d 3673 && !lwp_status_pending_p (lp))
e3e9f5a2 3674 {
336060f3
PA
3675 struct regcache *regcache = get_thread_regcache (lp->ptid);
3676 struct gdbarch *gdbarch = get_regcache_arch (regcache);
336060f3 3677
23f238d3 3678 TRY
e3e9f5a2 3679 {
23f238d3
PA
3680 CORE_ADDR pc = regcache_read_pc (regcache);
3681 int leave_stopped = 0;
e3e9f5a2 3682
23f238d3
PA
3683 /* Don't bother if there's a breakpoint at PC that we'd hit
3684 immediately, and we're not waiting for this LWP. */
3685 if (!ptid_match (lp->ptid, *wait_ptid_p))
3686 {
3687 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
3688 leave_stopped = 1;
3689 }
e3e9f5a2 3690
23f238d3
PA
3691 if (!leave_stopped)
3692 {
3693 if (debug_linux_nat)
3694 fprintf_unfiltered (gdb_stdlog,
3695 "RSRL: resuming stopped-resumed LWP %s at "
3696 "%s: step=%d\n",
3697 target_pid_to_str (lp->ptid),
3698 paddress (gdbarch, pc),
3699 lp->step);
3700
3701 linux_resume_one_lwp_throw (lp, lp->step, GDB_SIGNAL_0);
3702 }
3703 }
3704 CATCH (ex, RETURN_MASK_ERROR)
3705 {
3706 if (!check_ptrace_stopped_lwp_gone (lp))
3707 throw_exception (ex);
3708 }
3709 END_CATCH
e3e9f5a2
PA
3710 }
3711
3712 return 0;
3713}
3714
7feb7d06
PA
3715static ptid_t
3716linux_nat_wait (struct target_ops *ops,
47608cb1
PA
3717 ptid_t ptid, struct target_waitstatus *ourstatus,
3718 int target_options)
7feb7d06
PA
3719{
3720 ptid_t event_ptid;
3721
3722 if (debug_linux_nat)
09826ec5
PA
3723 {
3724 char *options_string;
3725
3726 options_string = target_options_to_string (target_options);
3727 fprintf_unfiltered (gdb_stdlog,
3728 "linux_nat_wait: [%s], [%s]\n",
3729 target_pid_to_str (ptid),
3730 options_string);
3731 xfree (options_string);
3732 }
7feb7d06
PA
3733
3734 /* Flush the async file first. */
d9d41e78 3735 if (target_is_async_p ())
7feb7d06
PA
3736 async_file_flush ();
3737
e3e9f5a2
PA
3738 /* Resume LWPs that are currently stopped without any pending status
3739 to report, but are resumed from the core's perspective. LWPs get
3740 in this state if we find them stopping at a time we're not
3741 interested in reporting the event (target_wait on a
3742 specific_process, for example, see linux_nat_wait_1), and
3743 meanwhile the event became uninteresting. Don't bother resuming
3744 LWPs we're not going to wait for if they'd stop immediately. */
3745 if (non_stop)
3746 iterate_over_lwps (minus_one_ptid, resume_stopped_resumed_lwps, &ptid);
3747
47608cb1 3748 event_ptid = linux_nat_wait_1 (ops, ptid, ourstatus, target_options);
7feb7d06
PA
3749
3750 /* If we requested any event, and something came out, assume there
3751 may be more. If we requested a specific lwp or process, also
3752 assume there may be more. */
d9d41e78 3753 if (target_is_async_p ()
6953d224
PA
3754 && ((ourstatus->kind != TARGET_WAITKIND_IGNORE
3755 && ourstatus->kind != TARGET_WAITKIND_NO_RESUMED)
7feb7d06
PA
3756 || !ptid_equal (ptid, minus_one_ptid)))
3757 async_file_mark ();
3758
7feb7d06
PA
3759 return event_ptid;
3760}
3761
d6b0e80f
AC
3762static int
3763kill_callback (struct lwp_info *lp, void *data)
3764{
ed731959
JK
3765 /* PTRACE_KILL may resume the inferior. Send SIGKILL first. */
3766
3767 errno = 0;
69ff6be5 3768 kill_lwp (ptid_get_lwp (lp->ptid), SIGKILL);
ed731959 3769 if (debug_linux_nat)
57745c90
PA
3770 {
3771 int save_errno = errno;
3772
3773 fprintf_unfiltered (gdb_stdlog,
3774 "KC: kill (SIGKILL) %s, 0, 0 (%s)\n",
3775 target_pid_to_str (lp->ptid),
3776 save_errno ? safe_strerror (save_errno) : "OK");
3777 }
ed731959
JK
3778
3779 /* Some kernels ignore even SIGKILL for processes under ptrace. */
3780
d6b0e80f 3781 errno = 0;
dfd4cc63 3782 ptrace (PTRACE_KILL, ptid_get_lwp (lp->ptid), 0, 0);
d6b0e80f 3783 if (debug_linux_nat)
57745c90
PA
3784 {
3785 int save_errno = errno;
3786
3787 fprintf_unfiltered (gdb_stdlog,
3788 "KC: PTRACE_KILL %s, 0, 0 (%s)\n",
3789 target_pid_to_str (lp->ptid),
3790 save_errno ? safe_strerror (save_errno) : "OK");
3791 }
d6b0e80f
AC
3792
3793 return 0;
3794}
3795
3796static int
3797kill_wait_callback (struct lwp_info *lp, void *data)
3798{
3799 pid_t pid;
3800
3801 /* We must make sure that there are no pending events (delayed
3802 SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current
3803 program doesn't interfere with any following debugging session. */
3804
3805 /* For cloned processes we must check both with __WCLONE and
3806 without, since the exit status of a cloned process isn't reported
3807 with __WCLONE. */
3808 if (lp->cloned)
3809 {
3810 do
3811 {
dfd4cc63 3812 pid = my_waitpid (ptid_get_lwp (lp->ptid), NULL, __WCLONE);
e85a822c 3813 if (pid != (pid_t) -1)
d6b0e80f 3814 {
e85a822c
DJ
3815 if (debug_linux_nat)
3816 fprintf_unfiltered (gdb_stdlog,
3817 "KWC: wait %s received unknown.\n",
3818 target_pid_to_str (lp->ptid));
3819 /* The Linux kernel sometimes fails to kill a thread
3820 completely after PTRACE_KILL; that goes from the stop
3821 point in do_fork out to the one in
3822 get_signal_to_deliever and waits again. So kill it
3823 again. */
3824 kill_callback (lp, NULL);
d6b0e80f
AC
3825 }
3826 }
dfd4cc63 3827 while (pid == ptid_get_lwp (lp->ptid));
d6b0e80f
AC
3828
3829 gdb_assert (pid == -1 && errno == ECHILD);
3830 }
3831
3832 do
3833 {
dfd4cc63 3834 pid = my_waitpid (ptid_get_lwp (lp->ptid), NULL, 0);
e85a822c 3835 if (pid != (pid_t) -1)
d6b0e80f 3836 {
e85a822c
DJ
3837 if (debug_linux_nat)
3838 fprintf_unfiltered (gdb_stdlog,
3839 "KWC: wait %s received unk.\n",
3840 target_pid_to_str (lp->ptid));
3841 /* See the call to kill_callback above. */
3842 kill_callback (lp, NULL);
d6b0e80f
AC
3843 }
3844 }
dfd4cc63 3845 while (pid == ptid_get_lwp (lp->ptid));
d6b0e80f
AC
3846
3847 gdb_assert (pid == -1 && errno == ECHILD);
3848 return 0;
3849}
3850
3851static void
7d85a9c0 3852linux_nat_kill (struct target_ops *ops)
d6b0e80f 3853{
f973ed9c
DJ
3854 struct target_waitstatus last;
3855 ptid_t last_ptid;
3856 int status;
d6b0e80f 3857
f973ed9c
DJ
3858 /* If we're stopped while forking and we haven't followed yet,
3859 kill the other task. We need to do this first because the
3860 parent will be sleeping if this is a vfork. */
d6b0e80f 3861
f973ed9c 3862 get_last_target_status (&last_ptid, &last);
d6b0e80f 3863
f973ed9c
DJ
3864 if (last.kind == TARGET_WAITKIND_FORKED
3865 || last.kind == TARGET_WAITKIND_VFORKED)
3866 {
dfd4cc63 3867 ptrace (PT_KILL, ptid_get_pid (last.value.related_pid), 0, 0);
f973ed9c 3868 wait (&status);
26cb8b7c
PA
3869
3870 /* Let the arch-specific native code know this process is
3871 gone. */
dfd4cc63 3872 linux_nat_forget_process (ptid_get_pid (last.value.related_pid));
f973ed9c
DJ
3873 }
3874
3875 if (forks_exist_p ())
7feb7d06 3876 linux_fork_killall ();
f973ed9c
DJ
3877 else
3878 {
d90e17a7 3879 ptid_t ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
e0881a8e 3880
4c28f408
PA
3881 /* Stop all threads before killing them, since ptrace requires
3882 that the thread is stopped to sucessfully PTRACE_KILL. */
d90e17a7 3883 iterate_over_lwps (ptid, stop_callback, NULL);
4c28f408
PA
3884 /* ... and wait until all of them have reported back that
3885 they're no longer running. */
d90e17a7 3886 iterate_over_lwps (ptid, stop_wait_callback, NULL);
4c28f408 3887
f973ed9c 3888 /* Kill all LWP's ... */
d90e17a7 3889 iterate_over_lwps (ptid, kill_callback, NULL);
f973ed9c
DJ
3890
3891 /* ... and wait until we've flushed all events. */
d90e17a7 3892 iterate_over_lwps (ptid, kill_wait_callback, NULL);
f973ed9c
DJ
3893 }
3894
3895 target_mourn_inferior ();
d6b0e80f
AC
3896}
3897
3898static void
136d6dae 3899linux_nat_mourn_inferior (struct target_ops *ops)
d6b0e80f 3900{
26cb8b7c
PA
3901 int pid = ptid_get_pid (inferior_ptid);
3902
3903 purge_lwp_list (pid);
d6b0e80f 3904
f973ed9c 3905 if (! forks_exist_p ())
d90e17a7
PA
3906 /* Normal case, no other forks available. */
3907 linux_ops->to_mourn_inferior (ops);
f973ed9c
DJ
3908 else
3909 /* Multi-fork case. The current inferior_ptid has exited, but
3910 there are other viable forks to debug. Delete the exiting
3911 one and context-switch to the first available. */
3912 linux_fork_mourn_inferior ();
26cb8b7c
PA
3913
3914 /* Let the arch-specific native code know this process is gone. */
3915 linux_nat_forget_process (pid);
d6b0e80f
AC
3916}
3917
5b009018
PA
3918/* Convert a native/host siginfo object, into/from the siginfo in the
3919 layout of the inferiors' architecture. */
3920
3921static void
a5362b9a 3922siginfo_fixup (siginfo_t *siginfo, gdb_byte *inf_siginfo, int direction)
5b009018
PA
3923{
3924 int done = 0;
3925
3926 if (linux_nat_siginfo_fixup != NULL)
3927 done = linux_nat_siginfo_fixup (siginfo, inf_siginfo, direction);
3928
3929 /* If there was no callback, or the callback didn't do anything,
3930 then just do a straight memcpy. */
3931 if (!done)
3932 {
3933 if (direction == 1)
a5362b9a 3934 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
5b009018 3935 else
a5362b9a 3936 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
5b009018
PA
3937 }
3938}
3939
9b409511 3940static enum target_xfer_status
4aa995e1
PA
3941linux_xfer_siginfo (struct target_ops *ops, enum target_object object,
3942 const char *annex, gdb_byte *readbuf,
9b409511
YQ
3943 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
3944 ULONGEST *xfered_len)
4aa995e1 3945{
4aa995e1 3946 int pid;
a5362b9a
TS
3947 siginfo_t siginfo;
3948 gdb_byte inf_siginfo[sizeof (siginfo_t)];
4aa995e1
PA
3949
3950 gdb_assert (object == TARGET_OBJECT_SIGNAL_INFO);
3951 gdb_assert (readbuf || writebuf);
3952
dfd4cc63 3953 pid = ptid_get_lwp (inferior_ptid);
4aa995e1 3954 if (pid == 0)
dfd4cc63 3955 pid = ptid_get_pid (inferior_ptid);
4aa995e1
PA
3956
3957 if (offset > sizeof (siginfo))
2ed4b548 3958 return TARGET_XFER_E_IO;
4aa995e1
PA
3959
3960 errno = 0;
3961 ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
3962 if (errno != 0)
2ed4b548 3963 return TARGET_XFER_E_IO;
4aa995e1 3964
5b009018
PA
3965 /* When GDB is built as a 64-bit application, ptrace writes into
3966 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
3967 inferior with a 64-bit GDB should look the same as debugging it
3968 with a 32-bit GDB, we need to convert it. GDB core always sees
3969 the converted layout, so any read/write will have to be done
3970 post-conversion. */
3971 siginfo_fixup (&siginfo, inf_siginfo, 0);
3972
4aa995e1
PA
3973 if (offset + len > sizeof (siginfo))
3974 len = sizeof (siginfo) - offset;
3975
3976 if (readbuf != NULL)
5b009018 3977 memcpy (readbuf, inf_siginfo + offset, len);
4aa995e1
PA
3978 else
3979 {
5b009018
PA
3980 memcpy (inf_siginfo + offset, writebuf, len);
3981
3982 /* Convert back to ptrace layout before flushing it out. */
3983 siginfo_fixup (&siginfo, inf_siginfo, 1);
3984
4aa995e1
PA
3985 errno = 0;
3986 ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
3987 if (errno != 0)
2ed4b548 3988 return TARGET_XFER_E_IO;
4aa995e1
PA
3989 }
3990
9b409511
YQ
3991 *xfered_len = len;
3992 return TARGET_XFER_OK;
4aa995e1
PA
3993}
3994
9b409511 3995static enum target_xfer_status
10d6c8cd
DJ
3996linux_nat_xfer_partial (struct target_ops *ops, enum target_object object,
3997 const char *annex, gdb_byte *readbuf,
3998 const gdb_byte *writebuf,
9b409511 3999 ULONGEST offset, ULONGEST len, ULONGEST *xfered_len)
d6b0e80f 4000{
4aa995e1 4001 struct cleanup *old_chain;
9b409511 4002 enum target_xfer_status xfer;
d6b0e80f 4003
4aa995e1
PA
4004 if (object == TARGET_OBJECT_SIGNAL_INFO)
4005 return linux_xfer_siginfo (ops, object, annex, readbuf, writebuf,
9b409511 4006 offset, len, xfered_len);
4aa995e1 4007
c35b1492
PA
4008 /* The target is connected but no live inferior is selected. Pass
4009 this request down to a lower stratum (e.g., the executable
4010 file). */
4011 if (object == TARGET_OBJECT_MEMORY && ptid_equal (inferior_ptid, null_ptid))
9b409511 4012 return TARGET_XFER_EOF;
c35b1492 4013
4aa995e1
PA
4014 old_chain = save_inferior_ptid ();
4015
dfd4cc63
LM
4016 if (ptid_lwp_p (inferior_ptid))
4017 inferior_ptid = pid_to_ptid (ptid_get_lwp (inferior_ptid));
d6b0e80f 4018
10d6c8cd 4019 xfer = linux_ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
9b409511 4020 offset, len, xfered_len);
d6b0e80f
AC
4021
4022 do_cleanups (old_chain);
4023 return xfer;
4024}
4025
4026static int
28439f5e 4027linux_thread_alive (ptid_t ptid)
d6b0e80f 4028{
8c6a60d1 4029 int err, tmp_errno;
4c28f408 4030
dfd4cc63 4031 gdb_assert (ptid_lwp_p (ptid));
d6b0e80f 4032
4c28f408
PA
4033 /* Send signal 0 instead of anything ptrace, because ptracing a
4034 running thread errors out claiming that the thread doesn't
4035 exist. */
dfd4cc63 4036 err = kill_lwp (ptid_get_lwp (ptid), 0);
8c6a60d1 4037 tmp_errno = errno;
d6b0e80f
AC
4038 if (debug_linux_nat)
4039 fprintf_unfiltered (gdb_stdlog,
4c28f408 4040 "LLTA: KILL(SIG0) %s (%s)\n",
d6b0e80f 4041 target_pid_to_str (ptid),
8c6a60d1 4042 err ? safe_strerror (tmp_errno) : "OK");
9c0dd46b 4043
4c28f408 4044 if (err != 0)
d6b0e80f
AC
4045 return 0;
4046
4047 return 1;
4048}
4049
28439f5e
PA
4050static int
4051linux_nat_thread_alive (struct target_ops *ops, ptid_t ptid)
4052{
4053 return linux_thread_alive (ptid);
4054}
4055
8a06aea7
PA
4056/* Implement the to_update_thread_list target method for this
4057 target. */
4058
4059static void
4060linux_nat_update_thread_list (struct target_ops *ops)
4061{
4062 if (linux_supports_traceclone ())
4063 {
4064 /* With support for clone events, we add/delete threads from the
4065 list as clone/exit events are processed, so just try deleting
4066 exited threads still in the thread list. */
4067 delete_exited_threads ();
4068 }
4069 else
4070 prune_threads ();
4071}
4072
d6b0e80f 4073static char *
117de6a9 4074linux_nat_pid_to_str (struct target_ops *ops, ptid_t ptid)
d6b0e80f
AC
4075{
4076 static char buf[64];
4077
dfd4cc63
LM
4078 if (ptid_lwp_p (ptid)
4079 && (ptid_get_pid (ptid) != ptid_get_lwp (ptid)
4080 || num_lwps (ptid_get_pid (ptid)) > 1))
d6b0e80f 4081 {
dfd4cc63 4082 snprintf (buf, sizeof (buf), "LWP %ld", ptid_get_lwp (ptid));
d6b0e80f
AC
4083 return buf;
4084 }
4085
4086 return normal_pid_to_str (ptid);
4087}
4088
4694da01 4089static char *
503a628d 4090linux_nat_thread_name (struct target_ops *self, struct thread_info *thr)
4694da01
TT
4091{
4092 int pid = ptid_get_pid (thr->ptid);
4093 long lwp = ptid_get_lwp (thr->ptid);
4094#define FORMAT "/proc/%d/task/%ld/comm"
4095 char buf[sizeof (FORMAT) + 30];
4096 FILE *comm_file;
4097 char *result = NULL;
4098
4099 snprintf (buf, sizeof (buf), FORMAT, pid, lwp);
614c279d 4100 comm_file = gdb_fopen_cloexec (buf, "r");
4694da01
TT
4101 if (comm_file)
4102 {
4103 /* Not exported by the kernel, so we define it here. */
4104#define COMM_LEN 16
4105 static char line[COMM_LEN + 1];
4106
4107 if (fgets (line, sizeof (line), comm_file))
4108 {
4109 char *nl = strchr (line, '\n');
4110
4111 if (nl)
4112 *nl = '\0';
4113 if (*line != '\0')
4114 result = line;
4115 }
4116
4117 fclose (comm_file);
4118 }
4119
4120#undef COMM_LEN
4121#undef FORMAT
4122
4123 return result;
4124}
4125
dba24537
AC
4126/* Accepts an integer PID; Returns a string representing a file that
4127 can be opened to get the symbols for the child process. */
4128
6d8fd2b7 4129static char *
8dd27370 4130linux_child_pid_to_exec_file (struct target_ops *self, int pid)
dba24537 4131{
e0d86d2c 4132 return linux_proc_pid_to_exec_file (pid);
dba24537
AC
4133}
4134
10d6c8cd
DJ
4135/* Implement the to_xfer_partial interface for memory reads using the /proc
4136 filesystem. Because we can use a single read() call for /proc, this
4137 can be much more efficient than banging away at PTRACE_PEEKTEXT,
4138 but it doesn't support writes. */
4139
9b409511 4140static enum target_xfer_status
10d6c8cd
DJ
4141linux_proc_xfer_partial (struct target_ops *ops, enum target_object object,
4142 const char *annex, gdb_byte *readbuf,
4143 const gdb_byte *writebuf,
9b409511 4144 ULONGEST offset, LONGEST len, ULONGEST *xfered_len)
dba24537 4145{
10d6c8cd
DJ
4146 LONGEST ret;
4147 int fd;
dba24537
AC
4148 char filename[64];
4149
10d6c8cd 4150 if (object != TARGET_OBJECT_MEMORY || !readbuf)
dba24537
AC
4151 return 0;
4152
4153 /* Don't bother for one word. */
4154 if (len < 3 * sizeof (long))
9b409511 4155 return TARGET_XFER_EOF;
dba24537
AC
4156
4157 /* We could keep this file open and cache it - possibly one per
4158 thread. That requires some juggling, but is even faster. */
cde33bf1
YQ
4159 xsnprintf (filename, sizeof filename, "/proc/%d/mem",
4160 ptid_get_pid (inferior_ptid));
614c279d 4161 fd = gdb_open_cloexec (filename, O_RDONLY | O_LARGEFILE, 0);
dba24537 4162 if (fd == -1)
9b409511 4163 return TARGET_XFER_EOF;
dba24537
AC
4164
4165 /* If pread64 is available, use it. It's faster if the kernel
4166 supports it (only one syscall), and it's 64-bit safe even on
4167 32-bit platforms (for instance, SPARC debugging a SPARC64
4168 application). */
4169#ifdef HAVE_PREAD64
10d6c8cd 4170 if (pread64 (fd, readbuf, len, offset) != len)
dba24537 4171#else
10d6c8cd 4172 if (lseek (fd, offset, SEEK_SET) == -1 || read (fd, readbuf, len) != len)
dba24537
AC
4173#endif
4174 ret = 0;
4175 else
4176 ret = len;
4177
4178 close (fd);
9b409511
YQ
4179
4180 if (ret == 0)
4181 return TARGET_XFER_EOF;
4182 else
4183 {
4184 *xfered_len = ret;
4185 return TARGET_XFER_OK;
4186 }
dba24537
AC
4187}
4188
efcbbd14
UW
4189
4190/* Enumerate spufs IDs for process PID. */
4191static LONGEST
b55e14c7 4192spu_enumerate_spu_ids (int pid, gdb_byte *buf, ULONGEST offset, ULONGEST len)
efcbbd14 4193{
f5656ead 4194 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
efcbbd14
UW
4195 LONGEST pos = 0;
4196 LONGEST written = 0;
4197 char path[128];
4198 DIR *dir;
4199 struct dirent *entry;
4200
4201 xsnprintf (path, sizeof path, "/proc/%d/fd", pid);
4202 dir = opendir (path);
4203 if (!dir)
4204 return -1;
4205
4206 rewinddir (dir);
4207 while ((entry = readdir (dir)) != NULL)
4208 {
4209 struct stat st;
4210 struct statfs stfs;
4211 int fd;
4212
4213 fd = atoi (entry->d_name);
4214 if (!fd)
4215 continue;
4216
4217 xsnprintf (path, sizeof path, "/proc/%d/fd/%d", pid, fd);
4218 if (stat (path, &st) != 0)
4219 continue;
4220 if (!S_ISDIR (st.st_mode))
4221 continue;
4222
4223 if (statfs (path, &stfs) != 0)
4224 continue;
4225 if (stfs.f_type != SPUFS_MAGIC)
4226 continue;
4227
4228 if (pos >= offset && pos + 4 <= offset + len)
4229 {
4230 store_unsigned_integer (buf + pos - offset, 4, byte_order, fd);
4231 written += 4;
4232 }
4233 pos += 4;
4234 }
4235
4236 closedir (dir);
4237 return written;
4238}
4239
4240/* Implement the to_xfer_partial interface for the TARGET_OBJECT_SPU
4241 object type, using the /proc file system. */
9b409511
YQ
4242
4243static enum target_xfer_status
efcbbd14
UW
4244linux_proc_xfer_spu (struct target_ops *ops, enum target_object object,
4245 const char *annex, gdb_byte *readbuf,
4246 const gdb_byte *writebuf,
9b409511 4247 ULONGEST offset, ULONGEST len, ULONGEST *xfered_len)
efcbbd14
UW
4248{
4249 char buf[128];
4250 int fd = 0;
4251 int ret = -1;
dfd4cc63 4252 int pid = ptid_get_pid (inferior_ptid);
efcbbd14
UW
4253
4254 if (!annex)
4255 {
4256 if (!readbuf)
2ed4b548 4257 return TARGET_XFER_E_IO;
efcbbd14 4258 else
9b409511
YQ
4259 {
4260 LONGEST l = spu_enumerate_spu_ids (pid, readbuf, offset, len);
4261
4262 if (l < 0)
4263 return TARGET_XFER_E_IO;
4264 else if (l == 0)
4265 return TARGET_XFER_EOF;
4266 else
4267 {
4268 *xfered_len = (ULONGEST) l;
4269 return TARGET_XFER_OK;
4270 }
4271 }
efcbbd14
UW
4272 }
4273
4274 xsnprintf (buf, sizeof buf, "/proc/%d/fd/%s", pid, annex);
614c279d 4275 fd = gdb_open_cloexec (buf, writebuf? O_WRONLY : O_RDONLY, 0);
efcbbd14 4276 if (fd <= 0)
2ed4b548 4277 return TARGET_XFER_E_IO;
efcbbd14
UW
4278
4279 if (offset != 0
4280 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4281 {
4282 close (fd);
9b409511 4283 return TARGET_XFER_EOF;
efcbbd14
UW
4284 }
4285
4286 if (writebuf)
4287 ret = write (fd, writebuf, (size_t) len);
4288 else if (readbuf)
4289 ret = read (fd, readbuf, (size_t) len);
4290
4291 close (fd);
9b409511
YQ
4292
4293 if (ret < 0)
4294 return TARGET_XFER_E_IO;
4295 else if (ret == 0)
4296 return TARGET_XFER_EOF;
4297 else
4298 {
4299 *xfered_len = (ULONGEST) ret;
4300 return TARGET_XFER_OK;
4301 }
efcbbd14
UW
4302}
4303
4304
dba24537
AC
4305/* Parse LINE as a signal set and add its set bits to SIGS. */
4306
4307static void
4308add_line_to_sigset (const char *line, sigset_t *sigs)
4309{
4310 int len = strlen (line) - 1;
4311 const char *p;
4312 int signum;
4313
4314 if (line[len] != '\n')
8a3fe4f8 4315 error (_("Could not parse signal set: %s"), line);
dba24537
AC
4316
4317 p = line;
4318 signum = len * 4;
4319 while (len-- > 0)
4320 {
4321 int digit;
4322
4323 if (*p >= '0' && *p <= '9')
4324 digit = *p - '0';
4325 else if (*p >= 'a' && *p <= 'f')
4326 digit = *p - 'a' + 10;
4327 else
8a3fe4f8 4328 error (_("Could not parse signal set: %s"), line);
dba24537
AC
4329
4330 signum -= 4;
4331
4332 if (digit & 1)
4333 sigaddset (sigs, signum + 1);
4334 if (digit & 2)
4335 sigaddset (sigs, signum + 2);
4336 if (digit & 4)
4337 sigaddset (sigs, signum + 3);
4338 if (digit & 8)
4339 sigaddset (sigs, signum + 4);
4340
4341 p++;
4342 }
4343}
4344
4345/* Find process PID's pending signals from /proc/pid/status and set
4346 SIGS to match. */
4347
4348void
3e43a32a
MS
4349linux_proc_pending_signals (int pid, sigset_t *pending,
4350 sigset_t *blocked, sigset_t *ignored)
dba24537
AC
4351{
4352 FILE *procfile;
d8d2a3ee 4353 char buffer[PATH_MAX], fname[PATH_MAX];
7c8a8b04 4354 struct cleanup *cleanup;
dba24537
AC
4355
4356 sigemptyset (pending);
4357 sigemptyset (blocked);
4358 sigemptyset (ignored);
cde33bf1 4359 xsnprintf (fname, sizeof fname, "/proc/%d/status", pid);
614c279d 4360 procfile = gdb_fopen_cloexec (fname, "r");
dba24537 4361 if (procfile == NULL)
8a3fe4f8 4362 error (_("Could not open %s"), fname);
7c8a8b04 4363 cleanup = make_cleanup_fclose (procfile);
dba24537 4364
d8d2a3ee 4365 while (fgets (buffer, PATH_MAX, procfile) != NULL)
dba24537
AC
4366 {
4367 /* Normal queued signals are on the SigPnd line in the status
4368 file. However, 2.6 kernels also have a "shared" pending
4369 queue for delivering signals to a thread group, so check for
4370 a ShdPnd line also.
4371
4372 Unfortunately some Red Hat kernels include the shared pending
4373 queue but not the ShdPnd status field. */
4374
61012eef 4375 if (startswith (buffer, "SigPnd:\t"))
dba24537 4376 add_line_to_sigset (buffer + 8, pending);
61012eef 4377 else if (startswith (buffer, "ShdPnd:\t"))
dba24537 4378 add_line_to_sigset (buffer + 8, pending);
61012eef 4379 else if (startswith (buffer, "SigBlk:\t"))
dba24537 4380 add_line_to_sigset (buffer + 8, blocked);
61012eef 4381 else if (startswith (buffer, "SigIgn:\t"))
dba24537
AC
4382 add_line_to_sigset (buffer + 8, ignored);
4383 }
4384
7c8a8b04 4385 do_cleanups (cleanup);
dba24537
AC
4386}
4387
9b409511 4388static enum target_xfer_status
07e059b5 4389linux_nat_xfer_osdata (struct target_ops *ops, enum target_object object,
e0881a8e 4390 const char *annex, gdb_byte *readbuf,
9b409511
YQ
4391 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
4392 ULONGEST *xfered_len)
07e059b5 4393{
07e059b5
VP
4394 gdb_assert (object == TARGET_OBJECT_OSDATA);
4395
9b409511
YQ
4396 *xfered_len = linux_common_xfer_osdata (annex, readbuf, offset, len);
4397 if (*xfered_len == 0)
4398 return TARGET_XFER_EOF;
4399 else
4400 return TARGET_XFER_OK;
07e059b5
VP
4401}
4402
9b409511 4403static enum target_xfer_status
10d6c8cd
DJ
4404linux_xfer_partial (struct target_ops *ops, enum target_object object,
4405 const char *annex, gdb_byte *readbuf,
9b409511
YQ
4406 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
4407 ULONGEST *xfered_len)
10d6c8cd 4408{
9b409511 4409 enum target_xfer_status xfer;
10d6c8cd
DJ
4410
4411 if (object == TARGET_OBJECT_AUXV)
9f2982ff 4412 return memory_xfer_auxv (ops, object, annex, readbuf, writebuf,
9b409511 4413 offset, len, xfered_len);
10d6c8cd 4414
07e059b5
VP
4415 if (object == TARGET_OBJECT_OSDATA)
4416 return linux_nat_xfer_osdata (ops, object, annex, readbuf, writebuf,
9b409511 4417 offset, len, xfered_len);
07e059b5 4418
efcbbd14
UW
4419 if (object == TARGET_OBJECT_SPU)
4420 return linux_proc_xfer_spu (ops, object, annex, readbuf, writebuf,
9b409511 4421 offset, len, xfered_len);
efcbbd14 4422
8f313923
JK
4423 /* GDB calculates all the addresses in possibly larget width of the address.
4424 Address width needs to be masked before its final use - either by
4425 linux_proc_xfer_partial or inf_ptrace_xfer_partial.
4426
4427 Compare ADDR_BIT first to avoid a compiler warning on shift overflow. */
4428
4429 if (object == TARGET_OBJECT_MEMORY)
4430 {
f5656ead 4431 int addr_bit = gdbarch_addr_bit (target_gdbarch ());
8f313923
JK
4432
4433 if (addr_bit < (sizeof (ULONGEST) * HOST_CHAR_BIT))
4434 offset &= ((ULONGEST) 1 << addr_bit) - 1;
4435 }
4436
10d6c8cd 4437 xfer = linux_proc_xfer_partial (ops, object, annex, readbuf, writebuf,
9b409511
YQ
4438 offset, len, xfered_len);
4439 if (xfer != TARGET_XFER_EOF)
10d6c8cd
DJ
4440 return xfer;
4441
4442 return super_xfer_partial (ops, object, annex, readbuf, writebuf,
9b409511 4443 offset, len, xfered_len);
10d6c8cd
DJ
4444}
4445
5808517f
YQ
4446static void
4447cleanup_target_stop (void *arg)
4448{
4449 ptid_t *ptid = (ptid_t *) arg;
4450
4451 gdb_assert (arg != NULL);
4452
4453 /* Unpause all */
a493e3e2 4454 target_resume (*ptid, 0, GDB_SIGNAL_0);
5808517f
YQ
4455}
4456
4457static VEC(static_tracepoint_marker_p) *
c686c57f
TT
4458linux_child_static_tracepoint_markers_by_strid (struct target_ops *self,
4459 const char *strid)
5808517f
YQ
4460{
4461 char s[IPA_CMD_BUF_SIZE];
4462 struct cleanup *old_chain;
4463 int pid = ptid_get_pid (inferior_ptid);
4464 VEC(static_tracepoint_marker_p) *markers = NULL;
4465 struct static_tracepoint_marker *marker = NULL;
4466 char *p = s;
4467 ptid_t ptid = ptid_build (pid, 0, 0);
4468
4469 /* Pause all */
4470 target_stop (ptid);
4471
4472 memcpy (s, "qTfSTM", sizeof ("qTfSTM"));
4473 s[sizeof ("qTfSTM")] = 0;
4474
42476b70 4475 agent_run_command (pid, s, strlen (s) + 1);
5808517f
YQ
4476
4477 old_chain = make_cleanup (free_current_marker, &marker);
4478 make_cleanup (cleanup_target_stop, &ptid);
4479
4480 while (*p++ == 'm')
4481 {
4482 if (marker == NULL)
4483 marker = XCNEW (struct static_tracepoint_marker);
4484
4485 do
4486 {
4487 parse_static_tracepoint_marker_definition (p, &p, marker);
4488
4489 if (strid == NULL || strcmp (strid, marker->str_id) == 0)
4490 {
4491 VEC_safe_push (static_tracepoint_marker_p,
4492 markers, marker);
4493 marker = NULL;
4494 }
4495 else
4496 {
4497 release_static_tracepoint_marker (marker);
4498 memset (marker, 0, sizeof (*marker));
4499 }
4500 }
4501 while (*p++ == ','); /* comma-separated list */
4502
4503 memcpy (s, "qTsSTM", sizeof ("qTsSTM"));
4504 s[sizeof ("qTsSTM")] = 0;
42476b70 4505 agent_run_command (pid, s, strlen (s) + 1);
5808517f
YQ
4506 p = s;
4507 }
4508
4509 do_cleanups (old_chain);
4510
4511 return markers;
4512}
4513
e9efe249 4514/* Create a prototype generic GNU/Linux target. The client can override
10d6c8cd
DJ
4515 it with local methods. */
4516
910122bf
UW
4517static void
4518linux_target_install_ops (struct target_ops *t)
10d6c8cd 4519{
6d8fd2b7 4520 t->to_insert_fork_catchpoint = linux_child_insert_fork_catchpoint;
eb73ad13 4521 t->to_remove_fork_catchpoint = linux_child_remove_fork_catchpoint;
6d8fd2b7 4522 t->to_insert_vfork_catchpoint = linux_child_insert_vfork_catchpoint;
eb73ad13 4523 t->to_remove_vfork_catchpoint = linux_child_remove_vfork_catchpoint;
6d8fd2b7 4524 t->to_insert_exec_catchpoint = linux_child_insert_exec_catchpoint;
eb73ad13 4525 t->to_remove_exec_catchpoint = linux_child_remove_exec_catchpoint;
a96d9b2e 4526 t->to_set_syscall_catchpoint = linux_child_set_syscall_catchpoint;
6d8fd2b7 4527 t->to_pid_to_exec_file = linux_child_pid_to_exec_file;
10d6c8cd 4528 t->to_post_startup_inferior = linux_child_post_startup_inferior;
6d8fd2b7
UW
4529 t->to_post_attach = linux_child_post_attach;
4530 t->to_follow_fork = linux_child_follow_fork;
10d6c8cd
DJ
4531
4532 super_xfer_partial = t->to_xfer_partial;
4533 t->to_xfer_partial = linux_xfer_partial;
5808517f
YQ
4534
4535 t->to_static_tracepoint_markers_by_strid
4536 = linux_child_static_tracepoint_markers_by_strid;
910122bf
UW
4537}
4538
4539struct target_ops *
4540linux_target (void)
4541{
4542 struct target_ops *t;
4543
4544 t = inf_ptrace_target ();
4545 linux_target_install_ops (t);
4546
4547 return t;
4548}
4549
4550struct target_ops *
7714d83a 4551linux_trad_target (CORE_ADDR (*register_u_offset)(struct gdbarch *, int, int))
910122bf
UW
4552{
4553 struct target_ops *t;
4554
4555 t = inf_ptrace_trad_target (register_u_offset);
4556 linux_target_install_ops (t);
10d6c8cd 4557
10d6c8cd
DJ
4558 return t;
4559}
4560
b84876c2
PA
4561/* target_is_async_p implementation. */
4562
4563static int
6a109b6b 4564linux_nat_is_async_p (struct target_ops *ops)
b84876c2 4565{
198297aa 4566 return linux_is_async_p ();
b84876c2
PA
4567}
4568
4569/* target_can_async_p implementation. */
4570
4571static int
6a109b6b 4572linux_nat_can_async_p (struct target_ops *ops)
b84876c2
PA
4573{
4574 /* NOTE: palves 2008-03-21: We're only async when the user requests
7feb7d06 4575 it explicitly with the "set target-async" command.
b84876c2 4576 Someday, linux will always be async. */
3dd5b83d 4577 return target_async_permitted;
b84876c2
PA
4578}
4579
9908b566 4580static int
2a9a2795 4581linux_nat_supports_non_stop (struct target_ops *self)
9908b566
VP
4582{
4583 return 1;
4584}
4585
d90e17a7
PA
4586/* True if we want to support multi-process. To be removed when GDB
4587 supports multi-exec. */
4588
2277426b 4589int linux_multi_process = 1;
d90e17a7
PA
4590
4591static int
86ce2668 4592linux_nat_supports_multi_process (struct target_ops *self)
d90e17a7
PA
4593{
4594 return linux_multi_process;
4595}
4596
03583c20 4597static int
2bfc0540 4598linux_nat_supports_disable_randomization (struct target_ops *self)
03583c20
UW
4599{
4600#ifdef HAVE_PERSONALITY
4601 return 1;
4602#else
4603 return 0;
4604#endif
4605}
4606
b84876c2
PA
4607static int async_terminal_is_ours = 1;
4608
4d4ca2a1
DE
4609/* target_terminal_inferior implementation.
4610
4611 This is a wrapper around child_terminal_inferior to add async support. */
b84876c2
PA
4612
4613static void
d2f640d4 4614linux_nat_terminal_inferior (struct target_ops *self)
b84876c2 4615{
198297aa
PA
4616 /* Like target_terminal_inferior, use target_can_async_p, not
4617 target_is_async_p, since at this point the target is not async
4618 yet. If it can async, then we know it will become async prior to
4619 resume. */
4620 if (!target_can_async_p ())
b84876c2
PA
4621 {
4622 /* Async mode is disabled. */
d6b64346 4623 child_terminal_inferior (self);
b84876c2
PA
4624 return;
4625 }
4626
d6b64346 4627 child_terminal_inferior (self);
b84876c2 4628
d9d2d8b6 4629 /* Calls to target_terminal_*() are meant to be idempotent. */
b84876c2
PA
4630 if (!async_terminal_is_ours)
4631 return;
4632
4633 delete_file_handler (input_fd);
4634 async_terminal_is_ours = 0;
4635 set_sigint_trap ();
4636}
4637
4d4ca2a1
DE
4638/* target_terminal_ours implementation.
4639
4640 This is a wrapper around child_terminal_ours to add async support (and
4641 implement the target_terminal_ours vs target_terminal_ours_for_output
4642 distinction). child_terminal_ours is currently no different than
4643 child_terminal_ours_for_output.
4644 We leave target_terminal_ours_for_output alone, leaving it to
4645 child_terminal_ours_for_output. */
b84876c2 4646
2c0b251b 4647static void
e3594fd1 4648linux_nat_terminal_ours (struct target_ops *self)
b84876c2 4649{
b84876c2
PA
4650 /* GDB should never give the terminal to the inferior if the
4651 inferior is running in the background (run&, continue&, etc.),
4652 but claiming it sure should. */
d6b64346 4653 child_terminal_ours (self);
b84876c2 4654
b84876c2
PA
4655 if (async_terminal_is_ours)
4656 return;
4657
4658 clear_sigint_trap ();
4659 add_file_handler (input_fd, stdin_event_handler, 0);
4660 async_terminal_is_ours = 1;
4661}
4662
7feb7d06
PA
4663/* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4664 so we notice when any child changes state, and notify the
4665 event-loop; it allows us to use sigsuspend in linux_nat_wait_1
4666 above to wait for the arrival of a SIGCHLD. */
4667
b84876c2 4668static void
7feb7d06 4669sigchld_handler (int signo)
b84876c2 4670{
7feb7d06
PA
4671 int old_errno = errno;
4672
01124a23
DE
4673 if (debug_linux_nat)
4674 ui_file_write_async_safe (gdb_stdlog,
4675 "sigchld\n", sizeof ("sigchld\n") - 1);
7feb7d06
PA
4676
4677 if (signo == SIGCHLD
4678 && linux_nat_event_pipe[0] != -1)
4679 async_file_mark (); /* Let the event loop know that there are
4680 events to handle. */
4681
4682 errno = old_errno;
4683}
4684
4685/* Callback registered with the target events file descriptor. */
4686
4687static void
4688handle_target_event (int error, gdb_client_data client_data)
4689{
6a3753b3 4690 inferior_event_handler (INF_REG_EVENT, NULL);
7feb7d06
PA
4691}
4692
4693/* Create/destroy the target events pipe. Returns previous state. */
4694
4695static int
4696linux_async_pipe (int enable)
4697{
198297aa 4698 int previous = linux_is_async_p ();
7feb7d06
PA
4699
4700 if (previous != enable)
4701 {
4702 sigset_t prev_mask;
4703
12696c10
PA
4704 /* Block child signals while we create/destroy the pipe, as
4705 their handler writes to it. */
7feb7d06
PA
4706 block_child_signals (&prev_mask);
4707
4708 if (enable)
4709 {
614c279d 4710 if (gdb_pipe_cloexec (linux_nat_event_pipe) == -1)
7feb7d06
PA
4711 internal_error (__FILE__, __LINE__,
4712 "creating event pipe failed.");
4713
4714 fcntl (linux_nat_event_pipe[0], F_SETFL, O_NONBLOCK);
4715 fcntl (linux_nat_event_pipe[1], F_SETFL, O_NONBLOCK);
4716 }
4717 else
4718 {
4719 close (linux_nat_event_pipe[0]);
4720 close (linux_nat_event_pipe[1]);
4721 linux_nat_event_pipe[0] = -1;
4722 linux_nat_event_pipe[1] = -1;
4723 }
4724
4725 restore_child_signals_mask (&prev_mask);
4726 }
4727
4728 return previous;
b84876c2
PA
4729}
4730
4731/* target_async implementation. */
4732
4733static void
6a3753b3 4734linux_nat_async (struct target_ops *ops, int enable)
b84876c2 4735{
6a3753b3 4736 if (enable)
b84876c2 4737 {
7feb7d06
PA
4738 if (!linux_async_pipe (1))
4739 {
4740 add_file_handler (linux_nat_event_pipe[0],
4741 handle_target_event, NULL);
4742 /* There may be pending events to handle. Tell the event loop
4743 to poll them. */
4744 async_file_mark ();
4745 }
b84876c2
PA
4746 }
4747 else
4748 {
b84876c2 4749 delete_file_handler (linux_nat_event_pipe[0]);
7feb7d06 4750 linux_async_pipe (0);
b84876c2
PA
4751 }
4752 return;
4753}
4754
a493e3e2 4755/* Stop an LWP, and push a GDB_SIGNAL_0 stop status if no other
252fbfc8
PA
4756 event came out. */
4757
4c28f408 4758static int
252fbfc8 4759linux_nat_stop_lwp (struct lwp_info *lwp, void *data)
4c28f408 4760{
d90e17a7 4761 if (!lwp->stopped)
252fbfc8 4762 {
d90e17a7
PA
4763 if (debug_linux_nat)
4764 fprintf_unfiltered (gdb_stdlog,
4765 "LNSL: running -> suspending %s\n",
4766 target_pid_to_str (lwp->ptid));
252fbfc8 4767
252fbfc8 4768
25289eb2
PA
4769 if (lwp->last_resume_kind == resume_stop)
4770 {
4771 if (debug_linux_nat)
4772 fprintf_unfiltered (gdb_stdlog,
4773 "linux-nat: already stopping LWP %ld at "
4774 "GDB's request\n",
4775 ptid_get_lwp (lwp->ptid));
4776 return 0;
4777 }
252fbfc8 4778
25289eb2
PA
4779 stop_callback (lwp, NULL);
4780 lwp->last_resume_kind = resume_stop;
d90e17a7
PA
4781 }
4782 else
4783 {
4784 /* Already known to be stopped; do nothing. */
252fbfc8 4785
d90e17a7
PA
4786 if (debug_linux_nat)
4787 {
e09875d4 4788 if (find_thread_ptid (lwp->ptid)->stop_requested)
3e43a32a
MS
4789 fprintf_unfiltered (gdb_stdlog,
4790 "LNSL: already stopped/stop_requested %s\n",
d90e17a7
PA
4791 target_pid_to_str (lwp->ptid));
4792 else
3e43a32a
MS
4793 fprintf_unfiltered (gdb_stdlog,
4794 "LNSL: already stopped/no "
4795 "stop_requested yet %s\n",
d90e17a7 4796 target_pid_to_str (lwp->ptid));
252fbfc8
PA
4797 }
4798 }
4c28f408
PA
4799 return 0;
4800}
4801
4802static void
1eab8a48 4803linux_nat_stop (struct target_ops *self, ptid_t ptid)
4c28f408
PA
4804{
4805 if (non_stop)
d90e17a7 4806 iterate_over_lwps (ptid, linux_nat_stop_lwp, NULL);
4c28f408 4807 else
1eab8a48 4808 linux_ops->to_stop (linux_ops, ptid);
4c28f408
PA
4809}
4810
d90e17a7 4811static void
de90e03d 4812linux_nat_close (struct target_ops *self)
d90e17a7
PA
4813{
4814 /* Unregister from the event loop. */
9debeba0 4815 if (linux_nat_is_async_p (self))
6a3753b3 4816 linux_nat_async (self, 0);
d90e17a7 4817
d90e17a7 4818 if (linux_ops->to_close)
de90e03d 4819 linux_ops->to_close (linux_ops);
6a3cb8e8
PA
4820
4821 super_close (self);
d90e17a7
PA
4822}
4823
c0694254
PA
4824/* When requests are passed down from the linux-nat layer to the
4825 single threaded inf-ptrace layer, ptids of (lwpid,0,0) form are
4826 used. The address space pointer is stored in the inferior object,
4827 but the common code that is passed such ptid can't tell whether
4828 lwpid is a "main" process id or not (it assumes so). We reverse
4829 look up the "main" process id from the lwp here. */
4830
70221824 4831static struct address_space *
c0694254
PA
4832linux_nat_thread_address_space (struct target_ops *t, ptid_t ptid)
4833{
4834 struct lwp_info *lwp;
4835 struct inferior *inf;
4836 int pid;
4837
dfd4cc63 4838 if (ptid_get_lwp (ptid) == 0)
c0694254
PA
4839 {
4840 /* An (lwpid,0,0) ptid. Look up the lwp object to get at the
4841 tgid. */
4842 lwp = find_lwp_pid (ptid);
dfd4cc63 4843 pid = ptid_get_pid (lwp->ptid);
c0694254
PA
4844 }
4845 else
4846 {
4847 /* A (pid,lwpid,0) ptid. */
dfd4cc63 4848 pid = ptid_get_pid (ptid);
c0694254
PA
4849 }
4850
4851 inf = find_inferior_pid (pid);
4852 gdb_assert (inf != NULL);
4853 return inf->aspace;
4854}
4855
dc146f7c
VP
4856/* Return the cached value of the processor core for thread PTID. */
4857
70221824 4858static int
dc146f7c
VP
4859linux_nat_core_of_thread (struct target_ops *ops, ptid_t ptid)
4860{
4861 struct lwp_info *info = find_lwp_pid (ptid);
e0881a8e 4862
dc146f7c
VP
4863 if (info)
4864 return info->core;
4865 return -1;
4866}
4867
f973ed9c
DJ
4868void
4869linux_nat_add_target (struct target_ops *t)
4870{
f973ed9c
DJ
4871 /* Save the provided single-threaded target. We save this in a separate
4872 variable because another target we've inherited from (e.g. inf-ptrace)
4873 may have saved a pointer to T; we want to use it for the final
4874 process stratum target. */
4875 linux_ops_saved = *t;
4876 linux_ops = &linux_ops_saved;
4877
4878 /* Override some methods for multithreading. */
b84876c2 4879 t->to_create_inferior = linux_nat_create_inferior;
f973ed9c
DJ
4880 t->to_attach = linux_nat_attach;
4881 t->to_detach = linux_nat_detach;
4882 t->to_resume = linux_nat_resume;
4883 t->to_wait = linux_nat_wait;
2455069d 4884 t->to_pass_signals = linux_nat_pass_signals;
f973ed9c
DJ
4885 t->to_xfer_partial = linux_nat_xfer_partial;
4886 t->to_kill = linux_nat_kill;
4887 t->to_mourn_inferior = linux_nat_mourn_inferior;
4888 t->to_thread_alive = linux_nat_thread_alive;
8a06aea7 4889 t->to_update_thread_list = linux_nat_update_thread_list;
f973ed9c 4890 t->to_pid_to_str = linux_nat_pid_to_str;
4694da01 4891 t->to_thread_name = linux_nat_thread_name;
f973ed9c 4892 t->to_has_thread_control = tc_schedlock;
c0694254 4893 t->to_thread_address_space = linux_nat_thread_address_space;
ebec9a0f
PA
4894 t->to_stopped_by_watchpoint = linux_nat_stopped_by_watchpoint;
4895 t->to_stopped_data_address = linux_nat_stopped_data_address;
faf09f01
PA
4896 t->to_stopped_by_sw_breakpoint = linux_nat_stopped_by_sw_breakpoint;
4897 t->to_supports_stopped_by_sw_breakpoint = linux_nat_supports_stopped_by_sw_breakpoint;
4898 t->to_stopped_by_hw_breakpoint = linux_nat_stopped_by_hw_breakpoint;
4899 t->to_supports_stopped_by_hw_breakpoint = linux_nat_supports_stopped_by_hw_breakpoint;
f973ed9c 4900
b84876c2
PA
4901 t->to_can_async_p = linux_nat_can_async_p;
4902 t->to_is_async_p = linux_nat_is_async_p;
9908b566 4903 t->to_supports_non_stop = linux_nat_supports_non_stop;
b84876c2 4904 t->to_async = linux_nat_async;
b84876c2
PA
4905 t->to_terminal_inferior = linux_nat_terminal_inferior;
4906 t->to_terminal_ours = linux_nat_terminal_ours;
6a3cb8e8
PA
4907
4908 super_close = t->to_close;
d90e17a7 4909 t->to_close = linux_nat_close;
b84876c2 4910
4c28f408
PA
4911 /* Methods for non-stop support. */
4912 t->to_stop = linux_nat_stop;
4913
d90e17a7
PA
4914 t->to_supports_multi_process = linux_nat_supports_multi_process;
4915
03583c20
UW
4916 t->to_supports_disable_randomization
4917 = linux_nat_supports_disable_randomization;
4918
dc146f7c
VP
4919 t->to_core_of_thread = linux_nat_core_of_thread;
4920
f973ed9c
DJ
4921 /* We don't change the stratum; this target will sit at
4922 process_stratum and thread_db will set at thread_stratum. This
4923 is a little strange, since this is a multi-threaded-capable
4924 target, but we want to be on the stack below thread_db, and we
4925 also want to be used for single-threaded processes. */
4926
4927 add_target (t);
f973ed9c
DJ
4928}
4929
9f0bdab8
DJ
4930/* Register a method to call whenever a new thread is attached. */
4931void
7b50312a
PA
4932linux_nat_set_new_thread (struct target_ops *t,
4933 void (*new_thread) (struct lwp_info *))
9f0bdab8
DJ
4934{
4935 /* Save the pointer. We only support a single registered instance
4936 of the GNU/Linux native target, so we do not need to map this to
4937 T. */
4938 linux_nat_new_thread = new_thread;
4939}
4940
26cb8b7c
PA
4941/* See declaration in linux-nat.h. */
4942
4943void
4944linux_nat_set_new_fork (struct target_ops *t,
4945 linux_nat_new_fork_ftype *new_fork)
4946{
4947 /* Save the pointer. */
4948 linux_nat_new_fork = new_fork;
4949}
4950
4951/* See declaration in linux-nat.h. */
4952
4953void
4954linux_nat_set_forget_process (struct target_ops *t,
4955 linux_nat_forget_process_ftype *fn)
4956{
4957 /* Save the pointer. */
4958 linux_nat_forget_process_hook = fn;
4959}
4960
4961/* See declaration in linux-nat.h. */
4962
4963void
4964linux_nat_forget_process (pid_t pid)
4965{
4966 if (linux_nat_forget_process_hook != NULL)
4967 linux_nat_forget_process_hook (pid);
4968}
4969
5b009018
PA
4970/* Register a method that converts a siginfo object between the layout
4971 that ptrace returns, and the layout in the architecture of the
4972 inferior. */
4973void
4974linux_nat_set_siginfo_fixup (struct target_ops *t,
a5362b9a 4975 int (*siginfo_fixup) (siginfo_t *,
5b009018
PA
4976 gdb_byte *,
4977 int))
4978{
4979 /* Save the pointer. */
4980 linux_nat_siginfo_fixup = siginfo_fixup;
4981}
4982
7b50312a
PA
4983/* Register a method to call prior to resuming a thread. */
4984
4985void
4986linux_nat_set_prepare_to_resume (struct target_ops *t,
4987 void (*prepare_to_resume) (struct lwp_info *))
4988{
4989 /* Save the pointer. */
4990 linux_nat_prepare_to_resume = prepare_to_resume;
4991}
4992
f865ee35
JK
4993/* See linux-nat.h. */
4994
4995int
4996linux_nat_get_siginfo (ptid_t ptid, siginfo_t *siginfo)
9f0bdab8 4997{
da559b09 4998 int pid;
9f0bdab8 4999
dfd4cc63 5000 pid = ptid_get_lwp (ptid);
da559b09 5001 if (pid == 0)
dfd4cc63 5002 pid = ptid_get_pid (ptid);
f865ee35 5003
da559b09
JK
5004 errno = 0;
5005 ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, siginfo);
5006 if (errno != 0)
5007 {
5008 memset (siginfo, 0, sizeof (*siginfo));
5009 return 0;
5010 }
f865ee35 5011 return 1;
9f0bdab8
DJ
5012}
5013
7b669087
GB
5014/* See nat/linux-nat.h. */
5015
5016ptid_t
5017current_lwp_ptid (void)
5018{
5019 gdb_assert (ptid_lwp_p (inferior_ptid));
5020 return inferior_ptid;
5021}
5022
2c0b251b
PA
5023/* Provide a prototype to silence -Wmissing-prototypes. */
5024extern initialize_file_ftype _initialize_linux_nat;
5025
d6b0e80f
AC
5026void
5027_initialize_linux_nat (void)
5028{
ccce17b0
YQ
5029 add_setshow_zuinteger_cmd ("lin-lwp", class_maintenance,
5030 &debug_linux_nat, _("\
b84876c2
PA
5031Set debugging of GNU/Linux lwp module."), _("\
5032Show debugging of GNU/Linux lwp module."), _("\
5033Enables printf debugging output."),
ccce17b0
YQ
5034 NULL,
5035 show_debug_linux_nat,
5036 &setdebuglist, &showdebuglist);
b84876c2 5037
b84876c2 5038 /* Save this mask as the default. */
d6b0e80f
AC
5039 sigprocmask (SIG_SETMASK, NULL, &normal_mask);
5040
7feb7d06
PA
5041 /* Install a SIGCHLD handler. */
5042 sigchld_action.sa_handler = sigchld_handler;
5043 sigemptyset (&sigchld_action.sa_mask);
5044 sigchld_action.sa_flags = SA_RESTART;
b84876c2
PA
5045
5046 /* Make it the default. */
7feb7d06 5047 sigaction (SIGCHLD, &sigchld_action, NULL);
d6b0e80f
AC
5048
5049 /* Make sure we don't block SIGCHLD during a sigsuspend. */
5050 sigprocmask (SIG_SETMASK, NULL, &suspend_mask);
5051 sigdelset (&suspend_mask, SIGCHLD);
5052
7feb7d06 5053 sigemptyset (&blocked_mask);
d6b0e80f
AC
5054}
5055\f
5056
5057/* FIXME: kettenis/2000-08-26: The stuff on this page is specific to
5058 the GNU/Linux Threads library and therefore doesn't really belong
5059 here. */
5060
5061/* Read variable NAME in the target and return its value if found.
5062 Otherwise return zero. It is assumed that the type of the variable
5063 is `int'. */
5064
5065static int
5066get_signo (const char *name)
5067{
3b7344d5 5068 struct bound_minimal_symbol ms;
d6b0e80f
AC
5069 int signo;
5070
5071 ms = lookup_minimal_symbol (name, NULL, NULL);
3b7344d5 5072 if (ms.minsym == NULL)
d6b0e80f
AC
5073 return 0;
5074
77e371c0 5075 if (target_read_memory (BMSYMBOL_VALUE_ADDRESS (ms), (gdb_byte *) &signo,
d6b0e80f
AC
5076 sizeof (signo)) != 0)
5077 return 0;
5078
5079 return signo;
5080}
5081
5082/* Return the set of signals used by the threads library in *SET. */
5083
5084void
5085lin_thread_get_thread_signals (sigset_t *set)
5086{
5087 struct sigaction action;
5088 int restart, cancel;
5089
b84876c2 5090 sigemptyset (&blocked_mask);
d6b0e80f
AC
5091 sigemptyset (set);
5092
5093 restart = get_signo ("__pthread_sig_restart");
17fbb0bd
DJ
5094 cancel = get_signo ("__pthread_sig_cancel");
5095
5096 /* LinuxThreads normally uses the first two RT signals, but in some legacy
5097 cases may use SIGUSR1/SIGUSR2. NPTL always uses RT signals, but does
5098 not provide any way for the debugger to query the signal numbers -
5099 fortunately they don't change! */
5100
d6b0e80f 5101 if (restart == 0)
17fbb0bd 5102 restart = __SIGRTMIN;
d6b0e80f 5103
d6b0e80f 5104 if (cancel == 0)
17fbb0bd 5105 cancel = __SIGRTMIN + 1;
d6b0e80f
AC
5106
5107 sigaddset (set, restart);
5108 sigaddset (set, cancel);
5109
5110 /* The GNU/Linux Threads library makes terminating threads send a
5111 special "cancel" signal instead of SIGCHLD. Make sure we catch
5112 those (to prevent them from terminating GDB itself, which is
5113 likely to be their default action) and treat them the same way as
5114 SIGCHLD. */
5115
5116 action.sa_handler = sigchld_handler;
5117 sigemptyset (&action.sa_mask);
58aecb61 5118 action.sa_flags = SA_RESTART;
d6b0e80f
AC
5119 sigaction (cancel, &action, NULL);
5120
5121 /* We block the "cancel" signal throughout this code ... */
5122 sigaddset (&blocked_mask, cancel);
5123 sigprocmask (SIG_BLOCK, &blocked_mask, NULL);
5124
5125 /* ... except during a sigsuspend. */
5126 sigdelset (&suspend_mask, cancel);
5127}
This page took 1.397759 seconds and 4 git commands to generate.