2011-12-16 Phil Muldoon <pmuldoon@redhat.com>
[deliverable/binutils-gdb.git] / gdb / linux-nat.c
CommitLineData
3993f6b1 1/* GNU/Linux native-dependent code common to multiple platforms.
dba24537 2
7b6bb8da
JB
3 Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
4 2011 Free Software Foundation, Inc.
3993f6b1
DJ
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
a9762ec7 10 the Free Software Foundation; either version 3 of the License, or
3993f6b1
DJ
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
a9762ec7 19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
3993f6b1
DJ
20
21#include "defs.h"
22#include "inferior.h"
23#include "target.h"
d6b0e80f 24#include "gdb_string.h"
3993f6b1 25#include "gdb_wait.h"
d6b0e80f
AC
26#include "gdb_assert.h"
27#ifdef HAVE_TKILL_SYSCALL
28#include <unistd.h>
29#include <sys/syscall.h>
30#endif
3993f6b1 31#include <sys/ptrace.h>
0274a8ce 32#include "linux-nat.h"
af96c192 33#include "linux-ptrace.h"
13da1c97 34#include "linux-procfs.h"
ac264b3b 35#include "linux-fork.h"
d6b0e80f
AC
36#include "gdbthread.h"
37#include "gdbcmd.h"
38#include "regcache.h"
4f844a66 39#include "regset.h"
10d6c8cd
DJ
40#include "inf-ptrace.h"
41#include "auxv.h"
dba24537 42#include <sys/param.h> /* for MAXPATHLEN */
1777feb0 43#include <sys/procfs.h> /* for elf_gregset etc. */
dba24537
AC
44#include "elf-bfd.h" /* for elfcore_write_* */
45#include "gregset.h" /* for gregset */
46#include "gdbcore.h" /* for get_exec_file */
47#include <ctype.h> /* for isdigit */
1777feb0 48#include "gdbthread.h" /* for struct thread_info etc. */
dba24537
AC
49#include "gdb_stat.h" /* for struct stat */
50#include <fcntl.h> /* for O_RDONLY */
b84876c2
PA
51#include "inf-loop.h"
52#include "event-loop.h"
53#include "event-top.h"
07e059b5
VP
54#include <pwd.h>
55#include <sys/types.h>
56#include "gdb_dirent.h"
57#include "xml-support.h"
191c4426 58#include "terminal.h"
efcbbd14 59#include <sys/vfs.h>
6c95b8df 60#include "solib.h"
d26e3629 61#include "linux-osdata.h"
f179e162 62#include "cli/cli-utils.h"
efcbbd14
UW
63
64#ifndef SPUFS_MAGIC
65#define SPUFS_MAGIC 0x23c9b64e
66#endif
dba24537 67
10568435
JK
68#ifdef HAVE_PERSONALITY
69# include <sys/personality.h>
70# if !HAVE_DECL_ADDR_NO_RANDOMIZE
71# define ADDR_NO_RANDOMIZE 0x0040000
72# endif
73#endif /* HAVE_PERSONALITY */
74
1777feb0 75/* This comment documents high-level logic of this file.
8a77dff3
VP
76
77Waiting for events in sync mode
78===============================
79
80When waiting for an event in a specific thread, we just use waitpid, passing
81the specific pid, and not passing WNOHANG.
82
1777feb0 83When waiting for an event in all threads, waitpid is not quite good. Prior to
8a77dff3 84version 2.4, Linux can either wait for event in main thread, or in secondary
1777feb0 85threads. (2.4 has the __WALL flag). So, if we use blocking waitpid, we might
8a77dff3
VP
86miss an event. The solution is to use non-blocking waitpid, together with
87sigsuspend. First, we use non-blocking waitpid to get an event in the main
1777feb0 88process, if any. Second, we use non-blocking waitpid with the __WCLONED
8a77dff3
VP
89flag to check for events in cloned processes. If nothing is found, we use
90sigsuspend to wait for SIGCHLD. When SIGCHLD arrives, it means something
91happened to a child process -- and SIGCHLD will be delivered both for events
92in main debugged process and in cloned processes. As soon as we know there's
3e43a32a
MS
93an event, we get back to calling nonblocking waitpid with and without
94__WCLONED.
8a77dff3
VP
95
96Note that SIGCHLD should be blocked between waitpid and sigsuspend calls,
1777feb0 97so that we don't miss a signal. If SIGCHLD arrives in between, when it's
8a77dff3
VP
98blocked, the signal becomes pending and sigsuspend immediately
99notices it and returns.
100
101Waiting for events in async mode
102================================
103
7feb7d06
PA
104In async mode, GDB should always be ready to handle both user input
105and target events, so neither blocking waitpid nor sigsuspend are
106viable options. Instead, we should asynchronously notify the GDB main
107event loop whenever there's an unprocessed event from the target. We
108detect asynchronous target events by handling SIGCHLD signals. To
109notify the event loop about target events, the self-pipe trick is used
110--- a pipe is registered as waitable event source in the event loop,
111the event loop select/poll's on the read end of this pipe (as well on
112other event sources, e.g., stdin), and the SIGCHLD handler writes a
113byte to this pipe. This is more portable than relying on
114pselect/ppoll, since on kernels that lack those syscalls, libc
115emulates them with select/poll+sigprocmask, and that is racy
116(a.k.a. plain broken).
117
118Obviously, if we fail to notify the event loop if there's a target
119event, it's bad. OTOH, if we notify the event loop when there's no
120event from the target, linux_nat_wait will detect that there's no real
121event to report, and return event of type TARGET_WAITKIND_IGNORE.
122This is mostly harmless, but it will waste time and is better avoided.
123
124The main design point is that every time GDB is outside linux-nat.c,
125we have a SIGCHLD handler installed that is called when something
126happens to the target and notifies the GDB event loop. Whenever GDB
127core decides to handle the event, and calls into linux-nat.c, we
128process things as in sync mode, except that the we never block in
129sigsuspend.
130
131While processing an event, we may end up momentarily blocked in
132waitpid calls. Those waitpid calls, while blocking, are guarantied to
133return quickly. E.g., in all-stop mode, before reporting to the core
134that an LWP hit a breakpoint, all LWPs are stopped by sending them
135SIGSTOP, and synchronously waiting for the SIGSTOP to be reported.
136Note that this is different from blocking indefinitely waiting for the
137next event --- here, we're already handling an event.
8a77dff3
VP
138
139Use of signals
140==============
141
142We stop threads by sending a SIGSTOP. The use of SIGSTOP instead of another
143signal is not entirely significant; we just need for a signal to be delivered,
144so that we can intercept it. SIGSTOP's advantage is that it can not be
145blocked. A disadvantage is that it is not a real-time signal, so it can only
146be queued once; we do not keep track of other sources of SIGSTOP.
147
148Two other signals that can't be blocked are SIGCONT and SIGKILL. But we can't
149use them, because they have special behavior when the signal is generated -
150not when it is delivered. SIGCONT resumes the entire thread group and SIGKILL
151kills the entire thread group.
152
153A delivered SIGSTOP would stop the entire thread group, not just the thread we
154tkill'd. But we never let the SIGSTOP be delivered; we always intercept and
155cancel it (by PTRACE_CONT without passing SIGSTOP).
156
157We could use a real-time signal instead. This would solve those problems; we
158could use PTRACE_GETSIGINFO to locate the specific stop signals sent by GDB.
159But we would still have to have some support for SIGSTOP, since PTRACE_ATTACH
160generates it, and there are races with trying to find a signal that is not
161blocked. */
a0ef4274 162
dba24537
AC
163#ifndef O_LARGEFILE
164#define O_LARGEFILE 0
165#endif
0274a8ce 166
ca2163eb
PA
167/* Unlike other extended result codes, WSTOPSIG (status) on
168 PTRACE_O_TRACESYSGOOD syscall events doesn't return SIGTRAP, but
169 instead SIGTRAP with bit 7 set. */
170#define SYSCALL_SIGTRAP (SIGTRAP | 0x80)
171
10d6c8cd
DJ
172/* The single-threaded native GNU/Linux target_ops. We save a pointer for
173 the use of the multi-threaded target. */
174static struct target_ops *linux_ops;
f973ed9c 175static struct target_ops linux_ops_saved;
10d6c8cd 176
9f0bdab8 177/* The method to call, if any, when a new thread is attached. */
7b50312a
PA
178static void (*linux_nat_new_thread) (struct lwp_info *);
179
180/* Hook to call prior to resuming a thread. */
181static void (*linux_nat_prepare_to_resume) (struct lwp_info *);
9f0bdab8 182
5b009018
PA
183/* The method to call, if any, when the siginfo object needs to be
184 converted between the layout returned by ptrace, and the layout in
185 the architecture of the inferior. */
186static int (*linux_nat_siginfo_fixup) (struct siginfo *,
187 gdb_byte *,
188 int);
189
ac264b3b
MS
190/* The saved to_xfer_partial method, inherited from inf-ptrace.c.
191 Called by our to_xfer_partial. */
192static LONGEST (*super_xfer_partial) (struct target_ops *,
193 enum target_object,
194 const char *, gdb_byte *,
195 const gdb_byte *,
10d6c8cd
DJ
196 ULONGEST, LONGEST);
197
d6b0e80f 198static int debug_linux_nat;
920d2a44
AC
199static void
200show_debug_linux_nat (struct ui_file *file, int from_tty,
201 struct cmd_list_element *c, const char *value)
202{
203 fprintf_filtered (file, _("Debugging of GNU/Linux lwp module is %s.\n"),
204 value);
205}
d6b0e80f 206
ae087d01
DJ
207struct simple_pid_list
208{
209 int pid;
3d799a95 210 int status;
ae087d01
DJ
211 struct simple_pid_list *next;
212};
213struct simple_pid_list *stopped_pids;
214
3993f6b1
DJ
215/* This variable is a tri-state flag: -1 for unknown, 0 if PTRACE_O_TRACEFORK
216 can not be used, 1 if it can. */
217
218static int linux_supports_tracefork_flag = -1;
219
3e43a32a
MS
220/* This variable is a tri-state flag: -1 for unknown, 0 if
221 PTRACE_O_TRACESYSGOOD can not be used, 1 if it can. */
a96d9b2e
SDJ
222
223static int linux_supports_tracesysgood_flag = -1;
224
9016a515
DJ
225/* If we have PTRACE_O_TRACEFORK, this flag indicates whether we also have
226 PTRACE_O_TRACEVFORKDONE. */
227
228static int linux_supports_tracevforkdone_flag = -1;
229
a96d9b2e
SDJ
230/* Stores the current used ptrace() options. */
231static int current_ptrace_options = 0;
232
3dd5b83d
PA
233/* Async mode support. */
234
b84876c2
PA
235/* The read/write ends of the pipe registered as waitable file in the
236 event loop. */
237static int linux_nat_event_pipe[2] = { -1, -1 };
238
7feb7d06 239/* Flush the event pipe. */
b84876c2 240
7feb7d06
PA
241static void
242async_file_flush (void)
b84876c2 243{
7feb7d06
PA
244 int ret;
245 char buf;
b84876c2 246
7feb7d06 247 do
b84876c2 248 {
7feb7d06 249 ret = read (linux_nat_event_pipe[0], &buf, 1);
b84876c2 250 }
7feb7d06 251 while (ret >= 0 || (ret == -1 && errno == EINTR));
b84876c2
PA
252}
253
7feb7d06
PA
254/* Put something (anything, doesn't matter what, or how much) in event
255 pipe, so that the select/poll in the event-loop realizes we have
256 something to process. */
252fbfc8 257
b84876c2 258static void
7feb7d06 259async_file_mark (void)
b84876c2 260{
7feb7d06 261 int ret;
b84876c2 262
7feb7d06
PA
263 /* It doesn't really matter what the pipe contains, as long we end
264 up with something in it. Might as well flush the previous
265 left-overs. */
266 async_file_flush ();
b84876c2 267
7feb7d06 268 do
b84876c2 269 {
7feb7d06 270 ret = write (linux_nat_event_pipe[1], "+", 1);
b84876c2 271 }
7feb7d06 272 while (ret == -1 && errno == EINTR);
b84876c2 273
7feb7d06
PA
274 /* Ignore EAGAIN. If the pipe is full, the event loop will already
275 be awakened anyway. */
b84876c2
PA
276}
277
7feb7d06 278static void linux_nat_async (void (*callback)
3e43a32a
MS
279 (enum inferior_event_type event_type,
280 void *context),
7feb7d06 281 void *context);
7feb7d06
PA
282static int kill_lwp (int lwpid, int signo);
283
284static int stop_callback (struct lwp_info *lp, void *data);
285
286static void block_child_signals (sigset_t *prev_mask);
287static void restore_child_signals_mask (sigset_t *prev_mask);
2277426b
PA
288
289struct lwp_info;
290static struct lwp_info *add_lwp (ptid_t ptid);
291static void purge_lwp_list (int pid);
292static struct lwp_info *find_lwp_pid (ptid_t ptid);
293
ae087d01
DJ
294\f
295/* Trivial list manipulation functions to keep track of a list of
296 new stopped processes. */
297static void
3d799a95 298add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
ae087d01
DJ
299{
300 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
e0881a8e 301
ae087d01 302 new_pid->pid = pid;
3d799a95 303 new_pid->status = status;
ae087d01
DJ
304 new_pid->next = *listp;
305 *listp = new_pid;
306}
307
84636d28
PA
308static int
309in_pid_list_p (struct simple_pid_list *list, int pid)
310{
311 struct simple_pid_list *p;
312
313 for (p = list; p != NULL; p = p->next)
314 if (p->pid == pid)
315 return 1;
316 return 0;
317}
318
ae087d01 319static int
46a96992 320pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
ae087d01
DJ
321{
322 struct simple_pid_list **p;
323
324 for (p = listp; *p != NULL; p = &(*p)->next)
325 if ((*p)->pid == pid)
326 {
327 struct simple_pid_list *next = (*p)->next;
e0881a8e 328
46a96992 329 *statusp = (*p)->status;
ae087d01
DJ
330 xfree (*p);
331 *p = next;
332 return 1;
333 }
334 return 0;
335}
336
3993f6b1
DJ
337\f
338/* A helper function for linux_test_for_tracefork, called after fork (). */
339
340static void
341linux_tracefork_child (void)
342{
3993f6b1
DJ
343 ptrace (PTRACE_TRACEME, 0, 0, 0);
344 kill (getpid (), SIGSTOP);
345 fork ();
48bb3cce 346 _exit (0);
3993f6b1
DJ
347}
348
7feb7d06 349/* Wrapper function for waitpid which handles EINTR. */
b957e937
DJ
350
351static int
46a96992 352my_waitpid (int pid, int *statusp, int flags)
b957e937
DJ
353{
354 int ret;
b84876c2 355
b957e937
DJ
356 do
357 {
46a96992 358 ret = waitpid (pid, statusp, flags);
b957e937
DJ
359 }
360 while (ret == -1 && errno == EINTR);
361
362 return ret;
363}
364
365/* Determine if PTRACE_O_TRACEFORK can be used to follow fork events.
366
367 First, we try to enable fork tracing on ORIGINAL_PID. If this fails,
368 we know that the feature is not available. This may change the tracing
369 options for ORIGINAL_PID, but we'll be setting them shortly anyway.
370
371 However, if it succeeds, we don't know for sure that the feature is
372 available; old versions of PTRACE_SETOPTIONS ignored unknown options. We
3993f6b1 373 create a child process, attach to it, use PTRACE_SETOPTIONS to enable
b957e937
DJ
374 fork tracing, and let it fork. If the process exits, we assume that we
375 can't use TRACEFORK; if we get the fork notification, and we can extract
376 the new child's PID, then we assume that we can. */
3993f6b1
DJ
377
378static void
b957e937 379linux_test_for_tracefork (int original_pid)
3993f6b1
DJ
380{
381 int child_pid, ret, status;
382 long second_pid;
7feb7d06 383 sigset_t prev_mask;
4c28f408 384
7feb7d06
PA
385 /* We don't want those ptrace calls to be interrupted. */
386 block_child_signals (&prev_mask);
3993f6b1 387
b957e937
DJ
388 linux_supports_tracefork_flag = 0;
389 linux_supports_tracevforkdone_flag = 0;
390
391 ret = ptrace (PTRACE_SETOPTIONS, original_pid, 0, PTRACE_O_TRACEFORK);
392 if (ret != 0)
7feb7d06
PA
393 {
394 restore_child_signals_mask (&prev_mask);
395 return;
396 }
b957e937 397
3993f6b1
DJ
398 child_pid = fork ();
399 if (child_pid == -1)
e2e0b3e5 400 perror_with_name (("fork"));
3993f6b1
DJ
401
402 if (child_pid == 0)
403 linux_tracefork_child ();
404
b957e937 405 ret = my_waitpid (child_pid, &status, 0);
3993f6b1 406 if (ret == -1)
e2e0b3e5 407 perror_with_name (("waitpid"));
3993f6b1 408 else if (ret != child_pid)
8a3fe4f8 409 error (_("linux_test_for_tracefork: waitpid: unexpected result %d."), ret);
3993f6b1 410 if (! WIFSTOPPED (status))
3e43a32a
MS
411 error (_("linux_test_for_tracefork: waitpid: unexpected status %d."),
412 status);
3993f6b1 413
3993f6b1
DJ
414 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0, PTRACE_O_TRACEFORK);
415 if (ret != 0)
416 {
b957e937
DJ
417 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
418 if (ret != 0)
419 {
8a3fe4f8 420 warning (_("linux_test_for_tracefork: failed to kill child"));
7feb7d06 421 restore_child_signals_mask (&prev_mask);
b957e937
DJ
422 return;
423 }
424
425 ret = my_waitpid (child_pid, &status, 0);
426 if (ret != child_pid)
3e43a32a
MS
427 warning (_("linux_test_for_tracefork: failed "
428 "to wait for killed child"));
b957e937 429 else if (!WIFSIGNALED (status))
3e43a32a
MS
430 warning (_("linux_test_for_tracefork: unexpected "
431 "wait status 0x%x from killed child"), status);
b957e937 432
7feb7d06 433 restore_child_signals_mask (&prev_mask);
3993f6b1
DJ
434 return;
435 }
436
9016a515
DJ
437 /* Check whether PTRACE_O_TRACEVFORKDONE is available. */
438 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0,
439 PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORKDONE);
440 linux_supports_tracevforkdone_flag = (ret == 0);
441
b957e937
DJ
442 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
443 if (ret != 0)
8a3fe4f8 444 warning (_("linux_test_for_tracefork: failed to resume child"));
b957e937
DJ
445
446 ret = my_waitpid (child_pid, &status, 0);
447
3993f6b1
DJ
448 if (ret == child_pid && WIFSTOPPED (status)
449 && status >> 16 == PTRACE_EVENT_FORK)
450 {
451 second_pid = 0;
452 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
453 if (ret == 0 && second_pid != 0)
454 {
455 int second_status;
456
457 linux_supports_tracefork_flag = 1;
b957e937
DJ
458 my_waitpid (second_pid, &second_status, 0);
459 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
460 if (ret != 0)
3e43a32a
MS
461 warning (_("linux_test_for_tracefork: "
462 "failed to kill second child"));
97725dc4 463 my_waitpid (second_pid, &status, 0);
3993f6b1
DJ
464 }
465 }
b957e937 466 else
8a3fe4f8
AC
467 warning (_("linux_test_for_tracefork: unexpected result from waitpid "
468 "(%d, status 0x%x)"), ret, status);
3993f6b1 469
b957e937
DJ
470 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
471 if (ret != 0)
8a3fe4f8 472 warning (_("linux_test_for_tracefork: failed to kill child"));
b957e937 473 my_waitpid (child_pid, &status, 0);
4c28f408 474
7feb7d06 475 restore_child_signals_mask (&prev_mask);
3993f6b1
DJ
476}
477
a96d9b2e
SDJ
478/* Determine if PTRACE_O_TRACESYSGOOD can be used to follow syscalls.
479
480 We try to enable syscall tracing on ORIGINAL_PID. If this fails,
481 we know that the feature is not available. This may change the tracing
482 options for ORIGINAL_PID, but we'll be setting them shortly anyway. */
483
484static void
485linux_test_for_tracesysgood (int original_pid)
486{
487 int ret;
488 sigset_t prev_mask;
489
490 /* We don't want those ptrace calls to be interrupted. */
491 block_child_signals (&prev_mask);
492
493 linux_supports_tracesysgood_flag = 0;
494
495 ret = ptrace (PTRACE_SETOPTIONS, original_pid, 0, PTRACE_O_TRACESYSGOOD);
496 if (ret != 0)
497 goto out;
498
499 linux_supports_tracesysgood_flag = 1;
500out:
501 restore_child_signals_mask (&prev_mask);
502}
503
504/* Determine wether we support PTRACE_O_TRACESYSGOOD option available.
505 This function also sets linux_supports_tracesysgood_flag. */
506
507static int
508linux_supports_tracesysgood (int pid)
509{
510 if (linux_supports_tracesysgood_flag == -1)
511 linux_test_for_tracesysgood (pid);
512 return linux_supports_tracesysgood_flag;
513}
514
3993f6b1
DJ
515/* Return non-zero iff we have tracefork functionality available.
516 This function also sets linux_supports_tracefork_flag. */
517
518static int
b957e937 519linux_supports_tracefork (int pid)
3993f6b1
DJ
520{
521 if (linux_supports_tracefork_flag == -1)
b957e937 522 linux_test_for_tracefork (pid);
3993f6b1
DJ
523 return linux_supports_tracefork_flag;
524}
525
9016a515 526static int
b957e937 527linux_supports_tracevforkdone (int pid)
9016a515
DJ
528{
529 if (linux_supports_tracefork_flag == -1)
b957e937 530 linux_test_for_tracefork (pid);
9016a515
DJ
531 return linux_supports_tracevforkdone_flag;
532}
533
a96d9b2e
SDJ
534static void
535linux_enable_tracesysgood (ptid_t ptid)
536{
537 int pid = ptid_get_lwp (ptid);
538
539 if (pid == 0)
540 pid = ptid_get_pid (ptid);
541
542 if (linux_supports_tracesysgood (pid) == 0)
543 return;
544
545 current_ptrace_options |= PTRACE_O_TRACESYSGOOD;
546
547 ptrace (PTRACE_SETOPTIONS, pid, 0, current_ptrace_options);
548}
549
3993f6b1 550\f
4de4c07c
DJ
551void
552linux_enable_event_reporting (ptid_t ptid)
553{
d3587048 554 int pid = ptid_get_lwp (ptid);
4de4c07c 555
d3587048
DJ
556 if (pid == 0)
557 pid = ptid_get_pid (ptid);
558
b957e937 559 if (! linux_supports_tracefork (pid))
4de4c07c
DJ
560 return;
561
a96d9b2e
SDJ
562 current_ptrace_options |= PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORK
563 | PTRACE_O_TRACEEXEC | PTRACE_O_TRACECLONE;
564
b957e937 565 if (linux_supports_tracevforkdone (pid))
a96d9b2e 566 current_ptrace_options |= PTRACE_O_TRACEVFORKDONE;
9016a515
DJ
567
568 /* Do not enable PTRACE_O_TRACEEXIT until GDB is more prepared to support
569 read-only process state. */
4de4c07c 570
a96d9b2e 571 ptrace (PTRACE_SETOPTIONS, pid, 0, current_ptrace_options);
4de4c07c
DJ
572}
573
6d8fd2b7
UW
574static void
575linux_child_post_attach (int pid)
4de4c07c
DJ
576{
577 linux_enable_event_reporting (pid_to_ptid (pid));
a96d9b2e 578 linux_enable_tracesysgood (pid_to_ptid (pid));
4de4c07c
DJ
579}
580
10d6c8cd 581static void
4de4c07c
DJ
582linux_child_post_startup_inferior (ptid_t ptid)
583{
584 linux_enable_event_reporting (ptid);
a96d9b2e 585 linux_enable_tracesysgood (ptid);
4de4c07c
DJ
586}
587
6d8fd2b7
UW
588static int
589linux_child_follow_fork (struct target_ops *ops, int follow_child)
3993f6b1 590{
7feb7d06 591 sigset_t prev_mask;
9016a515 592 int has_vforked;
4de4c07c
DJ
593 int parent_pid, child_pid;
594
7feb7d06 595 block_child_signals (&prev_mask);
b84876c2 596
e58b0e63
PA
597 has_vforked = (inferior_thread ()->pending_follow.kind
598 == TARGET_WAITKIND_VFORKED);
599 parent_pid = ptid_get_lwp (inferior_ptid);
d3587048 600 if (parent_pid == 0)
e58b0e63
PA
601 parent_pid = ptid_get_pid (inferior_ptid);
602 child_pid = PIDGET (inferior_thread ()->pending_follow.value.related_pid);
4de4c07c 603
2277426b
PA
604 if (!detach_fork)
605 linux_enable_event_reporting (pid_to_ptid (child_pid));
606
6c95b8df
PA
607 if (has_vforked
608 && !non_stop /* Non-stop always resumes both branches. */
609 && (!target_is_async_p () || sync_execution)
610 && !(follow_child || detach_fork || sched_multi))
611 {
612 /* The parent stays blocked inside the vfork syscall until the
613 child execs or exits. If we don't let the child run, then
614 the parent stays blocked. If we're telling the parent to run
615 in the foreground, the user will not be able to ctrl-c to get
616 back the terminal, effectively hanging the debug session. */
ac74f770
MS
617 fprintf_filtered (gdb_stderr, _("\
618Can not resume the parent process over vfork in the foreground while\n\
619holding the child stopped. Try \"set detach-on-fork\" or \
620\"set schedule-multiple\".\n"));
621 /* FIXME output string > 80 columns. */
6c95b8df
PA
622 return 1;
623 }
624
4de4c07c
DJ
625 if (! follow_child)
626 {
6c95b8df 627 struct lwp_info *child_lp = NULL;
4de4c07c 628
1777feb0 629 /* We're already attached to the parent, by default. */
4de4c07c 630
ac264b3b
MS
631 /* Detach new forked process? */
632 if (detach_fork)
f75c00e4 633 {
6c95b8df
PA
634 /* Before detaching from the child, remove all breakpoints
635 from it. If we forked, then this has already been taken
636 care of by infrun.c. If we vforked however, any
637 breakpoint inserted in the parent is visible in the
638 child, even those added while stopped in a vfork
639 catchpoint. This will remove the breakpoints from the
640 parent also, but they'll be reinserted below. */
641 if (has_vforked)
642 {
643 /* keep breakpoints list in sync. */
644 remove_breakpoints_pid (GET_PID (inferior_ptid));
645 }
646
e85a822c 647 if (info_verbose || debug_linux_nat)
ac264b3b
MS
648 {
649 target_terminal_ours ();
650 fprintf_filtered (gdb_stdlog,
3e43a32a
MS
651 "Detaching after fork from "
652 "child process %d.\n",
ac264b3b
MS
653 child_pid);
654 }
4de4c07c 655
ac264b3b
MS
656 ptrace (PTRACE_DETACH, child_pid, 0, 0);
657 }
658 else
659 {
77435e4c 660 struct inferior *parent_inf, *child_inf;
2277426b 661 struct cleanup *old_chain;
7f9f62ba
PA
662
663 /* Add process to GDB's tables. */
77435e4c
PA
664 child_inf = add_inferior (child_pid);
665
e58b0e63 666 parent_inf = current_inferior ();
77435e4c 667 child_inf->attach_flag = parent_inf->attach_flag;
191c4426 668 copy_terminal_info (child_inf, parent_inf);
7f9f62ba 669
2277426b 670 old_chain = save_inferior_ptid ();
6c95b8df 671 save_current_program_space ();
2277426b
PA
672
673 inferior_ptid = ptid_build (child_pid, child_pid, 0);
674 add_thread (inferior_ptid);
6c95b8df
PA
675 child_lp = add_lwp (inferior_ptid);
676 child_lp->stopped = 1;
25289eb2 677 child_lp->last_resume_kind = resume_stop;
2277426b 678
6c95b8df
PA
679 /* If this is a vfork child, then the address-space is
680 shared with the parent. */
681 if (has_vforked)
682 {
683 child_inf->pspace = parent_inf->pspace;
684 child_inf->aspace = parent_inf->aspace;
685
686 /* The parent will be frozen until the child is done
687 with the shared region. Keep track of the
688 parent. */
689 child_inf->vfork_parent = parent_inf;
690 child_inf->pending_detach = 0;
691 parent_inf->vfork_child = child_inf;
692 parent_inf->pending_detach = 0;
693 }
694 else
695 {
696 child_inf->aspace = new_address_space ();
697 child_inf->pspace = add_program_space (child_inf->aspace);
698 child_inf->removable = 1;
699 set_current_program_space (child_inf->pspace);
700 clone_program_space (child_inf->pspace, parent_inf->pspace);
701
702 /* Let the shared library layer (solib-svr4) learn about
703 this new process, relocate the cloned exec, pull in
704 shared libraries, and install the solib event
705 breakpoint. If a "cloned-VM" event was propagated
706 better throughout the core, this wouldn't be
707 required. */
268a4a75 708 solib_create_inferior_hook (0);
6c95b8df
PA
709 }
710
711 /* Let the thread_db layer learn about this new process. */
2277426b
PA
712 check_for_thread_db ();
713
714 do_cleanups (old_chain);
ac264b3b 715 }
9016a515
DJ
716
717 if (has_vforked)
718 {
3ced3da4 719 struct lwp_info *parent_lp;
6c95b8df
PA
720 struct inferior *parent_inf;
721
722 parent_inf = current_inferior ();
723
724 /* If we detached from the child, then we have to be careful
725 to not insert breakpoints in the parent until the child
726 is done with the shared memory region. However, if we're
727 staying attached to the child, then we can and should
728 insert breakpoints, so that we can debug it. A
729 subsequent child exec or exit is enough to know when does
730 the child stops using the parent's address space. */
731 parent_inf->waiting_for_vfork_done = detach_fork;
56710373 732 parent_inf->pspace->breakpoints_not_allowed = detach_fork;
6c95b8df 733
3ced3da4 734 parent_lp = find_lwp_pid (pid_to_ptid (parent_pid));
b957e937 735 gdb_assert (linux_supports_tracefork_flag >= 0);
3ced3da4 736
b957e937 737 if (linux_supports_tracevforkdone (0))
9016a515 738 {
6c95b8df
PA
739 if (debug_linux_nat)
740 fprintf_unfiltered (gdb_stdlog,
741 "LCFF: waiting for VFORK_DONE on %d\n",
742 parent_pid);
3ced3da4 743 parent_lp->stopped = 1;
9016a515 744
6c95b8df
PA
745 /* We'll handle the VFORK_DONE event like any other
746 event, in target_wait. */
9016a515
DJ
747 }
748 else
749 {
750 /* We can't insert breakpoints until the child has
751 finished with the shared memory region. We need to
752 wait until that happens. Ideal would be to just
753 call:
754 - ptrace (PTRACE_SYSCALL, parent_pid, 0, 0);
755 - waitpid (parent_pid, &status, __WALL);
756 However, most architectures can't handle a syscall
757 being traced on the way out if it wasn't traced on
758 the way in.
759
760 We might also think to loop, continuing the child
761 until it exits or gets a SIGTRAP. One problem is
762 that the child might call ptrace with PTRACE_TRACEME.
763
764 There's no simple and reliable way to figure out when
765 the vforked child will be done with its copy of the
766 shared memory. We could step it out of the syscall,
767 two instructions, let it go, and then single-step the
768 parent once. When we have hardware single-step, this
769 would work; with software single-step it could still
770 be made to work but we'd have to be able to insert
771 single-step breakpoints in the child, and we'd have
772 to insert -just- the single-step breakpoint in the
773 parent. Very awkward.
774
775 In the end, the best we can do is to make sure it
776 runs for a little while. Hopefully it will be out of
777 range of any breakpoints we reinsert. Usually this
778 is only the single-step breakpoint at vfork's return
779 point. */
780
6c95b8df
PA
781 if (debug_linux_nat)
782 fprintf_unfiltered (gdb_stdlog,
3e43a32a
MS
783 "LCFF: no VFORK_DONE "
784 "support, sleeping a bit\n");
6c95b8df 785
9016a515 786 usleep (10000);
9016a515 787
6c95b8df
PA
788 /* Pretend we've seen a PTRACE_EVENT_VFORK_DONE event,
789 and leave it pending. The next linux_nat_resume call
790 will notice a pending event, and bypasses actually
791 resuming the inferior. */
3ced3da4
PA
792 parent_lp->status = 0;
793 parent_lp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
794 parent_lp->stopped = 1;
6c95b8df
PA
795
796 /* If we're in async mode, need to tell the event loop
797 there's something here to process. */
798 if (target_can_async_p ())
799 async_file_mark ();
800 }
9016a515 801 }
4de4c07c 802 }
3993f6b1 803 else
4de4c07c 804 {
77435e4c 805 struct inferior *parent_inf, *child_inf;
3ced3da4 806 struct lwp_info *child_lp;
6c95b8df 807 struct program_space *parent_pspace;
4de4c07c 808
e85a822c 809 if (info_verbose || debug_linux_nat)
f75c00e4
DJ
810 {
811 target_terminal_ours ();
6c95b8df 812 if (has_vforked)
3e43a32a
MS
813 fprintf_filtered (gdb_stdlog,
814 _("Attaching after process %d "
815 "vfork to child process %d.\n"),
6c95b8df
PA
816 parent_pid, child_pid);
817 else
3e43a32a
MS
818 fprintf_filtered (gdb_stdlog,
819 _("Attaching after process %d "
820 "fork to child process %d.\n"),
6c95b8df 821 parent_pid, child_pid);
f75c00e4 822 }
4de4c07c 823
7a7d3353
PA
824 /* Add the new inferior first, so that the target_detach below
825 doesn't unpush the target. */
826
77435e4c
PA
827 child_inf = add_inferior (child_pid);
828
e58b0e63 829 parent_inf = current_inferior ();
77435e4c 830 child_inf->attach_flag = parent_inf->attach_flag;
191c4426 831 copy_terminal_info (child_inf, parent_inf);
7a7d3353 832
6c95b8df 833 parent_pspace = parent_inf->pspace;
9016a515 834
6c95b8df
PA
835 /* If we're vforking, we want to hold on to the parent until the
836 child exits or execs. At child exec or exit time we can
837 remove the old breakpoints from the parent and detach or
838 resume debugging it. Otherwise, detach the parent now; we'll
839 want to reuse it's program/address spaces, but we can't set
840 them to the child before removing breakpoints from the
841 parent, otherwise, the breakpoints module could decide to
842 remove breakpoints from the wrong process (since they'd be
843 assigned to the same address space). */
9016a515
DJ
844
845 if (has_vforked)
7f9f62ba 846 {
6c95b8df
PA
847 gdb_assert (child_inf->vfork_parent == NULL);
848 gdb_assert (parent_inf->vfork_child == NULL);
849 child_inf->vfork_parent = parent_inf;
850 child_inf->pending_detach = 0;
851 parent_inf->vfork_child = child_inf;
852 parent_inf->pending_detach = detach_fork;
853 parent_inf->waiting_for_vfork_done = 0;
ac264b3b 854 }
2277426b 855 else if (detach_fork)
b84876c2 856 target_detach (NULL, 0);
4de4c07c 857
6c95b8df
PA
858 /* Note that the detach above makes PARENT_INF dangling. */
859
860 /* Add the child thread to the appropriate lists, and switch to
861 this new thread, before cloning the program space, and
862 informing the solib layer about this new process. */
863
9f0bdab8 864 inferior_ptid = ptid_build (child_pid, child_pid, 0);
2277426b 865 add_thread (inferior_ptid);
3ced3da4
PA
866 child_lp = add_lwp (inferior_ptid);
867 child_lp->stopped = 1;
25289eb2 868 child_lp->last_resume_kind = resume_stop;
6c95b8df
PA
869
870 /* If this is a vfork child, then the address-space is shared
871 with the parent. If we detached from the parent, then we can
872 reuse the parent's program/address spaces. */
873 if (has_vforked || detach_fork)
874 {
875 child_inf->pspace = parent_pspace;
876 child_inf->aspace = child_inf->pspace->aspace;
877 }
878 else
879 {
880 child_inf->aspace = new_address_space ();
881 child_inf->pspace = add_program_space (child_inf->aspace);
882 child_inf->removable = 1;
883 set_current_program_space (child_inf->pspace);
884 clone_program_space (child_inf->pspace, parent_pspace);
885
886 /* Let the shared library layer (solib-svr4) learn about
887 this new process, relocate the cloned exec, pull in
888 shared libraries, and install the solib event breakpoint.
889 If a "cloned-VM" event was propagated better throughout
890 the core, this wouldn't be required. */
268a4a75 891 solib_create_inferior_hook (0);
6c95b8df 892 }
ac264b3b 893
6c95b8df 894 /* Let the thread_db layer learn about this new process. */
ef29ce1a 895 check_for_thread_db ();
4de4c07c
DJ
896 }
897
7feb7d06 898 restore_child_signals_mask (&prev_mask);
4de4c07c
DJ
899 return 0;
900}
901
4de4c07c 902\f
77b06cd7 903static int
6d8fd2b7 904linux_child_insert_fork_catchpoint (int pid)
4de4c07c 905{
77b06cd7 906 return !linux_supports_tracefork (pid);
3993f6b1
DJ
907}
908
eb73ad13
PA
909static int
910linux_child_remove_fork_catchpoint (int pid)
911{
912 return 0;
913}
914
77b06cd7 915static int
6d8fd2b7 916linux_child_insert_vfork_catchpoint (int pid)
3993f6b1 917{
77b06cd7 918 return !linux_supports_tracefork (pid);
3993f6b1
DJ
919}
920
eb73ad13
PA
921static int
922linux_child_remove_vfork_catchpoint (int pid)
923{
924 return 0;
925}
926
77b06cd7 927static int
6d8fd2b7 928linux_child_insert_exec_catchpoint (int pid)
3993f6b1 929{
77b06cd7 930 return !linux_supports_tracefork (pid);
3993f6b1
DJ
931}
932
eb73ad13
PA
933static int
934linux_child_remove_exec_catchpoint (int pid)
935{
936 return 0;
937}
938
a96d9b2e
SDJ
939static int
940linux_child_set_syscall_catchpoint (int pid, int needed, int any_count,
941 int table_size, int *table)
942{
77b06cd7
TJB
943 if (!linux_supports_tracesysgood (pid))
944 return 1;
945
a96d9b2e
SDJ
946 /* On GNU/Linux, we ignore the arguments. It means that we only
947 enable the syscall catchpoints, but do not disable them.
77b06cd7 948
a96d9b2e
SDJ
949 Also, we do not use the `table' information because we do not
950 filter system calls here. We let GDB do the logic for us. */
951 return 0;
952}
953
d6b0e80f
AC
954/* On GNU/Linux there are no real LWP's. The closest thing to LWP's
955 are processes sharing the same VM space. A multi-threaded process
956 is basically a group of such processes. However, such a grouping
957 is almost entirely a user-space issue; the kernel doesn't enforce
958 such a grouping at all (this might change in the future). In
959 general, we'll rely on the threads library (i.e. the GNU/Linux
960 Threads library) to provide such a grouping.
961
962 It is perfectly well possible to write a multi-threaded application
963 without the assistance of a threads library, by using the clone
964 system call directly. This module should be able to give some
965 rudimentary support for debugging such applications if developers
966 specify the CLONE_PTRACE flag in the clone system call, and are
967 using the Linux kernel 2.4 or above.
968
969 Note that there are some peculiarities in GNU/Linux that affect
970 this code:
971
972 - In general one should specify the __WCLONE flag to waitpid in
973 order to make it report events for any of the cloned processes
974 (and leave it out for the initial process). However, if a cloned
975 process has exited the exit status is only reported if the
976 __WCLONE flag is absent. Linux kernel 2.4 has a __WALL flag, but
977 we cannot use it since GDB must work on older systems too.
978
979 - When a traced, cloned process exits and is waited for by the
980 debugger, the kernel reassigns it to the original parent and
981 keeps it around as a "zombie". Somehow, the GNU/Linux Threads
982 library doesn't notice this, which leads to the "zombie problem":
983 When debugged a multi-threaded process that spawns a lot of
984 threads will run out of processes, even if the threads exit,
985 because the "zombies" stay around. */
986
987/* List of known LWPs. */
9f0bdab8 988struct lwp_info *lwp_list;
d6b0e80f
AC
989\f
990
d6b0e80f
AC
991/* Original signal mask. */
992static sigset_t normal_mask;
993
994/* Signal mask for use with sigsuspend in linux_nat_wait, initialized in
995 _initialize_linux_nat. */
996static sigset_t suspend_mask;
997
7feb7d06
PA
998/* Signals to block to make that sigsuspend work. */
999static sigset_t blocked_mask;
1000
1001/* SIGCHLD action. */
1002struct sigaction sigchld_action;
b84876c2 1003
7feb7d06
PA
1004/* Block child signals (SIGCHLD and linux threads signals), and store
1005 the previous mask in PREV_MASK. */
84e46146 1006
7feb7d06
PA
1007static void
1008block_child_signals (sigset_t *prev_mask)
1009{
1010 /* Make sure SIGCHLD is blocked. */
1011 if (!sigismember (&blocked_mask, SIGCHLD))
1012 sigaddset (&blocked_mask, SIGCHLD);
1013
1014 sigprocmask (SIG_BLOCK, &blocked_mask, prev_mask);
1015}
1016
1017/* Restore child signals mask, previously returned by
1018 block_child_signals. */
1019
1020static void
1021restore_child_signals_mask (sigset_t *prev_mask)
1022{
1023 sigprocmask (SIG_SETMASK, prev_mask, NULL);
1024}
2455069d
UW
1025
1026/* Mask of signals to pass directly to the inferior. */
1027static sigset_t pass_mask;
1028
1029/* Update signals to pass to the inferior. */
1030static void
1031linux_nat_pass_signals (int numsigs, unsigned char *pass_signals)
1032{
1033 int signo;
1034
1035 sigemptyset (&pass_mask);
1036
1037 for (signo = 1; signo < NSIG; signo++)
1038 {
1039 int target_signo = target_signal_from_host (signo);
1040 if (target_signo < numsigs && pass_signals[target_signo])
1041 sigaddset (&pass_mask, signo);
1042 }
1043}
1044
d6b0e80f
AC
1045\f
1046
1047/* Prototypes for local functions. */
1048static int stop_wait_callback (struct lwp_info *lp, void *data);
28439f5e 1049static int linux_thread_alive (ptid_t ptid);
6d8fd2b7 1050static char *linux_child_pid_to_exec_file (int pid);
710151dd 1051
d6b0e80f
AC
1052\f
1053/* Convert wait status STATUS to a string. Used for printing debug
1054 messages only. */
1055
1056static char *
1057status_to_str (int status)
1058{
1059 static char buf[64];
1060
1061 if (WIFSTOPPED (status))
206aa767 1062 {
ca2163eb 1063 if (WSTOPSIG (status) == SYSCALL_SIGTRAP)
206aa767
DE
1064 snprintf (buf, sizeof (buf), "%s (stopped at syscall)",
1065 strsignal (SIGTRAP));
1066 else
1067 snprintf (buf, sizeof (buf), "%s (stopped)",
1068 strsignal (WSTOPSIG (status)));
1069 }
d6b0e80f
AC
1070 else if (WIFSIGNALED (status))
1071 snprintf (buf, sizeof (buf), "%s (terminated)",
ba9b2ec3 1072 strsignal (WTERMSIG (status)));
d6b0e80f
AC
1073 else
1074 snprintf (buf, sizeof (buf), "%d (exited)", WEXITSTATUS (status));
1075
1076 return buf;
1077}
1078
7b50312a
PA
1079/* Destroy and free LP. */
1080
1081static void
1082lwp_free (struct lwp_info *lp)
1083{
1084 xfree (lp->arch_private);
1085 xfree (lp);
1086}
1087
d90e17a7
PA
1088/* Remove all LWPs belong to PID from the lwp list. */
1089
1090static void
1091purge_lwp_list (int pid)
1092{
1093 struct lwp_info *lp, *lpprev, *lpnext;
1094
1095 lpprev = NULL;
1096
1097 for (lp = lwp_list; lp; lp = lpnext)
1098 {
1099 lpnext = lp->next;
1100
1101 if (ptid_get_pid (lp->ptid) == pid)
1102 {
1103 if (lp == lwp_list)
1104 lwp_list = lp->next;
1105 else
1106 lpprev->next = lp->next;
1107
7b50312a 1108 lwp_free (lp);
d90e17a7
PA
1109 }
1110 else
1111 lpprev = lp;
1112 }
1113}
1114
1115/* Return the number of known LWPs in the tgid given by PID. */
1116
1117static int
1118num_lwps (int pid)
1119{
1120 int count = 0;
1121 struct lwp_info *lp;
1122
1123 for (lp = lwp_list; lp; lp = lp->next)
1124 if (ptid_get_pid (lp->ptid) == pid)
1125 count++;
1126
1127 return count;
d6b0e80f
AC
1128}
1129
f973ed9c 1130/* Add the LWP specified by PID to the list. Return a pointer to the
9f0bdab8
DJ
1131 structure describing the new LWP. The LWP should already be stopped
1132 (with an exception for the very first LWP). */
d6b0e80f
AC
1133
1134static struct lwp_info *
1135add_lwp (ptid_t ptid)
1136{
1137 struct lwp_info *lp;
1138
1139 gdb_assert (is_lwp (ptid));
1140
1141 lp = (struct lwp_info *) xmalloc (sizeof (struct lwp_info));
1142
1143 memset (lp, 0, sizeof (struct lwp_info));
1144
25289eb2 1145 lp->last_resume_kind = resume_continue;
d6b0e80f
AC
1146 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
1147
1148 lp->ptid = ptid;
dc146f7c 1149 lp->core = -1;
d6b0e80f
AC
1150
1151 lp->next = lwp_list;
1152 lwp_list = lp;
d6b0e80f 1153
7b50312a
PA
1154 if (linux_nat_new_thread != NULL)
1155 linux_nat_new_thread (lp);
9f0bdab8 1156
d6b0e80f
AC
1157 return lp;
1158}
1159
1160/* Remove the LWP specified by PID from the list. */
1161
1162static void
1163delete_lwp (ptid_t ptid)
1164{
1165 struct lwp_info *lp, *lpprev;
1166
1167 lpprev = NULL;
1168
1169 for (lp = lwp_list; lp; lpprev = lp, lp = lp->next)
1170 if (ptid_equal (lp->ptid, ptid))
1171 break;
1172
1173 if (!lp)
1174 return;
1175
d6b0e80f
AC
1176 if (lpprev)
1177 lpprev->next = lp->next;
1178 else
1179 lwp_list = lp->next;
1180
7b50312a 1181 lwp_free (lp);
d6b0e80f
AC
1182}
1183
1184/* Return a pointer to the structure describing the LWP corresponding
1185 to PID. If no corresponding LWP could be found, return NULL. */
1186
1187static struct lwp_info *
1188find_lwp_pid (ptid_t ptid)
1189{
1190 struct lwp_info *lp;
1191 int lwp;
1192
1193 if (is_lwp (ptid))
1194 lwp = GET_LWP (ptid);
1195 else
1196 lwp = GET_PID (ptid);
1197
1198 for (lp = lwp_list; lp; lp = lp->next)
1199 if (lwp == GET_LWP (lp->ptid))
1200 return lp;
1201
1202 return NULL;
1203}
1204
1205/* Call CALLBACK with its second argument set to DATA for every LWP in
1206 the list. If CALLBACK returns 1 for a particular LWP, return a
1207 pointer to the structure describing that LWP immediately.
1208 Otherwise return NULL. */
1209
1210struct lwp_info *
d90e17a7
PA
1211iterate_over_lwps (ptid_t filter,
1212 int (*callback) (struct lwp_info *, void *),
1213 void *data)
d6b0e80f
AC
1214{
1215 struct lwp_info *lp, *lpnext;
1216
1217 for (lp = lwp_list; lp; lp = lpnext)
1218 {
1219 lpnext = lp->next;
d90e17a7
PA
1220
1221 if (ptid_match (lp->ptid, filter))
1222 {
1223 if ((*callback) (lp, data))
1224 return lp;
1225 }
d6b0e80f
AC
1226 }
1227
1228 return NULL;
1229}
1230
2277426b
PA
1231/* Update our internal state when changing from one checkpoint to
1232 another indicated by NEW_PTID. We can only switch single-threaded
1233 applications, so we only create one new LWP, and the previous list
1234 is discarded. */
f973ed9c
DJ
1235
1236void
1237linux_nat_switch_fork (ptid_t new_ptid)
1238{
1239 struct lwp_info *lp;
1240
2277426b
PA
1241 purge_lwp_list (GET_PID (inferior_ptid));
1242
f973ed9c
DJ
1243 lp = add_lwp (new_ptid);
1244 lp->stopped = 1;
e26af52f 1245
2277426b
PA
1246 /* This changes the thread's ptid while preserving the gdb thread
1247 num. Also changes the inferior pid, while preserving the
1248 inferior num. */
1249 thread_change_ptid (inferior_ptid, new_ptid);
1250
1251 /* We've just told GDB core that the thread changed target id, but,
1252 in fact, it really is a different thread, with different register
1253 contents. */
1254 registers_changed ();
e26af52f
DJ
1255}
1256
e26af52f
DJ
1257/* Handle the exit of a single thread LP. */
1258
1259static void
1260exit_lwp (struct lwp_info *lp)
1261{
e09875d4 1262 struct thread_info *th = find_thread_ptid (lp->ptid);
063bfe2e
VP
1263
1264 if (th)
e26af52f 1265 {
17faa917
DJ
1266 if (print_thread_events)
1267 printf_unfiltered (_("[%s exited]\n"), target_pid_to_str (lp->ptid));
1268
4f8d22e3 1269 delete_thread (lp->ptid);
e26af52f
DJ
1270 }
1271
1272 delete_lwp (lp->ptid);
1273}
1274
a0ef4274
DJ
1275/* Detect `T (stopped)' in `/proc/PID/status'.
1276 Other states including `T (tracing stop)' are reported as false. */
1277
1278static int
1279pid_is_stopped (pid_t pid)
1280{
1281 FILE *status_file;
1282 char buf[100];
1283 int retval = 0;
1284
1285 snprintf (buf, sizeof (buf), "/proc/%d/status", (int) pid);
1286 status_file = fopen (buf, "r");
1287 if (status_file != NULL)
1288 {
1289 int have_state = 0;
1290
1291 while (fgets (buf, sizeof (buf), status_file))
1292 {
1293 if (strncmp (buf, "State:", 6) == 0)
1294 {
1295 have_state = 1;
1296 break;
1297 }
1298 }
1299 if (have_state && strstr (buf, "T (stopped)") != NULL)
1300 retval = 1;
1301 fclose (status_file);
1302 }
1303 return retval;
1304}
1305
1306/* Wait for the LWP specified by LP, which we have just attached to.
1307 Returns a wait status for that LWP, to cache. */
1308
1309static int
1310linux_nat_post_attach_wait (ptid_t ptid, int first, int *cloned,
1311 int *signalled)
1312{
1313 pid_t new_pid, pid = GET_LWP (ptid);
1314 int status;
1315
1316 if (pid_is_stopped (pid))
1317 {
1318 if (debug_linux_nat)
1319 fprintf_unfiltered (gdb_stdlog,
1320 "LNPAW: Attaching to a stopped process\n");
1321
1322 /* The process is definitely stopped. It is in a job control
1323 stop, unless the kernel predates the TASK_STOPPED /
1324 TASK_TRACED distinction, in which case it might be in a
1325 ptrace stop. Make sure it is in a ptrace stop; from there we
1326 can kill it, signal it, et cetera.
1327
1328 First make sure there is a pending SIGSTOP. Since we are
1329 already attached, the process can not transition from stopped
1330 to running without a PTRACE_CONT; so we know this signal will
1331 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1332 probably already in the queue (unless this kernel is old
1333 enough to use TASK_STOPPED for ptrace stops); but since SIGSTOP
1334 is not an RT signal, it can only be queued once. */
1335 kill_lwp (pid, SIGSTOP);
1336
1337 /* Finally, resume the stopped process. This will deliver the SIGSTOP
1338 (or a higher priority signal, just like normal PTRACE_ATTACH). */
1339 ptrace (PTRACE_CONT, pid, 0, 0);
1340 }
1341
1342 /* Make sure the initial process is stopped. The user-level threads
1343 layer might want to poke around in the inferior, and that won't
1344 work if things haven't stabilized yet. */
1345 new_pid = my_waitpid (pid, &status, 0);
1346 if (new_pid == -1 && errno == ECHILD)
1347 {
1348 if (first)
1349 warning (_("%s is a cloned process"), target_pid_to_str (ptid));
1350
1351 /* Try again with __WCLONE to check cloned processes. */
1352 new_pid = my_waitpid (pid, &status, __WCLONE);
1353 *cloned = 1;
1354 }
1355
dacc9cb2
PP
1356 gdb_assert (pid == new_pid);
1357
1358 if (!WIFSTOPPED (status))
1359 {
1360 /* The pid we tried to attach has apparently just exited. */
1361 if (debug_linux_nat)
1362 fprintf_unfiltered (gdb_stdlog, "LNPAW: Failed to stop %d: %s",
1363 pid, status_to_str (status));
1364 return status;
1365 }
a0ef4274
DJ
1366
1367 if (WSTOPSIG (status) != SIGSTOP)
1368 {
1369 *signalled = 1;
1370 if (debug_linux_nat)
1371 fprintf_unfiltered (gdb_stdlog,
1372 "LNPAW: Received %s after attaching\n",
1373 status_to_str (status));
1374 }
1375
1376 return status;
1377}
1378
84636d28
PA
1379/* Attach to the LWP specified by PID. Return 0 if successful, -1 if
1380 the new LWP could not be attached, or 1 if we're already auto
1381 attached to this thread, but haven't processed the
1382 PTRACE_EVENT_CLONE event of its parent thread, so we just ignore
1383 its existance, without considering it an error. */
d6b0e80f 1384
9ee57c33 1385int
93815fbf 1386lin_lwp_attach_lwp (ptid_t ptid)
d6b0e80f 1387{
9ee57c33 1388 struct lwp_info *lp;
7feb7d06 1389 sigset_t prev_mask;
84636d28 1390 int lwpid;
d6b0e80f
AC
1391
1392 gdb_assert (is_lwp (ptid));
1393
7feb7d06 1394 block_child_signals (&prev_mask);
d6b0e80f 1395
9ee57c33 1396 lp = find_lwp_pid (ptid);
84636d28 1397 lwpid = GET_LWP (ptid);
d6b0e80f
AC
1398
1399 /* We assume that we're already attached to any LWP that has an id
1400 equal to the overall process id, and to any LWP that is already
1401 in our list of LWPs. If we're not seeing exit events from threads
1402 and we've had PID wraparound since we last tried to stop all threads,
1403 this assumption might be wrong; fortunately, this is very unlikely
1404 to happen. */
84636d28 1405 if (lwpid != GET_PID (ptid) && lp == NULL)
d6b0e80f 1406 {
a0ef4274 1407 int status, cloned = 0, signalled = 0;
d6b0e80f 1408
84636d28 1409 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) < 0)
9ee57c33 1410 {
84636d28
PA
1411 if (linux_supports_tracefork_flag)
1412 {
1413 /* If we haven't stopped all threads when we get here,
1414 we may have seen a thread listed in thread_db's list,
1415 but not processed the PTRACE_EVENT_CLONE yet. If
1416 that's the case, ignore this new thread, and let
1417 normal event handling discover it later. */
1418 if (in_pid_list_p (stopped_pids, lwpid))
1419 {
1420 /* We've already seen this thread stop, but we
1421 haven't seen the PTRACE_EVENT_CLONE extended
1422 event yet. */
1423 restore_child_signals_mask (&prev_mask);
1424 return 0;
1425 }
1426 else
1427 {
1428 int new_pid;
1429 int status;
1430
1431 /* See if we've got a stop for this new child
1432 pending. If so, we're already attached. */
1433 new_pid = my_waitpid (lwpid, &status, WNOHANG);
1434 if (new_pid == -1 && errno == ECHILD)
1435 new_pid = my_waitpid (lwpid, &status, __WCLONE | WNOHANG);
1436 if (new_pid != -1)
1437 {
1438 if (WIFSTOPPED (status))
1439 add_to_pid_list (&stopped_pids, lwpid, status);
1440
1441 restore_child_signals_mask (&prev_mask);
1442 return 1;
1443 }
1444 }
1445 }
1446
9ee57c33
DJ
1447 /* If we fail to attach to the thread, issue a warning,
1448 but continue. One way this can happen is if thread
e9efe249 1449 creation is interrupted; as of Linux kernel 2.6.19, a
9ee57c33
DJ
1450 bug may place threads in the thread list and then fail
1451 to create them. */
1452 warning (_("Can't attach %s: %s"), target_pid_to_str (ptid),
1453 safe_strerror (errno));
7feb7d06 1454 restore_child_signals_mask (&prev_mask);
9ee57c33
DJ
1455 return -1;
1456 }
1457
d6b0e80f
AC
1458 if (debug_linux_nat)
1459 fprintf_unfiltered (gdb_stdlog,
1460 "LLAL: PTRACE_ATTACH %s, 0, 0 (OK)\n",
1461 target_pid_to_str (ptid));
1462
a0ef4274 1463 status = linux_nat_post_attach_wait (ptid, 0, &cloned, &signalled);
dacc9cb2 1464 if (!WIFSTOPPED (status))
673c2bbe
DE
1465 {
1466 restore_child_signals_mask (&prev_mask);
f687d035 1467 return 1;
673c2bbe 1468 }
dacc9cb2 1469
a0ef4274
DJ
1470 lp = add_lwp (ptid);
1471 lp->stopped = 1;
1472 lp->cloned = cloned;
1473 lp->signalled = signalled;
1474 if (WSTOPSIG (status) != SIGSTOP)
d6b0e80f 1475 {
a0ef4274
DJ
1476 lp->resumed = 1;
1477 lp->status = status;
d6b0e80f
AC
1478 }
1479
a0ef4274 1480 target_post_attach (GET_LWP (lp->ptid));
d6b0e80f
AC
1481
1482 if (debug_linux_nat)
1483 {
1484 fprintf_unfiltered (gdb_stdlog,
1485 "LLAL: waitpid %s received %s\n",
1486 target_pid_to_str (ptid),
1487 status_to_str (status));
1488 }
1489 }
1490 else
1491 {
1492 /* We assume that the LWP representing the original process is
1493 already stopped. Mark it as stopped in the data structure
155bd5d1
AC
1494 that the GNU/linux ptrace layer uses to keep track of
1495 threads. Note that this won't have already been done since
1496 the main thread will have, we assume, been stopped by an
1497 attach from a different layer. */
9ee57c33
DJ
1498 if (lp == NULL)
1499 lp = add_lwp (ptid);
d6b0e80f
AC
1500 lp->stopped = 1;
1501 }
9ee57c33 1502
25289eb2 1503 lp->last_resume_kind = resume_stop;
7feb7d06 1504 restore_child_signals_mask (&prev_mask);
9ee57c33 1505 return 0;
d6b0e80f
AC
1506}
1507
b84876c2 1508static void
136d6dae
VP
1509linux_nat_create_inferior (struct target_ops *ops,
1510 char *exec_file, char *allargs, char **env,
b84876c2
PA
1511 int from_tty)
1512{
10568435
JK
1513#ifdef HAVE_PERSONALITY
1514 int personality_orig = 0, personality_set = 0;
1515#endif /* HAVE_PERSONALITY */
b84876c2
PA
1516
1517 /* The fork_child mechanism is synchronous and calls target_wait, so
1518 we have to mask the async mode. */
1519
10568435
JK
1520#ifdef HAVE_PERSONALITY
1521 if (disable_randomization)
1522 {
1523 errno = 0;
1524 personality_orig = personality (0xffffffff);
1525 if (errno == 0 && !(personality_orig & ADDR_NO_RANDOMIZE))
1526 {
1527 personality_set = 1;
1528 personality (personality_orig | ADDR_NO_RANDOMIZE);
1529 }
1530 if (errno != 0 || (personality_set
1531 && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE)))
1532 warning (_("Error disabling address space randomization: %s"),
1533 safe_strerror (errno));
1534 }
1535#endif /* HAVE_PERSONALITY */
1536
2455069d
UW
1537 /* Make sure we report all signals during startup. */
1538 linux_nat_pass_signals (0, NULL);
1539
136d6dae 1540 linux_ops->to_create_inferior (ops, exec_file, allargs, env, from_tty);
b84876c2 1541
10568435
JK
1542#ifdef HAVE_PERSONALITY
1543 if (personality_set)
1544 {
1545 errno = 0;
1546 personality (personality_orig);
1547 if (errno != 0)
1548 warning (_("Error restoring address space randomization: %s"),
1549 safe_strerror (errno));
1550 }
1551#endif /* HAVE_PERSONALITY */
b84876c2
PA
1552}
1553
d6b0e80f 1554static void
136d6dae 1555linux_nat_attach (struct target_ops *ops, char *args, int from_tty)
d6b0e80f
AC
1556{
1557 struct lwp_info *lp;
d6b0e80f 1558 int status;
af990527 1559 ptid_t ptid;
d6b0e80f 1560
2455069d
UW
1561 /* Make sure we report all signals during attach. */
1562 linux_nat_pass_signals (0, NULL);
1563
136d6dae 1564 linux_ops->to_attach (ops, args, from_tty);
d6b0e80f 1565
af990527
PA
1566 /* The ptrace base target adds the main thread with (pid,0,0)
1567 format. Decorate it with lwp info. */
1568 ptid = BUILD_LWP (GET_PID (inferior_ptid), GET_PID (inferior_ptid));
1569 thread_change_ptid (inferior_ptid, ptid);
1570
9f0bdab8 1571 /* Add the initial process as the first LWP to the list. */
af990527 1572 lp = add_lwp (ptid);
a0ef4274
DJ
1573
1574 status = linux_nat_post_attach_wait (lp->ptid, 1, &lp->cloned,
1575 &lp->signalled);
dacc9cb2
PP
1576 if (!WIFSTOPPED (status))
1577 {
1578 if (WIFEXITED (status))
1579 {
1580 int exit_code = WEXITSTATUS (status);
1581
1582 target_terminal_ours ();
1583 target_mourn_inferior ();
1584 if (exit_code == 0)
1585 error (_("Unable to attach: program exited normally."));
1586 else
1587 error (_("Unable to attach: program exited with code %d."),
1588 exit_code);
1589 }
1590 else if (WIFSIGNALED (status))
1591 {
1592 enum target_signal signo;
1593
1594 target_terminal_ours ();
1595 target_mourn_inferior ();
1596
1597 signo = target_signal_from_host (WTERMSIG (status));
1598 error (_("Unable to attach: program terminated with signal "
1599 "%s, %s."),
1600 target_signal_to_name (signo),
1601 target_signal_to_string (signo));
1602 }
1603
1604 internal_error (__FILE__, __LINE__,
1605 _("unexpected status %d for PID %ld"),
1606 status, (long) GET_LWP (ptid));
1607 }
1608
a0ef4274 1609 lp->stopped = 1;
9f0bdab8 1610
a0ef4274 1611 /* Save the wait status to report later. */
d6b0e80f 1612 lp->resumed = 1;
a0ef4274
DJ
1613 if (debug_linux_nat)
1614 fprintf_unfiltered (gdb_stdlog,
1615 "LNA: waitpid %ld, saving status %s\n",
1616 (long) GET_PID (lp->ptid), status_to_str (status));
710151dd 1617
7feb7d06
PA
1618 lp->status = status;
1619
1620 if (target_can_async_p ())
1621 target_async (inferior_event_handler, 0);
d6b0e80f
AC
1622}
1623
a0ef4274
DJ
1624/* Get pending status of LP. */
1625static int
1626get_pending_status (struct lwp_info *lp, int *status)
1627{
ca2163eb
PA
1628 enum target_signal signo = TARGET_SIGNAL_0;
1629
1630 /* If we paused threads momentarily, we may have stored pending
1631 events in lp->status or lp->waitstatus (see stop_wait_callback),
1632 and GDB core hasn't seen any signal for those threads.
1633 Otherwise, the last signal reported to the core is found in the
1634 thread object's stop_signal.
1635
1636 There's a corner case that isn't handled here at present. Only
1637 if the thread stopped with a TARGET_WAITKIND_STOPPED does
1638 stop_signal make sense as a real signal to pass to the inferior.
1639 Some catchpoint related events, like
1640 TARGET_WAITKIND_(V)FORK|EXEC|SYSCALL, have their stop_signal set
1641 to TARGET_SIGNAL_SIGTRAP when the catchpoint triggers. But,
1642 those traps are debug API (ptrace in our case) related and
1643 induced; the inferior wouldn't see them if it wasn't being
1644 traced. Hence, we should never pass them to the inferior, even
1645 when set to pass state. Since this corner case isn't handled by
1646 infrun.c when proceeding with a signal, for consistency, neither
1647 do we handle it here (or elsewhere in the file we check for
1648 signal pass state). Normally SIGTRAP isn't set to pass state, so
1649 this is really a corner case. */
1650
1651 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
1652 signo = TARGET_SIGNAL_0; /* a pending ptrace event, not a real signal. */
1653 else if (lp->status)
1654 signo = target_signal_from_host (WSTOPSIG (lp->status));
1655 else if (non_stop && !is_executing (lp->ptid))
1656 {
1657 struct thread_info *tp = find_thread_ptid (lp->ptid);
e0881a8e 1658
16c381f0 1659 signo = tp->suspend.stop_signal;
ca2163eb
PA
1660 }
1661 else if (!non_stop)
a0ef4274 1662 {
ca2163eb
PA
1663 struct target_waitstatus last;
1664 ptid_t last_ptid;
4c28f408 1665
ca2163eb 1666 get_last_target_status (&last_ptid, &last);
4c28f408 1667
ca2163eb
PA
1668 if (GET_LWP (lp->ptid) == GET_LWP (last_ptid))
1669 {
e09875d4 1670 struct thread_info *tp = find_thread_ptid (lp->ptid);
e0881a8e 1671
16c381f0 1672 signo = tp->suspend.stop_signal;
4c28f408 1673 }
ca2163eb 1674 }
4c28f408 1675
ca2163eb 1676 *status = 0;
4c28f408 1677
ca2163eb
PA
1678 if (signo == TARGET_SIGNAL_0)
1679 {
1680 if (debug_linux_nat)
1681 fprintf_unfiltered (gdb_stdlog,
1682 "GPT: lwp %s has no pending signal\n",
1683 target_pid_to_str (lp->ptid));
1684 }
1685 else if (!signal_pass_state (signo))
1686 {
1687 if (debug_linux_nat)
3e43a32a
MS
1688 fprintf_unfiltered (gdb_stdlog,
1689 "GPT: lwp %s had signal %s, "
1690 "but it is in no pass state\n",
ca2163eb
PA
1691 target_pid_to_str (lp->ptid),
1692 target_signal_to_string (signo));
a0ef4274 1693 }
a0ef4274 1694 else
4c28f408 1695 {
ca2163eb
PA
1696 *status = W_STOPCODE (target_signal_to_host (signo));
1697
1698 if (debug_linux_nat)
1699 fprintf_unfiltered (gdb_stdlog,
1700 "GPT: lwp %s has pending signal %s\n",
1701 target_pid_to_str (lp->ptid),
1702 target_signal_to_string (signo));
4c28f408 1703 }
a0ef4274
DJ
1704
1705 return 0;
1706}
1707
d6b0e80f
AC
1708static int
1709detach_callback (struct lwp_info *lp, void *data)
1710{
1711 gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status));
1712
1713 if (debug_linux_nat && lp->status)
1714 fprintf_unfiltered (gdb_stdlog, "DC: Pending %s for %s on detach.\n",
1715 strsignal (WSTOPSIG (lp->status)),
1716 target_pid_to_str (lp->ptid));
1717
a0ef4274
DJ
1718 /* If there is a pending SIGSTOP, get rid of it. */
1719 if (lp->signalled)
d6b0e80f 1720 {
d6b0e80f
AC
1721 if (debug_linux_nat)
1722 fprintf_unfiltered (gdb_stdlog,
a0ef4274
DJ
1723 "DC: Sending SIGCONT to %s\n",
1724 target_pid_to_str (lp->ptid));
d6b0e80f 1725
a0ef4274 1726 kill_lwp (GET_LWP (lp->ptid), SIGCONT);
d6b0e80f 1727 lp->signalled = 0;
d6b0e80f
AC
1728 }
1729
1730 /* We don't actually detach from the LWP that has an id equal to the
1731 overall process id just yet. */
1732 if (GET_LWP (lp->ptid) != GET_PID (lp->ptid))
1733 {
a0ef4274
DJ
1734 int status = 0;
1735
1736 /* Pass on any pending signal for this LWP. */
1737 get_pending_status (lp, &status);
1738
7b50312a
PA
1739 if (linux_nat_prepare_to_resume != NULL)
1740 linux_nat_prepare_to_resume (lp);
d6b0e80f
AC
1741 errno = 0;
1742 if (ptrace (PTRACE_DETACH, GET_LWP (lp->ptid), 0,
a0ef4274 1743 WSTOPSIG (status)) < 0)
8a3fe4f8 1744 error (_("Can't detach %s: %s"), target_pid_to_str (lp->ptid),
d6b0e80f
AC
1745 safe_strerror (errno));
1746
1747 if (debug_linux_nat)
1748 fprintf_unfiltered (gdb_stdlog,
1749 "PTRACE_DETACH (%s, %s, 0) (OK)\n",
1750 target_pid_to_str (lp->ptid),
7feb7d06 1751 strsignal (WSTOPSIG (status)));
d6b0e80f
AC
1752
1753 delete_lwp (lp->ptid);
1754 }
1755
1756 return 0;
1757}
1758
1759static void
136d6dae 1760linux_nat_detach (struct target_ops *ops, char *args, int from_tty)
d6b0e80f 1761{
b84876c2 1762 int pid;
a0ef4274 1763 int status;
d90e17a7
PA
1764 struct lwp_info *main_lwp;
1765
1766 pid = GET_PID (inferior_ptid);
a0ef4274 1767
b84876c2
PA
1768 if (target_can_async_p ())
1769 linux_nat_async (NULL, 0);
1770
4c28f408
PA
1771 /* Stop all threads before detaching. ptrace requires that the
1772 thread is stopped to sucessfully detach. */
d90e17a7 1773 iterate_over_lwps (pid_to_ptid (pid), stop_callback, NULL);
4c28f408
PA
1774 /* ... and wait until all of them have reported back that
1775 they're no longer running. */
d90e17a7 1776 iterate_over_lwps (pid_to_ptid (pid), stop_wait_callback, NULL);
4c28f408 1777
d90e17a7 1778 iterate_over_lwps (pid_to_ptid (pid), detach_callback, NULL);
d6b0e80f
AC
1779
1780 /* Only the initial process should be left right now. */
d90e17a7
PA
1781 gdb_assert (num_lwps (GET_PID (inferior_ptid)) == 1);
1782
1783 main_lwp = find_lwp_pid (pid_to_ptid (pid));
d6b0e80f 1784
a0ef4274
DJ
1785 /* Pass on any pending signal for the last LWP. */
1786 if ((args == NULL || *args == '\0')
d90e17a7 1787 && get_pending_status (main_lwp, &status) != -1
a0ef4274
DJ
1788 && WIFSTOPPED (status))
1789 {
1790 /* Put the signal number in ARGS so that inf_ptrace_detach will
1791 pass it along with PTRACE_DETACH. */
1792 args = alloca (8);
1793 sprintf (args, "%d", (int) WSTOPSIG (status));
ddabfc73
TT
1794 if (debug_linux_nat)
1795 fprintf_unfiltered (gdb_stdlog,
1796 "LND: Sending signal %s to %s\n",
1797 args,
1798 target_pid_to_str (main_lwp->ptid));
a0ef4274
DJ
1799 }
1800
7b50312a
PA
1801 if (linux_nat_prepare_to_resume != NULL)
1802 linux_nat_prepare_to_resume (main_lwp);
d90e17a7 1803 delete_lwp (main_lwp->ptid);
b84876c2 1804
7a7d3353
PA
1805 if (forks_exist_p ())
1806 {
1807 /* Multi-fork case. The current inferior_ptid is being detached
1808 from, but there are other viable forks to debug. Detach from
1809 the current fork, and context-switch to the first
1810 available. */
1811 linux_fork_detach (args, from_tty);
1812
1813 if (non_stop && target_can_async_p ())
1814 target_async (inferior_event_handler, 0);
1815 }
1816 else
1817 linux_ops->to_detach (ops, args, from_tty);
d6b0e80f
AC
1818}
1819
1820/* Resume LP. */
1821
25289eb2
PA
1822static void
1823resume_lwp (struct lwp_info *lp, int step)
d6b0e80f 1824{
25289eb2 1825 if (lp->stopped)
6c95b8df 1826 {
25289eb2
PA
1827 struct inferior *inf = find_inferior_pid (GET_PID (lp->ptid));
1828
1829 if (inf->vfork_child != NULL)
1830 {
1831 if (debug_linux_nat)
1832 fprintf_unfiltered (gdb_stdlog,
1833 "RC: Not resuming %s (vfork parent)\n",
1834 target_pid_to_str (lp->ptid));
1835 }
1836 else if (lp->status == 0
1837 && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE)
1838 {
1839 if (debug_linux_nat)
1840 fprintf_unfiltered (gdb_stdlog,
1841 "RC: PTRACE_CONT %s, 0, 0 (resuming sibling)\n",
1842 target_pid_to_str (lp->ptid));
1843
7b50312a
PA
1844 if (linux_nat_prepare_to_resume != NULL)
1845 linux_nat_prepare_to_resume (lp);
25289eb2
PA
1846 linux_ops->to_resume (linux_ops,
1847 pid_to_ptid (GET_LWP (lp->ptid)),
1848 step, TARGET_SIGNAL_0);
25289eb2
PA
1849 lp->stopped = 0;
1850 lp->step = step;
1851 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
1852 lp->stopped_by_watchpoint = 0;
1853 }
1854 else
1855 {
1856 if (debug_linux_nat)
1857 fprintf_unfiltered (gdb_stdlog,
1858 "RC: Not resuming sibling %s (has pending)\n",
1859 target_pid_to_str (lp->ptid));
1860 }
6c95b8df 1861 }
25289eb2 1862 else
d6b0e80f 1863 {
d90e17a7
PA
1864 if (debug_linux_nat)
1865 fprintf_unfiltered (gdb_stdlog,
25289eb2 1866 "RC: Not resuming sibling %s (not stopped)\n",
d6b0e80f 1867 target_pid_to_str (lp->ptid));
d6b0e80f 1868 }
25289eb2 1869}
d6b0e80f 1870
25289eb2
PA
1871static int
1872resume_callback (struct lwp_info *lp, void *data)
1873{
1874 resume_lwp (lp, 0);
d6b0e80f
AC
1875 return 0;
1876}
1877
1878static int
1879resume_clear_callback (struct lwp_info *lp, void *data)
1880{
1881 lp->resumed = 0;
25289eb2 1882 lp->last_resume_kind = resume_stop;
d6b0e80f
AC
1883 return 0;
1884}
1885
1886static int
1887resume_set_callback (struct lwp_info *lp, void *data)
1888{
1889 lp->resumed = 1;
25289eb2 1890 lp->last_resume_kind = resume_continue;
d6b0e80f
AC
1891 return 0;
1892}
1893
1894static void
28439f5e
PA
1895linux_nat_resume (struct target_ops *ops,
1896 ptid_t ptid, int step, enum target_signal signo)
d6b0e80f 1897{
7feb7d06 1898 sigset_t prev_mask;
d6b0e80f 1899 struct lwp_info *lp;
d90e17a7 1900 int resume_many;
d6b0e80f 1901
76f50ad1
DJ
1902 if (debug_linux_nat)
1903 fprintf_unfiltered (gdb_stdlog,
1904 "LLR: Preparing to %s %s, %s, inferior_ptid %s\n",
1905 step ? "step" : "resume",
1906 target_pid_to_str (ptid),
423ec54c
JK
1907 (signo != TARGET_SIGNAL_0
1908 ? strsignal (target_signal_to_host (signo)) : "0"),
76f50ad1
DJ
1909 target_pid_to_str (inferior_ptid));
1910
7feb7d06 1911 block_child_signals (&prev_mask);
b84876c2 1912
d6b0e80f 1913 /* A specific PTID means `step only this process id'. */
d90e17a7
PA
1914 resume_many = (ptid_equal (minus_one_ptid, ptid)
1915 || ptid_is_pid (ptid));
4c28f408 1916
e3e9f5a2
PA
1917 /* Mark the lwps we're resuming as resumed. */
1918 iterate_over_lwps (ptid, resume_set_callback, NULL);
d6b0e80f 1919
d90e17a7
PA
1920 /* See if it's the current inferior that should be handled
1921 specially. */
1922 if (resume_many)
1923 lp = find_lwp_pid (inferior_ptid);
1924 else
1925 lp = find_lwp_pid (ptid);
9f0bdab8 1926 gdb_assert (lp != NULL);
d6b0e80f 1927
9f0bdab8
DJ
1928 /* Remember if we're stepping. */
1929 lp->step = step;
25289eb2 1930 lp->last_resume_kind = step ? resume_step : resume_continue;
d6b0e80f 1931
9f0bdab8
DJ
1932 /* If we have a pending wait status for this thread, there is no
1933 point in resuming the process. But first make sure that
1934 linux_nat_wait won't preemptively handle the event - we
1935 should never take this short-circuit if we are going to
1936 leave LP running, since we have skipped resuming all the
1937 other threads. This bit of code needs to be synchronized
1938 with linux_nat_wait. */
76f50ad1 1939
9f0bdab8
DJ
1940 if (lp->status && WIFSTOPPED (lp->status))
1941 {
2455069d
UW
1942 if (!lp->step
1943 && WSTOPSIG (lp->status)
1944 && sigismember (&pass_mask, WSTOPSIG (lp->status)))
d6b0e80f 1945 {
9f0bdab8
DJ
1946 if (debug_linux_nat)
1947 fprintf_unfiltered (gdb_stdlog,
1948 "LLR: Not short circuiting for ignored "
1949 "status 0x%x\n", lp->status);
1950
d6b0e80f
AC
1951 /* FIXME: What should we do if we are supposed to continue
1952 this thread with a signal? */
1953 gdb_assert (signo == TARGET_SIGNAL_0);
2455069d 1954 signo = target_signal_from_host (WSTOPSIG (lp->status));
9f0bdab8
DJ
1955 lp->status = 0;
1956 }
1957 }
76f50ad1 1958
6c95b8df 1959 if (lp->status || lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
9f0bdab8
DJ
1960 {
1961 /* FIXME: What should we do if we are supposed to continue
1962 this thread with a signal? */
1963 gdb_assert (signo == TARGET_SIGNAL_0);
76f50ad1 1964
9f0bdab8
DJ
1965 if (debug_linux_nat)
1966 fprintf_unfiltered (gdb_stdlog,
1967 "LLR: Short circuiting for status 0x%x\n",
1968 lp->status);
d6b0e80f 1969
7feb7d06
PA
1970 restore_child_signals_mask (&prev_mask);
1971 if (target_can_async_p ())
1972 {
1973 target_async (inferior_event_handler, 0);
1974 /* Tell the event loop we have something to process. */
1975 async_file_mark ();
1976 }
9f0bdab8 1977 return;
d6b0e80f
AC
1978 }
1979
9f0bdab8
DJ
1980 /* Mark LWP as not stopped to prevent it from being continued by
1981 resume_callback. */
1982 lp->stopped = 0;
1983
d90e17a7
PA
1984 if (resume_many)
1985 iterate_over_lwps (ptid, resume_callback, NULL);
1986
1987 /* Convert to something the lower layer understands. */
1988 ptid = pid_to_ptid (GET_LWP (lp->ptid));
d6b0e80f 1989
7b50312a
PA
1990 if (linux_nat_prepare_to_resume != NULL)
1991 linux_nat_prepare_to_resume (lp);
28439f5e 1992 linux_ops->to_resume (linux_ops, ptid, step, signo);
9f0bdab8 1993 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
ebec9a0f 1994 lp->stopped_by_watchpoint = 0;
9f0bdab8 1995
d6b0e80f
AC
1996 if (debug_linux_nat)
1997 fprintf_unfiltered (gdb_stdlog,
1998 "LLR: %s %s, %s (resume event thread)\n",
1999 step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2000 target_pid_to_str (ptid),
423ec54c
JK
2001 (signo != TARGET_SIGNAL_0
2002 ? strsignal (target_signal_to_host (signo)) : "0"));
b84876c2 2003
7feb7d06 2004 restore_child_signals_mask (&prev_mask);
b84876c2 2005 if (target_can_async_p ())
8ea051c5 2006 target_async (inferior_event_handler, 0);
d6b0e80f
AC
2007}
2008
c5f62d5f 2009/* Send a signal to an LWP. */
d6b0e80f
AC
2010
2011static int
2012kill_lwp (int lwpid, int signo)
2013{
c5f62d5f
DE
2014 /* Use tkill, if possible, in case we are using nptl threads. If tkill
2015 fails, then we are not using nptl threads and we should be using kill. */
d6b0e80f
AC
2016
2017#ifdef HAVE_TKILL_SYSCALL
c5f62d5f
DE
2018 {
2019 static int tkill_failed;
2020
2021 if (!tkill_failed)
2022 {
2023 int ret;
2024
2025 errno = 0;
2026 ret = syscall (__NR_tkill, lwpid, signo);
2027 if (errno != ENOSYS)
2028 return ret;
2029 tkill_failed = 1;
2030 }
2031 }
d6b0e80f
AC
2032#endif
2033
2034 return kill (lwpid, signo);
2035}
2036
ca2163eb
PA
2037/* Handle a GNU/Linux syscall trap wait response. If we see a syscall
2038 event, check if the core is interested in it: if not, ignore the
2039 event, and keep waiting; otherwise, we need to toggle the LWP's
2040 syscall entry/exit status, since the ptrace event itself doesn't
2041 indicate it, and report the trap to higher layers. */
2042
2043static int
2044linux_handle_syscall_trap (struct lwp_info *lp, int stopping)
2045{
2046 struct target_waitstatus *ourstatus = &lp->waitstatus;
2047 struct gdbarch *gdbarch = target_thread_architecture (lp->ptid);
2048 int syscall_number = (int) gdbarch_get_syscall_number (gdbarch, lp->ptid);
2049
2050 if (stopping)
2051 {
2052 /* If we're stopping threads, there's a SIGSTOP pending, which
2053 makes it so that the LWP reports an immediate syscall return,
2054 followed by the SIGSTOP. Skip seeing that "return" using
2055 PTRACE_CONT directly, and let stop_wait_callback collect the
2056 SIGSTOP. Later when the thread is resumed, a new syscall
2057 entry event. If we didn't do this (and returned 0), we'd
2058 leave a syscall entry pending, and our caller, by using
2059 PTRACE_CONT to collect the SIGSTOP, skips the syscall return
2060 itself. Later, when the user re-resumes this LWP, we'd see
2061 another syscall entry event and we'd mistake it for a return.
2062
2063 If stop_wait_callback didn't force the SIGSTOP out of the LWP
2064 (leaving immediately with LWP->signalled set, without issuing
2065 a PTRACE_CONT), it would still be problematic to leave this
2066 syscall enter pending, as later when the thread is resumed,
2067 it would then see the same syscall exit mentioned above,
2068 followed by the delayed SIGSTOP, while the syscall didn't
2069 actually get to execute. It seems it would be even more
2070 confusing to the user. */
2071
2072 if (debug_linux_nat)
2073 fprintf_unfiltered (gdb_stdlog,
2074 "LHST: ignoring syscall %d "
2075 "for LWP %ld (stopping threads), "
2076 "resuming with PTRACE_CONT for SIGSTOP\n",
2077 syscall_number,
2078 GET_LWP (lp->ptid));
2079
2080 lp->syscall_state = TARGET_WAITKIND_IGNORE;
2081 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2082 return 1;
2083 }
2084
2085 if (catch_syscall_enabled ())
2086 {
2087 /* Always update the entry/return state, even if this particular
2088 syscall isn't interesting to the core now. In async mode,
2089 the user could install a new catchpoint for this syscall
2090 between syscall enter/return, and we'll need to know to
2091 report a syscall return if that happens. */
2092 lp->syscall_state = (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2093 ? TARGET_WAITKIND_SYSCALL_RETURN
2094 : TARGET_WAITKIND_SYSCALL_ENTRY);
2095
2096 if (catching_syscall_number (syscall_number))
2097 {
2098 /* Alright, an event to report. */
2099 ourstatus->kind = lp->syscall_state;
2100 ourstatus->value.syscall_number = syscall_number;
2101
2102 if (debug_linux_nat)
2103 fprintf_unfiltered (gdb_stdlog,
2104 "LHST: stopping for %s of syscall %d"
2105 " for LWP %ld\n",
3e43a32a
MS
2106 lp->syscall_state
2107 == TARGET_WAITKIND_SYSCALL_ENTRY
ca2163eb
PA
2108 ? "entry" : "return",
2109 syscall_number,
2110 GET_LWP (lp->ptid));
2111 return 0;
2112 }
2113
2114 if (debug_linux_nat)
2115 fprintf_unfiltered (gdb_stdlog,
2116 "LHST: ignoring %s of syscall %d "
2117 "for LWP %ld\n",
2118 lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2119 ? "entry" : "return",
2120 syscall_number,
2121 GET_LWP (lp->ptid));
2122 }
2123 else
2124 {
2125 /* If we had been syscall tracing, and hence used PT_SYSCALL
2126 before on this LWP, it could happen that the user removes all
2127 syscall catchpoints before we get to process this event.
2128 There are two noteworthy issues here:
2129
2130 - When stopped at a syscall entry event, resuming with
2131 PT_STEP still resumes executing the syscall and reports a
2132 syscall return.
2133
2134 - Only PT_SYSCALL catches syscall enters. If we last
2135 single-stepped this thread, then this event can't be a
2136 syscall enter. If we last single-stepped this thread, this
2137 has to be a syscall exit.
2138
2139 The points above mean that the next resume, be it PT_STEP or
2140 PT_CONTINUE, can not trigger a syscall trace event. */
2141 if (debug_linux_nat)
2142 fprintf_unfiltered (gdb_stdlog,
3e43a32a
MS
2143 "LHST: caught syscall event "
2144 "with no syscall catchpoints."
ca2163eb
PA
2145 " %d for LWP %ld, ignoring\n",
2146 syscall_number,
2147 GET_LWP (lp->ptid));
2148 lp->syscall_state = TARGET_WAITKIND_IGNORE;
2149 }
2150
2151 /* The core isn't interested in this event. For efficiency, avoid
2152 stopping all threads only to have the core resume them all again.
2153 Since we're not stopping threads, if we're still syscall tracing
2154 and not stepping, we can't use PTRACE_CONT here, as we'd miss any
2155 subsequent syscall. Simply resume using the inf-ptrace layer,
2156 which knows when to use PT_SYSCALL or PT_CONTINUE. */
2157
2158 /* Note that gdbarch_get_syscall_number may access registers, hence
2159 fill a regcache. */
2160 registers_changed ();
7b50312a
PA
2161 if (linux_nat_prepare_to_resume != NULL)
2162 linux_nat_prepare_to_resume (lp);
ca2163eb
PA
2163 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
2164 lp->step, TARGET_SIGNAL_0);
2165 return 1;
2166}
2167
3d799a95
DJ
2168/* Handle a GNU/Linux extended wait response. If we see a clone
2169 event, we need to add the new LWP to our list (and not report the
2170 trap to higher layers). This function returns non-zero if the
2171 event should be ignored and we should wait again. If STOPPING is
2172 true, the new LWP remains stopped, otherwise it is continued. */
d6b0e80f
AC
2173
2174static int
3d799a95
DJ
2175linux_handle_extended_wait (struct lwp_info *lp, int status,
2176 int stopping)
d6b0e80f 2177{
3d799a95
DJ
2178 int pid = GET_LWP (lp->ptid);
2179 struct target_waitstatus *ourstatus = &lp->waitstatus;
3d799a95 2180 int event = status >> 16;
d6b0e80f 2181
3d799a95
DJ
2182 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK
2183 || event == PTRACE_EVENT_CLONE)
d6b0e80f 2184 {
3d799a95
DJ
2185 unsigned long new_pid;
2186 int ret;
2187
2188 ptrace (PTRACE_GETEVENTMSG, pid, 0, &new_pid);
6fc19103 2189
3d799a95
DJ
2190 /* If we haven't already seen the new PID stop, wait for it now. */
2191 if (! pull_pid_from_list (&stopped_pids, new_pid, &status))
2192 {
2193 /* The new child has a pending SIGSTOP. We can't affect it until it
2194 hits the SIGSTOP, but we're already attached. */
2195 ret = my_waitpid (new_pid, &status,
2196 (event == PTRACE_EVENT_CLONE) ? __WCLONE : 0);
2197 if (ret == -1)
2198 perror_with_name (_("waiting for new child"));
2199 else if (ret != new_pid)
2200 internal_error (__FILE__, __LINE__,
2201 _("wait returned unexpected PID %d"), ret);
2202 else if (!WIFSTOPPED (status))
2203 internal_error (__FILE__, __LINE__,
2204 _("wait returned unexpected status 0x%x"), status);
2205 }
2206
3a3e9ee3 2207 ourstatus->value.related_pid = ptid_build (new_pid, new_pid, 0);
3d799a95 2208
2277426b
PA
2209 if (event == PTRACE_EVENT_FORK
2210 && linux_fork_checkpointing_p (GET_PID (lp->ptid)))
2211 {
2277426b
PA
2212 /* Handle checkpointing by linux-fork.c here as a special
2213 case. We don't want the follow-fork-mode or 'catch fork'
2214 to interfere with this. */
2215
2216 /* This won't actually modify the breakpoint list, but will
2217 physically remove the breakpoints from the child. */
2218 detach_breakpoints (new_pid);
2219
2220 /* Retain child fork in ptrace (stopped) state. */
14571dad
MS
2221 if (!find_fork_pid (new_pid))
2222 add_fork (new_pid);
2277426b
PA
2223
2224 /* Report as spurious, so that infrun doesn't want to follow
2225 this fork. We're actually doing an infcall in
2226 linux-fork.c. */
2227 ourstatus->kind = TARGET_WAITKIND_SPURIOUS;
2228 linux_enable_event_reporting (pid_to_ptid (new_pid));
2229
2230 /* Report the stop to the core. */
2231 return 0;
2232 }
2233
3d799a95
DJ
2234 if (event == PTRACE_EVENT_FORK)
2235 ourstatus->kind = TARGET_WAITKIND_FORKED;
2236 else if (event == PTRACE_EVENT_VFORK)
2237 ourstatus->kind = TARGET_WAITKIND_VFORKED;
6fc19103 2238 else
3d799a95 2239 {
78768c4a
JK
2240 struct lwp_info *new_lp;
2241
3d799a95 2242 ourstatus->kind = TARGET_WAITKIND_IGNORE;
78768c4a 2243
3c4d7e12
PA
2244 if (debug_linux_nat)
2245 fprintf_unfiltered (gdb_stdlog,
2246 "LHEW: Got clone event "
2247 "from LWP %d, new child is LWP %ld\n",
2248 pid, new_pid);
2249
d90e17a7 2250 new_lp = add_lwp (BUILD_LWP (new_pid, GET_PID (lp->ptid)));
3d799a95 2251 new_lp->cloned = 1;
4c28f408 2252 new_lp->stopped = 1;
d6b0e80f 2253
3d799a95
DJ
2254 if (WSTOPSIG (status) != SIGSTOP)
2255 {
2256 /* This can happen if someone starts sending signals to
2257 the new thread before it gets a chance to run, which
2258 have a lower number than SIGSTOP (e.g. SIGUSR1).
2259 This is an unlikely case, and harder to handle for
2260 fork / vfork than for clone, so we do not try - but
2261 we handle it for clone events here. We'll send
2262 the other signal on to the thread below. */
2263
2264 new_lp->signalled = 1;
2265 }
2266 else
79395f92
PA
2267 {
2268 struct thread_info *tp;
2269
2270 /* When we stop for an event in some other thread, and
2271 pull the thread list just as this thread has cloned,
2272 we'll have seen the new thread in the thread_db list
2273 before handling the CLONE event (glibc's
2274 pthread_create adds the new thread to the thread list
2275 before clone'ing, and has the kernel fill in the
2276 thread's tid on the clone call with
2277 CLONE_PARENT_SETTID). If that happened, and the core
2278 had requested the new thread to stop, we'll have
2279 killed it with SIGSTOP. But since SIGSTOP is not an
2280 RT signal, it can only be queued once. We need to be
2281 careful to not resume the LWP if we wanted it to
2282 stop. In that case, we'll leave the SIGSTOP pending.
2283 It will later be reported as TARGET_SIGNAL_0. */
2284 tp = find_thread_ptid (new_lp->ptid);
2285 if (tp != NULL && tp->stop_requested)
2286 new_lp->last_resume_kind = resume_stop;
2287 else
2288 status = 0;
2289 }
d6b0e80f 2290
4c28f408 2291 if (non_stop)
3d799a95 2292 {
4c28f408
PA
2293 /* Add the new thread to GDB's lists as soon as possible
2294 so that:
2295
2296 1) the frontend doesn't have to wait for a stop to
2297 display them, and,
2298
2299 2) we tag it with the correct running state. */
2300
2301 /* If the thread_db layer is active, let it know about
2302 this new thread, and add it to GDB's list. */
2303 if (!thread_db_attach_lwp (new_lp->ptid))
2304 {
2305 /* We're not using thread_db. Add it to GDB's
2306 list. */
2307 target_post_attach (GET_LWP (new_lp->ptid));
2308 add_thread (new_lp->ptid);
2309 }
2310
2311 if (!stopping)
2312 {
2313 set_running (new_lp->ptid, 1);
2314 set_executing (new_lp->ptid, 1);
e21ffe51
PA
2315 /* thread_db_attach_lwp -> lin_lwp_attach_lwp forced
2316 resume_stop. */
2317 new_lp->last_resume_kind = resume_continue;
4c28f408
PA
2318 }
2319 }
2320
79395f92
PA
2321 if (status != 0)
2322 {
2323 /* We created NEW_LP so it cannot yet contain STATUS. */
2324 gdb_assert (new_lp->status == 0);
2325
2326 /* Save the wait status to report later. */
2327 if (debug_linux_nat)
2328 fprintf_unfiltered (gdb_stdlog,
2329 "LHEW: waitpid of new LWP %ld, "
2330 "saving status %s\n",
2331 (long) GET_LWP (new_lp->ptid),
2332 status_to_str (status));
2333 new_lp->status = status;
2334 }
2335
ca2163eb
PA
2336 /* Note the need to use the low target ops to resume, to
2337 handle resuming with PT_SYSCALL if we have syscall
2338 catchpoints. */
4c28f408
PA
2339 if (!stopping)
2340 {
3d799a95 2341 new_lp->resumed = 1;
ca2163eb 2342
79395f92 2343 if (status == 0)
ad34eb2f 2344 {
e21ffe51 2345 gdb_assert (new_lp->last_resume_kind == resume_continue);
ad34eb2f
JK
2346 if (debug_linux_nat)
2347 fprintf_unfiltered (gdb_stdlog,
79395f92
PA
2348 "LHEW: resuming new LWP %ld\n",
2349 GET_LWP (new_lp->ptid));
7b50312a
PA
2350 if (linux_nat_prepare_to_resume != NULL)
2351 linux_nat_prepare_to_resume (new_lp);
79395f92
PA
2352 linux_ops->to_resume (linux_ops, pid_to_ptid (new_pid),
2353 0, TARGET_SIGNAL_0);
2354 new_lp->stopped = 0;
ad34eb2f
JK
2355 }
2356 }
d6b0e80f 2357
3d799a95
DJ
2358 if (debug_linux_nat)
2359 fprintf_unfiltered (gdb_stdlog,
3c4d7e12 2360 "LHEW: resuming parent LWP %d\n", pid);
7b50312a
PA
2361 if (linux_nat_prepare_to_resume != NULL)
2362 linux_nat_prepare_to_resume (lp);
ca2163eb
PA
2363 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
2364 0, TARGET_SIGNAL_0);
3d799a95
DJ
2365
2366 return 1;
2367 }
2368
2369 return 0;
d6b0e80f
AC
2370 }
2371
3d799a95
DJ
2372 if (event == PTRACE_EVENT_EXEC)
2373 {
a75724bc
PA
2374 if (debug_linux_nat)
2375 fprintf_unfiltered (gdb_stdlog,
2376 "LHEW: Got exec event from LWP %ld\n",
2377 GET_LWP (lp->ptid));
2378
3d799a95
DJ
2379 ourstatus->kind = TARGET_WAITKIND_EXECD;
2380 ourstatus->value.execd_pathname
6d8fd2b7 2381 = xstrdup (linux_child_pid_to_exec_file (pid));
3d799a95 2382
6c95b8df
PA
2383 return 0;
2384 }
2385
2386 if (event == PTRACE_EVENT_VFORK_DONE)
2387 {
2388 if (current_inferior ()->waiting_for_vfork_done)
3d799a95 2389 {
6c95b8df 2390 if (debug_linux_nat)
3e43a32a
MS
2391 fprintf_unfiltered (gdb_stdlog,
2392 "LHEW: Got expected PTRACE_EVENT_"
2393 "VFORK_DONE from LWP %ld: stopping\n",
6c95b8df 2394 GET_LWP (lp->ptid));
3d799a95 2395
6c95b8df
PA
2396 ourstatus->kind = TARGET_WAITKIND_VFORK_DONE;
2397 return 0;
3d799a95
DJ
2398 }
2399
6c95b8df 2400 if (debug_linux_nat)
3e43a32a
MS
2401 fprintf_unfiltered (gdb_stdlog,
2402 "LHEW: Got PTRACE_EVENT_VFORK_DONE "
2403 "from LWP %ld: resuming\n",
6c95b8df
PA
2404 GET_LWP (lp->ptid));
2405 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2406 return 1;
3d799a95
DJ
2407 }
2408
2409 internal_error (__FILE__, __LINE__,
2410 _("unknown ptrace event %d"), event);
d6b0e80f
AC
2411}
2412
432b4d03
JK
2413/* Return non-zero if LWP is a zombie. */
2414
2415static int
2416linux_lwp_is_zombie (long lwp)
2417{
2418 char buffer[MAXPATHLEN];
2419 FILE *procfile;
ea23808b
PA
2420 int retval;
2421 int have_state;
432b4d03 2422
07e78767 2423 xsnprintf (buffer, sizeof (buffer), "/proc/%ld/status", lwp);
432b4d03
JK
2424 procfile = fopen (buffer, "r");
2425 if (procfile == NULL)
2426 {
2427 warning (_("unable to open /proc file '%s'"), buffer);
2428 return 0;
2429 }
ea23808b
PA
2430
2431 have_state = 0;
432b4d03 2432 while (fgets (buffer, sizeof (buffer), procfile) != NULL)
ea23808b 2433 if (strncmp (buffer, "State:", 6) == 0)
432b4d03 2434 {
ea23808b 2435 have_state = 1;
432b4d03
JK
2436 break;
2437 }
ea23808b
PA
2438 retval = (have_state
2439 && strcmp (buffer, "State:\tZ (zombie)\n") == 0);
432b4d03 2440 fclose (procfile);
432b4d03
JK
2441 return retval;
2442}
2443
d6b0e80f
AC
2444/* Wait for LP to stop. Returns the wait status, or 0 if the LWP has
2445 exited. */
2446
2447static int
2448wait_lwp (struct lwp_info *lp)
2449{
2450 pid_t pid;
432b4d03 2451 int status = 0;
d6b0e80f 2452 int thread_dead = 0;
432b4d03 2453 sigset_t prev_mask;
d6b0e80f
AC
2454
2455 gdb_assert (!lp->stopped);
2456 gdb_assert (lp->status == 0);
2457
432b4d03
JK
2458 /* Make sure SIGCHLD is blocked for sigsuspend avoiding a race below. */
2459 block_child_signals (&prev_mask);
2460
2461 for (;;)
d6b0e80f 2462 {
432b4d03
JK
2463 /* If my_waitpid returns 0 it means the __WCLONE vs. non-__WCLONE kind
2464 was right and we should just call sigsuspend. */
2465
2466 pid = my_waitpid (GET_LWP (lp->ptid), &status, WNOHANG);
d6b0e80f 2467 if (pid == -1 && errno == ECHILD)
432b4d03 2468 pid = my_waitpid (GET_LWP (lp->ptid), &status, __WCLONE | WNOHANG);
a9f4bb21
PA
2469 if (pid == -1 && errno == ECHILD)
2470 {
2471 /* The thread has previously exited. We need to delete it
2472 now because, for some vendor 2.4 kernels with NPTL
2473 support backported, there won't be an exit event unless
2474 it is the main thread. 2.6 kernels will report an exit
2475 event for each thread that exits, as expected. */
2476 thread_dead = 1;
2477 if (debug_linux_nat)
2478 fprintf_unfiltered (gdb_stdlog, "WL: %s vanished.\n",
2479 target_pid_to_str (lp->ptid));
2480 }
432b4d03
JK
2481 if (pid != 0)
2482 break;
2483
2484 /* Bugs 10970, 12702.
2485 Thread group leader may have exited in which case we'll lock up in
2486 waitpid if there are other threads, even if they are all zombies too.
2487 Basically, we're not supposed to use waitpid this way.
2488 __WCLONE is not applicable for the leader so we can't use that.
2489 LINUX_NAT_THREAD_ALIVE cannot be used here as it requires a STOPPED
2490 process; it gets ESRCH both for the zombie and for running processes.
2491
2492 As a workaround, check if we're waiting for the thread group leader and
2493 if it's a zombie, and avoid calling waitpid if it is.
2494
2495 This is racy, what if the tgl becomes a zombie right after we check?
2496 Therefore always use WNOHANG with sigsuspend - it is equivalent to
2497 waiting waitpid but the linux_lwp_is_zombie is safe this way. */
2498
2499 if (GET_PID (lp->ptid) == GET_LWP (lp->ptid)
2500 && linux_lwp_is_zombie (GET_LWP (lp->ptid)))
d6b0e80f 2501 {
d6b0e80f
AC
2502 thread_dead = 1;
2503 if (debug_linux_nat)
432b4d03
JK
2504 fprintf_unfiltered (gdb_stdlog,
2505 "WL: Thread group leader %s vanished.\n",
d6b0e80f 2506 target_pid_to_str (lp->ptid));
432b4d03 2507 break;
d6b0e80f 2508 }
432b4d03
JK
2509
2510 /* Wait for next SIGCHLD and try again. This may let SIGCHLD handlers
2511 get invoked despite our caller had them intentionally blocked by
2512 block_child_signals. This is sensitive only to the loop of
2513 linux_nat_wait_1 and there if we get called my_waitpid gets called
2514 again before it gets to sigsuspend so we can safely let the handlers
2515 get executed here. */
2516
2517 sigsuspend (&suspend_mask);
2518 }
2519
2520 restore_child_signals_mask (&prev_mask);
2521
d6b0e80f
AC
2522 if (!thread_dead)
2523 {
2524 gdb_assert (pid == GET_LWP (lp->ptid));
2525
2526 if (debug_linux_nat)
2527 {
2528 fprintf_unfiltered (gdb_stdlog,
2529 "WL: waitpid %s received %s\n",
2530 target_pid_to_str (lp->ptid),
2531 status_to_str (status));
2532 }
d6b0e80f 2533
a9f4bb21
PA
2534 /* Check if the thread has exited. */
2535 if (WIFEXITED (status) || WIFSIGNALED (status))
2536 {
2537 thread_dead = 1;
2538 if (debug_linux_nat)
2539 fprintf_unfiltered (gdb_stdlog, "WL: %s exited.\n",
2540 target_pid_to_str (lp->ptid));
2541 }
d6b0e80f
AC
2542 }
2543
2544 if (thread_dead)
2545 {
e26af52f 2546 exit_lwp (lp);
d6b0e80f
AC
2547 return 0;
2548 }
2549
2550 gdb_assert (WIFSTOPPED (status));
2551
ca2163eb
PA
2552 /* Handle GNU/Linux's syscall SIGTRAPs. */
2553 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
2554 {
2555 /* No longer need the sysgood bit. The ptrace event ends up
2556 recorded in lp->waitstatus if we care for it. We can carry
2557 on handling the event like a regular SIGTRAP from here
2558 on. */
2559 status = W_STOPCODE (SIGTRAP);
2560 if (linux_handle_syscall_trap (lp, 1))
2561 return wait_lwp (lp);
2562 }
2563
d6b0e80f
AC
2564 /* Handle GNU/Linux's extended waitstatus for trace events. */
2565 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
2566 {
2567 if (debug_linux_nat)
2568 fprintf_unfiltered (gdb_stdlog,
2569 "WL: Handling extended status 0x%06x\n",
2570 status);
3d799a95 2571 if (linux_handle_extended_wait (lp, status, 1))
d6b0e80f
AC
2572 return wait_lwp (lp);
2573 }
2574
2575 return status;
2576}
2577
9f0bdab8
DJ
2578/* Save the most recent siginfo for LP. This is currently only called
2579 for SIGTRAP; some ports use the si_addr field for
2580 target_stopped_data_address. In the future, it may also be used to
2581 restore the siginfo of requeued signals. */
2582
2583static void
2584save_siginfo (struct lwp_info *lp)
2585{
2586 errno = 0;
2587 ptrace (PTRACE_GETSIGINFO, GET_LWP (lp->ptid),
2588 (PTRACE_TYPE_ARG3) 0, &lp->siginfo);
2589
2590 if (errno != 0)
2591 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
2592}
2593
d6b0e80f
AC
2594/* Send a SIGSTOP to LP. */
2595
2596static int
2597stop_callback (struct lwp_info *lp, void *data)
2598{
2599 if (!lp->stopped && !lp->signalled)
2600 {
2601 int ret;
2602
2603 if (debug_linux_nat)
2604 {
2605 fprintf_unfiltered (gdb_stdlog,
2606 "SC: kill %s **<SIGSTOP>**\n",
2607 target_pid_to_str (lp->ptid));
2608 }
2609 errno = 0;
2610 ret = kill_lwp (GET_LWP (lp->ptid), SIGSTOP);
2611 if (debug_linux_nat)
2612 {
2613 fprintf_unfiltered (gdb_stdlog,
2614 "SC: lwp kill %d %s\n",
2615 ret,
2616 errno ? safe_strerror (errno) : "ERRNO-OK");
2617 }
2618
2619 lp->signalled = 1;
2620 gdb_assert (lp->status == 0);
2621 }
2622
2623 return 0;
2624}
2625
7b50312a
PA
2626/* Request a stop on LWP. */
2627
2628void
2629linux_stop_lwp (struct lwp_info *lwp)
2630{
2631 stop_callback (lwp, NULL);
2632}
2633
57380f4e 2634/* Return non-zero if LWP PID has a pending SIGINT. */
d6b0e80f
AC
2635
2636static int
57380f4e
DJ
2637linux_nat_has_pending_sigint (int pid)
2638{
2639 sigset_t pending, blocked, ignored;
57380f4e
DJ
2640
2641 linux_proc_pending_signals (pid, &pending, &blocked, &ignored);
2642
2643 if (sigismember (&pending, SIGINT)
2644 && !sigismember (&ignored, SIGINT))
2645 return 1;
2646
2647 return 0;
2648}
2649
2650/* Set a flag in LP indicating that we should ignore its next SIGINT. */
2651
2652static int
2653set_ignore_sigint (struct lwp_info *lp, void *data)
d6b0e80f 2654{
57380f4e
DJ
2655 /* If a thread has a pending SIGINT, consume it; otherwise, set a
2656 flag to consume the next one. */
2657 if (lp->stopped && lp->status != 0 && WIFSTOPPED (lp->status)
2658 && WSTOPSIG (lp->status) == SIGINT)
2659 lp->status = 0;
2660 else
2661 lp->ignore_sigint = 1;
2662
2663 return 0;
2664}
2665
2666/* If LP does not have a SIGINT pending, then clear the ignore_sigint flag.
2667 This function is called after we know the LWP has stopped; if the LWP
2668 stopped before the expected SIGINT was delivered, then it will never have
2669 arrived. Also, if the signal was delivered to a shared queue and consumed
2670 by a different thread, it will never be delivered to this LWP. */
d6b0e80f 2671
57380f4e
DJ
2672static void
2673maybe_clear_ignore_sigint (struct lwp_info *lp)
2674{
2675 if (!lp->ignore_sigint)
2676 return;
2677
2678 if (!linux_nat_has_pending_sigint (GET_LWP (lp->ptid)))
2679 {
2680 if (debug_linux_nat)
2681 fprintf_unfiltered (gdb_stdlog,
2682 "MCIS: Clearing bogus flag for %s\n",
2683 target_pid_to_str (lp->ptid));
2684 lp->ignore_sigint = 0;
2685 }
2686}
2687
ebec9a0f
PA
2688/* Fetch the possible triggered data watchpoint info and store it in
2689 LP.
2690
2691 On some archs, like x86, that use debug registers to set
2692 watchpoints, it's possible that the way to know which watched
2693 address trapped, is to check the register that is used to select
2694 which address to watch. Problem is, between setting the watchpoint
2695 and reading back which data address trapped, the user may change
2696 the set of watchpoints, and, as a consequence, GDB changes the
2697 debug registers in the inferior. To avoid reading back a stale
2698 stopped-data-address when that happens, we cache in LP the fact
2699 that a watchpoint trapped, and the corresponding data address, as
2700 soon as we see LP stop with a SIGTRAP. If GDB changes the debug
2701 registers meanwhile, we have the cached data we can rely on. */
2702
2703static void
2704save_sigtrap (struct lwp_info *lp)
2705{
2706 struct cleanup *old_chain;
2707
2708 if (linux_ops->to_stopped_by_watchpoint == NULL)
2709 {
2710 lp->stopped_by_watchpoint = 0;
2711 return;
2712 }
2713
2714 old_chain = save_inferior_ptid ();
2715 inferior_ptid = lp->ptid;
2716
2717 lp->stopped_by_watchpoint = linux_ops->to_stopped_by_watchpoint ();
2718
2719 if (lp->stopped_by_watchpoint)
2720 {
2721 if (linux_ops->to_stopped_data_address != NULL)
2722 lp->stopped_data_address_p =
2723 linux_ops->to_stopped_data_address (&current_target,
2724 &lp->stopped_data_address);
2725 else
2726 lp->stopped_data_address_p = 0;
2727 }
2728
2729 do_cleanups (old_chain);
2730}
2731
2732/* See save_sigtrap. */
2733
2734static int
2735linux_nat_stopped_by_watchpoint (void)
2736{
2737 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2738
2739 gdb_assert (lp != NULL);
2740
2741 return lp->stopped_by_watchpoint;
2742}
2743
2744static int
2745linux_nat_stopped_data_address (struct target_ops *ops, CORE_ADDR *addr_p)
2746{
2747 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2748
2749 gdb_assert (lp != NULL);
2750
2751 *addr_p = lp->stopped_data_address;
2752
2753 return lp->stopped_data_address_p;
2754}
2755
26ab7092
JK
2756/* Commonly any breakpoint / watchpoint generate only SIGTRAP. */
2757
2758static int
2759sigtrap_is_event (int status)
2760{
2761 return WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP;
2762}
2763
2764/* SIGTRAP-like events recognizer. */
2765
2766static int (*linux_nat_status_is_event) (int status) = sigtrap_is_event;
2767
00390b84
JK
2768/* Check for SIGTRAP-like events in LP. */
2769
2770static int
2771linux_nat_lp_status_is_event (struct lwp_info *lp)
2772{
2773 /* We check for lp->waitstatus in addition to lp->status, because we can
2774 have pending process exits recorded in lp->status
2775 and W_EXITCODE(0,0) == 0. We should probably have an additional
2776 lp->status_p flag. */
2777
2778 return (lp->waitstatus.kind == TARGET_WAITKIND_IGNORE
2779 && linux_nat_status_is_event (lp->status));
2780}
2781
26ab7092
JK
2782/* Set alternative SIGTRAP-like events recognizer. If
2783 breakpoint_inserted_here_p there then gdbarch_decr_pc_after_break will be
2784 applied. */
2785
2786void
2787linux_nat_set_status_is_event (struct target_ops *t,
2788 int (*status_is_event) (int status))
2789{
2790 linux_nat_status_is_event = status_is_event;
2791}
2792
57380f4e
DJ
2793/* Wait until LP is stopped. */
2794
2795static int
2796stop_wait_callback (struct lwp_info *lp, void *data)
2797{
6c95b8df
PA
2798 struct inferior *inf = find_inferior_pid (GET_PID (lp->ptid));
2799
2800 /* If this is a vfork parent, bail out, it is not going to report
2801 any SIGSTOP until the vfork is done with. */
2802 if (inf->vfork_child != NULL)
2803 return 0;
2804
d6b0e80f
AC
2805 if (!lp->stopped)
2806 {
2807 int status;
2808
2809 status = wait_lwp (lp);
2810 if (status == 0)
2811 return 0;
2812
57380f4e
DJ
2813 if (lp->ignore_sigint && WIFSTOPPED (status)
2814 && WSTOPSIG (status) == SIGINT)
d6b0e80f 2815 {
57380f4e 2816 lp->ignore_sigint = 0;
d6b0e80f
AC
2817
2818 errno = 0;
2819 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2820 if (debug_linux_nat)
2821 fprintf_unfiltered (gdb_stdlog,
3e43a32a
MS
2822 "PTRACE_CONT %s, 0, 0 (%s) "
2823 "(discarding SIGINT)\n",
d6b0e80f
AC
2824 target_pid_to_str (lp->ptid),
2825 errno ? safe_strerror (errno) : "OK");
2826
57380f4e 2827 return stop_wait_callback (lp, NULL);
d6b0e80f
AC
2828 }
2829
57380f4e
DJ
2830 maybe_clear_ignore_sigint (lp);
2831
d6b0e80f
AC
2832 if (WSTOPSIG (status) != SIGSTOP)
2833 {
26ab7092 2834 if (linux_nat_status_is_event (status))
d6b0e80f
AC
2835 {
2836 /* If a LWP other than the LWP that we're reporting an
2837 event for has hit a GDB breakpoint (as opposed to
2838 some random trap signal), then just arrange for it to
2839 hit it again later. We don't keep the SIGTRAP status
2840 and don't forward the SIGTRAP signal to the LWP. We
2841 will handle the current event, eventually we will
2842 resume all LWPs, and this one will get its breakpoint
2843 trap again.
2844
2845 If we do not do this, then we run the risk that the
2846 user will delete or disable the breakpoint, but the
2847 thread will have already tripped on it. */
2848
9f0bdab8
DJ
2849 /* Save the trap's siginfo in case we need it later. */
2850 save_siginfo (lp);
2851
ebec9a0f
PA
2852 save_sigtrap (lp);
2853
1777feb0 2854 /* Now resume this LWP and get the SIGSTOP event. */
d6b0e80f
AC
2855 errno = 0;
2856 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2857 if (debug_linux_nat)
2858 {
2859 fprintf_unfiltered (gdb_stdlog,
2860 "PTRACE_CONT %s, 0, 0 (%s)\n",
2861 target_pid_to_str (lp->ptid),
2862 errno ? safe_strerror (errno) : "OK");
2863
2864 fprintf_unfiltered (gdb_stdlog,
2865 "SWC: Candidate SIGTRAP event in %s\n",
2866 target_pid_to_str (lp->ptid));
2867 }
710151dd 2868 /* Hold this event/waitstatus while we check to see if
1777feb0 2869 there are any more (we still want to get that SIGSTOP). */
57380f4e 2870 stop_wait_callback (lp, NULL);
710151dd 2871
7feb7d06
PA
2872 /* Hold the SIGTRAP for handling by linux_nat_wait. If
2873 there's another event, throw it back into the
1777feb0 2874 queue. */
7feb7d06 2875 if (lp->status)
710151dd 2876 {
7feb7d06
PA
2877 if (debug_linux_nat)
2878 fprintf_unfiltered (gdb_stdlog,
2879 "SWC: kill %s, %s\n",
2880 target_pid_to_str (lp->ptid),
2881 status_to_str ((int) status));
2882 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (lp->status));
d6b0e80f 2883 }
7feb7d06 2884
1777feb0 2885 /* Save the sigtrap event. */
7feb7d06 2886 lp->status = status;
d6b0e80f
AC
2887 return 0;
2888 }
2889 else
2890 {
2891 /* The thread was stopped with a signal other than
1777feb0 2892 SIGSTOP, and didn't accidentally trip a breakpoint. */
d6b0e80f
AC
2893
2894 if (debug_linux_nat)
2895 {
2896 fprintf_unfiltered (gdb_stdlog,
2897 "SWC: Pending event %s in %s\n",
2898 status_to_str ((int) status),
2899 target_pid_to_str (lp->ptid));
2900 }
1777feb0 2901 /* Now resume this LWP and get the SIGSTOP event. */
d6b0e80f
AC
2902 errno = 0;
2903 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2904 if (debug_linux_nat)
2905 fprintf_unfiltered (gdb_stdlog,
2906 "SWC: PTRACE_CONT %s, 0, 0 (%s)\n",
2907 target_pid_to_str (lp->ptid),
2908 errno ? safe_strerror (errno) : "OK");
2909
2910 /* Hold this event/waitstatus while we check to see if
1777feb0 2911 there are any more (we still want to get that SIGSTOP). */
57380f4e 2912 stop_wait_callback (lp, NULL);
710151dd
PA
2913
2914 /* If the lp->status field is still empty, use it to
2915 hold this event. If not, then this event must be
2916 returned to the event queue of the LWP. */
7feb7d06 2917 if (lp->status)
d6b0e80f
AC
2918 {
2919 if (debug_linux_nat)
2920 {
2921 fprintf_unfiltered (gdb_stdlog,
2922 "SWC: kill %s, %s\n",
2923 target_pid_to_str (lp->ptid),
2924 status_to_str ((int) status));
2925 }
2926 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (status));
2927 }
710151dd
PA
2928 else
2929 lp->status = status;
d6b0e80f
AC
2930 return 0;
2931 }
2932 }
2933 else
2934 {
2935 /* We caught the SIGSTOP that we intended to catch, so
2936 there's no SIGSTOP pending. */
2937 lp->stopped = 1;
2938 lp->signalled = 0;
2939 }
2940 }
2941
2942 return 0;
2943}
2944
d6b0e80f
AC
2945/* Return non-zero if LP has a wait status pending. */
2946
2947static int
2948status_callback (struct lwp_info *lp, void *data)
2949{
2950 /* Only report a pending wait status if we pretend that this has
2951 indeed been resumed. */
ca2163eb
PA
2952 if (!lp->resumed)
2953 return 0;
2954
2955 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
2956 {
2957 /* A ptrace event, like PTRACE_FORK|VFORK|EXEC, syscall event,
766062f6 2958 or a pending process exit. Note that `W_EXITCODE(0,0) ==
ca2163eb
PA
2959 0', so a clean process exit can not be stored pending in
2960 lp->status, it is indistinguishable from
2961 no-pending-status. */
2962 return 1;
2963 }
2964
2965 if (lp->status != 0)
2966 return 1;
2967
2968 return 0;
d6b0e80f
AC
2969}
2970
2971/* Return non-zero if LP isn't stopped. */
2972
2973static int
2974running_callback (struct lwp_info *lp, void *data)
2975{
25289eb2
PA
2976 return (!lp->stopped
2977 || ((lp->status != 0
2978 || lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
2979 && lp->resumed));
d6b0e80f
AC
2980}
2981
2982/* Count the LWP's that have had events. */
2983
2984static int
2985count_events_callback (struct lwp_info *lp, void *data)
2986{
2987 int *count = data;
2988
2989 gdb_assert (count != NULL);
2990
e09490f1 2991 /* Count only resumed LWPs that have a SIGTRAP event pending. */
00390b84 2992 if (lp->resumed && linux_nat_lp_status_is_event (lp))
d6b0e80f
AC
2993 (*count)++;
2994
2995 return 0;
2996}
2997
2998/* Select the LWP (if any) that is currently being single-stepped. */
2999
3000static int
3001select_singlestep_lwp_callback (struct lwp_info *lp, void *data)
3002{
25289eb2
PA
3003 if (lp->last_resume_kind == resume_step
3004 && lp->status != 0)
d6b0e80f
AC
3005 return 1;
3006 else
3007 return 0;
3008}
3009
3010/* Select the Nth LWP that has had a SIGTRAP event. */
3011
3012static int
3013select_event_lwp_callback (struct lwp_info *lp, void *data)
3014{
3015 int *selector = data;
3016
3017 gdb_assert (selector != NULL);
3018
1777feb0 3019 /* Select only resumed LWPs that have a SIGTRAP event pending. */
00390b84 3020 if (lp->resumed && linux_nat_lp_status_is_event (lp))
d6b0e80f
AC
3021 if ((*selector)-- == 0)
3022 return 1;
3023
3024 return 0;
3025}
3026
710151dd
PA
3027static int
3028cancel_breakpoint (struct lwp_info *lp)
3029{
3030 /* Arrange for a breakpoint to be hit again later. We don't keep
3031 the SIGTRAP status and don't forward the SIGTRAP signal to the
3032 LWP. We will handle the current event, eventually we will resume
3033 this LWP, and this breakpoint will trap again.
3034
3035 If we do not do this, then we run the risk that the user will
3036 delete or disable the breakpoint, but the LWP will have already
3037 tripped on it. */
3038
515630c5
UW
3039 struct regcache *regcache = get_thread_regcache (lp->ptid);
3040 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3041 CORE_ADDR pc;
3042
3043 pc = regcache_read_pc (regcache) - gdbarch_decr_pc_after_break (gdbarch);
6c95b8df 3044 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
710151dd
PA
3045 {
3046 if (debug_linux_nat)
3047 fprintf_unfiltered (gdb_stdlog,
3048 "CB: Push back breakpoint for %s\n",
3049 target_pid_to_str (lp->ptid));
3050
3051 /* Back up the PC if necessary. */
515630c5
UW
3052 if (gdbarch_decr_pc_after_break (gdbarch))
3053 regcache_write_pc (regcache, pc);
3054
710151dd
PA
3055 return 1;
3056 }
3057 return 0;
3058}
3059
d6b0e80f
AC
3060static int
3061cancel_breakpoints_callback (struct lwp_info *lp, void *data)
3062{
3063 struct lwp_info *event_lp = data;
3064
3065 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
3066 if (lp == event_lp)
3067 return 0;
3068
3069 /* If a LWP other than the LWP that we're reporting an event for has
3070 hit a GDB breakpoint (as opposed to some random trap signal),
3071 then just arrange for it to hit it again later. We don't keep
3072 the SIGTRAP status and don't forward the SIGTRAP signal to the
3073 LWP. We will handle the current event, eventually we will resume
3074 all LWPs, and this one will get its breakpoint trap again.
3075
3076 If we do not do this, then we run the risk that the user will
3077 delete or disable the breakpoint, but the LWP will have already
3078 tripped on it. */
3079
00390b84 3080 if (linux_nat_lp_status_is_event (lp)
710151dd
PA
3081 && cancel_breakpoint (lp))
3082 /* Throw away the SIGTRAP. */
3083 lp->status = 0;
d6b0e80f
AC
3084
3085 return 0;
3086}
3087
3088/* Select one LWP out of those that have events pending. */
3089
3090static void
d90e17a7 3091select_event_lwp (ptid_t filter, struct lwp_info **orig_lp, int *status)
d6b0e80f
AC
3092{
3093 int num_events = 0;
3094 int random_selector;
3095 struct lwp_info *event_lp;
3096
ac264b3b 3097 /* Record the wait status for the original LWP. */
d6b0e80f
AC
3098 (*orig_lp)->status = *status;
3099
3100 /* Give preference to any LWP that is being single-stepped. */
d90e17a7
PA
3101 event_lp = iterate_over_lwps (filter,
3102 select_singlestep_lwp_callback, NULL);
d6b0e80f
AC
3103 if (event_lp != NULL)
3104 {
3105 if (debug_linux_nat)
3106 fprintf_unfiltered (gdb_stdlog,
3107 "SEL: Select single-step %s\n",
3108 target_pid_to_str (event_lp->ptid));
3109 }
3110 else
3111 {
3112 /* No single-stepping LWP. Select one at random, out of those
3113 which have had SIGTRAP events. */
3114
3115 /* First see how many SIGTRAP events we have. */
d90e17a7 3116 iterate_over_lwps (filter, count_events_callback, &num_events);
d6b0e80f
AC
3117
3118 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
3119 random_selector = (int)
3120 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
3121
3122 if (debug_linux_nat && num_events > 1)
3123 fprintf_unfiltered (gdb_stdlog,
3124 "SEL: Found %d SIGTRAP events, selecting #%d\n",
3125 num_events, random_selector);
3126
d90e17a7
PA
3127 event_lp = iterate_over_lwps (filter,
3128 select_event_lwp_callback,
d6b0e80f
AC
3129 &random_selector);
3130 }
3131
3132 if (event_lp != NULL)
3133 {
3134 /* Switch the event LWP. */
3135 *orig_lp = event_lp;
3136 *status = event_lp->status;
3137 }
3138
3139 /* Flush the wait status for the event LWP. */
3140 (*orig_lp)->status = 0;
3141}
3142
3143/* Return non-zero if LP has been resumed. */
3144
3145static int
3146resumed_callback (struct lwp_info *lp, void *data)
3147{
3148 return lp->resumed;
3149}
3150
12d9289a
PA
3151/* Stop an active thread, verify it still exists, then resume it. If
3152 the thread ends up with a pending status, then it is not resumed,
3153 and *DATA (really a pointer to int), is set. */
d6b0e80f
AC
3154
3155static int
3156stop_and_resume_callback (struct lwp_info *lp, void *data)
3157{
12d9289a
PA
3158 int *new_pending_p = data;
3159
25289eb2 3160 if (!lp->stopped)
d6b0e80f 3161 {
25289eb2
PA
3162 ptid_t ptid = lp->ptid;
3163
d6b0e80f
AC
3164 stop_callback (lp, NULL);
3165 stop_wait_callback (lp, NULL);
25289eb2
PA
3166
3167 /* Resume if the lwp still exists, and the core wanted it
3168 running. */
12d9289a
PA
3169 lp = find_lwp_pid (ptid);
3170 if (lp != NULL)
25289eb2 3171 {
12d9289a
PA
3172 if (lp->last_resume_kind == resume_stop
3173 && lp->status == 0)
3174 {
3175 /* The core wanted the LWP to stop. Even if it stopped
3176 cleanly (with SIGSTOP), leave the event pending. */
3177 if (debug_linux_nat)
3178 fprintf_unfiltered (gdb_stdlog,
3179 "SARC: core wanted LWP %ld stopped "
3180 "(leaving SIGSTOP pending)\n",
3181 GET_LWP (lp->ptid));
3182 lp->status = W_STOPCODE (SIGSTOP);
3183 }
3184
3185 if (lp->status == 0)
3186 {
3187 if (debug_linux_nat)
3188 fprintf_unfiltered (gdb_stdlog,
3189 "SARC: re-resuming LWP %ld\n",
3190 GET_LWP (lp->ptid));
3191 resume_lwp (lp, lp->step);
3192 }
3193 else
3194 {
3195 if (debug_linux_nat)
3196 fprintf_unfiltered (gdb_stdlog,
3197 "SARC: not re-resuming LWP %ld "
3198 "(has pending)\n",
3199 GET_LWP (lp->ptid));
3200 if (new_pending_p)
3201 *new_pending_p = 1;
3202 }
25289eb2 3203 }
d6b0e80f
AC
3204 }
3205 return 0;
3206}
3207
02f3fc28 3208/* Check if we should go on and pass this event to common code.
12d9289a
PA
3209 Return the affected lwp if we are, or NULL otherwise. If we stop
3210 all lwps temporarily, we may end up with new pending events in some
3211 other lwp. In that case set *NEW_PENDING_P to true. */
3212
02f3fc28 3213static struct lwp_info *
0e5bf2a8 3214linux_nat_filter_event (int lwpid, int status, int *new_pending_p)
02f3fc28
PA
3215{
3216 struct lwp_info *lp;
3217
12d9289a
PA
3218 *new_pending_p = 0;
3219
02f3fc28
PA
3220 lp = find_lwp_pid (pid_to_ptid (lwpid));
3221
3222 /* Check for stop events reported by a process we didn't already
3223 know about - anything not already in our LWP list.
3224
3225 If we're expecting to receive stopped processes after
3226 fork, vfork, and clone events, then we'll just add the
3227 new one to our list and go back to waiting for the event
3228 to be reported - the stopped process might be returned
0e5bf2a8
PA
3229 from waitpid before or after the event is.
3230
3231 But note the case of a non-leader thread exec'ing after the
3232 leader having exited, and gone from our lists. The non-leader
3233 thread changes its tid to the tgid. */
3234
3235 if (WIFSTOPPED (status) && lp == NULL
3236 && (WSTOPSIG (status) == SIGTRAP && status >> 16 == PTRACE_EVENT_EXEC))
3237 {
3238 /* A multi-thread exec after we had seen the leader exiting. */
3239 if (debug_linux_nat)
3240 fprintf_unfiltered (gdb_stdlog,
3241 "LLW: Re-adding thread group leader LWP %d.\n",
3242 lwpid);
3243
3244 lp = add_lwp (BUILD_LWP (lwpid, lwpid));
3245 lp->stopped = 1;
3246 lp->resumed = 1;
3247 add_thread (lp->ptid);
3248 }
3249
02f3fc28
PA
3250 if (WIFSTOPPED (status) && !lp)
3251 {
84636d28 3252 add_to_pid_list (&stopped_pids, lwpid, status);
02f3fc28
PA
3253 return NULL;
3254 }
3255
3256 /* Make sure we don't report an event for the exit of an LWP not in
1777feb0 3257 our list, i.e. not part of the current process. This can happen
fd62cb89 3258 if we detach from a program we originally forked and then it
02f3fc28
PA
3259 exits. */
3260 if (!WIFSTOPPED (status) && !lp)
3261 return NULL;
3262
ca2163eb
PA
3263 /* Handle GNU/Linux's syscall SIGTRAPs. */
3264 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
3265 {
3266 /* No longer need the sysgood bit. The ptrace event ends up
3267 recorded in lp->waitstatus if we care for it. We can carry
3268 on handling the event like a regular SIGTRAP from here
3269 on. */
3270 status = W_STOPCODE (SIGTRAP);
3271 if (linux_handle_syscall_trap (lp, 0))
3272 return NULL;
3273 }
02f3fc28 3274
ca2163eb
PA
3275 /* Handle GNU/Linux's extended waitstatus for trace events. */
3276 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
02f3fc28
PA
3277 {
3278 if (debug_linux_nat)
3279 fprintf_unfiltered (gdb_stdlog,
3280 "LLW: Handling extended status 0x%06x\n",
3281 status);
3282 if (linux_handle_extended_wait (lp, status, 0))
3283 return NULL;
3284 }
3285
26ab7092 3286 if (linux_nat_status_is_event (status))
ebec9a0f
PA
3287 {
3288 /* Save the trap's siginfo in case we need it later. */
3289 save_siginfo (lp);
3290
3291 save_sigtrap (lp);
3292 }
ca2163eb 3293
02f3fc28 3294 /* Check if the thread has exited. */
d90e17a7
PA
3295 if ((WIFEXITED (status) || WIFSIGNALED (status))
3296 && num_lwps (GET_PID (lp->ptid)) > 1)
02f3fc28 3297 {
9db03742
JB
3298 /* If this is the main thread, we must stop all threads and verify
3299 if they are still alive. This is because in the nptl thread model
3300 on Linux 2.4, there is no signal issued for exiting LWPs
02f3fc28
PA
3301 other than the main thread. We only get the main thread exit
3302 signal once all child threads have already exited. If we
3303 stop all the threads and use the stop_wait_callback to check
3304 if they have exited we can determine whether this signal
3305 should be ignored or whether it means the end of the debugged
3306 application, regardless of which threading model is being
5d3b6af6 3307 used. */
02f3fc28
PA
3308 if (GET_PID (lp->ptid) == GET_LWP (lp->ptid))
3309 {
3310 lp->stopped = 1;
d90e17a7 3311 iterate_over_lwps (pid_to_ptid (GET_PID (lp->ptid)),
12d9289a 3312 stop_and_resume_callback, new_pending_p);
02f3fc28
PA
3313 }
3314
3315 if (debug_linux_nat)
3316 fprintf_unfiltered (gdb_stdlog,
3317 "LLW: %s exited.\n",
3318 target_pid_to_str (lp->ptid));
3319
d90e17a7 3320 if (num_lwps (GET_PID (lp->ptid)) > 1)
9db03742
JB
3321 {
3322 /* If there is at least one more LWP, then the exit signal
3323 was not the end of the debugged application and should be
3324 ignored. */
3325 exit_lwp (lp);
3326 return NULL;
3327 }
02f3fc28
PA
3328 }
3329
3330 /* Check if the current LWP has previously exited. In the nptl
3331 thread model, LWPs other than the main thread do not issue
3332 signals when they exit so we must check whenever the thread has
3333 stopped. A similar check is made in stop_wait_callback(). */
d90e17a7 3334 if (num_lwps (GET_PID (lp->ptid)) > 1 && !linux_thread_alive (lp->ptid))
02f3fc28 3335 {
d90e17a7
PA
3336 ptid_t ptid = pid_to_ptid (GET_PID (lp->ptid));
3337
02f3fc28
PA
3338 if (debug_linux_nat)
3339 fprintf_unfiltered (gdb_stdlog,
3340 "LLW: %s exited.\n",
3341 target_pid_to_str (lp->ptid));
3342
3343 exit_lwp (lp);
3344
3345 /* Make sure there is at least one thread running. */
d90e17a7 3346 gdb_assert (iterate_over_lwps (ptid, running_callback, NULL));
02f3fc28
PA
3347
3348 /* Discard the event. */
3349 return NULL;
3350 }
3351
3352 /* Make sure we don't report a SIGSTOP that we sent ourselves in
3353 an attempt to stop an LWP. */
3354 if (lp->signalled
3355 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP)
3356 {
3357 if (debug_linux_nat)
3358 fprintf_unfiltered (gdb_stdlog,
3359 "LLW: Delayed SIGSTOP caught for %s.\n",
3360 target_pid_to_str (lp->ptid));
3361
02f3fc28
PA
3362 lp->signalled = 0;
3363
25289eb2
PA
3364 if (lp->last_resume_kind != resume_stop)
3365 {
3366 /* This is a delayed SIGSTOP. */
02f3fc28 3367
25289eb2
PA
3368 registers_changed ();
3369
7b50312a
PA
3370 if (linux_nat_prepare_to_resume != NULL)
3371 linux_nat_prepare_to_resume (lp);
25289eb2 3372 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
02f3fc28 3373 lp->step, TARGET_SIGNAL_0);
25289eb2
PA
3374 if (debug_linux_nat)
3375 fprintf_unfiltered (gdb_stdlog,
3376 "LLW: %s %s, 0, 0 (discard SIGSTOP)\n",
3377 lp->step ?
3378 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3379 target_pid_to_str (lp->ptid));
02f3fc28 3380
25289eb2
PA
3381 lp->stopped = 0;
3382 gdb_assert (lp->resumed);
02f3fc28 3383
25289eb2
PA
3384 /* Discard the event. */
3385 return NULL;
3386 }
02f3fc28
PA
3387 }
3388
57380f4e
DJ
3389 /* Make sure we don't report a SIGINT that we have already displayed
3390 for another thread. */
3391 if (lp->ignore_sigint
3392 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGINT)
3393 {
3394 if (debug_linux_nat)
3395 fprintf_unfiltered (gdb_stdlog,
3396 "LLW: Delayed SIGINT caught for %s.\n",
3397 target_pid_to_str (lp->ptid));
3398
3399 /* This is a delayed SIGINT. */
3400 lp->ignore_sigint = 0;
3401
3402 registers_changed ();
7b50312a
PA
3403 if (linux_nat_prepare_to_resume != NULL)
3404 linux_nat_prepare_to_resume (lp);
28439f5e 3405 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
57380f4e
DJ
3406 lp->step, TARGET_SIGNAL_0);
3407 if (debug_linux_nat)
3408 fprintf_unfiltered (gdb_stdlog,
3409 "LLW: %s %s, 0, 0 (discard SIGINT)\n",
3410 lp->step ?
3411 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3412 target_pid_to_str (lp->ptid));
3413
3414 lp->stopped = 0;
3415 gdb_assert (lp->resumed);
3416
3417 /* Discard the event. */
3418 return NULL;
3419 }
3420
02f3fc28
PA
3421 /* An interesting event. */
3422 gdb_assert (lp);
ca2163eb 3423 lp->status = status;
02f3fc28
PA
3424 return lp;
3425}
3426
0e5bf2a8
PA
3427/* Detect zombie thread group leaders, and "exit" them. We can't reap
3428 their exits until all other threads in the group have exited. */
3429
3430static void
3431check_zombie_leaders (void)
3432{
3433 struct inferior *inf;
3434
3435 ALL_INFERIORS (inf)
3436 {
3437 struct lwp_info *leader_lp;
3438
3439 if (inf->pid == 0)
3440 continue;
3441
3442 leader_lp = find_lwp_pid (pid_to_ptid (inf->pid));
3443 if (leader_lp != NULL
3444 /* Check if there are other threads in the group, as we may
3445 have raced with the inferior simply exiting. */
3446 && num_lwps (inf->pid) > 1
3447 && linux_lwp_is_zombie (inf->pid))
3448 {
3449 if (debug_linux_nat)
3450 fprintf_unfiltered (gdb_stdlog,
3451 "CZL: Thread group leader %d zombie "
3452 "(it exited, or another thread execd).\n",
3453 inf->pid);
3454
3455 /* A leader zombie can mean one of two things:
3456
3457 - It exited, and there's an exit status pending
3458 available, or only the leader exited (not the whole
3459 program). In the latter case, we can't waitpid the
3460 leader's exit status until all other threads are gone.
3461
3462 - There are 3 or more threads in the group, and a thread
3463 other than the leader exec'd. On an exec, the Linux
3464 kernel destroys all other threads (except the execing
3465 one) in the thread group, and resets the execing thread's
3466 tid to the tgid. No exit notification is sent for the
3467 execing thread -- from the ptracer's perspective, it
3468 appears as though the execing thread just vanishes.
3469 Until we reap all other threads except the leader and the
3470 execing thread, the leader will be zombie, and the
3471 execing thread will be in `D (disc sleep)'. As soon as
3472 all other threads are reaped, the execing thread changes
3473 it's tid to the tgid, and the previous (zombie) leader
3474 vanishes, giving place to the "new" leader. We could try
3475 distinguishing the exit and exec cases, by waiting once
3476 more, and seeing if something comes out, but it doesn't
3477 sound useful. The previous leader _does_ go away, and
3478 we'll re-add the new one once we see the exec event
3479 (which is just the same as what would happen if the
3480 previous leader did exit voluntarily before some other
3481 thread execs). */
3482
3483 if (debug_linux_nat)
3484 fprintf_unfiltered (gdb_stdlog,
3485 "CZL: Thread group leader %d vanished.\n",
3486 inf->pid);
3487 exit_lwp (leader_lp);
3488 }
3489 }
3490}
3491
d6b0e80f 3492static ptid_t
7feb7d06 3493linux_nat_wait_1 (struct target_ops *ops,
47608cb1
PA
3494 ptid_t ptid, struct target_waitstatus *ourstatus,
3495 int target_options)
d6b0e80f 3496{
7feb7d06 3497 static sigset_t prev_mask;
4b60df3d 3498 enum resume_kind last_resume_kind;
12d9289a 3499 struct lwp_info *lp;
12d9289a 3500 int status;
d6b0e80f 3501
01124a23 3502 if (debug_linux_nat)
b84876c2
PA
3503 fprintf_unfiltered (gdb_stdlog, "LLW: enter\n");
3504
f973ed9c
DJ
3505 /* The first time we get here after starting a new inferior, we may
3506 not have added it to the LWP list yet - this is the earliest
3507 moment at which we know its PID. */
d90e17a7 3508 if (ptid_is_pid (inferior_ptid))
f973ed9c 3509 {
27c9d204
PA
3510 /* Upgrade the main thread's ptid. */
3511 thread_change_ptid (inferior_ptid,
3512 BUILD_LWP (GET_PID (inferior_ptid),
3513 GET_PID (inferior_ptid)));
3514
f973ed9c
DJ
3515 lp = add_lwp (inferior_ptid);
3516 lp->resumed = 1;
3517 }
3518
7feb7d06
PA
3519 /* Make sure SIGCHLD is blocked. */
3520 block_child_signals (&prev_mask);
d6b0e80f
AC
3521
3522retry:
d90e17a7
PA
3523 lp = NULL;
3524 status = 0;
d6b0e80f
AC
3525
3526 /* First check if there is a LWP with a wait status pending. */
0e5bf2a8 3527 if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid))
d6b0e80f 3528 {
0e5bf2a8 3529 /* Any LWP in the PTID group that's been resumed will do. */
d90e17a7 3530 lp = iterate_over_lwps (ptid, status_callback, NULL);
d6b0e80f
AC
3531 if (lp)
3532 {
ca2163eb 3533 if (debug_linux_nat && lp->status)
d6b0e80f
AC
3534 fprintf_unfiltered (gdb_stdlog,
3535 "LLW: Using pending wait status %s for %s.\n",
ca2163eb 3536 status_to_str (lp->status),
d6b0e80f
AC
3537 target_pid_to_str (lp->ptid));
3538 }
d6b0e80f
AC
3539 }
3540 else if (is_lwp (ptid))
3541 {
3542 if (debug_linux_nat)
3543 fprintf_unfiltered (gdb_stdlog,
3544 "LLW: Waiting for specific LWP %s.\n",
3545 target_pid_to_str (ptid));
3546
3547 /* We have a specific LWP to check. */
3548 lp = find_lwp_pid (ptid);
3549 gdb_assert (lp);
d6b0e80f 3550
ca2163eb 3551 if (debug_linux_nat && lp->status)
d6b0e80f
AC
3552 fprintf_unfiltered (gdb_stdlog,
3553 "LLW: Using pending wait status %s for %s.\n",
ca2163eb 3554 status_to_str (lp->status),
d6b0e80f
AC
3555 target_pid_to_str (lp->ptid));
3556
d90e17a7
PA
3557 /* We check for lp->waitstatus in addition to lp->status,
3558 because we can have pending process exits recorded in
3559 lp->status and W_EXITCODE(0,0) == 0. We should probably have
3560 an additional lp->status_p flag. */
ca2163eb 3561 if (lp->status == 0 && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE)
d90e17a7 3562 lp = NULL;
d6b0e80f
AC
3563 }
3564
25289eb2 3565 if (lp && lp->signalled && lp->last_resume_kind != resume_stop)
d6b0e80f
AC
3566 {
3567 /* A pending SIGSTOP may interfere with the normal stream of
3568 events. In a typical case where interference is a problem,
3569 we have a SIGSTOP signal pending for LWP A while
3570 single-stepping it, encounter an event in LWP B, and take the
3571 pending SIGSTOP while trying to stop LWP A. After processing
3572 the event in LWP B, LWP A is continued, and we'll never see
3573 the SIGTRAP associated with the last time we were
3574 single-stepping LWP A. */
3575
3576 /* Resume the thread. It should halt immediately returning the
3577 pending SIGSTOP. */
3578 registers_changed ();
7b50312a
PA
3579 if (linux_nat_prepare_to_resume != NULL)
3580 linux_nat_prepare_to_resume (lp);
28439f5e 3581 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
10d6c8cd 3582 lp->step, TARGET_SIGNAL_0);
d6b0e80f
AC
3583 if (debug_linux_nat)
3584 fprintf_unfiltered (gdb_stdlog,
3585 "LLW: %s %s, 0, 0 (expect SIGSTOP)\n",
3586 lp->step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3587 target_pid_to_str (lp->ptid));
3588 lp->stopped = 0;
3589 gdb_assert (lp->resumed);
3590
ca2163eb
PA
3591 /* Catch the pending SIGSTOP. */
3592 status = lp->status;
3593 lp->status = 0;
3594
d6b0e80f 3595 stop_wait_callback (lp, NULL);
ca2163eb
PA
3596
3597 /* If the lp->status field isn't empty, we caught another signal
3598 while flushing the SIGSTOP. Return it back to the event
3599 queue of the LWP, as we already have an event to handle. */
3600 if (lp->status)
3601 {
3602 if (debug_linux_nat)
3603 fprintf_unfiltered (gdb_stdlog,
3604 "LLW: kill %s, %s\n",
3605 target_pid_to_str (lp->ptid),
3606 status_to_str (lp->status));
3607 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (lp->status));
3608 }
3609
3610 lp->status = status;
d6b0e80f
AC
3611 }
3612
b84876c2
PA
3613 if (!target_can_async_p ())
3614 {
3615 /* Causes SIGINT to be passed on to the attached process. */
3616 set_sigint_trap ();
b84876c2 3617 }
d6b0e80f 3618
0e5bf2a8 3619 /* But if we don't find a pending event, we'll have to wait. */
7feb7d06 3620
d90e17a7 3621 while (lp == NULL)
d6b0e80f
AC
3622 {
3623 pid_t lwpid;
3624
0e5bf2a8
PA
3625 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
3626 quirks:
3627
3628 - If the thread group leader exits while other threads in the
3629 thread group still exist, waitpid(TGID, ...) hangs. That
3630 waitpid won't return an exit status until the other threads
3631 in the group are reapped.
3632
3633 - When a non-leader thread execs, that thread just vanishes
3634 without reporting an exit (so we'd hang if we waited for it
3635 explicitly in that case). The exec event is reported to
3636 the TGID pid. */
3637
3638 errno = 0;
3639 lwpid = my_waitpid (-1, &status, __WCLONE | WNOHANG);
3640 if (lwpid == 0 || (lwpid == -1 && errno == ECHILD))
3641 lwpid = my_waitpid (-1, &status, WNOHANG);
3642
3643 if (debug_linux_nat)
3644 fprintf_unfiltered (gdb_stdlog,
3645 "LNW: waitpid(-1, ...) returned %d, %s\n",
3646 lwpid, errno ? safe_strerror (errno) : "ERRNO-OK");
b84876c2 3647
d6b0e80f
AC
3648 if (lwpid > 0)
3649 {
12d9289a
PA
3650 /* If this is true, then we paused LWPs momentarily, and may
3651 now have pending events to handle. */
3652 int new_pending;
3653
d6b0e80f
AC
3654 if (debug_linux_nat)
3655 {
3656 fprintf_unfiltered (gdb_stdlog,
3657 "LLW: waitpid %ld received %s\n",
3658 (long) lwpid, status_to_str (status));
3659 }
3660
0e5bf2a8 3661 lp = linux_nat_filter_event (lwpid, status, &new_pending);
d90e17a7 3662
33355866
JK
3663 /* STATUS is now no longer valid, use LP->STATUS instead. */
3664 status = 0;
3665
0e5bf2a8 3666 if (lp && !ptid_match (lp->ptid, ptid))
d6b0e80f 3667 {
e3e9f5a2
PA
3668 gdb_assert (lp->resumed);
3669
d90e17a7 3670 if (debug_linux_nat)
3e43a32a
MS
3671 fprintf (stderr,
3672 "LWP %ld got an event %06x, leaving pending.\n",
33355866 3673 ptid_get_lwp (lp->ptid), lp->status);
d90e17a7 3674
ca2163eb 3675 if (WIFSTOPPED (lp->status))
d90e17a7 3676 {
ca2163eb 3677 if (WSTOPSIG (lp->status) != SIGSTOP)
d90e17a7 3678 {
e3e9f5a2
PA
3679 /* Cancel breakpoint hits. The breakpoint may
3680 be removed before we fetch events from this
3681 process to report to the core. It is best
3682 not to assume the moribund breakpoints
3683 heuristic always handles these cases --- it
3684 could be too many events go through to the
3685 core before this one is handled. All-stop
3686 always cancels breakpoint hits in all
3687 threads. */
3688 if (non_stop
00390b84 3689 && linux_nat_lp_status_is_event (lp)
e3e9f5a2
PA
3690 && cancel_breakpoint (lp))
3691 {
3692 /* Throw away the SIGTRAP. */
3693 lp->status = 0;
3694
3695 if (debug_linux_nat)
3696 fprintf (stderr,
3e43a32a
MS
3697 "LLW: LWP %ld hit a breakpoint while"
3698 " waiting for another process;"
3699 " cancelled it\n",
e3e9f5a2
PA
3700 ptid_get_lwp (lp->ptid));
3701 }
3702 lp->stopped = 1;
d90e17a7
PA
3703 }
3704 else
3705 {
3706 lp->stopped = 1;
3707 lp->signalled = 0;
3708 }
3709 }
33355866 3710 else if (WIFEXITED (lp->status) || WIFSIGNALED (lp->status))
d90e17a7
PA
3711 {
3712 if (debug_linux_nat)
3e43a32a
MS
3713 fprintf (stderr,
3714 "Process %ld exited while stopping LWPs\n",
d90e17a7
PA
3715 ptid_get_lwp (lp->ptid));
3716
3717 /* This was the last lwp in the process. Since
3718 events are serialized to GDB core, and we can't
3719 report this one right now, but GDB core and the
3720 other target layers will want to be notified
3721 about the exit code/signal, leave the status
3722 pending for the next time we're able to report
3723 it. */
d90e17a7
PA
3724
3725 /* Prevent trying to stop this thread again. We'll
3726 never try to resume it because it has a pending
3727 status. */
3728 lp->stopped = 1;
3729
3730 /* Dead LWP's aren't expected to reported a pending
3731 sigstop. */
3732 lp->signalled = 0;
3733
3734 /* Store the pending event in the waitstatus as
3735 well, because W_EXITCODE(0,0) == 0. */
ca2163eb 3736 store_waitstatus (&lp->waitstatus, lp->status);
d90e17a7
PA
3737 }
3738
3739 /* Keep looking. */
3740 lp = NULL;
d6b0e80f
AC
3741 }
3742
0e5bf2a8 3743 if (new_pending)
d90e17a7 3744 {
0e5bf2a8
PA
3745 /* Some LWP now has a pending event. Go all the way
3746 back to check it. */
3747 goto retry;
3748 }
12d9289a 3749
0e5bf2a8
PA
3750 if (lp)
3751 {
3752 /* We got an event to report to the core. */
3753 break;
d90e17a7 3754 }
0e5bf2a8
PA
3755
3756 /* Retry until nothing comes out of waitpid. A single
3757 SIGCHLD can indicate more than one child stopped. */
3758 continue;
d6b0e80f
AC
3759 }
3760
0e5bf2a8
PA
3761 /* Check for zombie thread group leaders. Those can't be reaped
3762 until all other threads in the thread group are. */
3763 check_zombie_leaders ();
d6b0e80f 3764
0e5bf2a8
PA
3765 /* If there are no resumed children left, bail. We'd be stuck
3766 forever in the sigsuspend call below otherwise. */
3767 if (iterate_over_lwps (ptid, resumed_callback, NULL) == NULL)
3768 {
3769 if (debug_linux_nat)
3770 fprintf_unfiltered (gdb_stdlog, "LLW: exit (no resumed LWP)\n");
b84876c2 3771
0e5bf2a8 3772 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
b84876c2 3773
0e5bf2a8
PA
3774 if (!target_can_async_p ())
3775 clear_sigint_trap ();
b84876c2 3776
0e5bf2a8
PA
3777 restore_child_signals_mask (&prev_mask);
3778 return minus_one_ptid;
d6b0e80f 3779 }
28736962 3780
0e5bf2a8
PA
3781 /* No interesting event to report to the core. */
3782
3783 if (target_options & TARGET_WNOHANG)
3784 {
01124a23 3785 if (debug_linux_nat)
28736962
PA
3786 fprintf_unfiltered (gdb_stdlog, "LLW: exit (ignore)\n");
3787
0e5bf2a8 3788 ourstatus->kind = TARGET_WAITKIND_IGNORE;
28736962
PA
3789 restore_child_signals_mask (&prev_mask);
3790 return minus_one_ptid;
3791 }
d6b0e80f
AC
3792
3793 /* We shouldn't end up here unless we want to try again. */
d90e17a7 3794 gdb_assert (lp == NULL);
0e5bf2a8
PA
3795
3796 /* Block until we get an event reported with SIGCHLD. */
3797 sigsuspend (&suspend_mask);
d6b0e80f
AC
3798 }
3799
b84876c2 3800 if (!target_can_async_p ())
d26b5354 3801 clear_sigint_trap ();
d6b0e80f
AC
3802
3803 gdb_assert (lp);
3804
ca2163eb
PA
3805 status = lp->status;
3806 lp->status = 0;
3807
d6b0e80f
AC
3808 /* Don't report signals that GDB isn't interested in, such as
3809 signals that are neither printed nor stopped upon. Stopping all
3810 threads can be a bit time-consuming so if we want decent
3811 performance with heavily multi-threaded programs, especially when
3812 they're using a high frequency timer, we'd better avoid it if we
3813 can. */
3814
3815 if (WIFSTOPPED (status))
3816 {
423ec54c 3817 enum target_signal signo = target_signal_from_host (WSTOPSIG (status));
d6b0e80f 3818
2455069d
UW
3819 /* When using hardware single-step, we need to report every signal.
3820 Otherwise, signals in pass_mask may be short-circuited. */
d539ed7e 3821 if (!lp->step
2455069d 3822 && WSTOPSIG (status) && sigismember (&pass_mask, WSTOPSIG (status)))
d6b0e80f
AC
3823 {
3824 /* FIMXE: kettenis/2001-06-06: Should we resume all threads
3825 here? It is not clear we should. GDB may not expect
3826 other threads to run. On the other hand, not resuming
3827 newly attached threads may cause an unwanted delay in
3828 getting them running. */
3829 registers_changed ();
7b50312a
PA
3830 if (linux_nat_prepare_to_resume != NULL)
3831 linux_nat_prepare_to_resume (lp);
28439f5e 3832 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
10d6c8cd 3833 lp->step, signo);
d6b0e80f
AC
3834 if (debug_linux_nat)
3835 fprintf_unfiltered (gdb_stdlog,
3836 "LLW: %s %s, %s (preempt 'handle')\n",
3837 lp->step ?
3838 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3839 target_pid_to_str (lp->ptid),
423ec54c
JK
3840 (signo != TARGET_SIGNAL_0
3841 ? strsignal (target_signal_to_host (signo))
3842 : "0"));
d6b0e80f 3843 lp->stopped = 0;
d6b0e80f
AC
3844 goto retry;
3845 }
3846
1ad15515 3847 if (!non_stop)
d6b0e80f 3848 {
1ad15515
PA
3849 /* Only do the below in all-stop, as we currently use SIGINT
3850 to implement target_stop (see linux_nat_stop) in
3851 non-stop. */
3852 if (signo == TARGET_SIGNAL_INT && signal_pass_state (signo) == 0)
3853 {
3854 /* If ^C/BREAK is typed at the tty/console, SIGINT gets
3855 forwarded to the entire process group, that is, all LWPs
3856 will receive it - unless they're using CLONE_THREAD to
3857 share signals. Since we only want to report it once, we
3858 mark it as ignored for all LWPs except this one. */
d90e17a7
PA
3859 iterate_over_lwps (pid_to_ptid (ptid_get_pid (ptid)),
3860 set_ignore_sigint, NULL);
1ad15515
PA
3861 lp->ignore_sigint = 0;
3862 }
3863 else
3864 maybe_clear_ignore_sigint (lp);
d6b0e80f
AC
3865 }
3866 }
3867
3868 /* This LWP is stopped now. */
3869 lp->stopped = 1;
3870
3871 if (debug_linux_nat)
3872 fprintf_unfiltered (gdb_stdlog, "LLW: Candidate event %s in %s.\n",
3873 status_to_str (status), target_pid_to_str (lp->ptid));
3874
4c28f408
PA
3875 if (!non_stop)
3876 {
3877 /* Now stop all other LWP's ... */
d90e17a7 3878 iterate_over_lwps (minus_one_ptid, stop_callback, NULL);
4c28f408
PA
3879
3880 /* ... and wait until all of them have reported back that
3881 they're no longer running. */
d90e17a7 3882 iterate_over_lwps (minus_one_ptid, stop_wait_callback, NULL);
4c28f408
PA
3883
3884 /* If we're not waiting for a specific LWP, choose an event LWP
3885 from among those that have had events. Giving equal priority
3886 to all LWPs that have had events helps prevent
3887 starvation. */
0e5bf2a8 3888 if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid))
d90e17a7 3889 select_event_lwp (ptid, &lp, &status);
d6b0e80f 3890
e3e9f5a2
PA
3891 /* Now that we've selected our final event LWP, cancel any
3892 breakpoints in other LWPs that have hit a GDB breakpoint.
3893 See the comment in cancel_breakpoints_callback to find out
3894 why. */
3895 iterate_over_lwps (minus_one_ptid, cancel_breakpoints_callback, lp);
3896
4b60df3d
PA
3897 /* We'll need this to determine whether to report a SIGSTOP as
3898 TARGET_WAITKIND_0. Need to take a copy because
3899 resume_clear_callback clears it. */
3900 last_resume_kind = lp->last_resume_kind;
3901
e3e9f5a2
PA
3902 /* In all-stop, from the core's perspective, all LWPs are now
3903 stopped until a new resume action is sent over. */
3904 iterate_over_lwps (minus_one_ptid, resume_clear_callback, NULL);
3905 }
3906 else
25289eb2 3907 {
4b60df3d
PA
3908 /* See above. */
3909 last_resume_kind = lp->last_resume_kind;
3910 resume_clear_callback (lp, NULL);
25289eb2 3911 }
d6b0e80f 3912
26ab7092 3913 if (linux_nat_status_is_event (status))
d6b0e80f 3914 {
d6b0e80f
AC
3915 if (debug_linux_nat)
3916 fprintf_unfiltered (gdb_stdlog,
4fdebdd0
PA
3917 "LLW: trap ptid is %s.\n",
3918 target_pid_to_str (lp->ptid));
d6b0e80f 3919 }
d6b0e80f
AC
3920
3921 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3922 {
3923 *ourstatus = lp->waitstatus;
3924 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3925 }
3926 else
3927 store_waitstatus (ourstatus, status);
3928
01124a23 3929 if (debug_linux_nat)
b84876c2
PA
3930 fprintf_unfiltered (gdb_stdlog, "LLW: exit\n");
3931
7feb7d06 3932 restore_child_signals_mask (&prev_mask);
1e225492 3933
4b60df3d 3934 if (last_resume_kind == resume_stop
25289eb2
PA
3935 && ourstatus->kind == TARGET_WAITKIND_STOPPED
3936 && WSTOPSIG (status) == SIGSTOP)
3937 {
3938 /* A thread that has been requested to stop by GDB with
3939 target_stop, and it stopped cleanly, so report as SIG0. The
3940 use of SIGSTOP is an implementation detail. */
3941 ourstatus->value.sig = TARGET_SIGNAL_0;
3942 }
3943
1e225492
JK
3944 if (ourstatus->kind == TARGET_WAITKIND_EXITED
3945 || ourstatus->kind == TARGET_WAITKIND_SIGNALLED)
3946 lp->core = -1;
3947 else
3948 lp->core = linux_nat_core_of_thread_1 (lp->ptid);
3949
f973ed9c 3950 return lp->ptid;
d6b0e80f
AC
3951}
3952
e3e9f5a2
PA
3953/* Resume LWPs that are currently stopped without any pending status
3954 to report, but are resumed from the core's perspective. */
3955
3956static int
3957resume_stopped_resumed_lwps (struct lwp_info *lp, void *data)
3958{
3959 ptid_t *wait_ptid_p = data;
3960
3961 if (lp->stopped
3962 && lp->resumed
3963 && lp->status == 0
3964 && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE)
3965 {
336060f3
PA
3966 struct regcache *regcache = get_thread_regcache (lp->ptid);
3967 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3968 CORE_ADDR pc = regcache_read_pc (regcache);
3969
e3e9f5a2
PA
3970 gdb_assert (is_executing (lp->ptid));
3971
3972 /* Don't bother if there's a breakpoint at PC that we'd hit
3973 immediately, and we're not waiting for this LWP. */
3974 if (!ptid_match (lp->ptid, *wait_ptid_p))
3975 {
e3e9f5a2
PA
3976 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
3977 return 0;
3978 }
3979
3980 if (debug_linux_nat)
3981 fprintf_unfiltered (gdb_stdlog,
336060f3
PA
3982 "RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
3983 target_pid_to_str (lp->ptid),
3984 paddress (gdbarch, pc),
3985 lp->step);
e3e9f5a2 3986
336060f3 3987 registers_changed ();
7b50312a
PA
3988 if (linux_nat_prepare_to_resume != NULL)
3989 linux_nat_prepare_to_resume (lp);
e3e9f5a2
PA
3990 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
3991 lp->step, TARGET_SIGNAL_0);
3992 lp->stopped = 0;
3993 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
3994 lp->stopped_by_watchpoint = 0;
3995 }
3996
3997 return 0;
3998}
3999
7feb7d06
PA
4000static ptid_t
4001linux_nat_wait (struct target_ops *ops,
47608cb1
PA
4002 ptid_t ptid, struct target_waitstatus *ourstatus,
4003 int target_options)
7feb7d06
PA
4004{
4005 ptid_t event_ptid;
4006
4007 if (debug_linux_nat)
3e43a32a
MS
4008 fprintf_unfiltered (gdb_stdlog,
4009 "linux_nat_wait: [%s]\n", target_pid_to_str (ptid));
7feb7d06
PA
4010
4011 /* Flush the async file first. */
4012 if (target_can_async_p ())
4013 async_file_flush ();
4014
e3e9f5a2
PA
4015 /* Resume LWPs that are currently stopped without any pending status
4016 to report, but are resumed from the core's perspective. LWPs get
4017 in this state if we find them stopping at a time we're not
4018 interested in reporting the event (target_wait on a
4019 specific_process, for example, see linux_nat_wait_1), and
4020 meanwhile the event became uninteresting. Don't bother resuming
4021 LWPs we're not going to wait for if they'd stop immediately. */
4022 if (non_stop)
4023 iterate_over_lwps (minus_one_ptid, resume_stopped_resumed_lwps, &ptid);
4024
47608cb1 4025 event_ptid = linux_nat_wait_1 (ops, ptid, ourstatus, target_options);
7feb7d06
PA
4026
4027 /* If we requested any event, and something came out, assume there
4028 may be more. If we requested a specific lwp or process, also
4029 assume there may be more. */
4030 if (target_can_async_p ()
6953d224
PA
4031 && ((ourstatus->kind != TARGET_WAITKIND_IGNORE
4032 && ourstatus->kind != TARGET_WAITKIND_NO_RESUMED)
7feb7d06
PA
4033 || !ptid_equal (ptid, minus_one_ptid)))
4034 async_file_mark ();
4035
4036 /* Get ready for the next event. */
4037 if (target_can_async_p ())
4038 target_async (inferior_event_handler, 0);
4039
4040 return event_ptid;
4041}
4042
d6b0e80f
AC
4043static int
4044kill_callback (struct lwp_info *lp, void *data)
4045{
ed731959
JK
4046 /* PTRACE_KILL may resume the inferior. Send SIGKILL first. */
4047
4048 errno = 0;
4049 kill (GET_LWP (lp->ptid), SIGKILL);
4050 if (debug_linux_nat)
4051 fprintf_unfiltered (gdb_stdlog,
4052 "KC: kill (SIGKILL) %s, 0, 0 (%s)\n",
4053 target_pid_to_str (lp->ptid),
4054 errno ? safe_strerror (errno) : "OK");
4055
4056 /* Some kernels ignore even SIGKILL for processes under ptrace. */
4057
d6b0e80f
AC
4058 errno = 0;
4059 ptrace (PTRACE_KILL, GET_LWP (lp->ptid), 0, 0);
4060 if (debug_linux_nat)
4061 fprintf_unfiltered (gdb_stdlog,
4062 "KC: PTRACE_KILL %s, 0, 0 (%s)\n",
4063 target_pid_to_str (lp->ptid),
4064 errno ? safe_strerror (errno) : "OK");
4065
4066 return 0;
4067}
4068
4069static int
4070kill_wait_callback (struct lwp_info *lp, void *data)
4071{
4072 pid_t pid;
4073
4074 /* We must make sure that there are no pending events (delayed
4075 SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current
4076 program doesn't interfere with any following debugging session. */
4077
4078 /* For cloned processes we must check both with __WCLONE and
4079 without, since the exit status of a cloned process isn't reported
4080 with __WCLONE. */
4081 if (lp->cloned)
4082 {
4083 do
4084 {
58aecb61 4085 pid = my_waitpid (GET_LWP (lp->ptid), NULL, __WCLONE);
e85a822c 4086 if (pid != (pid_t) -1)
d6b0e80f 4087 {
e85a822c
DJ
4088 if (debug_linux_nat)
4089 fprintf_unfiltered (gdb_stdlog,
4090 "KWC: wait %s received unknown.\n",
4091 target_pid_to_str (lp->ptid));
4092 /* The Linux kernel sometimes fails to kill a thread
4093 completely after PTRACE_KILL; that goes from the stop
4094 point in do_fork out to the one in
4095 get_signal_to_deliever and waits again. So kill it
4096 again. */
4097 kill_callback (lp, NULL);
d6b0e80f
AC
4098 }
4099 }
4100 while (pid == GET_LWP (lp->ptid));
4101
4102 gdb_assert (pid == -1 && errno == ECHILD);
4103 }
4104
4105 do
4106 {
58aecb61 4107 pid = my_waitpid (GET_LWP (lp->ptid), NULL, 0);
e85a822c 4108 if (pid != (pid_t) -1)
d6b0e80f 4109 {
e85a822c
DJ
4110 if (debug_linux_nat)
4111 fprintf_unfiltered (gdb_stdlog,
4112 "KWC: wait %s received unk.\n",
4113 target_pid_to_str (lp->ptid));
4114 /* See the call to kill_callback above. */
4115 kill_callback (lp, NULL);
d6b0e80f
AC
4116 }
4117 }
4118 while (pid == GET_LWP (lp->ptid));
4119
4120 gdb_assert (pid == -1 && errno == ECHILD);
4121 return 0;
4122}
4123
4124static void
7d85a9c0 4125linux_nat_kill (struct target_ops *ops)
d6b0e80f 4126{
f973ed9c
DJ
4127 struct target_waitstatus last;
4128 ptid_t last_ptid;
4129 int status;
d6b0e80f 4130
f973ed9c
DJ
4131 /* If we're stopped while forking and we haven't followed yet,
4132 kill the other task. We need to do this first because the
4133 parent will be sleeping if this is a vfork. */
d6b0e80f 4134
f973ed9c 4135 get_last_target_status (&last_ptid, &last);
d6b0e80f 4136
f973ed9c
DJ
4137 if (last.kind == TARGET_WAITKIND_FORKED
4138 || last.kind == TARGET_WAITKIND_VFORKED)
4139 {
3a3e9ee3 4140 ptrace (PT_KILL, PIDGET (last.value.related_pid), 0, 0);
f973ed9c
DJ
4141 wait (&status);
4142 }
4143
4144 if (forks_exist_p ())
7feb7d06 4145 linux_fork_killall ();
f973ed9c
DJ
4146 else
4147 {
d90e17a7 4148 ptid_t ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
e0881a8e 4149
4c28f408
PA
4150 /* Stop all threads before killing them, since ptrace requires
4151 that the thread is stopped to sucessfully PTRACE_KILL. */
d90e17a7 4152 iterate_over_lwps (ptid, stop_callback, NULL);
4c28f408
PA
4153 /* ... and wait until all of them have reported back that
4154 they're no longer running. */
d90e17a7 4155 iterate_over_lwps (ptid, stop_wait_callback, NULL);
4c28f408 4156
f973ed9c 4157 /* Kill all LWP's ... */
d90e17a7 4158 iterate_over_lwps (ptid, kill_callback, NULL);
f973ed9c
DJ
4159
4160 /* ... and wait until we've flushed all events. */
d90e17a7 4161 iterate_over_lwps (ptid, kill_wait_callback, NULL);
f973ed9c
DJ
4162 }
4163
4164 target_mourn_inferior ();
d6b0e80f
AC
4165}
4166
4167static void
136d6dae 4168linux_nat_mourn_inferior (struct target_ops *ops)
d6b0e80f 4169{
d90e17a7 4170 purge_lwp_list (ptid_get_pid (inferior_ptid));
d6b0e80f 4171
f973ed9c 4172 if (! forks_exist_p ())
d90e17a7
PA
4173 /* Normal case, no other forks available. */
4174 linux_ops->to_mourn_inferior (ops);
f973ed9c
DJ
4175 else
4176 /* Multi-fork case. The current inferior_ptid has exited, but
4177 there are other viable forks to debug. Delete the exiting
4178 one and context-switch to the first available. */
4179 linux_fork_mourn_inferior ();
d6b0e80f
AC
4180}
4181
5b009018
PA
4182/* Convert a native/host siginfo object, into/from the siginfo in the
4183 layout of the inferiors' architecture. */
4184
4185static void
4186siginfo_fixup (struct siginfo *siginfo, gdb_byte *inf_siginfo, int direction)
4187{
4188 int done = 0;
4189
4190 if (linux_nat_siginfo_fixup != NULL)
4191 done = linux_nat_siginfo_fixup (siginfo, inf_siginfo, direction);
4192
4193 /* If there was no callback, or the callback didn't do anything,
4194 then just do a straight memcpy. */
4195 if (!done)
4196 {
4197 if (direction == 1)
4198 memcpy (siginfo, inf_siginfo, sizeof (struct siginfo));
4199 else
4200 memcpy (inf_siginfo, siginfo, sizeof (struct siginfo));
4201 }
4202}
4203
4aa995e1
PA
4204static LONGEST
4205linux_xfer_siginfo (struct target_ops *ops, enum target_object object,
4206 const char *annex, gdb_byte *readbuf,
4207 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
4208{
4aa995e1
PA
4209 int pid;
4210 struct siginfo siginfo;
5b009018 4211 gdb_byte inf_siginfo[sizeof (struct siginfo)];
4aa995e1
PA
4212
4213 gdb_assert (object == TARGET_OBJECT_SIGNAL_INFO);
4214 gdb_assert (readbuf || writebuf);
4215
4216 pid = GET_LWP (inferior_ptid);
4217 if (pid == 0)
4218 pid = GET_PID (inferior_ptid);
4219
4220 if (offset > sizeof (siginfo))
4221 return -1;
4222
4223 errno = 0;
4224 ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
4225 if (errno != 0)
4226 return -1;
4227
5b009018
PA
4228 /* When GDB is built as a 64-bit application, ptrace writes into
4229 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
4230 inferior with a 64-bit GDB should look the same as debugging it
4231 with a 32-bit GDB, we need to convert it. GDB core always sees
4232 the converted layout, so any read/write will have to be done
4233 post-conversion. */
4234 siginfo_fixup (&siginfo, inf_siginfo, 0);
4235
4aa995e1
PA
4236 if (offset + len > sizeof (siginfo))
4237 len = sizeof (siginfo) - offset;
4238
4239 if (readbuf != NULL)
5b009018 4240 memcpy (readbuf, inf_siginfo + offset, len);
4aa995e1
PA
4241 else
4242 {
5b009018
PA
4243 memcpy (inf_siginfo + offset, writebuf, len);
4244
4245 /* Convert back to ptrace layout before flushing it out. */
4246 siginfo_fixup (&siginfo, inf_siginfo, 1);
4247
4aa995e1
PA
4248 errno = 0;
4249 ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
4250 if (errno != 0)
4251 return -1;
4252 }
4253
4254 return len;
4255}
4256
10d6c8cd
DJ
4257static LONGEST
4258linux_nat_xfer_partial (struct target_ops *ops, enum target_object object,
4259 const char *annex, gdb_byte *readbuf,
4260 const gdb_byte *writebuf,
4261 ULONGEST offset, LONGEST len)
d6b0e80f 4262{
4aa995e1 4263 struct cleanup *old_chain;
10d6c8cd 4264 LONGEST xfer;
d6b0e80f 4265
4aa995e1
PA
4266 if (object == TARGET_OBJECT_SIGNAL_INFO)
4267 return linux_xfer_siginfo (ops, object, annex, readbuf, writebuf,
4268 offset, len);
4269
c35b1492
PA
4270 /* The target is connected but no live inferior is selected. Pass
4271 this request down to a lower stratum (e.g., the executable
4272 file). */
4273 if (object == TARGET_OBJECT_MEMORY && ptid_equal (inferior_ptid, null_ptid))
4274 return 0;
4275
4aa995e1
PA
4276 old_chain = save_inferior_ptid ();
4277
d6b0e80f
AC
4278 if (is_lwp (inferior_ptid))
4279 inferior_ptid = pid_to_ptid (GET_LWP (inferior_ptid));
4280
10d6c8cd
DJ
4281 xfer = linux_ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
4282 offset, len);
d6b0e80f
AC
4283
4284 do_cleanups (old_chain);
4285 return xfer;
4286}
4287
4288static int
28439f5e 4289linux_thread_alive (ptid_t ptid)
d6b0e80f 4290{
8c6a60d1 4291 int err, tmp_errno;
4c28f408 4292
d6b0e80f
AC
4293 gdb_assert (is_lwp (ptid));
4294
4c28f408
PA
4295 /* Send signal 0 instead of anything ptrace, because ptracing a
4296 running thread errors out claiming that the thread doesn't
4297 exist. */
4298 err = kill_lwp (GET_LWP (ptid), 0);
8c6a60d1 4299 tmp_errno = errno;
d6b0e80f
AC
4300 if (debug_linux_nat)
4301 fprintf_unfiltered (gdb_stdlog,
4c28f408 4302 "LLTA: KILL(SIG0) %s (%s)\n",
d6b0e80f 4303 target_pid_to_str (ptid),
8c6a60d1 4304 err ? safe_strerror (tmp_errno) : "OK");
9c0dd46b 4305
4c28f408 4306 if (err != 0)
d6b0e80f
AC
4307 return 0;
4308
4309 return 1;
4310}
4311
28439f5e
PA
4312static int
4313linux_nat_thread_alive (struct target_ops *ops, ptid_t ptid)
4314{
4315 return linux_thread_alive (ptid);
4316}
4317
d6b0e80f 4318static char *
117de6a9 4319linux_nat_pid_to_str (struct target_ops *ops, ptid_t ptid)
d6b0e80f
AC
4320{
4321 static char buf[64];
4322
a0ef4274 4323 if (is_lwp (ptid)
d90e17a7
PA
4324 && (GET_PID (ptid) != GET_LWP (ptid)
4325 || num_lwps (GET_PID (ptid)) > 1))
d6b0e80f
AC
4326 {
4327 snprintf (buf, sizeof (buf), "LWP %ld", GET_LWP (ptid));
4328 return buf;
4329 }
4330
4331 return normal_pid_to_str (ptid);
4332}
4333
4694da01
TT
4334static char *
4335linux_nat_thread_name (struct thread_info *thr)
4336{
4337 int pid = ptid_get_pid (thr->ptid);
4338 long lwp = ptid_get_lwp (thr->ptid);
4339#define FORMAT "/proc/%d/task/%ld/comm"
4340 char buf[sizeof (FORMAT) + 30];
4341 FILE *comm_file;
4342 char *result = NULL;
4343
4344 snprintf (buf, sizeof (buf), FORMAT, pid, lwp);
4345 comm_file = fopen (buf, "r");
4346 if (comm_file)
4347 {
4348 /* Not exported by the kernel, so we define it here. */
4349#define COMM_LEN 16
4350 static char line[COMM_LEN + 1];
4351
4352 if (fgets (line, sizeof (line), comm_file))
4353 {
4354 char *nl = strchr (line, '\n');
4355
4356 if (nl)
4357 *nl = '\0';
4358 if (*line != '\0')
4359 result = line;
4360 }
4361
4362 fclose (comm_file);
4363 }
4364
4365#undef COMM_LEN
4366#undef FORMAT
4367
4368 return result;
4369}
4370
dba24537
AC
4371/* Accepts an integer PID; Returns a string representing a file that
4372 can be opened to get the symbols for the child process. */
4373
6d8fd2b7
UW
4374static char *
4375linux_child_pid_to_exec_file (int pid)
dba24537
AC
4376{
4377 char *name1, *name2;
4378
4379 name1 = xmalloc (MAXPATHLEN);
4380 name2 = xmalloc (MAXPATHLEN);
4381 make_cleanup (xfree, name1);
4382 make_cleanup (xfree, name2);
4383 memset (name2, 0, MAXPATHLEN);
4384
4385 sprintf (name1, "/proc/%d/exe", pid);
4386 if (readlink (name1, name2, MAXPATHLEN) > 0)
4387 return name2;
4388 else
4389 return name1;
4390}
4391
4392/* Service function for corefiles and info proc. */
4393
4394static int
4395read_mapping (FILE *mapfile,
4396 long long *addr,
4397 long long *endaddr,
4398 char *permissions,
4399 long long *offset,
4400 char *device, long long *inode, char *filename)
4401{
4402 int ret = fscanf (mapfile, "%llx-%llx %s %llx %s %llx",
4403 addr, endaddr, permissions, offset, device, inode);
4404
2e14c2ea
MS
4405 filename[0] = '\0';
4406 if (ret > 0 && ret != EOF)
dba24537
AC
4407 {
4408 /* Eat everything up to EOL for the filename. This will prevent
4409 weird filenames (such as one with embedded whitespace) from
4410 confusing this code. It also makes this code more robust in
4411 respect to annotations the kernel may add after the filename.
4412
4413 Note the filename is used for informational purposes
4414 only. */
4415 ret += fscanf (mapfile, "%[^\n]\n", filename);
4416 }
2e14c2ea 4417
dba24537
AC
4418 return (ret != 0 && ret != EOF);
4419}
4420
4421/* Fills the "to_find_memory_regions" target vector. Lists the memory
4422 regions in the inferior for a corefile. */
4423
4424static int
b8edc417 4425linux_nat_find_memory_regions (find_memory_region_ftype func, void *obfd)
dba24537 4426{
89ecc4f5 4427 int pid = PIDGET (inferior_ptid);
dba24537
AC
4428 char mapsfilename[MAXPATHLEN];
4429 FILE *mapsfile;
4430 long long addr, endaddr, size, offset, inode;
4431 char permissions[8], device[8], filename[MAXPATHLEN];
4432 int read, write, exec;
7c8a8b04 4433 struct cleanup *cleanup;
dba24537
AC
4434
4435 /* Compose the filename for the /proc memory map, and open it. */
89ecc4f5 4436 sprintf (mapsfilename, "/proc/%d/maps", pid);
dba24537 4437 if ((mapsfile = fopen (mapsfilename, "r")) == NULL)
8a3fe4f8 4438 error (_("Could not open %s."), mapsfilename);
7c8a8b04 4439 cleanup = make_cleanup_fclose (mapsfile);
dba24537
AC
4440
4441 if (info_verbose)
4442 fprintf_filtered (gdb_stdout,
4443 "Reading memory regions from %s\n", mapsfilename);
4444
4445 /* Now iterate until end-of-file. */
4446 while (read_mapping (mapsfile, &addr, &endaddr, &permissions[0],
4447 &offset, &device[0], &inode, &filename[0]))
4448 {
4449 size = endaddr - addr;
4450
4451 /* Get the segment's permissions. */
4452 read = (strchr (permissions, 'r') != 0);
4453 write = (strchr (permissions, 'w') != 0);
4454 exec = (strchr (permissions, 'x') != 0);
4455
4456 if (info_verbose)
4457 {
4458 fprintf_filtered (gdb_stdout,
2244ba2e
PM
4459 "Save segment, %s bytes at %s (%c%c%c)",
4460 plongest (size), paddress (target_gdbarch, addr),
dba24537
AC
4461 read ? 'r' : ' ',
4462 write ? 'w' : ' ', exec ? 'x' : ' ');
b260b6c1 4463 if (filename[0])
dba24537
AC
4464 fprintf_filtered (gdb_stdout, " for %s", filename);
4465 fprintf_filtered (gdb_stdout, "\n");
4466 }
4467
4468 /* Invoke the callback function to create the corefile
4469 segment. */
4470 func (addr, size, read, write, exec, obfd);
4471 }
7c8a8b04 4472 do_cleanups (cleanup);
dba24537
AC
4473 return 0;
4474}
4475
2020b7ab
PA
4476static int
4477find_signalled_thread (struct thread_info *info, void *data)
4478{
16c381f0 4479 if (info->suspend.stop_signal != TARGET_SIGNAL_0
2020b7ab
PA
4480 && ptid_get_pid (info->ptid) == ptid_get_pid (inferior_ptid))
4481 return 1;
4482
4483 return 0;
4484}
4485
4486static enum target_signal
4487find_stop_signal (void)
4488{
4489 struct thread_info *info =
4490 iterate_over_threads (find_signalled_thread, NULL);
4491
4492 if (info)
16c381f0 4493 return info->suspend.stop_signal;
2020b7ab
PA
4494 else
4495 return TARGET_SIGNAL_0;
4496}
4497
dba24537
AC
4498/* Records the thread's register state for the corefile note
4499 section. */
4500
4501static char *
4502linux_nat_do_thread_registers (bfd *obfd, ptid_t ptid,
2020b7ab
PA
4503 char *note_data, int *note_size,
4504 enum target_signal stop_signal)
dba24537 4505{
dba24537 4506 unsigned long lwp = ptid_get_lwp (ptid);
c2250ad1
UW
4507 struct gdbarch *gdbarch = target_gdbarch;
4508 struct regcache *regcache = get_thread_arch_regcache (ptid, gdbarch);
4f844a66 4509 const struct regset *regset;
55e969c1 4510 int core_regset_p;
594f7785 4511 struct cleanup *old_chain;
17ea7499
CES
4512 struct core_regset_section *sect_list;
4513 char *gdb_regset;
594f7785
UW
4514
4515 old_chain = save_inferior_ptid ();
4516 inferior_ptid = ptid;
4517 target_fetch_registers (regcache, -1);
4518 do_cleanups (old_chain);
4f844a66
DM
4519
4520 core_regset_p = gdbarch_regset_from_core_section_p (gdbarch);
17ea7499
CES
4521 sect_list = gdbarch_core_regset_sections (gdbarch);
4522
17ea7499
CES
4523 /* The loop below uses the new struct core_regset_section, which stores
4524 the supported section names and sizes for the core file. Note that
4525 note PRSTATUS needs to be treated specially. But the other notes are
4526 structurally the same, so they can benefit from the new struct. */
4527 if (core_regset_p && sect_list != NULL)
4528 while (sect_list->sect_name != NULL)
4529 {
17ea7499
CES
4530 regset = gdbarch_regset_from_core_section (gdbarch,
4531 sect_list->sect_name,
4532 sect_list->size);
4533 gdb_assert (regset && regset->collect_regset);
4534 gdb_regset = xmalloc (sect_list->size);
4535 regset->collect_regset (regset, regcache, -1,
4536 gdb_regset, sect_list->size);
2f2241f1
UW
4537
4538 if (strcmp (sect_list->sect_name, ".reg") == 0)
4539 note_data = (char *) elfcore_write_prstatus
4540 (obfd, note_data, note_size,
857d11d0
JK
4541 lwp, target_signal_to_host (stop_signal),
4542 gdb_regset);
2f2241f1
UW
4543 else
4544 note_data = (char *) elfcore_write_register_note
4545 (obfd, note_data, note_size,
4546 sect_list->sect_name, gdb_regset,
4547 sect_list->size);
17ea7499
CES
4548 xfree (gdb_regset);
4549 sect_list++;
4550 }
dba24537 4551
17ea7499
CES
4552 /* For architectures that does not have the struct core_regset_section
4553 implemented, we use the old method. When all the architectures have
4554 the new support, the code below should be deleted. */
4f844a66 4555 else
17ea7499 4556 {
2f2241f1
UW
4557 gdb_gregset_t gregs;
4558 gdb_fpregset_t fpregs;
4559
4560 if (core_regset_p
4561 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg",
3e43a32a
MS
4562 sizeof (gregs)))
4563 != NULL && regset->collect_regset != NULL)
2f2241f1
UW
4564 regset->collect_regset (regset, regcache, -1,
4565 &gregs, sizeof (gregs));
4566 else
4567 fill_gregset (regcache, &gregs, -1);
4568
857d11d0
JK
4569 note_data = (char *) elfcore_write_prstatus
4570 (obfd, note_data, note_size, lwp, target_signal_to_host (stop_signal),
4571 &gregs);
2f2241f1 4572
17ea7499
CES
4573 if (core_regset_p
4574 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg2",
3e43a32a
MS
4575 sizeof (fpregs)))
4576 != NULL && regset->collect_regset != NULL)
17ea7499
CES
4577 regset->collect_regset (regset, regcache, -1,
4578 &fpregs, sizeof (fpregs));
4579 else
4580 fill_fpregset (regcache, &fpregs, -1);
4581
4582 note_data = (char *) elfcore_write_prfpreg (obfd,
4583 note_data,
4584 note_size,
4585 &fpregs, sizeof (fpregs));
4586 }
4f844a66 4587
dba24537
AC
4588 return note_data;
4589}
4590
4591struct linux_nat_corefile_thread_data
4592{
4593 bfd *obfd;
4594 char *note_data;
4595 int *note_size;
4596 int num_notes;
2020b7ab 4597 enum target_signal stop_signal;
dba24537
AC
4598};
4599
4600/* Called by gdbthread.c once per thread. Records the thread's
4601 register state for the corefile note section. */
4602
4603static int
4604linux_nat_corefile_thread_callback (struct lwp_info *ti, void *data)
4605{
4606 struct linux_nat_corefile_thread_data *args = data;
dba24537 4607
dba24537
AC
4608 args->note_data = linux_nat_do_thread_registers (args->obfd,
4609 ti->ptid,
4610 args->note_data,
2020b7ab
PA
4611 args->note_size,
4612 args->stop_signal);
dba24537 4613 args->num_notes++;
56be3814 4614
dba24537
AC
4615 return 0;
4616}
4617
efcbbd14
UW
4618/* Enumerate spufs IDs for process PID. */
4619
4620static void
4621iterate_over_spus (int pid, void (*callback) (void *, int), void *data)
4622{
4623 char path[128];
4624 DIR *dir;
4625 struct dirent *entry;
4626
4627 xsnprintf (path, sizeof path, "/proc/%d/fd", pid);
4628 dir = opendir (path);
4629 if (!dir)
4630 return;
4631
4632 rewinddir (dir);
4633 while ((entry = readdir (dir)) != NULL)
4634 {
4635 struct stat st;
4636 struct statfs stfs;
4637 int fd;
4638
4639 fd = atoi (entry->d_name);
4640 if (!fd)
4641 continue;
4642
4643 xsnprintf (path, sizeof path, "/proc/%d/fd/%d", pid, fd);
4644 if (stat (path, &st) != 0)
4645 continue;
4646 if (!S_ISDIR (st.st_mode))
4647 continue;
4648
4649 if (statfs (path, &stfs) != 0)
4650 continue;
4651 if (stfs.f_type != SPUFS_MAGIC)
4652 continue;
4653
4654 callback (data, fd);
4655 }
4656
4657 closedir (dir);
4658}
4659
4660/* Generate corefile notes for SPU contexts. */
4661
4662struct linux_spu_corefile_data
4663{
4664 bfd *obfd;
4665 char *note_data;
4666 int *note_size;
4667};
4668
4669static void
4670linux_spu_corefile_callback (void *data, int fd)
4671{
4672 struct linux_spu_corefile_data *args = data;
4673 int i;
4674
4675 static const char *spu_files[] =
4676 {
4677 "object-id",
4678 "mem",
4679 "regs",
4680 "fpcr",
4681 "lslr",
4682 "decr",
4683 "decr_status",
4684 "signal1",
4685 "signal1_type",
4686 "signal2",
4687 "signal2_type",
4688 "event_mask",
4689 "event_status",
4690 "mbox_info",
4691 "ibox_info",
4692 "wbox_info",
4693 "dma_info",
4694 "proxydma_info",
4695 };
4696
4697 for (i = 0; i < sizeof (spu_files) / sizeof (spu_files[0]); i++)
4698 {
4699 char annex[32], note_name[32];
4700 gdb_byte *spu_data;
4701 LONGEST spu_len;
4702
4703 xsnprintf (annex, sizeof annex, "%d/%s", fd, spu_files[i]);
4704 spu_len = target_read_alloc (&current_target, TARGET_OBJECT_SPU,
4705 annex, &spu_data);
4706 if (spu_len > 0)
4707 {
4708 xsnprintf (note_name, sizeof note_name, "SPU/%s", annex);
4709 args->note_data = elfcore_write_note (args->obfd, args->note_data,
4710 args->note_size, note_name,
4711 NT_SPU, spu_data, spu_len);
4712 xfree (spu_data);
4713 }
4714 }
4715}
4716
4717static char *
4718linux_spu_make_corefile_notes (bfd *obfd, char *note_data, int *note_size)
4719{
4720 struct linux_spu_corefile_data args;
e0881a8e 4721
efcbbd14
UW
4722 args.obfd = obfd;
4723 args.note_data = note_data;
4724 args.note_size = note_size;
4725
4726 iterate_over_spus (PIDGET (inferior_ptid),
4727 linux_spu_corefile_callback, &args);
4728
4729 return args.note_data;
4730}
4731
dba24537
AC
4732/* Fills the "to_make_corefile_note" target vector. Builds the note
4733 section for a corefile, and returns it in a malloc buffer. */
4734
4735static char *
4736linux_nat_make_corefile_notes (bfd *obfd, int *note_size)
4737{
4738 struct linux_nat_corefile_thread_data thread_args;
d99148ef 4739 /* The variable size must be >= sizeof (prpsinfo_t.pr_fname). */
dba24537 4740 char fname[16] = { '\0' };
d99148ef 4741 /* The variable size must be >= sizeof (prpsinfo_t.pr_psargs). */
dba24537
AC
4742 char psargs[80] = { '\0' };
4743 char *note_data = NULL;
d90e17a7 4744 ptid_t filter = pid_to_ptid (ptid_get_pid (inferior_ptid));
c6826062 4745 gdb_byte *auxv;
dba24537
AC
4746 int auxv_len;
4747
4748 if (get_exec_file (0))
4749 {
9f37bbcc 4750 strncpy (fname, lbasename (get_exec_file (0)), sizeof (fname));
dba24537
AC
4751 strncpy (psargs, get_exec_file (0), sizeof (psargs));
4752 if (get_inferior_args ())
4753 {
d99148ef
JK
4754 char *string_end;
4755 char *psargs_end = psargs + sizeof (psargs);
4756
4757 /* linux_elfcore_write_prpsinfo () handles zero unterminated
4758 strings fine. */
4759 string_end = memchr (psargs, 0, sizeof (psargs));
4760 if (string_end != NULL)
4761 {
4762 *string_end++ = ' ';
4763 strncpy (string_end, get_inferior_args (),
4764 psargs_end - string_end);
4765 }
dba24537
AC
4766 }
4767 note_data = (char *) elfcore_write_prpsinfo (obfd,
4768 note_data,
4769 note_size, fname, psargs);
4770 }
4771
4772 /* Dump information for threads. */
4773 thread_args.obfd = obfd;
4774 thread_args.note_data = note_data;
4775 thread_args.note_size = note_size;
4776 thread_args.num_notes = 0;
2020b7ab 4777 thread_args.stop_signal = find_stop_signal ();
d90e17a7 4778 iterate_over_lwps (filter, linux_nat_corefile_thread_callback, &thread_args);
2020b7ab
PA
4779 gdb_assert (thread_args.num_notes != 0);
4780 note_data = thread_args.note_data;
dba24537 4781
13547ab6
DJ
4782 auxv_len = target_read_alloc (&current_target, TARGET_OBJECT_AUXV,
4783 NULL, &auxv);
dba24537
AC
4784 if (auxv_len > 0)
4785 {
4786 note_data = elfcore_write_note (obfd, note_data, note_size,
4787 "CORE", NT_AUXV, auxv, auxv_len);
4788 xfree (auxv);
4789 }
4790
efcbbd14
UW
4791 note_data = linux_spu_make_corefile_notes (obfd, note_data, note_size);
4792
dba24537
AC
4793 make_cleanup (xfree, note_data);
4794 return note_data;
4795}
4796
4797/* Implement the "info proc" command. */
4798
f179e162
JK
4799enum info_proc_what
4800 {
4801 /* Display the default cmdline, cwd and exe outputs. */
4802 IP_MINIMAL,
4803
4804 /* Display `info proc mappings'. */
4805 IP_MAPPINGS,
4806
4807 /* Display `info proc status'. */
4808 IP_STATUS,
4809
4810 /* Display `info proc stat'. */
4811 IP_STAT,
4812
4813 /* Display `info proc cmdline'. */
4814 IP_CMDLINE,
4815
4816 /* Display `info proc exe'. */
4817 IP_EXE,
4818
4819 /* Display `info proc cwd'. */
4820 IP_CWD,
4821
4822 /* Display all of the above. */
4823 IP_ALL
4824 };
4825
dba24537 4826static void
f179e162 4827linux_nat_info_proc_cmd_1 (char *args, enum info_proc_what what, int from_tty)
dba24537 4828{
89ecc4f5
DE
4829 /* A long is used for pid instead of an int to avoid a loss of precision
4830 compiler warning from the output of strtoul. */
4831 long pid = PIDGET (inferior_ptid);
dba24537 4832 FILE *procfile;
dba24537
AC
4833 char buffer[MAXPATHLEN];
4834 char fname1[MAXPATHLEN], fname2[MAXPATHLEN];
f179e162
JK
4835 int cmdline_f = (what == IP_MINIMAL || what == IP_CMDLINE || what == IP_ALL);
4836 int cwd_f = (what == IP_MINIMAL || what == IP_CWD || what == IP_ALL);
4837 int exe_f = (what == IP_MINIMAL || what == IP_EXE || what == IP_ALL);
4838 int mappings_f = (what == IP_MAPPINGS || what == IP_ALL);
4839 int status_f = (what == IP_STATUS || what == IP_ALL);
4840 int stat_f = (what == IP_STAT || what == IP_ALL);
dba24537
AC
4841 struct stat dummy;
4842
f179e162
JK
4843 if (args && isdigit (args[0]))
4844 pid = strtoul (args, &args, 10);
4845
4846 args = skip_spaces (args);
4847 if (args && args[0])
4848 error (_("Too many parameters: %s"), args);
4849
dba24537 4850 if (pid == 0)
8a3fe4f8 4851 error (_("No current process: you must name one."));
dba24537 4852
89ecc4f5 4853 sprintf (fname1, "/proc/%ld", pid);
dba24537 4854 if (stat (fname1, &dummy) != 0)
8a3fe4f8 4855 error (_("No /proc directory: '%s'"), fname1);
dba24537 4856
89ecc4f5 4857 printf_filtered (_("process %ld\n"), pid);
f179e162 4858 if (cmdline_f)
dba24537 4859 {
89ecc4f5 4860 sprintf (fname1, "/proc/%ld/cmdline", pid);
d5d6fca5 4861 if ((procfile = fopen (fname1, "r")) != NULL)
dba24537 4862 {
7c8a8b04 4863 struct cleanup *cleanup = make_cleanup_fclose (procfile);
e0881a8e 4864
bf1d7d9c
JB
4865 if (fgets (buffer, sizeof (buffer), procfile))
4866 printf_filtered ("cmdline = '%s'\n", buffer);
4867 else
4868 warning (_("unable to read '%s'"), fname1);
7c8a8b04 4869 do_cleanups (cleanup);
dba24537
AC
4870 }
4871 else
8a3fe4f8 4872 warning (_("unable to open /proc file '%s'"), fname1);
dba24537 4873 }
f179e162 4874 if (cwd_f)
dba24537 4875 {
89ecc4f5 4876 sprintf (fname1, "/proc/%ld/cwd", pid);
dba24537
AC
4877 memset (fname2, 0, sizeof (fname2));
4878 if (readlink (fname1, fname2, sizeof (fname2)) > 0)
4879 printf_filtered ("cwd = '%s'\n", fname2);
4880 else
8a3fe4f8 4881 warning (_("unable to read link '%s'"), fname1);
dba24537 4882 }
f179e162 4883 if (exe_f)
dba24537 4884 {
89ecc4f5 4885 sprintf (fname1, "/proc/%ld/exe", pid);
dba24537
AC
4886 memset (fname2, 0, sizeof (fname2));
4887 if (readlink (fname1, fname2, sizeof (fname2)) > 0)
4888 printf_filtered ("exe = '%s'\n", fname2);
4889 else
8a3fe4f8 4890 warning (_("unable to read link '%s'"), fname1);
dba24537 4891 }
f179e162 4892 if (mappings_f)
dba24537 4893 {
89ecc4f5 4894 sprintf (fname1, "/proc/%ld/maps", pid);
d5d6fca5 4895 if ((procfile = fopen (fname1, "r")) != NULL)
dba24537
AC
4896 {
4897 long long addr, endaddr, size, offset, inode;
4898 char permissions[8], device[8], filename[MAXPATHLEN];
7c8a8b04 4899 struct cleanup *cleanup;
dba24537 4900
7c8a8b04 4901 cleanup = make_cleanup_fclose (procfile);
a3f17187 4902 printf_filtered (_("Mapped address spaces:\n\n"));
a97b0ac8 4903 if (gdbarch_addr_bit (target_gdbarch) == 32)
dba24537
AC
4904 {
4905 printf_filtered ("\t%10s %10s %10s %10s %7s\n",
4906 "Start Addr",
4907 " End Addr",
4908 " Size", " Offset", "objfile");
4909 }
4910 else
4911 {
4912 printf_filtered (" %18s %18s %10s %10s %7s\n",
4913 "Start Addr",
4914 " End Addr",
4915 " Size", " Offset", "objfile");
4916 }
4917
4918 while (read_mapping (procfile, &addr, &endaddr, &permissions[0],
4919 &offset, &device[0], &inode, &filename[0]))
4920 {
4921 size = endaddr - addr;
4922
4923 /* FIXME: carlton/2003-08-27: Maybe the printf_filtered
4924 calls here (and possibly above) should be abstracted
4925 out into their own functions? Andrew suggests using
4926 a generic local_address_string instead to print out
4927 the addresses; that makes sense to me, too. */
4928
a97b0ac8 4929 if (gdbarch_addr_bit (target_gdbarch) == 32)
dba24537
AC
4930 {
4931 printf_filtered ("\t%#10lx %#10lx %#10x %#10x %7s\n",
4932 (unsigned long) addr, /* FIXME: pr_addr */
4933 (unsigned long) endaddr,
4934 (int) size,
4935 (unsigned int) offset,
4936 filename[0] ? filename : "");
4937 }
4938 else
4939 {
4940 printf_filtered (" %#18lx %#18lx %#10x %#10x %7s\n",
4941 (unsigned long) addr, /* FIXME: pr_addr */
4942 (unsigned long) endaddr,
4943 (int) size,
4944 (unsigned int) offset,
4945 filename[0] ? filename : "");
4946 }
4947 }
4948
7c8a8b04 4949 do_cleanups (cleanup);
dba24537
AC
4950 }
4951 else
8a3fe4f8 4952 warning (_("unable to open /proc file '%s'"), fname1);
dba24537 4953 }
f179e162 4954 if (status_f)
dba24537 4955 {
89ecc4f5 4956 sprintf (fname1, "/proc/%ld/status", pid);
d5d6fca5 4957 if ((procfile = fopen (fname1, "r")) != NULL)
dba24537 4958 {
7c8a8b04 4959 struct cleanup *cleanup = make_cleanup_fclose (procfile);
e0881a8e 4960
dba24537
AC
4961 while (fgets (buffer, sizeof (buffer), procfile) != NULL)
4962 puts_filtered (buffer);
7c8a8b04 4963 do_cleanups (cleanup);
dba24537
AC
4964 }
4965 else
8a3fe4f8 4966 warning (_("unable to open /proc file '%s'"), fname1);
dba24537 4967 }
f179e162 4968 if (stat_f)
dba24537 4969 {
89ecc4f5 4970 sprintf (fname1, "/proc/%ld/stat", pid);
d5d6fca5 4971 if ((procfile = fopen (fname1, "r")) != NULL)
dba24537
AC
4972 {
4973 int itmp;
4974 char ctmp;
a25694b4 4975 long ltmp;
7c8a8b04 4976 struct cleanup *cleanup = make_cleanup_fclose (procfile);
dba24537
AC
4977
4978 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 4979 printf_filtered (_("Process: %d\n"), itmp);
a25694b4 4980 if (fscanf (procfile, "(%[^)]) ", &buffer[0]) > 0)
a3f17187 4981 printf_filtered (_("Exec file: %s\n"), buffer);
dba24537 4982 if (fscanf (procfile, "%c ", &ctmp) > 0)
a3f17187 4983 printf_filtered (_("State: %c\n"), ctmp);
dba24537 4984 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 4985 printf_filtered (_("Parent process: %d\n"), itmp);
dba24537 4986 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 4987 printf_filtered (_("Process group: %d\n"), itmp);
dba24537 4988 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 4989 printf_filtered (_("Session id: %d\n"), itmp);
dba24537 4990 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 4991 printf_filtered (_("TTY: %d\n"), itmp);
dba24537 4992 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 4993 printf_filtered (_("TTY owner process group: %d\n"), itmp);
a25694b4
AS
4994 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4995 printf_filtered (_("Flags: 0x%lx\n"), ltmp);
4996 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4997 printf_filtered (_("Minor faults (no memory page): %lu\n"),
4998 (unsigned long) ltmp);
4999 if (fscanf (procfile, "%lu ", &ltmp) > 0)
5000 printf_filtered (_("Minor faults, children: %lu\n"),
5001 (unsigned long) ltmp);
5002 if (fscanf (procfile, "%lu ", &ltmp) > 0)
5003 printf_filtered (_("Major faults (memory page faults): %lu\n"),
5004 (unsigned long) ltmp);
5005 if (fscanf (procfile, "%lu ", &ltmp) > 0)
5006 printf_filtered (_("Major faults, children: %lu\n"),
5007 (unsigned long) ltmp);
5008 if (fscanf (procfile, "%ld ", &ltmp) > 0)
5009 printf_filtered (_("utime: %ld\n"), ltmp);
5010 if (fscanf (procfile, "%ld ", &ltmp) > 0)
5011 printf_filtered (_("stime: %ld\n"), ltmp);
5012 if (fscanf (procfile, "%ld ", &ltmp) > 0)
5013 printf_filtered (_("utime, children: %ld\n"), ltmp);
5014 if (fscanf (procfile, "%ld ", &ltmp) > 0)
5015 printf_filtered (_("stime, children: %ld\n"), ltmp);
5016 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3e43a32a
MS
5017 printf_filtered (_("jiffies remaining in current "
5018 "time slice: %ld\n"), ltmp);
a25694b4
AS
5019 if (fscanf (procfile, "%ld ", &ltmp) > 0)
5020 printf_filtered (_("'nice' value: %ld\n"), ltmp);
5021 if (fscanf (procfile, "%lu ", &ltmp) > 0)
5022 printf_filtered (_("jiffies until next timeout: %lu\n"),
5023 (unsigned long) ltmp);
5024 if (fscanf (procfile, "%lu ", &ltmp) > 0)
5025 printf_filtered (_("jiffies until next SIGALRM: %lu\n"),
5026 (unsigned long) ltmp);
5027 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3e43a32a
MS
5028 printf_filtered (_("start time (jiffies since "
5029 "system boot): %ld\n"), ltmp);
a25694b4
AS
5030 if (fscanf (procfile, "%lu ", &ltmp) > 0)
5031 printf_filtered (_("Virtual memory size: %lu\n"),
5032 (unsigned long) ltmp);
5033 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3e43a32a
MS
5034 printf_filtered (_("Resident set size: %lu\n"),
5035 (unsigned long) ltmp);
a25694b4
AS
5036 if (fscanf (procfile, "%lu ", &ltmp) > 0)
5037 printf_filtered (_("rlim: %lu\n"), (unsigned long) ltmp);
5038 if (fscanf (procfile, "%lu ", &ltmp) > 0)
5039 printf_filtered (_("Start of text: 0x%lx\n"), ltmp);
5040 if (fscanf (procfile, "%lu ", &ltmp) > 0)
5041 printf_filtered (_("End of text: 0x%lx\n"), ltmp);
5042 if (fscanf (procfile, "%lu ", &ltmp) > 0)
5043 printf_filtered (_("Start of stack: 0x%lx\n"), ltmp);
3e43a32a
MS
5044#if 0 /* Don't know how architecture-dependent the rest is...
5045 Anyway the signal bitmap info is available from "status". */
1777feb0 5046 if (fscanf (procfile, "%lu ", &ltmp) > 0) /* FIXME arch? */
a25694b4 5047 printf_filtered (_("Kernel stack pointer: 0x%lx\n"), ltmp);
1777feb0 5048 if (fscanf (procfile, "%lu ", &ltmp) > 0) /* FIXME arch? */
a25694b4
AS
5049 printf_filtered (_("Kernel instr pointer: 0x%lx\n"), ltmp);
5050 if (fscanf (procfile, "%ld ", &ltmp) > 0)
5051 printf_filtered (_("Pending signals bitmap: 0x%lx\n"), ltmp);
5052 if (fscanf (procfile, "%ld ", &ltmp) > 0)
5053 printf_filtered (_("Blocked signals bitmap: 0x%lx\n"), ltmp);
5054 if (fscanf (procfile, "%ld ", &ltmp) > 0)
5055 printf_filtered (_("Ignored signals bitmap: 0x%lx\n"), ltmp);
5056 if (fscanf (procfile, "%ld ", &ltmp) > 0)
5057 printf_filtered (_("Catched signals bitmap: 0x%lx\n"), ltmp);
1777feb0 5058 if (fscanf (procfile, "%lu ", &ltmp) > 0) /* FIXME arch? */
a25694b4 5059 printf_filtered (_("wchan (system call): 0x%lx\n"), ltmp);
dba24537 5060#endif
7c8a8b04 5061 do_cleanups (cleanup);
dba24537
AC
5062 }
5063 else
8a3fe4f8 5064 warning (_("unable to open /proc file '%s'"), fname1);
dba24537
AC
5065 }
5066}
5067
f179e162
JK
5068/* Implement `info proc' when given without any futher parameters. */
5069
5070static void
5071linux_nat_info_proc_cmd (char *args, int from_tty)
5072{
5073 linux_nat_info_proc_cmd_1 (args, IP_MINIMAL, from_tty);
5074}
5075
5076/* Implement `info proc mappings'. */
5077
5078static void
5079linux_nat_info_proc_cmd_mappings (char *args, int from_tty)
5080{
5081 linux_nat_info_proc_cmd_1 (args, IP_MAPPINGS, from_tty);
5082}
5083
5084/* Implement `info proc stat'. */
5085
5086static void
5087linux_nat_info_proc_cmd_stat (char *args, int from_tty)
5088{
5089 linux_nat_info_proc_cmd_1 (args, IP_STAT, from_tty);
5090}
5091
5092/* Implement `info proc status'. */
5093
5094static void
5095linux_nat_info_proc_cmd_status (char *args, int from_tty)
5096{
5097 linux_nat_info_proc_cmd_1 (args, IP_STATUS, from_tty);
5098}
5099
5100/* Implement `info proc cwd'. */
5101
5102static void
5103linux_nat_info_proc_cmd_cwd (char *args, int from_tty)
5104{
5105 linux_nat_info_proc_cmd_1 (args, IP_CWD, from_tty);
5106}
5107
5108/* Implement `info proc cmdline'. */
5109
5110static void
5111linux_nat_info_proc_cmd_cmdline (char *args, int from_tty)
5112{
5113 linux_nat_info_proc_cmd_1 (args, IP_CMDLINE, from_tty);
5114}
5115
5116/* Implement `info proc exe'. */
5117
5118static void
5119linux_nat_info_proc_cmd_exe (char *args, int from_tty)
5120{
5121 linux_nat_info_proc_cmd_1 (args, IP_EXE, from_tty);
5122}
5123
5124/* Implement `info proc all'. */
5125
5126static void
5127linux_nat_info_proc_cmd_all (char *args, int from_tty)
5128{
5129 linux_nat_info_proc_cmd_1 (args, IP_ALL, from_tty);
5130}
5131
10d6c8cd
DJ
5132/* Implement the to_xfer_partial interface for memory reads using the /proc
5133 filesystem. Because we can use a single read() call for /proc, this
5134 can be much more efficient than banging away at PTRACE_PEEKTEXT,
5135 but it doesn't support writes. */
5136
5137static LONGEST
5138linux_proc_xfer_partial (struct target_ops *ops, enum target_object object,
5139 const char *annex, gdb_byte *readbuf,
5140 const gdb_byte *writebuf,
5141 ULONGEST offset, LONGEST len)
dba24537 5142{
10d6c8cd
DJ
5143 LONGEST ret;
5144 int fd;
dba24537
AC
5145 char filename[64];
5146
10d6c8cd 5147 if (object != TARGET_OBJECT_MEMORY || !readbuf)
dba24537
AC
5148 return 0;
5149
5150 /* Don't bother for one word. */
5151 if (len < 3 * sizeof (long))
5152 return 0;
5153
5154 /* We could keep this file open and cache it - possibly one per
5155 thread. That requires some juggling, but is even faster. */
5156 sprintf (filename, "/proc/%d/mem", PIDGET (inferior_ptid));
5157 fd = open (filename, O_RDONLY | O_LARGEFILE);
5158 if (fd == -1)
5159 return 0;
5160
5161 /* If pread64 is available, use it. It's faster if the kernel
5162 supports it (only one syscall), and it's 64-bit safe even on
5163 32-bit platforms (for instance, SPARC debugging a SPARC64
5164 application). */
5165#ifdef HAVE_PREAD64
10d6c8cd 5166 if (pread64 (fd, readbuf, len, offset) != len)
dba24537 5167#else
10d6c8cd 5168 if (lseek (fd, offset, SEEK_SET) == -1 || read (fd, readbuf, len) != len)
dba24537
AC
5169#endif
5170 ret = 0;
5171 else
5172 ret = len;
5173
5174 close (fd);
5175 return ret;
5176}
5177
efcbbd14
UW
5178
5179/* Enumerate spufs IDs for process PID. */
5180static LONGEST
5181spu_enumerate_spu_ids (int pid, gdb_byte *buf, ULONGEST offset, LONGEST len)
5182{
5183 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch);
5184 LONGEST pos = 0;
5185 LONGEST written = 0;
5186 char path[128];
5187 DIR *dir;
5188 struct dirent *entry;
5189
5190 xsnprintf (path, sizeof path, "/proc/%d/fd", pid);
5191 dir = opendir (path);
5192 if (!dir)
5193 return -1;
5194
5195 rewinddir (dir);
5196 while ((entry = readdir (dir)) != NULL)
5197 {
5198 struct stat st;
5199 struct statfs stfs;
5200 int fd;
5201
5202 fd = atoi (entry->d_name);
5203 if (!fd)
5204 continue;
5205
5206 xsnprintf (path, sizeof path, "/proc/%d/fd/%d", pid, fd);
5207 if (stat (path, &st) != 0)
5208 continue;
5209 if (!S_ISDIR (st.st_mode))
5210 continue;
5211
5212 if (statfs (path, &stfs) != 0)
5213 continue;
5214 if (stfs.f_type != SPUFS_MAGIC)
5215 continue;
5216
5217 if (pos >= offset && pos + 4 <= offset + len)
5218 {
5219 store_unsigned_integer (buf + pos - offset, 4, byte_order, fd);
5220 written += 4;
5221 }
5222 pos += 4;
5223 }
5224
5225 closedir (dir);
5226 return written;
5227}
5228
5229/* Implement the to_xfer_partial interface for the TARGET_OBJECT_SPU
5230 object type, using the /proc file system. */
5231static LONGEST
5232linux_proc_xfer_spu (struct target_ops *ops, enum target_object object,
5233 const char *annex, gdb_byte *readbuf,
5234 const gdb_byte *writebuf,
5235 ULONGEST offset, LONGEST len)
5236{
5237 char buf[128];
5238 int fd = 0;
5239 int ret = -1;
5240 int pid = PIDGET (inferior_ptid);
5241
5242 if (!annex)
5243 {
5244 if (!readbuf)
5245 return -1;
5246 else
5247 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
5248 }
5249
5250 xsnprintf (buf, sizeof buf, "/proc/%d/fd/%s", pid, annex);
5251 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
5252 if (fd <= 0)
5253 return -1;
5254
5255 if (offset != 0
5256 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5257 {
5258 close (fd);
5259 return 0;
5260 }
5261
5262 if (writebuf)
5263 ret = write (fd, writebuf, (size_t) len);
5264 else if (readbuf)
5265 ret = read (fd, readbuf, (size_t) len);
5266
5267 close (fd);
5268 return ret;
5269}
5270
5271
dba24537
AC
5272/* Parse LINE as a signal set and add its set bits to SIGS. */
5273
5274static void
5275add_line_to_sigset (const char *line, sigset_t *sigs)
5276{
5277 int len = strlen (line) - 1;
5278 const char *p;
5279 int signum;
5280
5281 if (line[len] != '\n')
8a3fe4f8 5282 error (_("Could not parse signal set: %s"), line);
dba24537
AC
5283
5284 p = line;
5285 signum = len * 4;
5286 while (len-- > 0)
5287 {
5288 int digit;
5289
5290 if (*p >= '0' && *p <= '9')
5291 digit = *p - '0';
5292 else if (*p >= 'a' && *p <= 'f')
5293 digit = *p - 'a' + 10;
5294 else
8a3fe4f8 5295 error (_("Could not parse signal set: %s"), line);
dba24537
AC
5296
5297 signum -= 4;
5298
5299 if (digit & 1)
5300 sigaddset (sigs, signum + 1);
5301 if (digit & 2)
5302 sigaddset (sigs, signum + 2);
5303 if (digit & 4)
5304 sigaddset (sigs, signum + 3);
5305 if (digit & 8)
5306 sigaddset (sigs, signum + 4);
5307
5308 p++;
5309 }
5310}
5311
5312/* Find process PID's pending signals from /proc/pid/status and set
5313 SIGS to match. */
5314
5315void
3e43a32a
MS
5316linux_proc_pending_signals (int pid, sigset_t *pending,
5317 sigset_t *blocked, sigset_t *ignored)
dba24537
AC
5318{
5319 FILE *procfile;
5320 char buffer[MAXPATHLEN], fname[MAXPATHLEN];
7c8a8b04 5321 struct cleanup *cleanup;
dba24537
AC
5322
5323 sigemptyset (pending);
5324 sigemptyset (blocked);
5325 sigemptyset (ignored);
5326 sprintf (fname, "/proc/%d/status", pid);
5327 procfile = fopen (fname, "r");
5328 if (procfile == NULL)
8a3fe4f8 5329 error (_("Could not open %s"), fname);
7c8a8b04 5330 cleanup = make_cleanup_fclose (procfile);
dba24537
AC
5331
5332 while (fgets (buffer, MAXPATHLEN, procfile) != NULL)
5333 {
5334 /* Normal queued signals are on the SigPnd line in the status
5335 file. However, 2.6 kernels also have a "shared" pending
5336 queue for delivering signals to a thread group, so check for
5337 a ShdPnd line also.
5338
5339 Unfortunately some Red Hat kernels include the shared pending
5340 queue but not the ShdPnd status field. */
5341
5342 if (strncmp (buffer, "SigPnd:\t", 8) == 0)
5343 add_line_to_sigset (buffer + 8, pending);
5344 else if (strncmp (buffer, "ShdPnd:\t", 8) == 0)
5345 add_line_to_sigset (buffer + 8, pending);
5346 else if (strncmp (buffer, "SigBlk:\t", 8) == 0)
5347 add_line_to_sigset (buffer + 8, blocked);
5348 else if (strncmp (buffer, "SigIgn:\t", 8) == 0)
5349 add_line_to_sigset (buffer + 8, ignored);
5350 }
5351
7c8a8b04 5352 do_cleanups (cleanup);
dba24537
AC
5353}
5354
07e059b5
VP
5355static LONGEST
5356linux_nat_xfer_osdata (struct target_ops *ops, enum target_object object,
e0881a8e
MS
5357 const char *annex, gdb_byte *readbuf,
5358 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
07e059b5 5359{
07e059b5
VP
5360 gdb_assert (object == TARGET_OBJECT_OSDATA);
5361
d26e3629 5362 return linux_common_xfer_osdata (annex, readbuf, offset, len);
07e059b5
VP
5363}
5364
10d6c8cd
DJ
5365static LONGEST
5366linux_xfer_partial (struct target_ops *ops, enum target_object object,
5367 const char *annex, gdb_byte *readbuf,
5368 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
5369{
5370 LONGEST xfer;
5371
5372 if (object == TARGET_OBJECT_AUXV)
9f2982ff 5373 return memory_xfer_auxv (ops, object, annex, readbuf, writebuf,
10d6c8cd
DJ
5374 offset, len);
5375
07e059b5
VP
5376 if (object == TARGET_OBJECT_OSDATA)
5377 return linux_nat_xfer_osdata (ops, object, annex, readbuf, writebuf,
5378 offset, len);
5379
efcbbd14
UW
5380 if (object == TARGET_OBJECT_SPU)
5381 return linux_proc_xfer_spu (ops, object, annex, readbuf, writebuf,
5382 offset, len);
5383
8f313923
JK
5384 /* GDB calculates all the addresses in possibly larget width of the address.
5385 Address width needs to be masked before its final use - either by
5386 linux_proc_xfer_partial or inf_ptrace_xfer_partial.
5387
5388 Compare ADDR_BIT first to avoid a compiler warning on shift overflow. */
5389
5390 if (object == TARGET_OBJECT_MEMORY)
5391 {
5392 int addr_bit = gdbarch_addr_bit (target_gdbarch);
5393
5394 if (addr_bit < (sizeof (ULONGEST) * HOST_CHAR_BIT))
5395 offset &= ((ULONGEST) 1 << addr_bit) - 1;
5396 }
5397
10d6c8cd
DJ
5398 xfer = linux_proc_xfer_partial (ops, object, annex, readbuf, writebuf,
5399 offset, len);
5400 if (xfer != 0)
5401 return xfer;
5402
5403 return super_xfer_partial (ops, object, annex, readbuf, writebuf,
5404 offset, len);
5405}
5406
e9efe249 5407/* Create a prototype generic GNU/Linux target. The client can override
10d6c8cd
DJ
5408 it with local methods. */
5409
910122bf
UW
5410static void
5411linux_target_install_ops (struct target_ops *t)
10d6c8cd 5412{
6d8fd2b7 5413 t->to_insert_fork_catchpoint = linux_child_insert_fork_catchpoint;
eb73ad13 5414 t->to_remove_fork_catchpoint = linux_child_remove_fork_catchpoint;
6d8fd2b7 5415 t->to_insert_vfork_catchpoint = linux_child_insert_vfork_catchpoint;
eb73ad13 5416 t->to_remove_vfork_catchpoint = linux_child_remove_vfork_catchpoint;
6d8fd2b7 5417 t->to_insert_exec_catchpoint = linux_child_insert_exec_catchpoint;
eb73ad13 5418 t->to_remove_exec_catchpoint = linux_child_remove_exec_catchpoint;
a96d9b2e 5419 t->to_set_syscall_catchpoint = linux_child_set_syscall_catchpoint;
6d8fd2b7 5420 t->to_pid_to_exec_file = linux_child_pid_to_exec_file;
10d6c8cd 5421 t->to_post_startup_inferior = linux_child_post_startup_inferior;
6d8fd2b7
UW
5422 t->to_post_attach = linux_child_post_attach;
5423 t->to_follow_fork = linux_child_follow_fork;
10d6c8cd
DJ
5424 t->to_find_memory_regions = linux_nat_find_memory_regions;
5425 t->to_make_corefile_notes = linux_nat_make_corefile_notes;
5426
5427 super_xfer_partial = t->to_xfer_partial;
5428 t->to_xfer_partial = linux_xfer_partial;
910122bf
UW
5429}
5430
5431struct target_ops *
5432linux_target (void)
5433{
5434 struct target_ops *t;
5435
5436 t = inf_ptrace_target ();
5437 linux_target_install_ops (t);
5438
5439 return t;
5440}
5441
5442struct target_ops *
7714d83a 5443linux_trad_target (CORE_ADDR (*register_u_offset)(struct gdbarch *, int, int))
910122bf
UW
5444{
5445 struct target_ops *t;
5446
5447 t = inf_ptrace_trad_target (register_u_offset);
5448 linux_target_install_ops (t);
10d6c8cd 5449
10d6c8cd
DJ
5450 return t;
5451}
5452
b84876c2
PA
5453/* target_is_async_p implementation. */
5454
5455static int
5456linux_nat_is_async_p (void)
5457{
5458 /* NOTE: palves 2008-03-21: We're only async when the user requests
7feb7d06 5459 it explicitly with the "set target-async" command.
b84876c2 5460 Someday, linux will always be async. */
3dd5b83d 5461 return target_async_permitted;
b84876c2
PA
5462}
5463
5464/* target_can_async_p implementation. */
5465
5466static int
5467linux_nat_can_async_p (void)
5468{
5469 /* NOTE: palves 2008-03-21: We're only async when the user requests
7feb7d06 5470 it explicitly with the "set target-async" command.
b84876c2 5471 Someday, linux will always be async. */
3dd5b83d 5472 return target_async_permitted;
b84876c2
PA
5473}
5474
9908b566
VP
5475static int
5476linux_nat_supports_non_stop (void)
5477{
5478 return 1;
5479}
5480
d90e17a7
PA
5481/* True if we want to support multi-process. To be removed when GDB
5482 supports multi-exec. */
5483
2277426b 5484int linux_multi_process = 1;
d90e17a7
PA
5485
5486static int
5487linux_nat_supports_multi_process (void)
5488{
5489 return linux_multi_process;
5490}
5491
03583c20
UW
5492static int
5493linux_nat_supports_disable_randomization (void)
5494{
5495#ifdef HAVE_PERSONALITY
5496 return 1;
5497#else
5498 return 0;
5499#endif
5500}
5501
b84876c2
PA
5502static int async_terminal_is_ours = 1;
5503
5504/* target_terminal_inferior implementation. */
5505
5506static void
5507linux_nat_terminal_inferior (void)
5508{
5509 if (!target_is_async_p ())
5510 {
5511 /* Async mode is disabled. */
5512 terminal_inferior ();
5513 return;
5514 }
5515
b84876c2
PA
5516 terminal_inferior ();
5517
d9d2d8b6 5518 /* Calls to target_terminal_*() are meant to be idempotent. */
b84876c2
PA
5519 if (!async_terminal_is_ours)
5520 return;
5521
5522 delete_file_handler (input_fd);
5523 async_terminal_is_ours = 0;
5524 set_sigint_trap ();
5525}
5526
5527/* target_terminal_ours implementation. */
5528
2c0b251b 5529static void
b84876c2
PA
5530linux_nat_terminal_ours (void)
5531{
5532 if (!target_is_async_p ())
5533 {
5534 /* Async mode is disabled. */
5535 terminal_ours ();
5536 return;
5537 }
5538
5539 /* GDB should never give the terminal to the inferior if the
5540 inferior is running in the background (run&, continue&, etc.),
5541 but claiming it sure should. */
5542 terminal_ours ();
5543
b84876c2
PA
5544 if (async_terminal_is_ours)
5545 return;
5546
5547 clear_sigint_trap ();
5548 add_file_handler (input_fd, stdin_event_handler, 0);
5549 async_terminal_is_ours = 1;
5550}
5551
5552static void (*async_client_callback) (enum inferior_event_type event_type,
5553 void *context);
5554static void *async_client_context;
5555
7feb7d06
PA
5556/* SIGCHLD handler that serves two purposes: In non-stop/async mode,
5557 so we notice when any child changes state, and notify the
5558 event-loop; it allows us to use sigsuspend in linux_nat_wait_1
5559 above to wait for the arrival of a SIGCHLD. */
5560
b84876c2 5561static void
7feb7d06 5562sigchld_handler (int signo)
b84876c2 5563{
7feb7d06
PA
5564 int old_errno = errno;
5565
01124a23
DE
5566 if (debug_linux_nat)
5567 ui_file_write_async_safe (gdb_stdlog,
5568 "sigchld\n", sizeof ("sigchld\n") - 1);
7feb7d06
PA
5569
5570 if (signo == SIGCHLD
5571 && linux_nat_event_pipe[0] != -1)
5572 async_file_mark (); /* Let the event loop know that there are
5573 events to handle. */
5574
5575 errno = old_errno;
5576}
5577
5578/* Callback registered with the target events file descriptor. */
5579
5580static void
5581handle_target_event (int error, gdb_client_data client_data)
5582{
5583 (*async_client_callback) (INF_REG_EVENT, async_client_context);
5584}
5585
5586/* Create/destroy the target events pipe. Returns previous state. */
5587
5588static int
5589linux_async_pipe (int enable)
5590{
5591 int previous = (linux_nat_event_pipe[0] != -1);
5592
5593 if (previous != enable)
5594 {
5595 sigset_t prev_mask;
5596
5597 block_child_signals (&prev_mask);
5598
5599 if (enable)
5600 {
5601 if (pipe (linux_nat_event_pipe) == -1)
5602 internal_error (__FILE__, __LINE__,
5603 "creating event pipe failed.");
5604
5605 fcntl (linux_nat_event_pipe[0], F_SETFL, O_NONBLOCK);
5606 fcntl (linux_nat_event_pipe[1], F_SETFL, O_NONBLOCK);
5607 }
5608 else
5609 {
5610 close (linux_nat_event_pipe[0]);
5611 close (linux_nat_event_pipe[1]);
5612 linux_nat_event_pipe[0] = -1;
5613 linux_nat_event_pipe[1] = -1;
5614 }
5615
5616 restore_child_signals_mask (&prev_mask);
5617 }
5618
5619 return previous;
b84876c2
PA
5620}
5621
5622/* target_async implementation. */
5623
5624static void
5625linux_nat_async (void (*callback) (enum inferior_event_type event_type,
5626 void *context), void *context)
5627{
b84876c2
PA
5628 if (callback != NULL)
5629 {
5630 async_client_callback = callback;
5631 async_client_context = context;
7feb7d06
PA
5632 if (!linux_async_pipe (1))
5633 {
5634 add_file_handler (linux_nat_event_pipe[0],
5635 handle_target_event, NULL);
5636 /* There may be pending events to handle. Tell the event loop
5637 to poll them. */
5638 async_file_mark ();
5639 }
b84876c2
PA
5640 }
5641 else
5642 {
5643 async_client_callback = callback;
5644 async_client_context = context;
b84876c2 5645 delete_file_handler (linux_nat_event_pipe[0]);
7feb7d06 5646 linux_async_pipe (0);
b84876c2
PA
5647 }
5648 return;
5649}
5650
252fbfc8
PA
5651/* Stop an LWP, and push a TARGET_SIGNAL_0 stop status if no other
5652 event came out. */
5653
4c28f408 5654static int
252fbfc8 5655linux_nat_stop_lwp (struct lwp_info *lwp, void *data)
4c28f408 5656{
d90e17a7 5657 if (!lwp->stopped)
252fbfc8 5658 {
d90e17a7 5659 ptid_t ptid = lwp->ptid;
252fbfc8 5660
d90e17a7
PA
5661 if (debug_linux_nat)
5662 fprintf_unfiltered (gdb_stdlog,
5663 "LNSL: running -> suspending %s\n",
5664 target_pid_to_str (lwp->ptid));
252fbfc8 5665
252fbfc8 5666
25289eb2
PA
5667 if (lwp->last_resume_kind == resume_stop)
5668 {
5669 if (debug_linux_nat)
5670 fprintf_unfiltered (gdb_stdlog,
5671 "linux-nat: already stopping LWP %ld at "
5672 "GDB's request\n",
5673 ptid_get_lwp (lwp->ptid));
5674 return 0;
5675 }
252fbfc8 5676
25289eb2
PA
5677 stop_callback (lwp, NULL);
5678 lwp->last_resume_kind = resume_stop;
d90e17a7
PA
5679 }
5680 else
5681 {
5682 /* Already known to be stopped; do nothing. */
252fbfc8 5683
d90e17a7
PA
5684 if (debug_linux_nat)
5685 {
e09875d4 5686 if (find_thread_ptid (lwp->ptid)->stop_requested)
3e43a32a
MS
5687 fprintf_unfiltered (gdb_stdlog,
5688 "LNSL: already stopped/stop_requested %s\n",
d90e17a7
PA
5689 target_pid_to_str (lwp->ptid));
5690 else
3e43a32a
MS
5691 fprintf_unfiltered (gdb_stdlog,
5692 "LNSL: already stopped/no "
5693 "stop_requested yet %s\n",
d90e17a7 5694 target_pid_to_str (lwp->ptid));
252fbfc8
PA
5695 }
5696 }
4c28f408
PA
5697 return 0;
5698}
5699
5700static void
5701linux_nat_stop (ptid_t ptid)
5702{
5703 if (non_stop)
d90e17a7 5704 iterate_over_lwps (ptid, linux_nat_stop_lwp, NULL);
4c28f408
PA
5705 else
5706 linux_ops->to_stop (ptid);
5707}
5708
d90e17a7
PA
5709static void
5710linux_nat_close (int quitting)
5711{
5712 /* Unregister from the event loop. */
5713 if (target_is_async_p ())
5714 target_async (NULL, 0);
5715
d90e17a7
PA
5716 if (linux_ops->to_close)
5717 linux_ops->to_close (quitting);
5718}
5719
c0694254
PA
5720/* When requests are passed down from the linux-nat layer to the
5721 single threaded inf-ptrace layer, ptids of (lwpid,0,0) form are
5722 used. The address space pointer is stored in the inferior object,
5723 but the common code that is passed such ptid can't tell whether
5724 lwpid is a "main" process id or not (it assumes so). We reverse
5725 look up the "main" process id from the lwp here. */
5726
5727struct address_space *
5728linux_nat_thread_address_space (struct target_ops *t, ptid_t ptid)
5729{
5730 struct lwp_info *lwp;
5731 struct inferior *inf;
5732 int pid;
5733
5734 pid = GET_LWP (ptid);
5735 if (GET_LWP (ptid) == 0)
5736 {
5737 /* An (lwpid,0,0) ptid. Look up the lwp object to get at the
5738 tgid. */
5739 lwp = find_lwp_pid (ptid);
5740 pid = GET_PID (lwp->ptid);
5741 }
5742 else
5743 {
5744 /* A (pid,lwpid,0) ptid. */
5745 pid = GET_PID (ptid);
5746 }
5747
5748 inf = find_inferior_pid (pid);
5749 gdb_assert (inf != NULL);
5750 return inf->aspace;
5751}
5752
dc146f7c
VP
5753int
5754linux_nat_core_of_thread_1 (ptid_t ptid)
5755{
5756 struct cleanup *back_to;
5757 char *filename;
5758 FILE *f;
5759 char *content = NULL;
5760 char *p;
5761 char *ts = 0;
5762 int content_read = 0;
5763 int i;
5764 int core;
5765
5766 filename = xstrprintf ("/proc/%d/task/%ld/stat",
5767 GET_PID (ptid), GET_LWP (ptid));
5768 back_to = make_cleanup (xfree, filename);
5769
5770 f = fopen (filename, "r");
5771 if (!f)
5772 {
5773 do_cleanups (back_to);
5774 return -1;
5775 }
5776
5777 make_cleanup_fclose (f);
5778
5779 for (;;)
5780 {
5781 int n;
e0881a8e 5782
dc146f7c
VP
5783 content = xrealloc (content, content_read + 1024);
5784 n = fread (content + content_read, 1, 1024, f);
5785 content_read += n;
5786 if (n < 1024)
5787 {
5788 content[content_read] = '\0';
5789 break;
5790 }
5791 }
5792
5793 make_cleanup (xfree, content);
5794
5795 p = strchr (content, '(');
ca2a87a0
JK
5796
5797 /* Skip ")". */
5798 if (p != NULL)
5799 p = strchr (p, ')');
5800 if (p != NULL)
5801 p++;
dc146f7c
VP
5802
5803 /* If the first field after program name has index 0, then core number is
5804 the field with index 36. There's no constant for that anywhere. */
ca2a87a0
JK
5805 if (p != NULL)
5806 p = strtok_r (p, " ", &ts);
5807 for (i = 0; p != NULL && i != 36; ++i)
dc146f7c
VP
5808 p = strtok_r (NULL, " ", &ts);
5809
ca2a87a0 5810 if (p == NULL || sscanf (p, "%d", &core) == 0)
dc146f7c
VP
5811 core = -1;
5812
5813 do_cleanups (back_to);
5814
5815 return core;
5816}
5817
5818/* Return the cached value of the processor core for thread PTID. */
5819
5820int
5821linux_nat_core_of_thread (struct target_ops *ops, ptid_t ptid)
5822{
5823 struct lwp_info *info = find_lwp_pid (ptid);
e0881a8e 5824
dc146f7c
VP
5825 if (info)
5826 return info->core;
5827 return -1;
5828}
5829
f973ed9c
DJ
5830void
5831linux_nat_add_target (struct target_ops *t)
5832{
f973ed9c
DJ
5833 /* Save the provided single-threaded target. We save this in a separate
5834 variable because another target we've inherited from (e.g. inf-ptrace)
5835 may have saved a pointer to T; we want to use it for the final
5836 process stratum target. */
5837 linux_ops_saved = *t;
5838 linux_ops = &linux_ops_saved;
5839
5840 /* Override some methods for multithreading. */
b84876c2 5841 t->to_create_inferior = linux_nat_create_inferior;
f973ed9c
DJ
5842 t->to_attach = linux_nat_attach;
5843 t->to_detach = linux_nat_detach;
5844 t->to_resume = linux_nat_resume;
5845 t->to_wait = linux_nat_wait;
2455069d 5846 t->to_pass_signals = linux_nat_pass_signals;
f973ed9c
DJ
5847 t->to_xfer_partial = linux_nat_xfer_partial;
5848 t->to_kill = linux_nat_kill;
5849 t->to_mourn_inferior = linux_nat_mourn_inferior;
5850 t->to_thread_alive = linux_nat_thread_alive;
5851 t->to_pid_to_str = linux_nat_pid_to_str;
4694da01 5852 t->to_thread_name = linux_nat_thread_name;
f973ed9c 5853 t->to_has_thread_control = tc_schedlock;
c0694254 5854 t->to_thread_address_space = linux_nat_thread_address_space;
ebec9a0f
PA
5855 t->to_stopped_by_watchpoint = linux_nat_stopped_by_watchpoint;
5856 t->to_stopped_data_address = linux_nat_stopped_data_address;
f973ed9c 5857
b84876c2
PA
5858 t->to_can_async_p = linux_nat_can_async_p;
5859 t->to_is_async_p = linux_nat_is_async_p;
9908b566 5860 t->to_supports_non_stop = linux_nat_supports_non_stop;
b84876c2 5861 t->to_async = linux_nat_async;
b84876c2
PA
5862 t->to_terminal_inferior = linux_nat_terminal_inferior;
5863 t->to_terminal_ours = linux_nat_terminal_ours;
d90e17a7 5864 t->to_close = linux_nat_close;
b84876c2 5865
4c28f408
PA
5866 /* Methods for non-stop support. */
5867 t->to_stop = linux_nat_stop;
5868
d90e17a7
PA
5869 t->to_supports_multi_process = linux_nat_supports_multi_process;
5870
03583c20
UW
5871 t->to_supports_disable_randomization
5872 = linux_nat_supports_disable_randomization;
5873
dc146f7c
VP
5874 t->to_core_of_thread = linux_nat_core_of_thread;
5875
f973ed9c
DJ
5876 /* We don't change the stratum; this target will sit at
5877 process_stratum and thread_db will set at thread_stratum. This
5878 is a little strange, since this is a multi-threaded-capable
5879 target, but we want to be on the stack below thread_db, and we
5880 also want to be used for single-threaded processes. */
5881
5882 add_target (t);
f973ed9c
DJ
5883}
5884
9f0bdab8
DJ
5885/* Register a method to call whenever a new thread is attached. */
5886void
7b50312a
PA
5887linux_nat_set_new_thread (struct target_ops *t,
5888 void (*new_thread) (struct lwp_info *))
9f0bdab8
DJ
5889{
5890 /* Save the pointer. We only support a single registered instance
5891 of the GNU/Linux native target, so we do not need to map this to
5892 T. */
5893 linux_nat_new_thread = new_thread;
5894}
5895
5b009018
PA
5896/* Register a method that converts a siginfo object between the layout
5897 that ptrace returns, and the layout in the architecture of the
5898 inferior. */
5899void
5900linux_nat_set_siginfo_fixup (struct target_ops *t,
5901 int (*siginfo_fixup) (struct siginfo *,
5902 gdb_byte *,
5903 int))
5904{
5905 /* Save the pointer. */
5906 linux_nat_siginfo_fixup = siginfo_fixup;
5907}
5908
7b50312a
PA
5909/* Register a method to call prior to resuming a thread. */
5910
5911void
5912linux_nat_set_prepare_to_resume (struct target_ops *t,
5913 void (*prepare_to_resume) (struct lwp_info *))
5914{
5915 /* Save the pointer. */
5916 linux_nat_prepare_to_resume = prepare_to_resume;
5917}
5918
9f0bdab8
DJ
5919/* Return the saved siginfo associated with PTID. */
5920struct siginfo *
5921linux_nat_get_siginfo (ptid_t ptid)
5922{
5923 struct lwp_info *lp = find_lwp_pid (ptid);
5924
5925 gdb_assert (lp != NULL);
5926
5927 return &lp->siginfo;
5928}
5929
2c0b251b
PA
5930/* Provide a prototype to silence -Wmissing-prototypes. */
5931extern initialize_file_ftype _initialize_linux_nat;
5932
d6b0e80f
AC
5933void
5934_initialize_linux_nat (void)
5935{
f179e162
JK
5936 static struct cmd_list_element *info_proc_cmdlist;
5937
5938 add_prefix_cmd ("proc", class_info, linux_nat_info_proc_cmd,
5939 _("\
1bedd215 5940Show /proc process information about any running process.\n\
f179e162
JK
5941Specify any process id, or use the program being debugged by default."),
5942 &info_proc_cmdlist, "info proc ",
5943 1/*allow-unknown*/, &infolist);
5944
5945 add_cmd ("mappings", class_info, linux_nat_info_proc_cmd_mappings, _("\
5946List of mapped memory regions."),
5947 &info_proc_cmdlist);
5948
5949 add_cmd ("stat", class_info, linux_nat_info_proc_cmd_stat, _("\
080ad648 5950List process info from /proc/PID/stat."),
f179e162
JK
5951 &info_proc_cmdlist);
5952
5953 add_cmd ("status", class_info, linux_nat_info_proc_cmd_status, _("\
080ad648 5954List process info from /proc/PID/status."),
f179e162
JK
5955 &info_proc_cmdlist);
5956
5957 add_cmd ("cwd", class_info, linux_nat_info_proc_cmd_cwd, _("\
080ad648 5958List current working directory of the process."),
f179e162
JK
5959 &info_proc_cmdlist);
5960
5961 add_cmd ("cmdline", class_info, linux_nat_info_proc_cmd_cmdline, _("\
080ad648 5962List command line arguments of the process."),
f179e162
JK
5963 &info_proc_cmdlist);
5964
5965 add_cmd ("exe", class_info, linux_nat_info_proc_cmd_exe, _("\
080ad648 5966List absolute filename for executable of the process."),
f179e162
JK
5967 &info_proc_cmdlist);
5968
5969 add_cmd ("all", class_info, linux_nat_info_proc_cmd_all, _("\
5970List all available /proc info."),
5971 &info_proc_cmdlist);
d6b0e80f 5972
b84876c2
PA
5973 add_setshow_zinteger_cmd ("lin-lwp", class_maintenance,
5974 &debug_linux_nat, _("\
5975Set debugging of GNU/Linux lwp module."), _("\
5976Show debugging of GNU/Linux lwp module."), _("\
5977Enables printf debugging output."),
5978 NULL,
5979 show_debug_linux_nat,
5980 &setdebuglist, &showdebuglist);
5981
b84876c2 5982 /* Save this mask as the default. */
d6b0e80f
AC
5983 sigprocmask (SIG_SETMASK, NULL, &normal_mask);
5984
7feb7d06
PA
5985 /* Install a SIGCHLD handler. */
5986 sigchld_action.sa_handler = sigchld_handler;
5987 sigemptyset (&sigchld_action.sa_mask);
5988 sigchld_action.sa_flags = SA_RESTART;
b84876c2
PA
5989
5990 /* Make it the default. */
7feb7d06 5991 sigaction (SIGCHLD, &sigchld_action, NULL);
d6b0e80f
AC
5992
5993 /* Make sure we don't block SIGCHLD during a sigsuspend. */
5994 sigprocmask (SIG_SETMASK, NULL, &suspend_mask);
5995 sigdelset (&suspend_mask, SIGCHLD);
5996
7feb7d06 5997 sigemptyset (&blocked_mask);
d6b0e80f
AC
5998}
5999\f
6000
6001/* FIXME: kettenis/2000-08-26: The stuff on this page is specific to
6002 the GNU/Linux Threads library and therefore doesn't really belong
6003 here. */
6004
6005/* Read variable NAME in the target and return its value if found.
6006 Otherwise return zero. It is assumed that the type of the variable
6007 is `int'. */
6008
6009static int
6010get_signo (const char *name)
6011{
6012 struct minimal_symbol *ms;
6013 int signo;
6014
6015 ms = lookup_minimal_symbol (name, NULL, NULL);
6016 if (ms == NULL)
6017 return 0;
6018
8e70166d 6019 if (target_read_memory (SYMBOL_VALUE_ADDRESS (ms), (gdb_byte *) &signo,
d6b0e80f
AC
6020 sizeof (signo)) != 0)
6021 return 0;
6022
6023 return signo;
6024}
6025
6026/* Return the set of signals used by the threads library in *SET. */
6027
6028void
6029lin_thread_get_thread_signals (sigset_t *set)
6030{
6031 struct sigaction action;
6032 int restart, cancel;
6033
b84876c2 6034 sigemptyset (&blocked_mask);
d6b0e80f
AC
6035 sigemptyset (set);
6036
6037 restart = get_signo ("__pthread_sig_restart");
17fbb0bd
DJ
6038 cancel = get_signo ("__pthread_sig_cancel");
6039
6040 /* LinuxThreads normally uses the first two RT signals, but in some legacy
6041 cases may use SIGUSR1/SIGUSR2. NPTL always uses RT signals, but does
6042 not provide any way for the debugger to query the signal numbers -
6043 fortunately they don't change! */
6044
d6b0e80f 6045 if (restart == 0)
17fbb0bd 6046 restart = __SIGRTMIN;
d6b0e80f 6047
d6b0e80f 6048 if (cancel == 0)
17fbb0bd 6049 cancel = __SIGRTMIN + 1;
d6b0e80f
AC
6050
6051 sigaddset (set, restart);
6052 sigaddset (set, cancel);
6053
6054 /* The GNU/Linux Threads library makes terminating threads send a
6055 special "cancel" signal instead of SIGCHLD. Make sure we catch
6056 those (to prevent them from terminating GDB itself, which is
6057 likely to be their default action) and treat them the same way as
6058 SIGCHLD. */
6059
6060 action.sa_handler = sigchld_handler;
6061 sigemptyset (&action.sa_mask);
58aecb61 6062 action.sa_flags = SA_RESTART;
d6b0e80f
AC
6063 sigaction (cancel, &action, NULL);
6064
6065 /* We block the "cancel" signal throughout this code ... */
6066 sigaddset (&blocked_mask, cancel);
6067 sigprocmask (SIG_BLOCK, &blocked_mask, NULL);
6068
6069 /* ... except during a sigsuspend. */
6070 sigdelset (&suspend_mask, cancel);
6071}
This page took 1.103522 seconds and 4 git commands to generate.