* gdbthread.h (struct thread_info): Add comments around
[deliverable/binutils-gdb.git] / gdb / linux-nat.c
CommitLineData
3993f6b1 1/* GNU/Linux native-dependent code common to multiple platforms.
dba24537 2
9b254dd1 3 Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
e26af52f 4 Free Software Foundation, Inc.
3993f6b1
DJ
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
a9762ec7 10 the Free Software Foundation; either version 3 of the License, or
3993f6b1
DJ
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
a9762ec7 19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
3993f6b1
DJ
20
21#include "defs.h"
22#include "inferior.h"
23#include "target.h"
d6b0e80f 24#include "gdb_string.h"
3993f6b1 25#include "gdb_wait.h"
d6b0e80f
AC
26#include "gdb_assert.h"
27#ifdef HAVE_TKILL_SYSCALL
28#include <unistd.h>
29#include <sys/syscall.h>
30#endif
3993f6b1 31#include <sys/ptrace.h>
0274a8ce 32#include "linux-nat.h"
ac264b3b 33#include "linux-fork.h"
d6b0e80f
AC
34#include "gdbthread.h"
35#include "gdbcmd.h"
36#include "regcache.h"
4f844a66 37#include "regset.h"
10d6c8cd
DJ
38#include "inf-ptrace.h"
39#include "auxv.h"
dba24537
AC
40#include <sys/param.h> /* for MAXPATHLEN */
41#include <sys/procfs.h> /* for elf_gregset etc. */
42#include "elf-bfd.h" /* for elfcore_write_* */
43#include "gregset.h" /* for gregset */
44#include "gdbcore.h" /* for get_exec_file */
45#include <ctype.h> /* for isdigit */
46#include "gdbthread.h" /* for struct thread_info etc. */
47#include "gdb_stat.h" /* for struct stat */
48#include <fcntl.h> /* for O_RDONLY */
b84876c2
PA
49#include "inf-loop.h"
50#include "event-loop.h"
51#include "event-top.h"
dba24537 52
10568435
JK
53#ifdef HAVE_PERSONALITY
54# include <sys/personality.h>
55# if !HAVE_DECL_ADDR_NO_RANDOMIZE
56# define ADDR_NO_RANDOMIZE 0x0040000
57# endif
58#endif /* HAVE_PERSONALITY */
59
8a77dff3
VP
60/* This comment documents high-level logic of this file.
61
62Waiting for events in sync mode
63===============================
64
65When waiting for an event in a specific thread, we just use waitpid, passing
66the specific pid, and not passing WNOHANG.
67
68When waiting for an event in all threads, waitpid is not quite good. Prior to
69version 2.4, Linux can either wait for event in main thread, or in secondary
70threads. (2.4 has the __WALL flag). So, if we use blocking waitpid, we might
71miss an event. The solution is to use non-blocking waitpid, together with
72sigsuspend. First, we use non-blocking waitpid to get an event in the main
73process, if any. Second, we use non-blocking waitpid with the __WCLONED
74flag to check for events in cloned processes. If nothing is found, we use
75sigsuspend to wait for SIGCHLD. When SIGCHLD arrives, it means something
76happened to a child process -- and SIGCHLD will be delivered both for events
77in main debugged process and in cloned processes. As soon as we know there's
78an event, we get back to calling nonblocking waitpid with and without __WCLONED.
79
80Note that SIGCHLD should be blocked between waitpid and sigsuspend calls,
81so that we don't miss a signal. If SIGCHLD arrives in between, when it's
82blocked, the signal becomes pending and sigsuspend immediately
83notices it and returns.
84
85Waiting for events in async mode
86================================
87
88In async mode, GDB should always be ready to handle both user input and target
89events, so neither blocking waitpid nor sigsuspend are viable
90options. Instead, we should notify the GDB main event loop whenever there's
91unprocessed event from the target. The only way to notify this event loop is
92to make it wait on input from a pipe, and write something to the pipe whenever
93there's event. Obviously, if we fail to notify the event loop if there's
94target event, it's bad. If we notify the event loop when there's no event
95from target, linux-nat.c will detect that there's no event, actually, and
96report event of type TARGET_WAITKIND_IGNORE, but it will waste time and
97better avoided.
98
99The main design point is that every time GDB is outside linux-nat.c, we have a
100SIGCHLD handler installed that is called when something happens to the target
101and notifies the GDB event loop. Also, the event is extracted from the target
102using waitpid and stored for future use. Whenever GDB core decides to handle
103the event, and calls into linux-nat.c, we disable SIGCHLD and process things
104as in sync mode, except that before waitpid call we check if there are any
105previously read events.
106
107It could happen that during event processing, we'll try to get more events
108than there are events in the local queue, which will result to waitpid call.
109Those waitpid calls, while blocking, are guarantied to always have
110something for waitpid to return. E.g., stopping a thread with SIGSTOP, and
111waiting for the lwp to stop.
112
113The event loop is notified about new events using a pipe. SIGCHLD handler does
114waitpid and writes the results in to a pipe. GDB event loop has the other end
115of the pipe among the sources. When event loop starts to process the event
116and calls a function in linux-nat.c, all events from the pipe are transferred
117into a local queue and SIGCHLD is blocked. Further processing goes as in sync
118mode. Before we return from linux_nat_wait, we transfer all unprocessed events
119from local queue back to the pipe, so that when we get back to event loop,
120event loop will notice there's something more to do.
121
122SIGCHLD is blocked when we're inside target_wait, so that should we actually
123want to wait for some more events, SIGCHLD handler does not steal them from
124us. Technically, it would be possible to add new events to the local queue but
125it's about the same amount of work as blocking SIGCHLD.
126
127This moving of events from pipe into local queue and back into pipe when we
128enter/leave linux-nat.c is somewhat ugly. Unfortunately, GDB event loop is
129home-grown and incapable to wait on any queue.
130
131Use of signals
132==============
133
134We stop threads by sending a SIGSTOP. The use of SIGSTOP instead of another
135signal is not entirely significant; we just need for a signal to be delivered,
136so that we can intercept it. SIGSTOP's advantage is that it can not be
137blocked. A disadvantage is that it is not a real-time signal, so it can only
138be queued once; we do not keep track of other sources of SIGSTOP.
139
140Two other signals that can't be blocked are SIGCONT and SIGKILL. But we can't
141use them, because they have special behavior when the signal is generated -
142not when it is delivered. SIGCONT resumes the entire thread group and SIGKILL
143kills the entire thread group.
144
145A delivered SIGSTOP would stop the entire thread group, not just the thread we
146tkill'd. But we never let the SIGSTOP be delivered; we always intercept and
147cancel it (by PTRACE_CONT without passing SIGSTOP).
148
149We could use a real-time signal instead. This would solve those problems; we
150could use PTRACE_GETSIGINFO to locate the specific stop signals sent by GDB.
151But we would still have to have some support for SIGSTOP, since PTRACE_ATTACH
152generates it, and there are races with trying to find a signal that is not
153blocked. */
a0ef4274 154
dba24537
AC
155#ifndef O_LARGEFILE
156#define O_LARGEFILE 0
157#endif
0274a8ce 158
3993f6b1
DJ
159/* If the system headers did not provide the constants, hard-code the normal
160 values. */
161#ifndef PTRACE_EVENT_FORK
162
163#define PTRACE_SETOPTIONS 0x4200
164#define PTRACE_GETEVENTMSG 0x4201
165
166/* options set using PTRACE_SETOPTIONS */
167#define PTRACE_O_TRACESYSGOOD 0x00000001
168#define PTRACE_O_TRACEFORK 0x00000002
169#define PTRACE_O_TRACEVFORK 0x00000004
170#define PTRACE_O_TRACECLONE 0x00000008
171#define PTRACE_O_TRACEEXEC 0x00000010
9016a515
DJ
172#define PTRACE_O_TRACEVFORKDONE 0x00000020
173#define PTRACE_O_TRACEEXIT 0x00000040
3993f6b1
DJ
174
175/* Wait extended result codes for the above trace options. */
176#define PTRACE_EVENT_FORK 1
177#define PTRACE_EVENT_VFORK 2
178#define PTRACE_EVENT_CLONE 3
179#define PTRACE_EVENT_EXEC 4
c874c7fc 180#define PTRACE_EVENT_VFORK_DONE 5
9016a515 181#define PTRACE_EVENT_EXIT 6
3993f6b1
DJ
182
183#endif /* PTRACE_EVENT_FORK */
184
185/* We can't always assume that this flag is available, but all systems
186 with the ptrace event handlers also have __WALL, so it's safe to use
187 here. */
188#ifndef __WALL
189#define __WALL 0x40000000 /* Wait for any child. */
190#endif
191
02d3ff8c
UW
192#ifndef PTRACE_GETSIGINFO
193#define PTRACE_GETSIGINFO 0x4202
194#endif
195
10d6c8cd
DJ
196/* The single-threaded native GNU/Linux target_ops. We save a pointer for
197 the use of the multi-threaded target. */
198static struct target_ops *linux_ops;
f973ed9c 199static struct target_ops linux_ops_saved;
10d6c8cd 200
9f0bdab8
DJ
201/* The method to call, if any, when a new thread is attached. */
202static void (*linux_nat_new_thread) (ptid_t);
203
ac264b3b
MS
204/* The saved to_xfer_partial method, inherited from inf-ptrace.c.
205 Called by our to_xfer_partial. */
206static LONGEST (*super_xfer_partial) (struct target_ops *,
207 enum target_object,
208 const char *, gdb_byte *,
209 const gdb_byte *,
10d6c8cd
DJ
210 ULONGEST, LONGEST);
211
d6b0e80f 212static int debug_linux_nat;
920d2a44
AC
213static void
214show_debug_linux_nat (struct ui_file *file, int from_tty,
215 struct cmd_list_element *c, const char *value)
216{
217 fprintf_filtered (file, _("Debugging of GNU/Linux lwp module is %s.\n"),
218 value);
219}
d6b0e80f 220
b84876c2
PA
221static int debug_linux_nat_async = 0;
222static void
223show_debug_linux_nat_async (struct ui_file *file, int from_tty,
224 struct cmd_list_element *c, const char *value)
225{
226 fprintf_filtered (file, _("Debugging of GNU/Linux async lwp module is %s.\n"),
227 value);
228}
229
10568435
JK
230static int disable_randomization = 1;
231
232static void
233show_disable_randomization (struct ui_file *file, int from_tty,
234 struct cmd_list_element *c, const char *value)
235{
236#ifdef HAVE_PERSONALITY
237 fprintf_filtered (file, _("\
238Disabling randomization of debuggee's virtual address space is %s.\n"),
239 value);
240#else /* !HAVE_PERSONALITY */
241 fputs_filtered (_("\
242Disabling randomization of debuggee's virtual address space is unsupported on\n\
243this platform.\n"), file);
244#endif /* !HAVE_PERSONALITY */
245}
246
247static void
248set_disable_randomization (char *args, int from_tty, struct cmd_list_element *c)
249{
250#ifndef HAVE_PERSONALITY
251 error (_("\
252Disabling randomization of debuggee's virtual address space is unsupported on\n\
253this platform."));
254#endif /* !HAVE_PERSONALITY */
255}
256
9016a515
DJ
257static int linux_parent_pid;
258
ae087d01
DJ
259struct simple_pid_list
260{
261 int pid;
3d799a95 262 int status;
ae087d01
DJ
263 struct simple_pid_list *next;
264};
265struct simple_pid_list *stopped_pids;
266
3993f6b1
DJ
267/* This variable is a tri-state flag: -1 for unknown, 0 if PTRACE_O_TRACEFORK
268 can not be used, 1 if it can. */
269
270static int linux_supports_tracefork_flag = -1;
271
9016a515
DJ
272/* If we have PTRACE_O_TRACEFORK, this flag indicates whether we also have
273 PTRACE_O_TRACEVFORKDONE. */
274
275static int linux_supports_tracevforkdone_flag = -1;
276
b84876c2
PA
277/* Async mode support */
278
b84876c2
PA
279/* Zero if the async mode, although enabled, is masked, which means
280 linux_nat_wait should behave as if async mode was off. */
281static int linux_nat_async_mask_value = 1;
282
283/* The read/write ends of the pipe registered as waitable file in the
284 event loop. */
285static int linux_nat_event_pipe[2] = { -1, -1 };
286
287/* Number of queued events in the pipe. */
288static volatile int linux_nat_num_queued_events;
289
84e46146 290/* The possible SIGCHLD handling states. */
b84876c2 291
84e46146
PA
292enum sigchld_state
293{
294 /* SIGCHLD disabled, with action set to sigchld_handler, for the
295 sigsuspend in linux_nat_wait. */
296 sigchld_sync,
297 /* SIGCHLD enabled, with action set to async_sigchld_handler. */
298 sigchld_async,
299 /* Set SIGCHLD to default action. Used while creating an
300 inferior. */
301 sigchld_default
302};
303
304/* The current SIGCHLD handling state. */
305static enum sigchld_state linux_nat_async_events_state;
306
307static enum sigchld_state linux_nat_async_events (enum sigchld_state enable);
b84876c2
PA
308static void pipe_to_local_event_queue (void);
309static void local_event_queue_to_pipe (void);
310static void linux_nat_event_pipe_push (int pid, int status, int options);
311static int linux_nat_event_pipe_pop (int* ptr_status, int* ptr_options);
312static void linux_nat_set_async_mode (int on);
313static void linux_nat_async (void (*callback)
314 (enum inferior_event_type event_type, void *context),
315 void *context);
316static int linux_nat_async_mask (int mask);
a0ef4274 317static int kill_lwp (int lwpid, int signo);
b84876c2 318
4c28f408
PA
319static int send_sigint_callback (struct lwp_info *lp, void *data);
320static int stop_callback (struct lwp_info *lp, void *data);
321
b84876c2
PA
322/* Captures the result of a successful waitpid call, along with the
323 options used in that call. */
324struct waitpid_result
325{
326 int pid;
327 int status;
328 int options;
329 struct waitpid_result *next;
330};
331
332/* A singly-linked list of the results of the waitpid calls performed
333 in the async SIGCHLD handler. */
334static struct waitpid_result *waitpid_queue = NULL;
335
336static int
337queued_waitpid (int pid, int *status, int flags)
338{
339 struct waitpid_result *msg = waitpid_queue, *prev = NULL;
340
341 if (debug_linux_nat_async)
342 fprintf_unfiltered (gdb_stdlog,
343 "\
84e46146
PA
344QWPID: linux_nat_async_events_state(%d), linux_nat_num_queued_events(%d)\n",
345 linux_nat_async_events_state,
b84876c2
PA
346 linux_nat_num_queued_events);
347
348 if (flags & __WALL)
349 {
350 for (; msg; prev = msg, msg = msg->next)
351 if (pid == -1 || pid == msg->pid)
352 break;
353 }
354 else if (flags & __WCLONE)
355 {
356 for (; msg; prev = msg, msg = msg->next)
357 if (msg->options & __WCLONE
358 && (pid == -1 || pid == msg->pid))
359 break;
360 }
361 else
362 {
363 for (; msg; prev = msg, msg = msg->next)
364 if ((msg->options & __WCLONE) == 0
365 && (pid == -1 || pid == msg->pid))
366 break;
367 }
368
369 if (msg)
370 {
371 int pid;
372
373 if (prev)
374 prev->next = msg->next;
375 else
376 waitpid_queue = msg->next;
377
378 msg->next = NULL;
379 if (status)
380 *status = msg->status;
381 pid = msg->pid;
382
383 if (debug_linux_nat_async)
384 fprintf_unfiltered (gdb_stdlog, "QWPID: pid(%d), status(%x)\n",
385 pid, msg->status);
386 xfree (msg);
387
388 return pid;
389 }
390
391 if (debug_linux_nat_async)
392 fprintf_unfiltered (gdb_stdlog, "QWPID: miss\n");
393
394 if (status)
395 *status = 0;
396 return -1;
397}
398
399static void
400push_waitpid (int pid, int status, int options)
401{
402 struct waitpid_result *event, *new_event;
403
404 new_event = xmalloc (sizeof (*new_event));
405 new_event->pid = pid;
406 new_event->status = status;
407 new_event->options = options;
408 new_event->next = NULL;
409
410 if (waitpid_queue)
411 {
412 for (event = waitpid_queue;
413 event && event->next;
414 event = event->next)
415 ;
416
417 event->next = new_event;
418 }
419 else
420 waitpid_queue = new_event;
421}
422
710151dd 423/* Drain all queued events of PID. If PID is -1, the effect is of
b84876c2
PA
424 draining all events. */
425static void
426drain_queued_events (int pid)
427{
428 while (queued_waitpid (pid, NULL, __WALL) != -1)
429 ;
430}
431
ae087d01
DJ
432\f
433/* Trivial list manipulation functions to keep track of a list of
434 new stopped processes. */
435static void
3d799a95 436add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
ae087d01
DJ
437{
438 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
439 new_pid->pid = pid;
3d799a95 440 new_pid->status = status;
ae087d01
DJ
441 new_pid->next = *listp;
442 *listp = new_pid;
443}
444
445static int
3d799a95 446pull_pid_from_list (struct simple_pid_list **listp, int pid, int *status)
ae087d01
DJ
447{
448 struct simple_pid_list **p;
449
450 for (p = listp; *p != NULL; p = &(*p)->next)
451 if ((*p)->pid == pid)
452 {
453 struct simple_pid_list *next = (*p)->next;
3d799a95 454 *status = (*p)->status;
ae087d01
DJ
455 xfree (*p);
456 *p = next;
457 return 1;
458 }
459 return 0;
460}
461
3d799a95
DJ
462static void
463linux_record_stopped_pid (int pid, int status)
ae087d01 464{
3d799a95 465 add_to_pid_list (&stopped_pids, pid, status);
ae087d01
DJ
466}
467
3993f6b1
DJ
468\f
469/* A helper function for linux_test_for_tracefork, called after fork (). */
470
471static void
472linux_tracefork_child (void)
473{
474 int ret;
475
476 ptrace (PTRACE_TRACEME, 0, 0, 0);
477 kill (getpid (), SIGSTOP);
478 fork ();
48bb3cce 479 _exit (0);
3993f6b1
DJ
480}
481
b84876c2
PA
482/* Wrapper function for waitpid which handles EINTR, and checks for
483 locally queued events. */
b957e937
DJ
484
485static int
486my_waitpid (int pid, int *status, int flags)
487{
488 int ret;
b84876c2
PA
489
490 /* There should be no concurrent calls to waitpid. */
84e46146 491 gdb_assert (linux_nat_async_events_state == sigchld_sync);
b84876c2
PA
492
493 ret = queued_waitpid (pid, status, flags);
494 if (ret != -1)
495 return ret;
496
b957e937
DJ
497 do
498 {
499 ret = waitpid (pid, status, flags);
500 }
501 while (ret == -1 && errno == EINTR);
502
503 return ret;
504}
505
506/* Determine if PTRACE_O_TRACEFORK can be used to follow fork events.
507
508 First, we try to enable fork tracing on ORIGINAL_PID. If this fails,
509 we know that the feature is not available. This may change the tracing
510 options for ORIGINAL_PID, but we'll be setting them shortly anyway.
511
512 However, if it succeeds, we don't know for sure that the feature is
513 available; old versions of PTRACE_SETOPTIONS ignored unknown options. We
3993f6b1 514 create a child process, attach to it, use PTRACE_SETOPTIONS to enable
b957e937
DJ
515 fork tracing, and let it fork. If the process exits, we assume that we
516 can't use TRACEFORK; if we get the fork notification, and we can extract
517 the new child's PID, then we assume that we can. */
3993f6b1
DJ
518
519static void
b957e937 520linux_test_for_tracefork (int original_pid)
3993f6b1
DJ
521{
522 int child_pid, ret, status;
523 long second_pid;
4c28f408
PA
524 enum sigchld_state async_events_original_state;
525
526 async_events_original_state = linux_nat_async_events (sigchld_sync);
3993f6b1 527
b957e937
DJ
528 linux_supports_tracefork_flag = 0;
529 linux_supports_tracevforkdone_flag = 0;
530
531 ret = ptrace (PTRACE_SETOPTIONS, original_pid, 0, PTRACE_O_TRACEFORK);
532 if (ret != 0)
533 return;
534
3993f6b1
DJ
535 child_pid = fork ();
536 if (child_pid == -1)
e2e0b3e5 537 perror_with_name (("fork"));
3993f6b1
DJ
538
539 if (child_pid == 0)
540 linux_tracefork_child ();
541
b957e937 542 ret = my_waitpid (child_pid, &status, 0);
3993f6b1 543 if (ret == -1)
e2e0b3e5 544 perror_with_name (("waitpid"));
3993f6b1 545 else if (ret != child_pid)
8a3fe4f8 546 error (_("linux_test_for_tracefork: waitpid: unexpected result %d."), ret);
3993f6b1 547 if (! WIFSTOPPED (status))
8a3fe4f8 548 error (_("linux_test_for_tracefork: waitpid: unexpected status %d."), status);
3993f6b1 549
3993f6b1
DJ
550 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0, PTRACE_O_TRACEFORK);
551 if (ret != 0)
552 {
b957e937
DJ
553 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
554 if (ret != 0)
555 {
8a3fe4f8 556 warning (_("linux_test_for_tracefork: failed to kill child"));
4c28f408 557 linux_nat_async_events (async_events_original_state);
b957e937
DJ
558 return;
559 }
560
561 ret = my_waitpid (child_pid, &status, 0);
562 if (ret != child_pid)
8a3fe4f8 563 warning (_("linux_test_for_tracefork: failed to wait for killed child"));
b957e937 564 else if (!WIFSIGNALED (status))
8a3fe4f8
AC
565 warning (_("linux_test_for_tracefork: unexpected wait status 0x%x from "
566 "killed child"), status);
b957e937 567
4c28f408 568 linux_nat_async_events (async_events_original_state);
3993f6b1
DJ
569 return;
570 }
571
9016a515
DJ
572 /* Check whether PTRACE_O_TRACEVFORKDONE is available. */
573 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0,
574 PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORKDONE);
575 linux_supports_tracevforkdone_flag = (ret == 0);
576
b957e937
DJ
577 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
578 if (ret != 0)
8a3fe4f8 579 warning (_("linux_test_for_tracefork: failed to resume child"));
b957e937
DJ
580
581 ret = my_waitpid (child_pid, &status, 0);
582
3993f6b1
DJ
583 if (ret == child_pid && WIFSTOPPED (status)
584 && status >> 16 == PTRACE_EVENT_FORK)
585 {
586 second_pid = 0;
587 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
588 if (ret == 0 && second_pid != 0)
589 {
590 int second_status;
591
592 linux_supports_tracefork_flag = 1;
b957e937
DJ
593 my_waitpid (second_pid, &second_status, 0);
594 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
595 if (ret != 0)
8a3fe4f8 596 warning (_("linux_test_for_tracefork: failed to kill second child"));
97725dc4 597 my_waitpid (second_pid, &status, 0);
3993f6b1
DJ
598 }
599 }
b957e937 600 else
8a3fe4f8
AC
601 warning (_("linux_test_for_tracefork: unexpected result from waitpid "
602 "(%d, status 0x%x)"), ret, status);
3993f6b1 603
b957e937
DJ
604 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
605 if (ret != 0)
8a3fe4f8 606 warning (_("linux_test_for_tracefork: failed to kill child"));
b957e937 607 my_waitpid (child_pid, &status, 0);
4c28f408
PA
608
609 linux_nat_async_events (async_events_original_state);
3993f6b1
DJ
610}
611
612/* Return non-zero iff we have tracefork functionality available.
613 This function also sets linux_supports_tracefork_flag. */
614
615static int
b957e937 616linux_supports_tracefork (int pid)
3993f6b1
DJ
617{
618 if (linux_supports_tracefork_flag == -1)
b957e937 619 linux_test_for_tracefork (pid);
3993f6b1
DJ
620 return linux_supports_tracefork_flag;
621}
622
9016a515 623static int
b957e937 624linux_supports_tracevforkdone (int pid)
9016a515
DJ
625{
626 if (linux_supports_tracefork_flag == -1)
b957e937 627 linux_test_for_tracefork (pid);
9016a515
DJ
628 return linux_supports_tracevforkdone_flag;
629}
630
3993f6b1 631\f
4de4c07c
DJ
632void
633linux_enable_event_reporting (ptid_t ptid)
634{
d3587048 635 int pid = ptid_get_lwp (ptid);
4de4c07c
DJ
636 int options;
637
d3587048
DJ
638 if (pid == 0)
639 pid = ptid_get_pid (ptid);
640
b957e937 641 if (! linux_supports_tracefork (pid))
4de4c07c
DJ
642 return;
643
a2f23071
DJ
644 options = PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORK | PTRACE_O_TRACEEXEC
645 | PTRACE_O_TRACECLONE;
b957e937 646 if (linux_supports_tracevforkdone (pid))
9016a515
DJ
647 options |= PTRACE_O_TRACEVFORKDONE;
648
649 /* Do not enable PTRACE_O_TRACEEXIT until GDB is more prepared to support
650 read-only process state. */
4de4c07c
DJ
651
652 ptrace (PTRACE_SETOPTIONS, pid, 0, options);
653}
654
6d8fd2b7
UW
655static void
656linux_child_post_attach (int pid)
4de4c07c
DJ
657{
658 linux_enable_event_reporting (pid_to_ptid (pid));
0ec9a092 659 check_for_thread_db ();
4de4c07c
DJ
660}
661
10d6c8cd 662static void
4de4c07c
DJ
663linux_child_post_startup_inferior (ptid_t ptid)
664{
665 linux_enable_event_reporting (ptid);
0ec9a092 666 check_for_thread_db ();
4de4c07c
DJ
667}
668
6d8fd2b7
UW
669static int
670linux_child_follow_fork (struct target_ops *ops, int follow_child)
3993f6b1 671{
4de4c07c
DJ
672 ptid_t last_ptid;
673 struct target_waitstatus last_status;
9016a515 674 int has_vforked;
4de4c07c
DJ
675 int parent_pid, child_pid;
676
b84876c2
PA
677 if (target_can_async_p ())
678 target_async (NULL, 0);
679
4de4c07c 680 get_last_target_status (&last_ptid, &last_status);
9016a515 681 has_vforked = (last_status.kind == TARGET_WAITKIND_VFORKED);
d3587048
DJ
682 parent_pid = ptid_get_lwp (last_ptid);
683 if (parent_pid == 0)
684 parent_pid = ptid_get_pid (last_ptid);
3a3e9ee3 685 child_pid = PIDGET (last_status.value.related_pid);
4de4c07c
DJ
686
687 if (! follow_child)
688 {
689 /* We're already attached to the parent, by default. */
690
691 /* Before detaching from the child, remove all breakpoints from
692 it. (This won't actually modify the breakpoint list, but will
693 physically remove the breakpoints from the child.) */
9016a515
DJ
694 /* If we vforked this will remove the breakpoints from the parent
695 also, but they'll be reinserted below. */
4de4c07c
DJ
696 detach_breakpoints (child_pid);
697
ac264b3b
MS
698 /* Detach new forked process? */
699 if (detach_fork)
f75c00e4 700 {
e85a822c 701 if (info_verbose || debug_linux_nat)
ac264b3b
MS
702 {
703 target_terminal_ours ();
704 fprintf_filtered (gdb_stdlog,
705 "Detaching after fork from child process %d.\n",
706 child_pid);
707 }
4de4c07c 708
ac264b3b
MS
709 ptrace (PTRACE_DETACH, child_pid, 0, 0);
710 }
711 else
712 {
713 struct fork_info *fp;
714 /* Retain child fork in ptrace (stopped) state. */
715 fp = find_fork_pid (child_pid);
716 if (!fp)
717 fp = add_fork (child_pid);
718 fork_save_infrun_state (fp, 0);
719 }
9016a515
DJ
720
721 if (has_vforked)
722 {
b957e937
DJ
723 gdb_assert (linux_supports_tracefork_flag >= 0);
724 if (linux_supports_tracevforkdone (0))
9016a515
DJ
725 {
726 int status;
727
728 ptrace (PTRACE_CONT, parent_pid, 0, 0);
58aecb61 729 my_waitpid (parent_pid, &status, __WALL);
c874c7fc 730 if ((status >> 16) != PTRACE_EVENT_VFORK_DONE)
8a3fe4f8
AC
731 warning (_("Unexpected waitpid result %06x when waiting for "
732 "vfork-done"), status);
9016a515
DJ
733 }
734 else
735 {
736 /* We can't insert breakpoints until the child has
737 finished with the shared memory region. We need to
738 wait until that happens. Ideal would be to just
739 call:
740 - ptrace (PTRACE_SYSCALL, parent_pid, 0, 0);
741 - waitpid (parent_pid, &status, __WALL);
742 However, most architectures can't handle a syscall
743 being traced on the way out if it wasn't traced on
744 the way in.
745
746 We might also think to loop, continuing the child
747 until it exits or gets a SIGTRAP. One problem is
748 that the child might call ptrace with PTRACE_TRACEME.
749
750 There's no simple and reliable way to figure out when
751 the vforked child will be done with its copy of the
752 shared memory. We could step it out of the syscall,
753 two instructions, let it go, and then single-step the
754 parent once. When we have hardware single-step, this
755 would work; with software single-step it could still
756 be made to work but we'd have to be able to insert
757 single-step breakpoints in the child, and we'd have
758 to insert -just- the single-step breakpoint in the
759 parent. Very awkward.
760
761 In the end, the best we can do is to make sure it
762 runs for a little while. Hopefully it will be out of
763 range of any breakpoints we reinsert. Usually this
764 is only the single-step breakpoint at vfork's return
765 point. */
766
767 usleep (10000);
768 }
769
770 /* Since we vforked, breakpoints were removed in the parent
771 too. Put them back. */
772 reattach_breakpoints (parent_pid);
773 }
4de4c07c 774 }
3993f6b1 775 else
4de4c07c 776 {
4e1c45ea
PA
777 struct thread_info *last_tp = find_thread_pid (last_ptid);
778 struct thread_info *tp;
4de4c07c
DJ
779 char child_pid_spelling[40];
780
4e1c45ea
PA
781 /* Copy user stepping state to the new inferior thread. */
782 struct breakpoint *step_resume_breakpoint = last_tp->step_resume_breakpoint;
783 CORE_ADDR step_range_start = last_tp->step_range_start;
784 CORE_ADDR step_range_end = last_tp->step_range_end;
785 struct frame_id step_frame_id = last_tp->step_frame_id;
786
787 /* Otherwise, deleting the parent would get rid of this
788 breakpoint. */
789 last_tp->step_resume_breakpoint = NULL;
790
4de4c07c 791 /* Needed to keep the breakpoint lists in sync. */
9016a515
DJ
792 if (! has_vforked)
793 detach_breakpoints (child_pid);
4de4c07c
DJ
794
795 /* Before detaching from the parent, remove all breakpoints from it. */
796 remove_breakpoints ();
797
e85a822c 798 if (info_verbose || debug_linux_nat)
f75c00e4
DJ
799 {
800 target_terminal_ours ();
ac264b3b
MS
801 fprintf_filtered (gdb_stdlog,
802 "Attaching after fork to child process %d.\n",
803 child_pid);
f75c00e4 804 }
4de4c07c 805
9016a515
DJ
806 /* If we're vforking, we may want to hold on to the parent until
807 the child exits or execs. At exec time we can remove the old
808 breakpoints from the parent and detach it; at exit time we
809 could do the same (or even, sneakily, resume debugging it - the
810 child's exec has failed, or something similar).
811
812 This doesn't clean up "properly", because we can't call
813 target_detach, but that's OK; if the current target is "child",
814 then it doesn't need any further cleanups, and lin_lwp will
815 generally not encounter vfork (vfork is defined to fork
816 in libpthread.so).
817
818 The holding part is very easy if we have VFORKDONE events;
819 but keeping track of both processes is beyond GDB at the
820 moment. So we don't expose the parent to the rest of GDB.
821 Instead we quietly hold onto it until such time as we can
822 safely resume it. */
823
824 if (has_vforked)
825 linux_parent_pid = parent_pid;
ac264b3b
MS
826 else if (!detach_fork)
827 {
828 struct fork_info *fp;
829 /* Retain parent fork in ptrace (stopped) state. */
830 fp = find_fork_pid (parent_pid);
831 if (!fp)
832 fp = add_fork (parent_pid);
833 fork_save_infrun_state (fp, 0);
834 }
9016a515 835 else
b84876c2 836 target_detach (NULL, 0);
4de4c07c 837
9f0bdab8 838 inferior_ptid = ptid_build (child_pid, child_pid, 0);
ee057212
DJ
839
840 /* Reinstall ourselves, since we might have been removed in
841 target_detach (which does other necessary cleanup). */
ac264b3b 842
ee057212 843 push_target (ops);
9f0bdab8 844 linux_nat_switch_fork (inferior_ptid);
ef29ce1a 845 check_for_thread_db ();
4de4c07c 846
4e1c45ea
PA
847 tp = inferior_thread ();
848 tp->step_resume_breakpoint = step_resume_breakpoint;
849 tp->step_range_start = step_range_start;
850 tp->step_range_end = step_range_end;
851 tp->step_frame_id = step_frame_id;
852
4de4c07c
DJ
853 /* Reset breakpoints in the child as appropriate. */
854 follow_inferior_reset_breakpoints ();
855 }
856
b84876c2
PA
857 if (target_can_async_p ())
858 target_async (inferior_event_handler, 0);
859
4de4c07c
DJ
860 return 0;
861}
862
4de4c07c 863\f
6d8fd2b7
UW
864static void
865linux_child_insert_fork_catchpoint (int pid)
4de4c07c 866{
b957e937 867 if (! linux_supports_tracefork (pid))
8a3fe4f8 868 error (_("Your system does not support fork catchpoints."));
3993f6b1
DJ
869}
870
6d8fd2b7
UW
871static void
872linux_child_insert_vfork_catchpoint (int pid)
3993f6b1 873{
b957e937 874 if (!linux_supports_tracefork (pid))
8a3fe4f8 875 error (_("Your system does not support vfork catchpoints."));
3993f6b1
DJ
876}
877
6d8fd2b7
UW
878static void
879linux_child_insert_exec_catchpoint (int pid)
3993f6b1 880{
b957e937 881 if (!linux_supports_tracefork (pid))
8a3fe4f8 882 error (_("Your system does not support exec catchpoints."));
3993f6b1
DJ
883}
884
d6b0e80f
AC
885/* On GNU/Linux there are no real LWP's. The closest thing to LWP's
886 are processes sharing the same VM space. A multi-threaded process
887 is basically a group of such processes. However, such a grouping
888 is almost entirely a user-space issue; the kernel doesn't enforce
889 such a grouping at all (this might change in the future). In
890 general, we'll rely on the threads library (i.e. the GNU/Linux
891 Threads library) to provide such a grouping.
892
893 It is perfectly well possible to write a multi-threaded application
894 without the assistance of a threads library, by using the clone
895 system call directly. This module should be able to give some
896 rudimentary support for debugging such applications if developers
897 specify the CLONE_PTRACE flag in the clone system call, and are
898 using the Linux kernel 2.4 or above.
899
900 Note that there are some peculiarities in GNU/Linux that affect
901 this code:
902
903 - In general one should specify the __WCLONE flag to waitpid in
904 order to make it report events for any of the cloned processes
905 (and leave it out for the initial process). However, if a cloned
906 process has exited the exit status is only reported if the
907 __WCLONE flag is absent. Linux kernel 2.4 has a __WALL flag, but
908 we cannot use it since GDB must work on older systems too.
909
910 - When a traced, cloned process exits and is waited for by the
911 debugger, the kernel reassigns it to the original parent and
912 keeps it around as a "zombie". Somehow, the GNU/Linux Threads
913 library doesn't notice this, which leads to the "zombie problem":
914 When debugged a multi-threaded process that spawns a lot of
915 threads will run out of processes, even if the threads exit,
916 because the "zombies" stay around. */
917
918/* List of known LWPs. */
9f0bdab8 919struct lwp_info *lwp_list;
d6b0e80f
AC
920
921/* Number of LWPs in the list. */
922static int num_lwps;
d6b0e80f
AC
923\f
924
d6b0e80f
AC
925/* Original signal mask. */
926static sigset_t normal_mask;
927
928/* Signal mask for use with sigsuspend in linux_nat_wait, initialized in
929 _initialize_linux_nat. */
930static sigset_t suspend_mask;
931
b84876c2
PA
932/* SIGCHLD action for synchronous mode. */
933struct sigaction sync_sigchld_action;
934
935/* SIGCHLD action for asynchronous mode. */
936static struct sigaction async_sigchld_action;
84e46146
PA
937
938/* SIGCHLD default action, to pass to new inferiors. */
939static struct sigaction sigchld_default_action;
d6b0e80f
AC
940\f
941
942/* Prototypes for local functions. */
943static int stop_wait_callback (struct lwp_info *lp, void *data);
944static int linux_nat_thread_alive (ptid_t ptid);
6d8fd2b7 945static char *linux_child_pid_to_exec_file (int pid);
710151dd
PA
946static int cancel_breakpoint (struct lwp_info *lp);
947
d6b0e80f
AC
948\f
949/* Convert wait status STATUS to a string. Used for printing debug
950 messages only. */
951
952static char *
953status_to_str (int status)
954{
955 static char buf[64];
956
957 if (WIFSTOPPED (status))
958 snprintf (buf, sizeof (buf), "%s (stopped)",
959 strsignal (WSTOPSIG (status)));
960 else if (WIFSIGNALED (status))
961 snprintf (buf, sizeof (buf), "%s (terminated)",
962 strsignal (WSTOPSIG (status)));
963 else
964 snprintf (buf, sizeof (buf), "%d (exited)", WEXITSTATUS (status));
965
966 return buf;
967}
968
969/* Initialize the list of LWPs. Note that this module, contrary to
970 what GDB's generic threads layer does for its thread list,
971 re-initializes the LWP lists whenever we mourn or detach (which
972 doesn't involve mourning) the inferior. */
973
974static void
975init_lwp_list (void)
976{
977 struct lwp_info *lp, *lpnext;
978
979 for (lp = lwp_list; lp; lp = lpnext)
980 {
981 lpnext = lp->next;
982 xfree (lp);
983 }
984
985 lwp_list = NULL;
986 num_lwps = 0;
d6b0e80f
AC
987}
988
f973ed9c 989/* Add the LWP specified by PID to the list. Return a pointer to the
9f0bdab8
DJ
990 structure describing the new LWP. The LWP should already be stopped
991 (with an exception for the very first LWP). */
d6b0e80f
AC
992
993static struct lwp_info *
994add_lwp (ptid_t ptid)
995{
996 struct lwp_info *lp;
997
998 gdb_assert (is_lwp (ptid));
999
1000 lp = (struct lwp_info *) xmalloc (sizeof (struct lwp_info));
1001
1002 memset (lp, 0, sizeof (struct lwp_info));
1003
1004 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
1005
1006 lp->ptid = ptid;
1007
1008 lp->next = lwp_list;
1009 lwp_list = lp;
f973ed9c 1010 ++num_lwps;
d6b0e80f 1011
9f0bdab8
DJ
1012 if (num_lwps > 1 && linux_nat_new_thread != NULL)
1013 linux_nat_new_thread (ptid);
1014
d6b0e80f
AC
1015 return lp;
1016}
1017
1018/* Remove the LWP specified by PID from the list. */
1019
1020static void
1021delete_lwp (ptid_t ptid)
1022{
1023 struct lwp_info *lp, *lpprev;
1024
1025 lpprev = NULL;
1026
1027 for (lp = lwp_list; lp; lpprev = lp, lp = lp->next)
1028 if (ptid_equal (lp->ptid, ptid))
1029 break;
1030
1031 if (!lp)
1032 return;
1033
d6b0e80f
AC
1034 num_lwps--;
1035
1036 if (lpprev)
1037 lpprev->next = lp->next;
1038 else
1039 lwp_list = lp->next;
1040
1041 xfree (lp);
1042}
1043
1044/* Return a pointer to the structure describing the LWP corresponding
1045 to PID. If no corresponding LWP could be found, return NULL. */
1046
1047static struct lwp_info *
1048find_lwp_pid (ptid_t ptid)
1049{
1050 struct lwp_info *lp;
1051 int lwp;
1052
1053 if (is_lwp (ptid))
1054 lwp = GET_LWP (ptid);
1055 else
1056 lwp = GET_PID (ptid);
1057
1058 for (lp = lwp_list; lp; lp = lp->next)
1059 if (lwp == GET_LWP (lp->ptid))
1060 return lp;
1061
1062 return NULL;
1063}
1064
1065/* Call CALLBACK with its second argument set to DATA for every LWP in
1066 the list. If CALLBACK returns 1 for a particular LWP, return a
1067 pointer to the structure describing that LWP immediately.
1068 Otherwise return NULL. */
1069
1070struct lwp_info *
1071iterate_over_lwps (int (*callback) (struct lwp_info *, void *), void *data)
1072{
1073 struct lwp_info *lp, *lpnext;
1074
1075 for (lp = lwp_list; lp; lp = lpnext)
1076 {
1077 lpnext = lp->next;
1078 if ((*callback) (lp, data))
1079 return lp;
1080 }
1081
1082 return NULL;
1083}
1084
f973ed9c
DJ
1085/* Update our internal state when changing from one fork (checkpoint,
1086 et cetera) to another indicated by NEW_PTID. We can only switch
1087 single-threaded applications, so we only create one new LWP, and
1088 the previous list is discarded. */
1089
1090void
1091linux_nat_switch_fork (ptid_t new_ptid)
1092{
1093 struct lwp_info *lp;
1094
1095 init_lwp_list ();
1096 lp = add_lwp (new_ptid);
1097 lp->stopped = 1;
e26af52f 1098
4f8d22e3
PA
1099 init_thread_list ();
1100 add_thread_silent (new_ptid);
e26af52f
DJ
1101}
1102
e26af52f
DJ
1103/* Handle the exit of a single thread LP. */
1104
1105static void
1106exit_lwp (struct lwp_info *lp)
1107{
063bfe2e
VP
1108 struct thread_info *th = find_thread_pid (lp->ptid);
1109
1110 if (th)
e26af52f 1111 {
17faa917
DJ
1112 if (print_thread_events)
1113 printf_unfiltered (_("[%s exited]\n"), target_pid_to_str (lp->ptid));
1114
4f8d22e3 1115 delete_thread (lp->ptid);
e26af52f
DJ
1116 }
1117
1118 delete_lwp (lp->ptid);
1119}
1120
a0ef4274
DJ
1121/* Detect `T (stopped)' in `/proc/PID/status'.
1122 Other states including `T (tracing stop)' are reported as false. */
1123
1124static int
1125pid_is_stopped (pid_t pid)
1126{
1127 FILE *status_file;
1128 char buf[100];
1129 int retval = 0;
1130
1131 snprintf (buf, sizeof (buf), "/proc/%d/status", (int) pid);
1132 status_file = fopen (buf, "r");
1133 if (status_file != NULL)
1134 {
1135 int have_state = 0;
1136
1137 while (fgets (buf, sizeof (buf), status_file))
1138 {
1139 if (strncmp (buf, "State:", 6) == 0)
1140 {
1141 have_state = 1;
1142 break;
1143 }
1144 }
1145 if (have_state && strstr (buf, "T (stopped)") != NULL)
1146 retval = 1;
1147 fclose (status_file);
1148 }
1149 return retval;
1150}
1151
1152/* Wait for the LWP specified by LP, which we have just attached to.
1153 Returns a wait status for that LWP, to cache. */
1154
1155static int
1156linux_nat_post_attach_wait (ptid_t ptid, int first, int *cloned,
1157 int *signalled)
1158{
1159 pid_t new_pid, pid = GET_LWP (ptid);
1160 int status;
1161
1162 if (pid_is_stopped (pid))
1163 {
1164 if (debug_linux_nat)
1165 fprintf_unfiltered (gdb_stdlog,
1166 "LNPAW: Attaching to a stopped process\n");
1167
1168 /* The process is definitely stopped. It is in a job control
1169 stop, unless the kernel predates the TASK_STOPPED /
1170 TASK_TRACED distinction, in which case it might be in a
1171 ptrace stop. Make sure it is in a ptrace stop; from there we
1172 can kill it, signal it, et cetera.
1173
1174 First make sure there is a pending SIGSTOP. Since we are
1175 already attached, the process can not transition from stopped
1176 to running without a PTRACE_CONT; so we know this signal will
1177 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1178 probably already in the queue (unless this kernel is old
1179 enough to use TASK_STOPPED for ptrace stops); but since SIGSTOP
1180 is not an RT signal, it can only be queued once. */
1181 kill_lwp (pid, SIGSTOP);
1182
1183 /* Finally, resume the stopped process. This will deliver the SIGSTOP
1184 (or a higher priority signal, just like normal PTRACE_ATTACH). */
1185 ptrace (PTRACE_CONT, pid, 0, 0);
1186 }
1187
1188 /* Make sure the initial process is stopped. The user-level threads
1189 layer might want to poke around in the inferior, and that won't
1190 work if things haven't stabilized yet. */
1191 new_pid = my_waitpid (pid, &status, 0);
1192 if (new_pid == -1 && errno == ECHILD)
1193 {
1194 if (first)
1195 warning (_("%s is a cloned process"), target_pid_to_str (ptid));
1196
1197 /* Try again with __WCLONE to check cloned processes. */
1198 new_pid = my_waitpid (pid, &status, __WCLONE);
1199 *cloned = 1;
1200 }
1201
1202 gdb_assert (pid == new_pid && WIFSTOPPED (status));
1203
1204 if (WSTOPSIG (status) != SIGSTOP)
1205 {
1206 *signalled = 1;
1207 if (debug_linux_nat)
1208 fprintf_unfiltered (gdb_stdlog,
1209 "LNPAW: Received %s after attaching\n",
1210 status_to_str (status));
1211 }
1212
1213 return status;
1214}
1215
1216/* Attach to the LWP specified by PID. Return 0 if successful or -1
1217 if the new LWP could not be attached. */
d6b0e80f 1218
9ee57c33 1219int
93815fbf 1220lin_lwp_attach_lwp (ptid_t ptid)
d6b0e80f 1221{
9ee57c33 1222 struct lwp_info *lp;
84e46146 1223 enum sigchld_state async_events_original_state;
d6b0e80f
AC
1224
1225 gdb_assert (is_lwp (ptid));
1226
84e46146 1227 async_events_original_state = linux_nat_async_events (sigchld_sync);
d6b0e80f 1228
9ee57c33 1229 lp = find_lwp_pid (ptid);
d6b0e80f
AC
1230
1231 /* We assume that we're already attached to any LWP that has an id
1232 equal to the overall process id, and to any LWP that is already
1233 in our list of LWPs. If we're not seeing exit events from threads
1234 and we've had PID wraparound since we last tried to stop all threads,
1235 this assumption might be wrong; fortunately, this is very unlikely
1236 to happen. */
9ee57c33 1237 if (GET_LWP (ptid) != GET_PID (ptid) && lp == NULL)
d6b0e80f 1238 {
a0ef4274 1239 int status, cloned = 0, signalled = 0;
d6b0e80f
AC
1240
1241 if (ptrace (PTRACE_ATTACH, GET_LWP (ptid), 0, 0) < 0)
9ee57c33
DJ
1242 {
1243 /* If we fail to attach to the thread, issue a warning,
1244 but continue. One way this can happen is if thread
e9efe249 1245 creation is interrupted; as of Linux kernel 2.6.19, a
9ee57c33
DJ
1246 bug may place threads in the thread list and then fail
1247 to create them. */
1248 warning (_("Can't attach %s: %s"), target_pid_to_str (ptid),
1249 safe_strerror (errno));
1250 return -1;
1251 }
1252
d6b0e80f
AC
1253 if (debug_linux_nat)
1254 fprintf_unfiltered (gdb_stdlog,
1255 "LLAL: PTRACE_ATTACH %s, 0, 0 (OK)\n",
1256 target_pid_to_str (ptid));
1257
a0ef4274
DJ
1258 status = linux_nat_post_attach_wait (ptid, 0, &cloned, &signalled);
1259 lp = add_lwp (ptid);
1260 lp->stopped = 1;
1261 lp->cloned = cloned;
1262 lp->signalled = signalled;
1263 if (WSTOPSIG (status) != SIGSTOP)
d6b0e80f 1264 {
a0ef4274
DJ
1265 lp->resumed = 1;
1266 lp->status = status;
d6b0e80f
AC
1267 }
1268
a0ef4274 1269 target_post_attach (GET_LWP (lp->ptid));
d6b0e80f
AC
1270
1271 if (debug_linux_nat)
1272 {
1273 fprintf_unfiltered (gdb_stdlog,
1274 "LLAL: waitpid %s received %s\n",
1275 target_pid_to_str (ptid),
1276 status_to_str (status));
1277 }
1278 }
1279 else
1280 {
1281 /* We assume that the LWP representing the original process is
1282 already stopped. Mark it as stopped in the data structure
155bd5d1
AC
1283 that the GNU/linux ptrace layer uses to keep track of
1284 threads. Note that this won't have already been done since
1285 the main thread will have, we assume, been stopped by an
1286 attach from a different layer. */
9ee57c33
DJ
1287 if (lp == NULL)
1288 lp = add_lwp (ptid);
d6b0e80f
AC
1289 lp->stopped = 1;
1290 }
9ee57c33 1291
84e46146 1292 linux_nat_async_events (async_events_original_state);
9ee57c33 1293 return 0;
d6b0e80f
AC
1294}
1295
b84876c2
PA
1296static void
1297linux_nat_create_inferior (char *exec_file, char *allargs, char **env,
1298 int from_tty)
1299{
1300 int saved_async = 0;
10568435
JK
1301#ifdef HAVE_PERSONALITY
1302 int personality_orig = 0, personality_set = 0;
1303#endif /* HAVE_PERSONALITY */
b84876c2
PA
1304
1305 /* The fork_child mechanism is synchronous and calls target_wait, so
1306 we have to mask the async mode. */
1307
1308 if (target_can_async_p ())
84e46146
PA
1309 /* Mask async mode. Creating a child requires a loop calling
1310 wait_for_inferior currently. */
b84876c2
PA
1311 saved_async = linux_nat_async_mask (0);
1312 else
1313 {
1314 /* Restore the original signal mask. */
1315 sigprocmask (SIG_SETMASK, &normal_mask, NULL);
1316 /* Make sure we don't block SIGCHLD during a sigsuspend. */
1317 suspend_mask = normal_mask;
1318 sigdelset (&suspend_mask, SIGCHLD);
1319 }
1320
84e46146
PA
1321 /* Set SIGCHLD to the default action, until after execing the child,
1322 since the inferior inherits the superior's signal mask. It will
1323 be blocked again in linux_nat_wait, which is only reached after
1324 the inferior execing. */
1325 linux_nat_async_events (sigchld_default);
1326
10568435
JK
1327#ifdef HAVE_PERSONALITY
1328 if (disable_randomization)
1329 {
1330 errno = 0;
1331 personality_orig = personality (0xffffffff);
1332 if (errno == 0 && !(personality_orig & ADDR_NO_RANDOMIZE))
1333 {
1334 personality_set = 1;
1335 personality (personality_orig | ADDR_NO_RANDOMIZE);
1336 }
1337 if (errno != 0 || (personality_set
1338 && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE)))
1339 warning (_("Error disabling address space randomization: %s"),
1340 safe_strerror (errno));
1341 }
1342#endif /* HAVE_PERSONALITY */
1343
b84876c2
PA
1344 linux_ops->to_create_inferior (exec_file, allargs, env, from_tty);
1345
10568435
JK
1346#ifdef HAVE_PERSONALITY
1347 if (personality_set)
1348 {
1349 errno = 0;
1350 personality (personality_orig);
1351 if (errno != 0)
1352 warning (_("Error restoring address space randomization: %s"),
1353 safe_strerror (errno));
1354 }
1355#endif /* HAVE_PERSONALITY */
1356
b84876c2
PA
1357 if (saved_async)
1358 linux_nat_async_mask (saved_async);
1359}
1360
d6b0e80f
AC
1361static void
1362linux_nat_attach (char *args, int from_tty)
1363{
1364 struct lwp_info *lp;
d6b0e80f 1365 int status;
af990527 1366 ptid_t ptid;
d6b0e80f
AC
1367
1368 /* FIXME: We should probably accept a list of process id's, and
1369 attach all of them. */
10d6c8cd 1370 linux_ops->to_attach (args, from_tty);
d6b0e80f 1371
b84876c2
PA
1372 if (!target_can_async_p ())
1373 {
1374 /* Restore the original signal mask. */
1375 sigprocmask (SIG_SETMASK, &normal_mask, NULL);
1376 /* Make sure we don't block SIGCHLD during a sigsuspend. */
1377 suspend_mask = normal_mask;
1378 sigdelset (&suspend_mask, SIGCHLD);
1379 }
1380
af990527
PA
1381 /* The ptrace base target adds the main thread with (pid,0,0)
1382 format. Decorate it with lwp info. */
1383 ptid = BUILD_LWP (GET_PID (inferior_ptid), GET_PID (inferior_ptid));
1384 thread_change_ptid (inferior_ptid, ptid);
1385
9f0bdab8 1386 /* Add the initial process as the first LWP to the list. */
af990527 1387 lp = add_lwp (ptid);
a0ef4274
DJ
1388
1389 status = linux_nat_post_attach_wait (lp->ptid, 1, &lp->cloned,
1390 &lp->signalled);
1391 lp->stopped = 1;
9f0bdab8 1392
a0ef4274 1393 /* Save the wait status to report later. */
d6b0e80f 1394 lp->resumed = 1;
a0ef4274
DJ
1395 if (debug_linux_nat)
1396 fprintf_unfiltered (gdb_stdlog,
1397 "LNA: waitpid %ld, saving status %s\n",
1398 (long) GET_PID (lp->ptid), status_to_str (status));
710151dd
PA
1399
1400 if (!target_can_async_p ())
a0ef4274 1401 lp->status = status;
710151dd
PA
1402 else
1403 {
1404 /* We already waited for this LWP, so put the wait result on the
1405 pipe. The event loop will wake up and gets us to handling
1406 this event. */
a0ef4274
DJ
1407 linux_nat_event_pipe_push (GET_PID (lp->ptid), status,
1408 lp->cloned ? __WCLONE : 0);
b84876c2
PA
1409 /* Register in the event loop. */
1410 target_async (inferior_event_handler, 0);
d6b0e80f
AC
1411 }
1412}
1413
a0ef4274
DJ
1414/* Get pending status of LP. */
1415static int
1416get_pending_status (struct lwp_info *lp, int *status)
1417{
1418 struct target_waitstatus last;
1419 ptid_t last_ptid;
1420
1421 get_last_target_status (&last_ptid, &last);
1422
1423 /* If this lwp is the ptid that GDB is processing an event from, the
1424 signal will be in stop_signal. Otherwise, in all-stop + sync
1425 mode, we may cache pending events in lp->status while trying to
1426 stop all threads (see stop_wait_callback). In async mode, the
1427 events are always cached in waitpid_queue. */
1428
1429 *status = 0;
4c28f408
PA
1430
1431 if (non_stop)
a0ef4274 1432 {
4c28f408
PA
1433 enum target_signal signo = TARGET_SIGNAL_0;
1434
1435 if (is_executing (lp->ptid))
1436 {
1437 /* If the core thought this lwp was executing --- e.g., the
1438 executing property hasn't been updated yet, but the
1439 thread has been stopped with a stop_callback /
1440 stop_wait_callback sequence (see linux_nat_detach for
1441 example) --- we can only have pending events in the local
1442 queue. */
1443 if (queued_waitpid (GET_LWP (lp->ptid), status, __WALL) != -1)
1444 {
1445 if (WIFSTOPPED (status))
1446 signo = target_signal_from_host (WSTOPSIG (status));
1447
1448 /* If not stopped, then the lwp is gone, no use in
1449 resending a signal. */
1450 }
1451 }
1452 else
1453 {
1454 /* If the core knows the thread is not executing, then we
1455 have the last signal recorded in
1456 thread_info->stop_signal, unless this is inferior_ptid,
1457 in which case, it's in the global stop_signal, due to
1458 context switching. */
1459
1460 if (ptid_equal (lp->ptid, inferior_ptid))
1461 signo = stop_signal;
1462 else
1463 {
1464 struct thread_info *tp = find_thread_pid (lp->ptid);
1465 gdb_assert (tp);
1466 signo = tp->stop_signal;
1467 }
1468 }
1469
1470 if (signo != TARGET_SIGNAL_0
1471 && !signal_pass_state (signo))
1472 {
1473 if (debug_linux_nat)
1474 fprintf_unfiltered (gdb_stdlog, "\
1475GPT: lwp %s had signal %s, but it is in no pass state\n",
1476 target_pid_to_str (lp->ptid),
1477 target_signal_to_string (signo));
1478 }
1479 else
1480 {
1481 if (signo != TARGET_SIGNAL_0)
1482 *status = W_STOPCODE (target_signal_to_host (signo));
1483
1484 if (debug_linux_nat)
1485 fprintf_unfiltered (gdb_stdlog,
1486 "GPT: lwp %s as pending signal %s\n",
1487 target_pid_to_str (lp->ptid),
1488 target_signal_to_string (signo));
1489 }
a0ef4274 1490 }
a0ef4274 1491 else
4c28f408
PA
1492 {
1493 if (GET_LWP (lp->ptid) == GET_LWP (last_ptid))
1494 {
1495 if (stop_signal != TARGET_SIGNAL_0
1496 && signal_pass_state (stop_signal))
1497 *status = W_STOPCODE (target_signal_to_host (stop_signal));
1498 }
1499 else if (target_can_async_p ())
1500 queued_waitpid (GET_LWP (lp->ptid), status, __WALL);
1501 else
1502 *status = lp->status;
1503 }
a0ef4274
DJ
1504
1505 return 0;
1506}
1507
d6b0e80f
AC
1508static int
1509detach_callback (struct lwp_info *lp, void *data)
1510{
1511 gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status));
1512
1513 if (debug_linux_nat && lp->status)
1514 fprintf_unfiltered (gdb_stdlog, "DC: Pending %s for %s on detach.\n",
1515 strsignal (WSTOPSIG (lp->status)),
1516 target_pid_to_str (lp->ptid));
1517
a0ef4274
DJ
1518 /* If there is a pending SIGSTOP, get rid of it. */
1519 if (lp->signalled)
d6b0e80f 1520 {
d6b0e80f
AC
1521 if (debug_linux_nat)
1522 fprintf_unfiltered (gdb_stdlog,
a0ef4274
DJ
1523 "DC: Sending SIGCONT to %s\n",
1524 target_pid_to_str (lp->ptid));
d6b0e80f 1525
a0ef4274 1526 kill_lwp (GET_LWP (lp->ptid), SIGCONT);
d6b0e80f 1527 lp->signalled = 0;
d6b0e80f
AC
1528 }
1529
1530 /* We don't actually detach from the LWP that has an id equal to the
1531 overall process id just yet. */
1532 if (GET_LWP (lp->ptid) != GET_PID (lp->ptid))
1533 {
a0ef4274
DJ
1534 int status = 0;
1535
1536 /* Pass on any pending signal for this LWP. */
1537 get_pending_status (lp, &status);
1538
d6b0e80f
AC
1539 errno = 0;
1540 if (ptrace (PTRACE_DETACH, GET_LWP (lp->ptid), 0,
a0ef4274 1541 WSTOPSIG (status)) < 0)
8a3fe4f8 1542 error (_("Can't detach %s: %s"), target_pid_to_str (lp->ptid),
d6b0e80f
AC
1543 safe_strerror (errno));
1544
1545 if (debug_linux_nat)
1546 fprintf_unfiltered (gdb_stdlog,
1547 "PTRACE_DETACH (%s, %s, 0) (OK)\n",
1548 target_pid_to_str (lp->ptid),
1549 strsignal (WSTOPSIG (lp->status)));
1550
1551 delete_lwp (lp->ptid);
1552 }
1553
1554 return 0;
1555}
1556
1557static void
1558linux_nat_detach (char *args, int from_tty)
1559{
b84876c2 1560 int pid;
a0ef4274
DJ
1561 int status;
1562 enum target_signal sig;
1563
b84876c2
PA
1564 if (target_can_async_p ())
1565 linux_nat_async (NULL, 0);
1566
4c28f408
PA
1567 /* Stop all threads before detaching. ptrace requires that the
1568 thread is stopped to sucessfully detach. */
1569 iterate_over_lwps (stop_callback, NULL);
1570 /* ... and wait until all of them have reported back that
1571 they're no longer running. */
1572 iterate_over_lwps (stop_wait_callback, NULL);
1573
d6b0e80f
AC
1574 iterate_over_lwps (detach_callback, NULL);
1575
1576 /* Only the initial process should be left right now. */
1577 gdb_assert (num_lwps == 1);
1578
a0ef4274
DJ
1579 /* Pass on any pending signal for the last LWP. */
1580 if ((args == NULL || *args == '\0')
1581 && get_pending_status (lwp_list, &status) != -1
1582 && WIFSTOPPED (status))
1583 {
1584 /* Put the signal number in ARGS so that inf_ptrace_detach will
1585 pass it along with PTRACE_DETACH. */
1586 args = alloca (8);
1587 sprintf (args, "%d", (int) WSTOPSIG (status));
1588 fprintf_unfiltered (gdb_stdlog,
1589 "LND: Sending signal %s to %s\n",
1590 args,
1591 target_pid_to_str (lwp_list->ptid));
1592 }
1593
d6b0e80f
AC
1594 /* Destroy LWP info; it's no longer valid. */
1595 init_lwp_list ();
1596
b84876c2
PA
1597 pid = GET_PID (inferior_ptid);
1598 inferior_ptid = pid_to_ptid (pid);
10d6c8cd 1599 linux_ops->to_detach (args, from_tty);
b84876c2
PA
1600
1601 if (target_can_async_p ())
1602 drain_queued_events (pid);
d6b0e80f
AC
1603}
1604
1605/* Resume LP. */
1606
1607static int
1608resume_callback (struct lwp_info *lp, void *data)
1609{
1610 if (lp->stopped && lp->status == 0)
1611 {
10d6c8cd
DJ
1612 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
1613 0, TARGET_SIGNAL_0);
d6b0e80f
AC
1614 if (debug_linux_nat)
1615 fprintf_unfiltered (gdb_stdlog,
1616 "RC: PTRACE_CONT %s, 0, 0 (resume sibling)\n",
1617 target_pid_to_str (lp->ptid));
1618 lp->stopped = 0;
1619 lp->step = 0;
9f0bdab8 1620 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
d6b0e80f 1621 }
57380f4e
DJ
1622 else if (lp->stopped && debug_linux_nat)
1623 fprintf_unfiltered (gdb_stdlog, "RC: Not resuming sibling %s (has pending)\n",
1624 target_pid_to_str (lp->ptid));
1625 else if (debug_linux_nat)
1626 fprintf_unfiltered (gdb_stdlog, "RC: Not resuming sibling %s (not stopped)\n",
1627 target_pid_to_str (lp->ptid));
d6b0e80f
AC
1628
1629 return 0;
1630}
1631
1632static int
1633resume_clear_callback (struct lwp_info *lp, void *data)
1634{
1635 lp->resumed = 0;
1636 return 0;
1637}
1638
1639static int
1640resume_set_callback (struct lwp_info *lp, void *data)
1641{
1642 lp->resumed = 1;
1643 return 0;
1644}
1645
1646static void
1647linux_nat_resume (ptid_t ptid, int step, enum target_signal signo)
1648{
1649 struct lwp_info *lp;
1650 int resume_all;
1651
76f50ad1
DJ
1652 if (debug_linux_nat)
1653 fprintf_unfiltered (gdb_stdlog,
1654 "LLR: Preparing to %s %s, %s, inferior_ptid %s\n",
1655 step ? "step" : "resume",
1656 target_pid_to_str (ptid),
1657 signo ? strsignal (signo) : "0",
1658 target_pid_to_str (inferior_ptid));
1659
b84876c2
PA
1660 if (target_can_async_p ())
1661 /* Block events while we're here. */
84e46146 1662 linux_nat_async_events (sigchld_sync);
b84876c2 1663
d6b0e80f
AC
1664 /* A specific PTID means `step only this process id'. */
1665 resume_all = (PIDGET (ptid) == -1);
1666
4c28f408
PA
1667 if (non_stop && resume_all)
1668 internal_error (__FILE__, __LINE__,
1669 "can't resume all in non-stop mode");
1670
1671 if (!non_stop)
1672 {
1673 if (resume_all)
1674 iterate_over_lwps (resume_set_callback, NULL);
1675 else
1676 iterate_over_lwps (resume_clear_callback, NULL);
1677 }
d6b0e80f
AC
1678
1679 /* If PID is -1, it's the current inferior that should be
1680 handled specially. */
1681 if (PIDGET (ptid) == -1)
1682 ptid = inferior_ptid;
1683
1684 lp = find_lwp_pid (ptid);
9f0bdab8 1685 gdb_assert (lp != NULL);
d6b0e80f 1686
4c28f408 1687 /* Convert to something the lower layer understands. */
9f0bdab8 1688 ptid = pid_to_ptid (GET_LWP (lp->ptid));
d6b0e80f 1689
9f0bdab8
DJ
1690 /* Remember if we're stepping. */
1691 lp->step = step;
d6b0e80f 1692
9f0bdab8
DJ
1693 /* Mark this LWP as resumed. */
1694 lp->resumed = 1;
76f50ad1 1695
9f0bdab8
DJ
1696 /* If we have a pending wait status for this thread, there is no
1697 point in resuming the process. But first make sure that
1698 linux_nat_wait won't preemptively handle the event - we
1699 should never take this short-circuit if we are going to
1700 leave LP running, since we have skipped resuming all the
1701 other threads. This bit of code needs to be synchronized
1702 with linux_nat_wait. */
76f50ad1 1703
710151dd
PA
1704 /* In async mode, we never have pending wait status. */
1705 if (target_can_async_p () && lp->status)
1706 internal_error (__FILE__, __LINE__, "Pending status in async mode");
1707
9f0bdab8
DJ
1708 if (lp->status && WIFSTOPPED (lp->status))
1709 {
1710 int saved_signo = target_signal_from_host (WSTOPSIG (lp->status));
76f50ad1 1711
9f0bdab8
DJ
1712 if (signal_stop_state (saved_signo) == 0
1713 && signal_print_state (saved_signo) == 0
1714 && signal_pass_state (saved_signo) == 1)
d6b0e80f 1715 {
9f0bdab8
DJ
1716 if (debug_linux_nat)
1717 fprintf_unfiltered (gdb_stdlog,
1718 "LLR: Not short circuiting for ignored "
1719 "status 0x%x\n", lp->status);
1720
d6b0e80f
AC
1721 /* FIXME: What should we do if we are supposed to continue
1722 this thread with a signal? */
1723 gdb_assert (signo == TARGET_SIGNAL_0);
9f0bdab8
DJ
1724 signo = saved_signo;
1725 lp->status = 0;
1726 }
1727 }
76f50ad1 1728
9f0bdab8
DJ
1729 if (lp->status)
1730 {
1731 /* FIXME: What should we do if we are supposed to continue
1732 this thread with a signal? */
1733 gdb_assert (signo == TARGET_SIGNAL_0);
76f50ad1 1734
9f0bdab8
DJ
1735 if (debug_linux_nat)
1736 fprintf_unfiltered (gdb_stdlog,
1737 "LLR: Short circuiting for status 0x%x\n",
1738 lp->status);
d6b0e80f 1739
9f0bdab8 1740 return;
d6b0e80f
AC
1741 }
1742
9f0bdab8
DJ
1743 /* Mark LWP as not stopped to prevent it from being continued by
1744 resume_callback. */
1745 lp->stopped = 0;
1746
d6b0e80f
AC
1747 if (resume_all)
1748 iterate_over_lwps (resume_callback, NULL);
1749
10d6c8cd 1750 linux_ops->to_resume (ptid, step, signo);
9f0bdab8
DJ
1751 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
1752
d6b0e80f
AC
1753 if (debug_linux_nat)
1754 fprintf_unfiltered (gdb_stdlog,
1755 "LLR: %s %s, %s (resume event thread)\n",
1756 step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
1757 target_pid_to_str (ptid),
1758 signo ? strsignal (signo) : "0");
b84876c2
PA
1759
1760 if (target_can_async_p ())
8ea051c5 1761 target_async (inferior_event_handler, 0);
d6b0e80f
AC
1762}
1763
1764/* Issue kill to specified lwp. */
1765
1766static int tkill_failed;
1767
1768static int
1769kill_lwp (int lwpid, int signo)
1770{
1771 errno = 0;
1772
1773/* Use tkill, if possible, in case we are using nptl threads. If tkill
1774 fails, then we are not using nptl threads and we should be using kill. */
1775
1776#ifdef HAVE_TKILL_SYSCALL
1777 if (!tkill_failed)
1778 {
1779 int ret = syscall (__NR_tkill, lwpid, signo);
1780 if (errno != ENOSYS)
1781 return ret;
1782 errno = 0;
1783 tkill_failed = 1;
1784 }
1785#endif
1786
1787 return kill (lwpid, signo);
1788}
1789
3d799a95
DJ
1790/* Handle a GNU/Linux extended wait response. If we see a clone
1791 event, we need to add the new LWP to our list (and not report the
1792 trap to higher layers). This function returns non-zero if the
1793 event should be ignored and we should wait again. If STOPPING is
1794 true, the new LWP remains stopped, otherwise it is continued. */
d6b0e80f
AC
1795
1796static int
3d799a95
DJ
1797linux_handle_extended_wait (struct lwp_info *lp, int status,
1798 int stopping)
d6b0e80f 1799{
3d799a95
DJ
1800 int pid = GET_LWP (lp->ptid);
1801 struct target_waitstatus *ourstatus = &lp->waitstatus;
1802 struct lwp_info *new_lp = NULL;
1803 int event = status >> 16;
d6b0e80f 1804
3d799a95
DJ
1805 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK
1806 || event == PTRACE_EVENT_CLONE)
d6b0e80f 1807 {
3d799a95
DJ
1808 unsigned long new_pid;
1809 int ret;
1810
1811 ptrace (PTRACE_GETEVENTMSG, pid, 0, &new_pid);
6fc19103 1812
3d799a95
DJ
1813 /* If we haven't already seen the new PID stop, wait for it now. */
1814 if (! pull_pid_from_list (&stopped_pids, new_pid, &status))
1815 {
1816 /* The new child has a pending SIGSTOP. We can't affect it until it
1817 hits the SIGSTOP, but we're already attached. */
1818 ret = my_waitpid (new_pid, &status,
1819 (event == PTRACE_EVENT_CLONE) ? __WCLONE : 0);
1820 if (ret == -1)
1821 perror_with_name (_("waiting for new child"));
1822 else if (ret != new_pid)
1823 internal_error (__FILE__, __LINE__,
1824 _("wait returned unexpected PID %d"), ret);
1825 else if (!WIFSTOPPED (status))
1826 internal_error (__FILE__, __LINE__,
1827 _("wait returned unexpected status 0x%x"), status);
1828 }
1829
3a3e9ee3 1830 ourstatus->value.related_pid = ptid_build (new_pid, new_pid, 0);
3d799a95
DJ
1831
1832 if (event == PTRACE_EVENT_FORK)
1833 ourstatus->kind = TARGET_WAITKIND_FORKED;
1834 else if (event == PTRACE_EVENT_VFORK)
1835 ourstatus->kind = TARGET_WAITKIND_VFORKED;
6fc19103 1836 else
3d799a95 1837 {
4c28f408
PA
1838 struct cleanup *old_chain;
1839
3d799a95
DJ
1840 ourstatus->kind = TARGET_WAITKIND_IGNORE;
1841 new_lp = add_lwp (BUILD_LWP (new_pid, GET_PID (inferior_ptid)));
1842 new_lp->cloned = 1;
4c28f408 1843 new_lp->stopped = 1;
d6b0e80f 1844
3d799a95
DJ
1845 if (WSTOPSIG (status) != SIGSTOP)
1846 {
1847 /* This can happen if someone starts sending signals to
1848 the new thread before it gets a chance to run, which
1849 have a lower number than SIGSTOP (e.g. SIGUSR1).
1850 This is an unlikely case, and harder to handle for
1851 fork / vfork than for clone, so we do not try - but
1852 we handle it for clone events here. We'll send
1853 the other signal on to the thread below. */
1854
1855 new_lp->signalled = 1;
1856 }
1857 else
1858 status = 0;
d6b0e80f 1859
4c28f408 1860 if (non_stop)
3d799a95 1861 {
4c28f408
PA
1862 /* Add the new thread to GDB's lists as soon as possible
1863 so that:
1864
1865 1) the frontend doesn't have to wait for a stop to
1866 display them, and,
1867
1868 2) we tag it with the correct running state. */
1869
1870 /* If the thread_db layer is active, let it know about
1871 this new thread, and add it to GDB's list. */
1872 if (!thread_db_attach_lwp (new_lp->ptid))
1873 {
1874 /* We're not using thread_db. Add it to GDB's
1875 list. */
1876 target_post_attach (GET_LWP (new_lp->ptid));
1877 add_thread (new_lp->ptid);
1878 }
1879
1880 if (!stopping)
1881 {
1882 set_running (new_lp->ptid, 1);
1883 set_executing (new_lp->ptid, 1);
1884 }
1885 }
1886
1887 if (!stopping)
1888 {
1889 new_lp->stopped = 0;
3d799a95 1890 new_lp->resumed = 1;
4c28f408 1891 ptrace (PTRACE_CONT, new_pid, 0,
3d799a95
DJ
1892 status ? WSTOPSIG (status) : 0);
1893 }
d6b0e80f 1894
3d799a95
DJ
1895 if (debug_linux_nat)
1896 fprintf_unfiltered (gdb_stdlog,
1897 "LHEW: Got clone event from LWP %ld, resuming\n",
1898 GET_LWP (lp->ptid));
1899 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
1900
1901 return 1;
1902 }
1903
1904 return 0;
d6b0e80f
AC
1905 }
1906
3d799a95
DJ
1907 if (event == PTRACE_EVENT_EXEC)
1908 {
1909 ourstatus->kind = TARGET_WAITKIND_EXECD;
1910 ourstatus->value.execd_pathname
6d8fd2b7 1911 = xstrdup (linux_child_pid_to_exec_file (pid));
3d799a95
DJ
1912
1913 if (linux_parent_pid)
1914 {
1915 detach_breakpoints (linux_parent_pid);
1916 ptrace (PTRACE_DETACH, linux_parent_pid, 0, 0);
1917
1918 linux_parent_pid = 0;
1919 }
1920
25b22b0a
PA
1921 /* At this point, all inserted breakpoints are gone. Doing this
1922 as soon as we detect an exec prevents the badness of deleting
1923 a breakpoint writing the current "shadow contents" to lift
1924 the bp. That shadow is NOT valid after an exec.
1925
1926 Note that we have to do this after the detach_breakpoints
1927 call above, otherwise breakpoints wouldn't be lifted from the
1928 parent on a vfork, because detach_breakpoints would think
1929 that breakpoints are not inserted. */
1930 mark_breakpoints_out ();
3d799a95
DJ
1931 return 0;
1932 }
1933
1934 internal_error (__FILE__, __LINE__,
1935 _("unknown ptrace event %d"), event);
d6b0e80f
AC
1936}
1937
1938/* Wait for LP to stop. Returns the wait status, or 0 if the LWP has
1939 exited. */
1940
1941static int
1942wait_lwp (struct lwp_info *lp)
1943{
1944 pid_t pid;
1945 int status;
1946 int thread_dead = 0;
1947
1948 gdb_assert (!lp->stopped);
1949 gdb_assert (lp->status == 0);
1950
58aecb61 1951 pid = my_waitpid (GET_LWP (lp->ptid), &status, 0);
d6b0e80f
AC
1952 if (pid == -1 && errno == ECHILD)
1953 {
58aecb61 1954 pid = my_waitpid (GET_LWP (lp->ptid), &status, __WCLONE);
d6b0e80f
AC
1955 if (pid == -1 && errno == ECHILD)
1956 {
1957 /* The thread has previously exited. We need to delete it
1958 now because, for some vendor 2.4 kernels with NPTL
1959 support backported, there won't be an exit event unless
1960 it is the main thread. 2.6 kernels will report an exit
1961 event for each thread that exits, as expected. */
1962 thread_dead = 1;
1963 if (debug_linux_nat)
1964 fprintf_unfiltered (gdb_stdlog, "WL: %s vanished.\n",
1965 target_pid_to_str (lp->ptid));
1966 }
1967 }
1968
1969 if (!thread_dead)
1970 {
1971 gdb_assert (pid == GET_LWP (lp->ptid));
1972
1973 if (debug_linux_nat)
1974 {
1975 fprintf_unfiltered (gdb_stdlog,
1976 "WL: waitpid %s received %s\n",
1977 target_pid_to_str (lp->ptid),
1978 status_to_str (status));
1979 }
1980 }
1981
1982 /* Check if the thread has exited. */
1983 if (WIFEXITED (status) || WIFSIGNALED (status))
1984 {
1985 thread_dead = 1;
1986 if (debug_linux_nat)
1987 fprintf_unfiltered (gdb_stdlog, "WL: %s exited.\n",
1988 target_pid_to_str (lp->ptid));
1989 }
1990
1991 if (thread_dead)
1992 {
e26af52f 1993 exit_lwp (lp);
d6b0e80f
AC
1994 return 0;
1995 }
1996
1997 gdb_assert (WIFSTOPPED (status));
1998
1999 /* Handle GNU/Linux's extended waitstatus for trace events. */
2000 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
2001 {
2002 if (debug_linux_nat)
2003 fprintf_unfiltered (gdb_stdlog,
2004 "WL: Handling extended status 0x%06x\n",
2005 status);
3d799a95 2006 if (linux_handle_extended_wait (lp, status, 1))
d6b0e80f
AC
2007 return wait_lwp (lp);
2008 }
2009
2010 return status;
2011}
2012
9f0bdab8
DJ
2013/* Save the most recent siginfo for LP. This is currently only called
2014 for SIGTRAP; some ports use the si_addr field for
2015 target_stopped_data_address. In the future, it may also be used to
2016 restore the siginfo of requeued signals. */
2017
2018static void
2019save_siginfo (struct lwp_info *lp)
2020{
2021 errno = 0;
2022 ptrace (PTRACE_GETSIGINFO, GET_LWP (lp->ptid),
2023 (PTRACE_TYPE_ARG3) 0, &lp->siginfo);
2024
2025 if (errno != 0)
2026 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
2027}
2028
d6b0e80f
AC
2029/* Send a SIGSTOP to LP. */
2030
2031static int
2032stop_callback (struct lwp_info *lp, void *data)
2033{
2034 if (!lp->stopped && !lp->signalled)
2035 {
2036 int ret;
2037
2038 if (debug_linux_nat)
2039 {
2040 fprintf_unfiltered (gdb_stdlog,
2041 "SC: kill %s **<SIGSTOP>**\n",
2042 target_pid_to_str (lp->ptid));
2043 }
2044 errno = 0;
2045 ret = kill_lwp (GET_LWP (lp->ptid), SIGSTOP);
2046 if (debug_linux_nat)
2047 {
2048 fprintf_unfiltered (gdb_stdlog,
2049 "SC: lwp kill %d %s\n",
2050 ret,
2051 errno ? safe_strerror (errno) : "ERRNO-OK");
2052 }
2053
2054 lp->signalled = 1;
2055 gdb_assert (lp->status == 0);
2056 }
2057
2058 return 0;
2059}
2060
57380f4e 2061/* Return non-zero if LWP PID has a pending SIGINT. */
d6b0e80f
AC
2062
2063static int
57380f4e
DJ
2064linux_nat_has_pending_sigint (int pid)
2065{
2066 sigset_t pending, blocked, ignored;
2067 int i;
2068
2069 linux_proc_pending_signals (pid, &pending, &blocked, &ignored);
2070
2071 if (sigismember (&pending, SIGINT)
2072 && !sigismember (&ignored, SIGINT))
2073 return 1;
2074
2075 return 0;
2076}
2077
2078/* Set a flag in LP indicating that we should ignore its next SIGINT. */
2079
2080static int
2081set_ignore_sigint (struct lwp_info *lp, void *data)
d6b0e80f 2082{
57380f4e
DJ
2083 /* If a thread has a pending SIGINT, consume it; otherwise, set a
2084 flag to consume the next one. */
2085 if (lp->stopped && lp->status != 0 && WIFSTOPPED (lp->status)
2086 && WSTOPSIG (lp->status) == SIGINT)
2087 lp->status = 0;
2088 else
2089 lp->ignore_sigint = 1;
2090
2091 return 0;
2092}
2093
2094/* If LP does not have a SIGINT pending, then clear the ignore_sigint flag.
2095 This function is called after we know the LWP has stopped; if the LWP
2096 stopped before the expected SIGINT was delivered, then it will never have
2097 arrived. Also, if the signal was delivered to a shared queue and consumed
2098 by a different thread, it will never be delivered to this LWP. */
d6b0e80f 2099
57380f4e
DJ
2100static void
2101maybe_clear_ignore_sigint (struct lwp_info *lp)
2102{
2103 if (!lp->ignore_sigint)
2104 return;
2105
2106 if (!linux_nat_has_pending_sigint (GET_LWP (lp->ptid)))
2107 {
2108 if (debug_linux_nat)
2109 fprintf_unfiltered (gdb_stdlog,
2110 "MCIS: Clearing bogus flag for %s\n",
2111 target_pid_to_str (lp->ptid));
2112 lp->ignore_sigint = 0;
2113 }
2114}
2115
2116/* Wait until LP is stopped. */
2117
2118static int
2119stop_wait_callback (struct lwp_info *lp, void *data)
2120{
d6b0e80f
AC
2121 if (!lp->stopped)
2122 {
2123 int status;
2124
2125 status = wait_lwp (lp);
2126 if (status == 0)
2127 return 0;
2128
57380f4e
DJ
2129 if (lp->ignore_sigint && WIFSTOPPED (status)
2130 && WSTOPSIG (status) == SIGINT)
d6b0e80f 2131 {
57380f4e 2132 lp->ignore_sigint = 0;
d6b0e80f
AC
2133
2134 errno = 0;
2135 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2136 if (debug_linux_nat)
2137 fprintf_unfiltered (gdb_stdlog,
57380f4e 2138 "PTRACE_CONT %s, 0, 0 (%s) (discarding SIGINT)\n",
d6b0e80f
AC
2139 target_pid_to_str (lp->ptid),
2140 errno ? safe_strerror (errno) : "OK");
2141
57380f4e 2142 return stop_wait_callback (lp, NULL);
d6b0e80f
AC
2143 }
2144
57380f4e
DJ
2145 maybe_clear_ignore_sigint (lp);
2146
d6b0e80f
AC
2147 if (WSTOPSIG (status) != SIGSTOP)
2148 {
2149 if (WSTOPSIG (status) == SIGTRAP)
2150 {
2151 /* If a LWP other than the LWP that we're reporting an
2152 event for has hit a GDB breakpoint (as opposed to
2153 some random trap signal), then just arrange for it to
2154 hit it again later. We don't keep the SIGTRAP status
2155 and don't forward the SIGTRAP signal to the LWP. We
2156 will handle the current event, eventually we will
2157 resume all LWPs, and this one will get its breakpoint
2158 trap again.
2159
2160 If we do not do this, then we run the risk that the
2161 user will delete or disable the breakpoint, but the
2162 thread will have already tripped on it. */
2163
9f0bdab8
DJ
2164 /* Save the trap's siginfo in case we need it later. */
2165 save_siginfo (lp);
2166
d6b0e80f
AC
2167 /* Now resume this LWP and get the SIGSTOP event. */
2168 errno = 0;
2169 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2170 if (debug_linux_nat)
2171 {
2172 fprintf_unfiltered (gdb_stdlog,
2173 "PTRACE_CONT %s, 0, 0 (%s)\n",
2174 target_pid_to_str (lp->ptid),
2175 errno ? safe_strerror (errno) : "OK");
2176
2177 fprintf_unfiltered (gdb_stdlog,
2178 "SWC: Candidate SIGTRAP event in %s\n",
2179 target_pid_to_str (lp->ptid));
2180 }
710151dd
PA
2181 /* Hold this event/waitstatus while we check to see if
2182 there are any more (we still want to get that SIGSTOP). */
57380f4e 2183 stop_wait_callback (lp, NULL);
710151dd
PA
2184
2185 if (target_can_async_p ())
d6b0e80f 2186 {
710151dd
PA
2187 /* Don't leave a pending wait status in async mode.
2188 Retrigger the breakpoint. */
2189 if (!cancel_breakpoint (lp))
d6b0e80f 2190 {
710151dd
PA
2191 /* There was no gdb breakpoint set at pc. Put
2192 the event back in the queue. */
2193 if (debug_linux_nat)
2194 fprintf_unfiltered (gdb_stdlog,
2195 "SWC: kill %s, %s\n",
2196 target_pid_to_str (lp->ptid),
2197 status_to_str ((int) status));
2198 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (status));
2199 }
2200 }
2201 else
2202 {
2203 /* Hold the SIGTRAP for handling by
2204 linux_nat_wait. */
2205 /* If there's another event, throw it back into the
2206 queue. */
2207 if (lp->status)
2208 {
2209 if (debug_linux_nat)
2210 fprintf_unfiltered (gdb_stdlog,
2211 "SWC: kill %s, %s\n",
2212 target_pid_to_str (lp->ptid),
2213 status_to_str ((int) status));
2214 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (lp->status));
d6b0e80f 2215 }
710151dd
PA
2216 /* Save the sigtrap event. */
2217 lp->status = status;
d6b0e80f 2218 }
d6b0e80f
AC
2219 return 0;
2220 }
2221 else
2222 {
2223 /* The thread was stopped with a signal other than
2224 SIGSTOP, and didn't accidentally trip a breakpoint. */
2225
2226 if (debug_linux_nat)
2227 {
2228 fprintf_unfiltered (gdb_stdlog,
2229 "SWC: Pending event %s in %s\n",
2230 status_to_str ((int) status),
2231 target_pid_to_str (lp->ptid));
2232 }
2233 /* Now resume this LWP and get the SIGSTOP event. */
2234 errno = 0;
2235 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2236 if (debug_linux_nat)
2237 fprintf_unfiltered (gdb_stdlog,
2238 "SWC: PTRACE_CONT %s, 0, 0 (%s)\n",
2239 target_pid_to_str (lp->ptid),
2240 errno ? safe_strerror (errno) : "OK");
2241
2242 /* Hold this event/waitstatus while we check to see if
2243 there are any more (we still want to get that SIGSTOP). */
57380f4e 2244 stop_wait_callback (lp, NULL);
710151dd
PA
2245
2246 /* If the lp->status field is still empty, use it to
2247 hold this event. If not, then this event must be
2248 returned to the event queue of the LWP. */
2249 if (lp->status || target_can_async_p ())
d6b0e80f
AC
2250 {
2251 if (debug_linux_nat)
2252 {
2253 fprintf_unfiltered (gdb_stdlog,
2254 "SWC: kill %s, %s\n",
2255 target_pid_to_str (lp->ptid),
2256 status_to_str ((int) status));
2257 }
2258 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (status));
2259 }
710151dd
PA
2260 else
2261 lp->status = status;
d6b0e80f
AC
2262 return 0;
2263 }
2264 }
2265 else
2266 {
2267 /* We caught the SIGSTOP that we intended to catch, so
2268 there's no SIGSTOP pending. */
2269 lp->stopped = 1;
2270 lp->signalled = 0;
2271 }
2272 }
2273
2274 return 0;
2275}
2276
d6b0e80f
AC
2277/* Return non-zero if LP has a wait status pending. */
2278
2279static int
2280status_callback (struct lwp_info *lp, void *data)
2281{
2282 /* Only report a pending wait status if we pretend that this has
2283 indeed been resumed. */
2284 return (lp->status != 0 && lp->resumed);
2285}
2286
2287/* Return non-zero if LP isn't stopped. */
2288
2289static int
2290running_callback (struct lwp_info *lp, void *data)
2291{
2292 return (lp->stopped == 0 || (lp->status != 0 && lp->resumed));
2293}
2294
2295/* Count the LWP's that have had events. */
2296
2297static int
2298count_events_callback (struct lwp_info *lp, void *data)
2299{
2300 int *count = data;
2301
2302 gdb_assert (count != NULL);
2303
e09490f1
DJ
2304 /* Count only resumed LWPs that have a SIGTRAP event pending. */
2305 if (lp->status != 0 && lp->resumed
d6b0e80f
AC
2306 && WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP)
2307 (*count)++;
2308
2309 return 0;
2310}
2311
2312/* Select the LWP (if any) that is currently being single-stepped. */
2313
2314static int
2315select_singlestep_lwp_callback (struct lwp_info *lp, void *data)
2316{
2317 if (lp->step && lp->status != 0)
2318 return 1;
2319 else
2320 return 0;
2321}
2322
2323/* Select the Nth LWP that has had a SIGTRAP event. */
2324
2325static int
2326select_event_lwp_callback (struct lwp_info *lp, void *data)
2327{
2328 int *selector = data;
2329
2330 gdb_assert (selector != NULL);
2331
e09490f1
DJ
2332 /* Select only resumed LWPs that have a SIGTRAP event pending. */
2333 if (lp->status != 0 && lp->resumed
d6b0e80f
AC
2334 && WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP)
2335 if ((*selector)-- == 0)
2336 return 1;
2337
2338 return 0;
2339}
2340
710151dd
PA
2341static int
2342cancel_breakpoint (struct lwp_info *lp)
2343{
2344 /* Arrange for a breakpoint to be hit again later. We don't keep
2345 the SIGTRAP status and don't forward the SIGTRAP signal to the
2346 LWP. We will handle the current event, eventually we will resume
2347 this LWP, and this breakpoint will trap again.
2348
2349 If we do not do this, then we run the risk that the user will
2350 delete or disable the breakpoint, but the LWP will have already
2351 tripped on it. */
2352
515630c5
UW
2353 struct regcache *regcache = get_thread_regcache (lp->ptid);
2354 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2355 CORE_ADDR pc;
2356
2357 pc = regcache_read_pc (regcache) - gdbarch_decr_pc_after_break (gdbarch);
2358 if (breakpoint_inserted_here_p (pc))
710151dd
PA
2359 {
2360 if (debug_linux_nat)
2361 fprintf_unfiltered (gdb_stdlog,
2362 "CB: Push back breakpoint for %s\n",
2363 target_pid_to_str (lp->ptid));
2364
2365 /* Back up the PC if necessary. */
515630c5
UW
2366 if (gdbarch_decr_pc_after_break (gdbarch))
2367 regcache_write_pc (regcache, pc);
2368
710151dd
PA
2369 return 1;
2370 }
2371 return 0;
2372}
2373
d6b0e80f
AC
2374static int
2375cancel_breakpoints_callback (struct lwp_info *lp, void *data)
2376{
2377 struct lwp_info *event_lp = data;
2378
2379 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
2380 if (lp == event_lp)
2381 return 0;
2382
2383 /* If a LWP other than the LWP that we're reporting an event for has
2384 hit a GDB breakpoint (as opposed to some random trap signal),
2385 then just arrange for it to hit it again later. We don't keep
2386 the SIGTRAP status and don't forward the SIGTRAP signal to the
2387 LWP. We will handle the current event, eventually we will resume
2388 all LWPs, and this one will get its breakpoint trap again.
2389
2390 If we do not do this, then we run the risk that the user will
2391 delete or disable the breakpoint, but the LWP will have already
2392 tripped on it. */
2393
2394 if (lp->status != 0
2395 && WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP
710151dd
PA
2396 && cancel_breakpoint (lp))
2397 /* Throw away the SIGTRAP. */
2398 lp->status = 0;
d6b0e80f
AC
2399
2400 return 0;
2401}
2402
2403/* Select one LWP out of those that have events pending. */
2404
2405static void
2406select_event_lwp (struct lwp_info **orig_lp, int *status)
2407{
2408 int num_events = 0;
2409 int random_selector;
2410 struct lwp_info *event_lp;
2411
ac264b3b 2412 /* Record the wait status for the original LWP. */
d6b0e80f
AC
2413 (*orig_lp)->status = *status;
2414
2415 /* Give preference to any LWP that is being single-stepped. */
2416 event_lp = iterate_over_lwps (select_singlestep_lwp_callback, NULL);
2417 if (event_lp != NULL)
2418 {
2419 if (debug_linux_nat)
2420 fprintf_unfiltered (gdb_stdlog,
2421 "SEL: Select single-step %s\n",
2422 target_pid_to_str (event_lp->ptid));
2423 }
2424 else
2425 {
2426 /* No single-stepping LWP. Select one at random, out of those
2427 which have had SIGTRAP events. */
2428
2429 /* First see how many SIGTRAP events we have. */
2430 iterate_over_lwps (count_events_callback, &num_events);
2431
2432 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
2433 random_selector = (int)
2434 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2435
2436 if (debug_linux_nat && num_events > 1)
2437 fprintf_unfiltered (gdb_stdlog,
2438 "SEL: Found %d SIGTRAP events, selecting #%d\n",
2439 num_events, random_selector);
2440
2441 event_lp = iterate_over_lwps (select_event_lwp_callback,
2442 &random_selector);
2443 }
2444
2445 if (event_lp != NULL)
2446 {
2447 /* Switch the event LWP. */
2448 *orig_lp = event_lp;
2449 *status = event_lp->status;
2450 }
2451
2452 /* Flush the wait status for the event LWP. */
2453 (*orig_lp)->status = 0;
2454}
2455
2456/* Return non-zero if LP has been resumed. */
2457
2458static int
2459resumed_callback (struct lwp_info *lp, void *data)
2460{
2461 return lp->resumed;
2462}
2463
d6b0e80f
AC
2464/* Stop an active thread, verify it still exists, then resume it. */
2465
2466static int
2467stop_and_resume_callback (struct lwp_info *lp, void *data)
2468{
2469 struct lwp_info *ptr;
2470
2471 if (!lp->stopped && !lp->signalled)
2472 {
2473 stop_callback (lp, NULL);
2474 stop_wait_callback (lp, NULL);
2475 /* Resume if the lwp still exists. */
2476 for (ptr = lwp_list; ptr; ptr = ptr->next)
2477 if (lp == ptr)
2478 {
2479 resume_callback (lp, NULL);
2480 resume_set_callback (lp, NULL);
2481 }
2482 }
2483 return 0;
2484}
2485
02f3fc28 2486/* Check if we should go on and pass this event to common code.
fa2c6a57 2487 Return the affected lwp if we are, or NULL otherwise. */
02f3fc28
PA
2488static struct lwp_info *
2489linux_nat_filter_event (int lwpid, int status, int options)
2490{
2491 struct lwp_info *lp;
2492
2493 lp = find_lwp_pid (pid_to_ptid (lwpid));
2494
2495 /* Check for stop events reported by a process we didn't already
2496 know about - anything not already in our LWP list.
2497
2498 If we're expecting to receive stopped processes after
2499 fork, vfork, and clone events, then we'll just add the
2500 new one to our list and go back to waiting for the event
2501 to be reported - the stopped process might be returned
2502 from waitpid before or after the event is. */
2503 if (WIFSTOPPED (status) && !lp)
2504 {
2505 linux_record_stopped_pid (lwpid, status);
2506 return NULL;
2507 }
2508
2509 /* Make sure we don't report an event for the exit of an LWP not in
2510 our list, i.e. not part of the current process. This can happen
2511 if we detach from a program we original forked and then it
2512 exits. */
2513 if (!WIFSTOPPED (status) && !lp)
2514 return NULL;
2515
2516 /* NOTE drow/2003-06-17: This code seems to be meant for debugging
2517 CLONE_PTRACE processes which do not use the thread library -
2518 otherwise we wouldn't find the new LWP this way. That doesn't
2519 currently work, and the following code is currently unreachable
2520 due to the two blocks above. If it's fixed some day, this code
2521 should be broken out into a function so that we can also pick up
2522 LWPs from the new interface. */
2523 if (!lp)
2524 {
2525 lp = add_lwp (BUILD_LWP (lwpid, GET_PID (inferior_ptid)));
2526 if (options & __WCLONE)
2527 lp->cloned = 1;
2528
2529 gdb_assert (WIFSTOPPED (status)
2530 && WSTOPSIG (status) == SIGSTOP);
2531 lp->signalled = 1;
2532
2533 if (!in_thread_list (inferior_ptid))
2534 {
2535 inferior_ptid = BUILD_LWP (GET_PID (inferior_ptid),
2536 GET_PID (inferior_ptid));
2537 add_thread (inferior_ptid);
2538 }
2539
2540 add_thread (lp->ptid);
2541 }
2542
2543 /* Save the trap's siginfo in case we need it later. */
2544 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP)
2545 save_siginfo (lp);
2546
2547 /* Handle GNU/Linux's extended waitstatus for trace events. */
2548 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
2549 {
2550 if (debug_linux_nat)
2551 fprintf_unfiltered (gdb_stdlog,
2552 "LLW: Handling extended status 0x%06x\n",
2553 status);
2554 if (linux_handle_extended_wait (lp, status, 0))
2555 return NULL;
2556 }
2557
2558 /* Check if the thread has exited. */
2559 if ((WIFEXITED (status) || WIFSIGNALED (status)) && num_lwps > 1)
2560 {
2561 /* If this is the main thread, we must stop all threads and
2562 verify if they are still alive. This is because in the nptl
2563 thread model, there is no signal issued for exiting LWPs
2564 other than the main thread. We only get the main thread exit
2565 signal once all child threads have already exited. If we
2566 stop all the threads and use the stop_wait_callback to check
2567 if they have exited we can determine whether this signal
2568 should be ignored or whether it means the end of the debugged
2569 application, regardless of which threading model is being
2570 used. */
2571 if (GET_PID (lp->ptid) == GET_LWP (lp->ptid))
2572 {
2573 lp->stopped = 1;
2574 iterate_over_lwps (stop_and_resume_callback, NULL);
2575 }
2576
2577 if (debug_linux_nat)
2578 fprintf_unfiltered (gdb_stdlog,
2579 "LLW: %s exited.\n",
2580 target_pid_to_str (lp->ptid));
2581
2582 exit_lwp (lp);
2583
2584 /* If there is at least one more LWP, then the exit signal was
2585 not the end of the debugged application and should be
2586 ignored. */
2587 if (num_lwps > 0)
4c28f408 2588 return NULL;
02f3fc28
PA
2589 }
2590
2591 /* Check if the current LWP has previously exited. In the nptl
2592 thread model, LWPs other than the main thread do not issue
2593 signals when they exit so we must check whenever the thread has
2594 stopped. A similar check is made in stop_wait_callback(). */
2595 if (num_lwps > 1 && !linux_nat_thread_alive (lp->ptid))
2596 {
2597 if (debug_linux_nat)
2598 fprintf_unfiltered (gdb_stdlog,
2599 "LLW: %s exited.\n",
2600 target_pid_to_str (lp->ptid));
2601
2602 exit_lwp (lp);
2603
2604 /* Make sure there is at least one thread running. */
2605 gdb_assert (iterate_over_lwps (running_callback, NULL));
2606
2607 /* Discard the event. */
2608 return NULL;
2609 }
2610
2611 /* Make sure we don't report a SIGSTOP that we sent ourselves in
2612 an attempt to stop an LWP. */
2613 if (lp->signalled
2614 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP)
2615 {
2616 if (debug_linux_nat)
2617 fprintf_unfiltered (gdb_stdlog,
2618 "LLW: Delayed SIGSTOP caught for %s.\n",
2619 target_pid_to_str (lp->ptid));
2620
2621 /* This is a delayed SIGSTOP. */
2622 lp->signalled = 0;
2623
2624 registers_changed ();
2625
2626 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
2627 lp->step, TARGET_SIGNAL_0);
2628 if (debug_linux_nat)
2629 fprintf_unfiltered (gdb_stdlog,
2630 "LLW: %s %s, 0, 0 (discard SIGSTOP)\n",
2631 lp->step ?
2632 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2633 target_pid_to_str (lp->ptid));
2634
2635 lp->stopped = 0;
2636 gdb_assert (lp->resumed);
2637
2638 /* Discard the event. */
2639 return NULL;
2640 }
2641
57380f4e
DJ
2642 /* Make sure we don't report a SIGINT that we have already displayed
2643 for another thread. */
2644 if (lp->ignore_sigint
2645 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGINT)
2646 {
2647 if (debug_linux_nat)
2648 fprintf_unfiltered (gdb_stdlog,
2649 "LLW: Delayed SIGINT caught for %s.\n",
2650 target_pid_to_str (lp->ptid));
2651
2652 /* This is a delayed SIGINT. */
2653 lp->ignore_sigint = 0;
2654
2655 registers_changed ();
2656 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
2657 lp->step, TARGET_SIGNAL_0);
2658 if (debug_linux_nat)
2659 fprintf_unfiltered (gdb_stdlog,
2660 "LLW: %s %s, 0, 0 (discard SIGINT)\n",
2661 lp->step ?
2662 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2663 target_pid_to_str (lp->ptid));
2664
2665 lp->stopped = 0;
2666 gdb_assert (lp->resumed);
2667
2668 /* Discard the event. */
2669 return NULL;
2670 }
2671
02f3fc28
PA
2672 /* An interesting event. */
2673 gdb_assert (lp);
2674 return lp;
2675}
2676
b84876c2
PA
2677/* Get the events stored in the pipe into the local queue, so they are
2678 accessible to queued_waitpid. We need to do this, since it is not
2679 always the case that the event at the head of the pipe is the event
2680 we want. */
2681
2682static void
2683pipe_to_local_event_queue (void)
2684{
2685 if (debug_linux_nat_async)
2686 fprintf_unfiltered (gdb_stdlog,
2687 "PTLEQ: linux_nat_num_queued_events(%d)\n",
2688 linux_nat_num_queued_events);
2689 while (linux_nat_num_queued_events)
2690 {
2691 int lwpid, status, options;
b84876c2 2692 lwpid = linux_nat_event_pipe_pop (&status, &options);
b84876c2
PA
2693 gdb_assert (lwpid > 0);
2694 push_waitpid (lwpid, status, options);
2695 }
2696}
2697
2698/* Get the unprocessed events stored in the local queue back into the
2699 pipe, so the event loop realizes there's something else to
2700 process. */
2701
2702static void
2703local_event_queue_to_pipe (void)
2704{
2705 struct waitpid_result *w = waitpid_queue;
2706 while (w)
2707 {
2708 struct waitpid_result *next = w->next;
2709 linux_nat_event_pipe_push (w->pid,
2710 w->status,
2711 w->options);
2712 xfree (w);
2713 w = next;
2714 }
2715 waitpid_queue = NULL;
2716
2717 if (debug_linux_nat_async)
2718 fprintf_unfiltered (gdb_stdlog,
2719 "LEQTP: linux_nat_num_queued_events(%d)\n",
2720 linux_nat_num_queued_events);
2721}
2722
d6b0e80f
AC
2723static ptid_t
2724linux_nat_wait (ptid_t ptid, struct target_waitstatus *ourstatus)
2725{
2726 struct lwp_info *lp = NULL;
2727 int options = 0;
2728 int status = 0;
2729 pid_t pid = PIDGET (ptid);
d6b0e80f 2730
b84876c2
PA
2731 if (debug_linux_nat_async)
2732 fprintf_unfiltered (gdb_stdlog, "LLW: enter\n");
2733
f973ed9c
DJ
2734 /* The first time we get here after starting a new inferior, we may
2735 not have added it to the LWP list yet - this is the earliest
2736 moment at which we know its PID. */
2737 if (num_lwps == 0)
2738 {
2739 gdb_assert (!is_lwp (inferior_ptid));
2740
27c9d204
PA
2741 /* Upgrade the main thread's ptid. */
2742 thread_change_ptid (inferior_ptid,
2743 BUILD_LWP (GET_PID (inferior_ptid),
2744 GET_PID (inferior_ptid)));
2745
f973ed9c
DJ
2746 lp = add_lwp (inferior_ptid);
2747 lp->resumed = 1;
2748 }
2749
84e46146
PA
2750 /* Block events while we're here. */
2751 linux_nat_async_events (sigchld_sync);
d6b0e80f
AC
2752
2753retry:
2754
f973ed9c
DJ
2755 /* Make sure there is at least one LWP that has been resumed. */
2756 gdb_assert (iterate_over_lwps (resumed_callback, NULL));
d6b0e80f
AC
2757
2758 /* First check if there is a LWP with a wait status pending. */
2759 if (pid == -1)
2760 {
2761 /* Any LWP that's been resumed will do. */
2762 lp = iterate_over_lwps (status_callback, NULL);
2763 if (lp)
2764 {
710151dd
PA
2765 if (target_can_async_p ())
2766 internal_error (__FILE__, __LINE__,
2767 "Found an LWP with a pending status in async mode.");
2768
d6b0e80f
AC
2769 status = lp->status;
2770 lp->status = 0;
2771
2772 if (debug_linux_nat && status)
2773 fprintf_unfiltered (gdb_stdlog,
2774 "LLW: Using pending wait status %s for %s.\n",
2775 status_to_str (status),
2776 target_pid_to_str (lp->ptid));
2777 }
2778
b84876c2 2779 /* But if we don't find one, we'll have to wait, and check both
d6b0e80f
AC
2780 cloned and uncloned processes. We start with the cloned
2781 processes. */
2782 options = __WCLONE | WNOHANG;
2783 }
2784 else if (is_lwp (ptid))
2785 {
2786 if (debug_linux_nat)
2787 fprintf_unfiltered (gdb_stdlog,
2788 "LLW: Waiting for specific LWP %s.\n",
2789 target_pid_to_str (ptid));
2790
2791 /* We have a specific LWP to check. */
2792 lp = find_lwp_pid (ptid);
2793 gdb_assert (lp);
2794 status = lp->status;
2795 lp->status = 0;
2796
2797 if (debug_linux_nat && status)
2798 fprintf_unfiltered (gdb_stdlog,
2799 "LLW: Using pending wait status %s for %s.\n",
2800 status_to_str (status),
2801 target_pid_to_str (lp->ptid));
2802
2803 /* If we have to wait, take into account whether PID is a cloned
2804 process or not. And we have to convert it to something that
2805 the layer beneath us can understand. */
2806 options = lp->cloned ? __WCLONE : 0;
2807 pid = GET_LWP (ptid);
2808 }
2809
2810 if (status && lp->signalled)
2811 {
2812 /* A pending SIGSTOP may interfere with the normal stream of
2813 events. In a typical case where interference is a problem,
2814 we have a SIGSTOP signal pending for LWP A while
2815 single-stepping it, encounter an event in LWP B, and take the
2816 pending SIGSTOP while trying to stop LWP A. After processing
2817 the event in LWP B, LWP A is continued, and we'll never see
2818 the SIGTRAP associated with the last time we were
2819 single-stepping LWP A. */
2820
2821 /* Resume the thread. It should halt immediately returning the
2822 pending SIGSTOP. */
2823 registers_changed ();
10d6c8cd
DJ
2824 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
2825 lp->step, TARGET_SIGNAL_0);
d6b0e80f
AC
2826 if (debug_linux_nat)
2827 fprintf_unfiltered (gdb_stdlog,
2828 "LLW: %s %s, 0, 0 (expect SIGSTOP)\n",
2829 lp->step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2830 target_pid_to_str (lp->ptid));
2831 lp->stopped = 0;
2832 gdb_assert (lp->resumed);
2833
2834 /* This should catch the pending SIGSTOP. */
2835 stop_wait_callback (lp, NULL);
2836 }
2837
b84876c2
PA
2838 if (!target_can_async_p ())
2839 {
2840 /* Causes SIGINT to be passed on to the attached process. */
2841 set_sigint_trap ();
2842 set_sigio_trap ();
2843 }
d6b0e80f
AC
2844
2845 while (status == 0)
2846 {
2847 pid_t lwpid;
2848
b84876c2
PA
2849 if (target_can_async_p ())
2850 /* In async mode, don't ever block. Only look at the locally
2851 queued events. */
2852 lwpid = queued_waitpid (pid, &status, options);
2853 else
2854 lwpid = my_waitpid (pid, &status, options);
2855
d6b0e80f
AC
2856 if (lwpid > 0)
2857 {
2858 gdb_assert (pid == -1 || lwpid == pid);
2859
2860 if (debug_linux_nat)
2861 {
2862 fprintf_unfiltered (gdb_stdlog,
2863 "LLW: waitpid %ld received %s\n",
2864 (long) lwpid, status_to_str (status));
2865 }
2866
02f3fc28 2867 lp = linux_nat_filter_event (lwpid, status, options);
d6b0e80f
AC
2868 if (!lp)
2869 {
02f3fc28 2870 /* A discarded event. */
d6b0e80f
AC
2871 status = 0;
2872 continue;
2873 }
2874
2875 break;
2876 }
2877
2878 if (pid == -1)
2879 {
2880 /* Alternate between checking cloned and uncloned processes. */
2881 options ^= __WCLONE;
2882
b84876c2
PA
2883 /* And every time we have checked both:
2884 In async mode, return to event loop;
2885 In sync mode, suspend waiting for a SIGCHLD signal. */
d6b0e80f 2886 if (options & __WCLONE)
b84876c2
PA
2887 {
2888 if (target_can_async_p ())
2889 {
2890 /* No interesting event. */
2891 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2892
2893 /* Get ready for the next event. */
2894 target_async (inferior_event_handler, 0);
2895
2896 if (debug_linux_nat_async)
2897 fprintf_unfiltered (gdb_stdlog, "LLW: exit (ignore)\n");
2898
2899 return minus_one_ptid;
2900 }
2901
2902 sigsuspend (&suspend_mask);
2903 }
d6b0e80f
AC
2904 }
2905
2906 /* We shouldn't end up here unless we want to try again. */
2907 gdb_assert (status == 0);
2908 }
2909
b84876c2
PA
2910 if (!target_can_async_p ())
2911 {
2912 clear_sigio_trap ();
2913 clear_sigint_trap ();
2914 }
d6b0e80f
AC
2915
2916 gdb_assert (lp);
2917
2918 /* Don't report signals that GDB isn't interested in, such as
2919 signals that are neither printed nor stopped upon. Stopping all
2920 threads can be a bit time-consuming so if we want decent
2921 performance with heavily multi-threaded programs, especially when
2922 they're using a high frequency timer, we'd better avoid it if we
2923 can. */
2924
2925 if (WIFSTOPPED (status))
2926 {
2927 int signo = target_signal_from_host (WSTOPSIG (status));
2928
d539ed7e
UW
2929 /* If we get a signal while single-stepping, we may need special
2930 care, e.g. to skip the signal handler. Defer to common code. */
2931 if (!lp->step
2932 && signal_stop_state (signo) == 0
d6b0e80f
AC
2933 && signal_print_state (signo) == 0
2934 && signal_pass_state (signo) == 1)
2935 {
2936 /* FIMXE: kettenis/2001-06-06: Should we resume all threads
2937 here? It is not clear we should. GDB may not expect
2938 other threads to run. On the other hand, not resuming
2939 newly attached threads may cause an unwanted delay in
2940 getting them running. */
2941 registers_changed ();
10d6c8cd
DJ
2942 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
2943 lp->step, signo);
d6b0e80f
AC
2944 if (debug_linux_nat)
2945 fprintf_unfiltered (gdb_stdlog,
2946 "LLW: %s %s, %s (preempt 'handle')\n",
2947 lp->step ?
2948 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2949 target_pid_to_str (lp->ptid),
2950 signo ? strsignal (signo) : "0");
2951 lp->stopped = 0;
2952 status = 0;
2953 goto retry;
2954 }
2955
2956 if (signo == TARGET_SIGNAL_INT && signal_pass_state (signo) == 0)
2957 {
2958 /* If ^C/BREAK is typed at the tty/console, SIGINT gets
57380f4e
DJ
2959 forwarded to the entire process group, that is, all LWPs
2960 will receive it - unless they're using CLONE_THREAD to
2961 share signals. Since we only want to report it once, we
2962 mark it as ignored for all LWPs except this one. */
2963 iterate_over_lwps (set_ignore_sigint, NULL);
2964 lp->ignore_sigint = 0;
d6b0e80f 2965 }
57380f4e
DJ
2966 else
2967 maybe_clear_ignore_sigint (lp);
d6b0e80f
AC
2968 }
2969
2970 /* This LWP is stopped now. */
2971 lp->stopped = 1;
2972
2973 if (debug_linux_nat)
2974 fprintf_unfiltered (gdb_stdlog, "LLW: Candidate event %s in %s.\n",
2975 status_to_str (status), target_pid_to_str (lp->ptid));
2976
4c28f408
PA
2977 if (!non_stop)
2978 {
2979 /* Now stop all other LWP's ... */
2980 iterate_over_lwps (stop_callback, NULL);
2981
2982 /* ... and wait until all of them have reported back that
2983 they're no longer running. */
57380f4e 2984 iterate_over_lwps (stop_wait_callback, NULL);
4c28f408
PA
2985
2986 /* If we're not waiting for a specific LWP, choose an event LWP
2987 from among those that have had events. Giving equal priority
2988 to all LWPs that have had events helps prevent
2989 starvation. */
2990 if (pid == -1)
2991 select_event_lwp (&lp, &status);
2992 }
d6b0e80f
AC
2993
2994 /* Now that we've selected our final event LWP, cancel any
2995 breakpoints in other LWPs that have hit a GDB breakpoint. See
2996 the comment in cancel_breakpoints_callback to find out why. */
2997 iterate_over_lwps (cancel_breakpoints_callback, lp);
2998
d6b0e80f
AC
2999 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP)
3000 {
d6b0e80f
AC
3001 if (debug_linux_nat)
3002 fprintf_unfiltered (gdb_stdlog,
4fdebdd0
PA
3003 "LLW: trap ptid is %s.\n",
3004 target_pid_to_str (lp->ptid));
d6b0e80f 3005 }
d6b0e80f
AC
3006
3007 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3008 {
3009 *ourstatus = lp->waitstatus;
3010 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3011 }
3012 else
3013 store_waitstatus (ourstatus, status);
3014
b84876c2
PA
3015 /* Get ready for the next event. */
3016 if (target_can_async_p ())
3017 target_async (inferior_event_handler, 0);
3018
3019 if (debug_linux_nat_async)
3020 fprintf_unfiltered (gdb_stdlog, "LLW: exit\n");
3021
f973ed9c 3022 return lp->ptid;
d6b0e80f
AC
3023}
3024
3025static int
3026kill_callback (struct lwp_info *lp, void *data)
3027{
3028 errno = 0;
3029 ptrace (PTRACE_KILL, GET_LWP (lp->ptid), 0, 0);
3030 if (debug_linux_nat)
3031 fprintf_unfiltered (gdb_stdlog,
3032 "KC: PTRACE_KILL %s, 0, 0 (%s)\n",
3033 target_pid_to_str (lp->ptid),
3034 errno ? safe_strerror (errno) : "OK");
3035
3036 return 0;
3037}
3038
3039static int
3040kill_wait_callback (struct lwp_info *lp, void *data)
3041{
3042 pid_t pid;
3043
3044 /* We must make sure that there are no pending events (delayed
3045 SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current
3046 program doesn't interfere with any following debugging session. */
3047
3048 /* For cloned processes we must check both with __WCLONE and
3049 without, since the exit status of a cloned process isn't reported
3050 with __WCLONE. */
3051 if (lp->cloned)
3052 {
3053 do
3054 {
58aecb61 3055 pid = my_waitpid (GET_LWP (lp->ptid), NULL, __WCLONE);
e85a822c 3056 if (pid != (pid_t) -1)
d6b0e80f 3057 {
e85a822c
DJ
3058 if (debug_linux_nat)
3059 fprintf_unfiltered (gdb_stdlog,
3060 "KWC: wait %s received unknown.\n",
3061 target_pid_to_str (lp->ptid));
3062 /* The Linux kernel sometimes fails to kill a thread
3063 completely after PTRACE_KILL; that goes from the stop
3064 point in do_fork out to the one in
3065 get_signal_to_deliever and waits again. So kill it
3066 again. */
3067 kill_callback (lp, NULL);
d6b0e80f
AC
3068 }
3069 }
3070 while (pid == GET_LWP (lp->ptid));
3071
3072 gdb_assert (pid == -1 && errno == ECHILD);
3073 }
3074
3075 do
3076 {
58aecb61 3077 pid = my_waitpid (GET_LWP (lp->ptid), NULL, 0);
e85a822c 3078 if (pid != (pid_t) -1)
d6b0e80f 3079 {
e85a822c
DJ
3080 if (debug_linux_nat)
3081 fprintf_unfiltered (gdb_stdlog,
3082 "KWC: wait %s received unk.\n",
3083 target_pid_to_str (lp->ptid));
3084 /* See the call to kill_callback above. */
3085 kill_callback (lp, NULL);
d6b0e80f
AC
3086 }
3087 }
3088 while (pid == GET_LWP (lp->ptid));
3089
3090 gdb_assert (pid == -1 && errno == ECHILD);
3091 return 0;
3092}
3093
3094static void
3095linux_nat_kill (void)
3096{
f973ed9c
DJ
3097 struct target_waitstatus last;
3098 ptid_t last_ptid;
3099 int status;
d6b0e80f 3100
b84876c2
PA
3101 if (target_can_async_p ())
3102 target_async (NULL, 0);
3103
f973ed9c
DJ
3104 /* If we're stopped while forking and we haven't followed yet,
3105 kill the other task. We need to do this first because the
3106 parent will be sleeping if this is a vfork. */
d6b0e80f 3107
f973ed9c 3108 get_last_target_status (&last_ptid, &last);
d6b0e80f 3109
f973ed9c
DJ
3110 if (last.kind == TARGET_WAITKIND_FORKED
3111 || last.kind == TARGET_WAITKIND_VFORKED)
3112 {
3a3e9ee3 3113 ptrace (PT_KILL, PIDGET (last.value.related_pid), 0, 0);
f973ed9c
DJ
3114 wait (&status);
3115 }
3116
3117 if (forks_exist_p ())
b84876c2
PA
3118 {
3119 linux_fork_killall ();
3120 drain_queued_events (-1);
3121 }
f973ed9c
DJ
3122 else
3123 {
4c28f408
PA
3124 /* Stop all threads before killing them, since ptrace requires
3125 that the thread is stopped to sucessfully PTRACE_KILL. */
3126 iterate_over_lwps (stop_callback, NULL);
3127 /* ... and wait until all of them have reported back that
3128 they're no longer running. */
3129 iterate_over_lwps (stop_wait_callback, NULL);
3130
f973ed9c
DJ
3131 /* Kill all LWP's ... */
3132 iterate_over_lwps (kill_callback, NULL);
3133
3134 /* ... and wait until we've flushed all events. */
3135 iterate_over_lwps (kill_wait_callback, NULL);
3136 }
3137
3138 target_mourn_inferior ();
d6b0e80f
AC
3139}
3140
3141static void
3142linux_nat_mourn_inferior (void)
3143{
d6b0e80f
AC
3144 /* Destroy LWP info; it's no longer valid. */
3145 init_lwp_list ();
3146
f973ed9c 3147 if (! forks_exist_p ())
b84876c2
PA
3148 {
3149 /* Normal case, no other forks available. */
3150 if (target_can_async_p ())
3151 linux_nat_async (NULL, 0);
3152 linux_ops->to_mourn_inferior ();
3153 }
f973ed9c
DJ
3154 else
3155 /* Multi-fork case. The current inferior_ptid has exited, but
3156 there are other viable forks to debug. Delete the exiting
3157 one and context-switch to the first available. */
3158 linux_fork_mourn_inferior ();
d6b0e80f
AC
3159}
3160
10d6c8cd
DJ
3161static LONGEST
3162linux_nat_xfer_partial (struct target_ops *ops, enum target_object object,
3163 const char *annex, gdb_byte *readbuf,
3164 const gdb_byte *writebuf,
3165 ULONGEST offset, LONGEST len)
d6b0e80f
AC
3166{
3167 struct cleanup *old_chain = save_inferior_ptid ();
10d6c8cd 3168 LONGEST xfer;
d6b0e80f
AC
3169
3170 if (is_lwp (inferior_ptid))
3171 inferior_ptid = pid_to_ptid (GET_LWP (inferior_ptid));
3172
10d6c8cd
DJ
3173 xfer = linux_ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
3174 offset, len);
d6b0e80f
AC
3175
3176 do_cleanups (old_chain);
3177 return xfer;
3178}
3179
3180static int
3181linux_nat_thread_alive (ptid_t ptid)
3182{
4c28f408
PA
3183 int err;
3184
d6b0e80f
AC
3185 gdb_assert (is_lwp (ptid));
3186
4c28f408
PA
3187 /* Send signal 0 instead of anything ptrace, because ptracing a
3188 running thread errors out claiming that the thread doesn't
3189 exist. */
3190 err = kill_lwp (GET_LWP (ptid), 0);
3191
d6b0e80f
AC
3192 if (debug_linux_nat)
3193 fprintf_unfiltered (gdb_stdlog,
4c28f408 3194 "LLTA: KILL(SIG0) %s (%s)\n",
d6b0e80f 3195 target_pid_to_str (ptid),
4c28f408 3196 err ? safe_strerror (err) : "OK");
9c0dd46b 3197
4c28f408 3198 if (err != 0)
d6b0e80f
AC
3199 return 0;
3200
3201 return 1;
3202}
3203
3204static char *
3205linux_nat_pid_to_str (ptid_t ptid)
3206{
3207 static char buf[64];
3208
a0ef4274
DJ
3209 if (is_lwp (ptid)
3210 && ((lwp_list && lwp_list->next)
3211 || GET_PID (ptid) != GET_LWP (ptid)))
d6b0e80f
AC
3212 {
3213 snprintf (buf, sizeof (buf), "LWP %ld", GET_LWP (ptid));
3214 return buf;
3215 }
3216
3217 return normal_pid_to_str (ptid);
3218}
3219
d6b0e80f
AC
3220static void
3221sigchld_handler (int signo)
3222{
c6ebd6cf 3223 if (target_async_permitted
84e46146 3224 && linux_nat_async_events_state != sigchld_sync
b84876c2
PA
3225 && signo == SIGCHLD)
3226 /* It is *always* a bug to hit this. */
3227 internal_error (__FILE__, __LINE__,
3228 "sigchld_handler called when async events are enabled");
3229
d6b0e80f
AC
3230 /* Do nothing. The only reason for this handler is that it allows
3231 us to use sigsuspend in linux_nat_wait above to wait for the
3232 arrival of a SIGCHLD. */
3233}
3234
dba24537
AC
3235/* Accepts an integer PID; Returns a string representing a file that
3236 can be opened to get the symbols for the child process. */
3237
6d8fd2b7
UW
3238static char *
3239linux_child_pid_to_exec_file (int pid)
dba24537
AC
3240{
3241 char *name1, *name2;
3242
3243 name1 = xmalloc (MAXPATHLEN);
3244 name2 = xmalloc (MAXPATHLEN);
3245 make_cleanup (xfree, name1);
3246 make_cleanup (xfree, name2);
3247 memset (name2, 0, MAXPATHLEN);
3248
3249 sprintf (name1, "/proc/%d/exe", pid);
3250 if (readlink (name1, name2, MAXPATHLEN) > 0)
3251 return name2;
3252 else
3253 return name1;
3254}
3255
3256/* Service function for corefiles and info proc. */
3257
3258static int
3259read_mapping (FILE *mapfile,
3260 long long *addr,
3261 long long *endaddr,
3262 char *permissions,
3263 long long *offset,
3264 char *device, long long *inode, char *filename)
3265{
3266 int ret = fscanf (mapfile, "%llx-%llx %s %llx %s %llx",
3267 addr, endaddr, permissions, offset, device, inode);
3268
2e14c2ea
MS
3269 filename[0] = '\0';
3270 if (ret > 0 && ret != EOF)
dba24537
AC
3271 {
3272 /* Eat everything up to EOL for the filename. This will prevent
3273 weird filenames (such as one with embedded whitespace) from
3274 confusing this code. It also makes this code more robust in
3275 respect to annotations the kernel may add after the filename.
3276
3277 Note the filename is used for informational purposes
3278 only. */
3279 ret += fscanf (mapfile, "%[^\n]\n", filename);
3280 }
2e14c2ea 3281
dba24537
AC
3282 return (ret != 0 && ret != EOF);
3283}
3284
3285/* Fills the "to_find_memory_regions" target vector. Lists the memory
3286 regions in the inferior for a corefile. */
3287
3288static int
3289linux_nat_find_memory_regions (int (*func) (CORE_ADDR,
3290 unsigned long,
3291 int, int, int, void *), void *obfd)
3292{
3293 long long pid = PIDGET (inferior_ptid);
3294 char mapsfilename[MAXPATHLEN];
3295 FILE *mapsfile;
3296 long long addr, endaddr, size, offset, inode;
3297 char permissions[8], device[8], filename[MAXPATHLEN];
3298 int read, write, exec;
3299 int ret;
3300
3301 /* Compose the filename for the /proc memory map, and open it. */
3302 sprintf (mapsfilename, "/proc/%lld/maps", pid);
3303 if ((mapsfile = fopen (mapsfilename, "r")) == NULL)
8a3fe4f8 3304 error (_("Could not open %s."), mapsfilename);
dba24537
AC
3305
3306 if (info_verbose)
3307 fprintf_filtered (gdb_stdout,
3308 "Reading memory regions from %s\n", mapsfilename);
3309
3310 /* Now iterate until end-of-file. */
3311 while (read_mapping (mapsfile, &addr, &endaddr, &permissions[0],
3312 &offset, &device[0], &inode, &filename[0]))
3313 {
3314 size = endaddr - addr;
3315
3316 /* Get the segment's permissions. */
3317 read = (strchr (permissions, 'r') != 0);
3318 write = (strchr (permissions, 'w') != 0);
3319 exec = (strchr (permissions, 'x') != 0);
3320
3321 if (info_verbose)
3322 {
3323 fprintf_filtered (gdb_stdout,
3324 "Save segment, %lld bytes at 0x%s (%c%c%c)",
3325 size, paddr_nz (addr),
3326 read ? 'r' : ' ',
3327 write ? 'w' : ' ', exec ? 'x' : ' ');
b260b6c1 3328 if (filename[0])
dba24537
AC
3329 fprintf_filtered (gdb_stdout, " for %s", filename);
3330 fprintf_filtered (gdb_stdout, "\n");
3331 }
3332
3333 /* Invoke the callback function to create the corefile
3334 segment. */
3335 func (addr, size, read, write, exec, obfd);
3336 }
3337 fclose (mapsfile);
3338 return 0;
3339}
3340
3341/* Records the thread's register state for the corefile note
3342 section. */
3343
3344static char *
3345linux_nat_do_thread_registers (bfd *obfd, ptid_t ptid,
3346 char *note_data, int *note_size)
3347{
3348 gdb_gregset_t gregs;
3349 gdb_fpregset_t fpregs;
dba24537 3350 unsigned long lwp = ptid_get_lwp (ptid);
594f7785
UW
3351 struct regcache *regcache = get_thread_regcache (ptid);
3352 struct gdbarch *gdbarch = get_regcache_arch (regcache);
4f844a66 3353 const struct regset *regset;
55e969c1 3354 int core_regset_p;
594f7785 3355 struct cleanup *old_chain;
17ea7499
CES
3356 struct core_regset_section *sect_list;
3357 char *gdb_regset;
594f7785
UW
3358
3359 old_chain = save_inferior_ptid ();
3360 inferior_ptid = ptid;
3361 target_fetch_registers (regcache, -1);
3362 do_cleanups (old_chain);
4f844a66
DM
3363
3364 core_regset_p = gdbarch_regset_from_core_section_p (gdbarch);
17ea7499
CES
3365 sect_list = gdbarch_core_regset_sections (gdbarch);
3366
55e969c1
DM
3367 if (core_regset_p
3368 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg",
3369 sizeof (gregs))) != NULL
3370 && regset->collect_regset != NULL)
594f7785 3371 regset->collect_regset (regset, regcache, -1,
55e969c1 3372 &gregs, sizeof (gregs));
4f844a66 3373 else
594f7785 3374 fill_gregset (regcache, &gregs, -1);
4f844a66 3375
55e969c1
DM
3376 note_data = (char *) elfcore_write_prstatus (obfd,
3377 note_data,
3378 note_size,
3379 lwp,
3380 stop_signal, &gregs);
3381
17ea7499
CES
3382 /* The loop below uses the new struct core_regset_section, which stores
3383 the supported section names and sizes for the core file. Note that
3384 note PRSTATUS needs to be treated specially. But the other notes are
3385 structurally the same, so they can benefit from the new struct. */
3386 if (core_regset_p && sect_list != NULL)
3387 while (sect_list->sect_name != NULL)
3388 {
3389 /* .reg was already handled above. */
3390 if (strcmp (sect_list->sect_name, ".reg") == 0)
3391 {
3392 sect_list++;
3393 continue;
3394 }
3395 regset = gdbarch_regset_from_core_section (gdbarch,
3396 sect_list->sect_name,
3397 sect_list->size);
3398 gdb_assert (regset && regset->collect_regset);
3399 gdb_regset = xmalloc (sect_list->size);
3400 regset->collect_regset (regset, regcache, -1,
3401 gdb_regset, sect_list->size);
3402 note_data = (char *) elfcore_write_register_note (obfd,
3403 note_data,
3404 note_size,
3405 sect_list->sect_name,
3406 gdb_regset,
3407 sect_list->size);
3408 xfree (gdb_regset);
3409 sect_list++;
3410 }
dba24537 3411
17ea7499
CES
3412 /* For architectures that does not have the struct core_regset_section
3413 implemented, we use the old method. When all the architectures have
3414 the new support, the code below should be deleted. */
4f844a66 3415 else
17ea7499
CES
3416 {
3417 if (core_regset_p
3418 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg2",
3419 sizeof (fpregs))) != NULL
3420 && regset->collect_regset != NULL)
3421 regset->collect_regset (regset, regcache, -1,
3422 &fpregs, sizeof (fpregs));
3423 else
3424 fill_fpregset (regcache, &fpregs, -1);
3425
3426 note_data = (char *) elfcore_write_prfpreg (obfd,
3427 note_data,
3428 note_size,
3429 &fpregs, sizeof (fpregs));
3430 }
4f844a66 3431
dba24537
AC
3432 return note_data;
3433}
3434
3435struct linux_nat_corefile_thread_data
3436{
3437 bfd *obfd;
3438 char *note_data;
3439 int *note_size;
3440 int num_notes;
3441};
3442
3443/* Called by gdbthread.c once per thread. Records the thread's
3444 register state for the corefile note section. */
3445
3446static int
3447linux_nat_corefile_thread_callback (struct lwp_info *ti, void *data)
3448{
3449 struct linux_nat_corefile_thread_data *args = data;
dba24537 3450
dba24537
AC
3451 args->note_data = linux_nat_do_thread_registers (args->obfd,
3452 ti->ptid,
3453 args->note_data,
3454 args->note_size);
3455 args->num_notes++;
56be3814 3456
dba24537
AC
3457 return 0;
3458}
3459
3460/* Records the register state for the corefile note section. */
3461
3462static char *
3463linux_nat_do_registers (bfd *obfd, ptid_t ptid,
3464 char *note_data, int *note_size)
3465{
dba24537
AC
3466 return linux_nat_do_thread_registers (obfd,
3467 ptid_build (ptid_get_pid (inferior_ptid),
3468 ptid_get_pid (inferior_ptid),
3469 0),
3470 note_data, note_size);
dba24537
AC
3471}
3472
3473/* Fills the "to_make_corefile_note" target vector. Builds the note
3474 section for a corefile, and returns it in a malloc buffer. */
3475
3476static char *
3477linux_nat_make_corefile_notes (bfd *obfd, int *note_size)
3478{
3479 struct linux_nat_corefile_thread_data thread_args;
3480 struct cleanup *old_chain;
d99148ef 3481 /* The variable size must be >= sizeof (prpsinfo_t.pr_fname). */
dba24537 3482 char fname[16] = { '\0' };
d99148ef 3483 /* The variable size must be >= sizeof (prpsinfo_t.pr_psargs). */
dba24537
AC
3484 char psargs[80] = { '\0' };
3485 char *note_data = NULL;
3486 ptid_t current_ptid = inferior_ptid;
c6826062 3487 gdb_byte *auxv;
dba24537
AC
3488 int auxv_len;
3489
3490 if (get_exec_file (0))
3491 {
3492 strncpy (fname, strrchr (get_exec_file (0), '/') + 1, sizeof (fname));
3493 strncpy (psargs, get_exec_file (0), sizeof (psargs));
3494 if (get_inferior_args ())
3495 {
d99148ef
JK
3496 char *string_end;
3497 char *psargs_end = psargs + sizeof (psargs);
3498
3499 /* linux_elfcore_write_prpsinfo () handles zero unterminated
3500 strings fine. */
3501 string_end = memchr (psargs, 0, sizeof (psargs));
3502 if (string_end != NULL)
3503 {
3504 *string_end++ = ' ';
3505 strncpy (string_end, get_inferior_args (),
3506 psargs_end - string_end);
3507 }
dba24537
AC
3508 }
3509 note_data = (char *) elfcore_write_prpsinfo (obfd,
3510 note_data,
3511 note_size, fname, psargs);
3512 }
3513
3514 /* Dump information for threads. */
3515 thread_args.obfd = obfd;
3516 thread_args.note_data = note_data;
3517 thread_args.note_size = note_size;
3518 thread_args.num_notes = 0;
3519 iterate_over_lwps (linux_nat_corefile_thread_callback, &thread_args);
3520 if (thread_args.num_notes == 0)
3521 {
3522 /* iterate_over_threads didn't come up with any threads; just
3523 use inferior_ptid. */
3524 note_data = linux_nat_do_registers (obfd, inferior_ptid,
3525 note_data, note_size);
3526 }
3527 else
3528 {
3529 note_data = thread_args.note_data;
3530 }
3531
13547ab6
DJ
3532 auxv_len = target_read_alloc (&current_target, TARGET_OBJECT_AUXV,
3533 NULL, &auxv);
dba24537
AC
3534 if (auxv_len > 0)
3535 {
3536 note_data = elfcore_write_note (obfd, note_data, note_size,
3537 "CORE", NT_AUXV, auxv, auxv_len);
3538 xfree (auxv);
3539 }
3540
3541 make_cleanup (xfree, note_data);
3542 return note_data;
3543}
3544
3545/* Implement the "info proc" command. */
3546
3547static void
3548linux_nat_info_proc_cmd (char *args, int from_tty)
3549{
3550 long long pid = PIDGET (inferior_ptid);
3551 FILE *procfile;
3552 char **argv = NULL;
3553 char buffer[MAXPATHLEN];
3554 char fname1[MAXPATHLEN], fname2[MAXPATHLEN];
3555 int cmdline_f = 1;
3556 int cwd_f = 1;
3557 int exe_f = 1;
3558 int mappings_f = 0;
3559 int environ_f = 0;
3560 int status_f = 0;
3561 int stat_f = 0;
3562 int all = 0;
3563 struct stat dummy;
3564
3565 if (args)
3566 {
3567 /* Break up 'args' into an argv array. */
3568 if ((argv = buildargv (args)) == NULL)
3569 nomem (0);
3570 else
3571 make_cleanup_freeargv (argv);
3572 }
3573 while (argv != NULL && *argv != NULL)
3574 {
3575 if (isdigit (argv[0][0]))
3576 {
3577 pid = strtoul (argv[0], NULL, 10);
3578 }
3579 else if (strncmp (argv[0], "mappings", strlen (argv[0])) == 0)
3580 {
3581 mappings_f = 1;
3582 }
3583 else if (strcmp (argv[0], "status") == 0)
3584 {
3585 status_f = 1;
3586 }
3587 else if (strcmp (argv[0], "stat") == 0)
3588 {
3589 stat_f = 1;
3590 }
3591 else if (strcmp (argv[0], "cmd") == 0)
3592 {
3593 cmdline_f = 1;
3594 }
3595 else if (strncmp (argv[0], "exe", strlen (argv[0])) == 0)
3596 {
3597 exe_f = 1;
3598 }
3599 else if (strcmp (argv[0], "cwd") == 0)
3600 {
3601 cwd_f = 1;
3602 }
3603 else if (strncmp (argv[0], "all", strlen (argv[0])) == 0)
3604 {
3605 all = 1;
3606 }
3607 else
3608 {
3609 /* [...] (future options here) */
3610 }
3611 argv++;
3612 }
3613 if (pid == 0)
8a3fe4f8 3614 error (_("No current process: you must name one."));
dba24537
AC
3615
3616 sprintf (fname1, "/proc/%lld", pid);
3617 if (stat (fname1, &dummy) != 0)
8a3fe4f8 3618 error (_("No /proc directory: '%s'"), fname1);
dba24537 3619
a3f17187 3620 printf_filtered (_("process %lld\n"), pid);
dba24537
AC
3621 if (cmdline_f || all)
3622 {
3623 sprintf (fname1, "/proc/%lld/cmdline", pid);
d5d6fca5 3624 if ((procfile = fopen (fname1, "r")) != NULL)
dba24537
AC
3625 {
3626 fgets (buffer, sizeof (buffer), procfile);
3627 printf_filtered ("cmdline = '%s'\n", buffer);
3628 fclose (procfile);
3629 }
3630 else
8a3fe4f8 3631 warning (_("unable to open /proc file '%s'"), fname1);
dba24537
AC
3632 }
3633 if (cwd_f || all)
3634 {
3635 sprintf (fname1, "/proc/%lld/cwd", pid);
3636 memset (fname2, 0, sizeof (fname2));
3637 if (readlink (fname1, fname2, sizeof (fname2)) > 0)
3638 printf_filtered ("cwd = '%s'\n", fname2);
3639 else
8a3fe4f8 3640 warning (_("unable to read link '%s'"), fname1);
dba24537
AC
3641 }
3642 if (exe_f || all)
3643 {
3644 sprintf (fname1, "/proc/%lld/exe", pid);
3645 memset (fname2, 0, sizeof (fname2));
3646 if (readlink (fname1, fname2, sizeof (fname2)) > 0)
3647 printf_filtered ("exe = '%s'\n", fname2);
3648 else
8a3fe4f8 3649 warning (_("unable to read link '%s'"), fname1);
dba24537
AC
3650 }
3651 if (mappings_f || all)
3652 {
3653 sprintf (fname1, "/proc/%lld/maps", pid);
d5d6fca5 3654 if ((procfile = fopen (fname1, "r")) != NULL)
dba24537
AC
3655 {
3656 long long addr, endaddr, size, offset, inode;
3657 char permissions[8], device[8], filename[MAXPATHLEN];
3658
a3f17187 3659 printf_filtered (_("Mapped address spaces:\n\n"));
17a912b6 3660 if (gdbarch_addr_bit (current_gdbarch) == 32)
dba24537
AC
3661 {
3662 printf_filtered ("\t%10s %10s %10s %10s %7s\n",
3663 "Start Addr",
3664 " End Addr",
3665 " Size", " Offset", "objfile");
3666 }
3667 else
3668 {
3669 printf_filtered (" %18s %18s %10s %10s %7s\n",
3670 "Start Addr",
3671 " End Addr",
3672 " Size", " Offset", "objfile");
3673 }
3674
3675 while (read_mapping (procfile, &addr, &endaddr, &permissions[0],
3676 &offset, &device[0], &inode, &filename[0]))
3677 {
3678 size = endaddr - addr;
3679
3680 /* FIXME: carlton/2003-08-27: Maybe the printf_filtered
3681 calls here (and possibly above) should be abstracted
3682 out into their own functions? Andrew suggests using
3683 a generic local_address_string instead to print out
3684 the addresses; that makes sense to me, too. */
3685
17a912b6 3686 if (gdbarch_addr_bit (current_gdbarch) == 32)
dba24537
AC
3687 {
3688 printf_filtered ("\t%#10lx %#10lx %#10x %#10x %7s\n",
3689 (unsigned long) addr, /* FIXME: pr_addr */
3690 (unsigned long) endaddr,
3691 (int) size,
3692 (unsigned int) offset,
3693 filename[0] ? filename : "");
3694 }
3695 else
3696 {
3697 printf_filtered (" %#18lx %#18lx %#10x %#10x %7s\n",
3698 (unsigned long) addr, /* FIXME: pr_addr */
3699 (unsigned long) endaddr,
3700 (int) size,
3701 (unsigned int) offset,
3702 filename[0] ? filename : "");
3703 }
3704 }
3705
3706 fclose (procfile);
3707 }
3708 else
8a3fe4f8 3709 warning (_("unable to open /proc file '%s'"), fname1);
dba24537
AC
3710 }
3711 if (status_f || all)
3712 {
3713 sprintf (fname1, "/proc/%lld/status", pid);
d5d6fca5 3714 if ((procfile = fopen (fname1, "r")) != NULL)
dba24537
AC
3715 {
3716 while (fgets (buffer, sizeof (buffer), procfile) != NULL)
3717 puts_filtered (buffer);
3718 fclose (procfile);
3719 }
3720 else
8a3fe4f8 3721 warning (_("unable to open /proc file '%s'"), fname1);
dba24537
AC
3722 }
3723 if (stat_f || all)
3724 {
3725 sprintf (fname1, "/proc/%lld/stat", pid);
d5d6fca5 3726 if ((procfile = fopen (fname1, "r")) != NULL)
dba24537
AC
3727 {
3728 int itmp;
3729 char ctmp;
a25694b4 3730 long ltmp;
dba24537
AC
3731
3732 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 3733 printf_filtered (_("Process: %d\n"), itmp);
a25694b4 3734 if (fscanf (procfile, "(%[^)]) ", &buffer[0]) > 0)
a3f17187 3735 printf_filtered (_("Exec file: %s\n"), buffer);
dba24537 3736 if (fscanf (procfile, "%c ", &ctmp) > 0)
a3f17187 3737 printf_filtered (_("State: %c\n"), ctmp);
dba24537 3738 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 3739 printf_filtered (_("Parent process: %d\n"), itmp);
dba24537 3740 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 3741 printf_filtered (_("Process group: %d\n"), itmp);
dba24537 3742 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 3743 printf_filtered (_("Session id: %d\n"), itmp);
dba24537 3744 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 3745 printf_filtered (_("TTY: %d\n"), itmp);
dba24537 3746 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 3747 printf_filtered (_("TTY owner process group: %d\n"), itmp);
a25694b4
AS
3748 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3749 printf_filtered (_("Flags: 0x%lx\n"), ltmp);
3750 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3751 printf_filtered (_("Minor faults (no memory page): %lu\n"),
3752 (unsigned long) ltmp);
3753 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3754 printf_filtered (_("Minor faults, children: %lu\n"),
3755 (unsigned long) ltmp);
3756 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3757 printf_filtered (_("Major faults (memory page faults): %lu\n"),
3758 (unsigned long) ltmp);
3759 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3760 printf_filtered (_("Major faults, children: %lu\n"),
3761 (unsigned long) ltmp);
3762 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3763 printf_filtered (_("utime: %ld\n"), ltmp);
3764 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3765 printf_filtered (_("stime: %ld\n"), ltmp);
3766 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3767 printf_filtered (_("utime, children: %ld\n"), ltmp);
3768 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3769 printf_filtered (_("stime, children: %ld\n"), ltmp);
3770 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3771 printf_filtered (_("jiffies remaining in current time slice: %ld\n"),
3772 ltmp);
3773 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3774 printf_filtered (_("'nice' value: %ld\n"), ltmp);
3775 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3776 printf_filtered (_("jiffies until next timeout: %lu\n"),
3777 (unsigned long) ltmp);
3778 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3779 printf_filtered (_("jiffies until next SIGALRM: %lu\n"),
3780 (unsigned long) ltmp);
3781 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3782 printf_filtered (_("start time (jiffies since system boot): %ld\n"),
3783 ltmp);
3784 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3785 printf_filtered (_("Virtual memory size: %lu\n"),
3786 (unsigned long) ltmp);
3787 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3788 printf_filtered (_("Resident set size: %lu\n"), (unsigned long) ltmp);
3789 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3790 printf_filtered (_("rlim: %lu\n"), (unsigned long) ltmp);
3791 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3792 printf_filtered (_("Start of text: 0x%lx\n"), ltmp);
3793 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3794 printf_filtered (_("End of text: 0x%lx\n"), ltmp);
3795 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3796 printf_filtered (_("Start of stack: 0x%lx\n"), ltmp);
dba24537
AC
3797#if 0 /* Don't know how architecture-dependent the rest is...
3798 Anyway the signal bitmap info is available from "status". */
a25694b4
AS
3799 if (fscanf (procfile, "%lu ", &ltmp) > 0) /* FIXME arch? */
3800 printf_filtered (_("Kernel stack pointer: 0x%lx\n"), ltmp);
3801 if (fscanf (procfile, "%lu ", &ltmp) > 0) /* FIXME arch? */
3802 printf_filtered (_("Kernel instr pointer: 0x%lx\n"), ltmp);
3803 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3804 printf_filtered (_("Pending signals bitmap: 0x%lx\n"), ltmp);
3805 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3806 printf_filtered (_("Blocked signals bitmap: 0x%lx\n"), ltmp);
3807 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3808 printf_filtered (_("Ignored signals bitmap: 0x%lx\n"), ltmp);
3809 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3810 printf_filtered (_("Catched signals bitmap: 0x%lx\n"), ltmp);
3811 if (fscanf (procfile, "%lu ", &ltmp) > 0) /* FIXME arch? */
3812 printf_filtered (_("wchan (system call): 0x%lx\n"), ltmp);
dba24537
AC
3813#endif
3814 fclose (procfile);
3815 }
3816 else
8a3fe4f8 3817 warning (_("unable to open /proc file '%s'"), fname1);
dba24537
AC
3818 }
3819}
3820
10d6c8cd
DJ
3821/* Implement the to_xfer_partial interface for memory reads using the /proc
3822 filesystem. Because we can use a single read() call for /proc, this
3823 can be much more efficient than banging away at PTRACE_PEEKTEXT,
3824 but it doesn't support writes. */
3825
3826static LONGEST
3827linux_proc_xfer_partial (struct target_ops *ops, enum target_object object,
3828 const char *annex, gdb_byte *readbuf,
3829 const gdb_byte *writebuf,
3830 ULONGEST offset, LONGEST len)
dba24537 3831{
10d6c8cd
DJ
3832 LONGEST ret;
3833 int fd;
dba24537
AC
3834 char filename[64];
3835
10d6c8cd 3836 if (object != TARGET_OBJECT_MEMORY || !readbuf)
dba24537
AC
3837 return 0;
3838
3839 /* Don't bother for one word. */
3840 if (len < 3 * sizeof (long))
3841 return 0;
3842
3843 /* We could keep this file open and cache it - possibly one per
3844 thread. That requires some juggling, but is even faster. */
3845 sprintf (filename, "/proc/%d/mem", PIDGET (inferior_ptid));
3846 fd = open (filename, O_RDONLY | O_LARGEFILE);
3847 if (fd == -1)
3848 return 0;
3849
3850 /* If pread64 is available, use it. It's faster if the kernel
3851 supports it (only one syscall), and it's 64-bit safe even on
3852 32-bit platforms (for instance, SPARC debugging a SPARC64
3853 application). */
3854#ifdef HAVE_PREAD64
10d6c8cd 3855 if (pread64 (fd, readbuf, len, offset) != len)
dba24537 3856#else
10d6c8cd 3857 if (lseek (fd, offset, SEEK_SET) == -1 || read (fd, readbuf, len) != len)
dba24537
AC
3858#endif
3859 ret = 0;
3860 else
3861 ret = len;
3862
3863 close (fd);
3864 return ret;
3865}
3866
3867/* Parse LINE as a signal set and add its set bits to SIGS. */
3868
3869static void
3870add_line_to_sigset (const char *line, sigset_t *sigs)
3871{
3872 int len = strlen (line) - 1;
3873 const char *p;
3874 int signum;
3875
3876 if (line[len] != '\n')
8a3fe4f8 3877 error (_("Could not parse signal set: %s"), line);
dba24537
AC
3878
3879 p = line;
3880 signum = len * 4;
3881 while (len-- > 0)
3882 {
3883 int digit;
3884
3885 if (*p >= '0' && *p <= '9')
3886 digit = *p - '0';
3887 else if (*p >= 'a' && *p <= 'f')
3888 digit = *p - 'a' + 10;
3889 else
8a3fe4f8 3890 error (_("Could not parse signal set: %s"), line);
dba24537
AC
3891
3892 signum -= 4;
3893
3894 if (digit & 1)
3895 sigaddset (sigs, signum + 1);
3896 if (digit & 2)
3897 sigaddset (sigs, signum + 2);
3898 if (digit & 4)
3899 sigaddset (sigs, signum + 3);
3900 if (digit & 8)
3901 sigaddset (sigs, signum + 4);
3902
3903 p++;
3904 }
3905}
3906
3907/* Find process PID's pending signals from /proc/pid/status and set
3908 SIGS to match. */
3909
3910void
3911linux_proc_pending_signals (int pid, sigset_t *pending, sigset_t *blocked, sigset_t *ignored)
3912{
3913 FILE *procfile;
3914 char buffer[MAXPATHLEN], fname[MAXPATHLEN];
3915 int signum;
3916
3917 sigemptyset (pending);
3918 sigemptyset (blocked);
3919 sigemptyset (ignored);
3920 sprintf (fname, "/proc/%d/status", pid);
3921 procfile = fopen (fname, "r");
3922 if (procfile == NULL)
8a3fe4f8 3923 error (_("Could not open %s"), fname);
dba24537
AC
3924
3925 while (fgets (buffer, MAXPATHLEN, procfile) != NULL)
3926 {
3927 /* Normal queued signals are on the SigPnd line in the status
3928 file. However, 2.6 kernels also have a "shared" pending
3929 queue for delivering signals to a thread group, so check for
3930 a ShdPnd line also.
3931
3932 Unfortunately some Red Hat kernels include the shared pending
3933 queue but not the ShdPnd status field. */
3934
3935 if (strncmp (buffer, "SigPnd:\t", 8) == 0)
3936 add_line_to_sigset (buffer + 8, pending);
3937 else if (strncmp (buffer, "ShdPnd:\t", 8) == 0)
3938 add_line_to_sigset (buffer + 8, pending);
3939 else if (strncmp (buffer, "SigBlk:\t", 8) == 0)
3940 add_line_to_sigset (buffer + 8, blocked);
3941 else if (strncmp (buffer, "SigIgn:\t", 8) == 0)
3942 add_line_to_sigset (buffer + 8, ignored);
3943 }
3944
3945 fclose (procfile);
3946}
3947
10d6c8cd
DJ
3948static LONGEST
3949linux_xfer_partial (struct target_ops *ops, enum target_object object,
3950 const char *annex, gdb_byte *readbuf,
3951 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
3952{
3953 LONGEST xfer;
3954
3955 if (object == TARGET_OBJECT_AUXV)
3956 return procfs_xfer_auxv (ops, object, annex, readbuf, writebuf,
3957 offset, len);
3958
3959 xfer = linux_proc_xfer_partial (ops, object, annex, readbuf, writebuf,
3960 offset, len);
3961 if (xfer != 0)
3962 return xfer;
3963
3964 return super_xfer_partial (ops, object, annex, readbuf, writebuf,
3965 offset, len);
3966}
3967
e9efe249 3968/* Create a prototype generic GNU/Linux target. The client can override
10d6c8cd
DJ
3969 it with local methods. */
3970
910122bf
UW
3971static void
3972linux_target_install_ops (struct target_ops *t)
10d6c8cd 3973{
6d8fd2b7
UW
3974 t->to_insert_fork_catchpoint = linux_child_insert_fork_catchpoint;
3975 t->to_insert_vfork_catchpoint = linux_child_insert_vfork_catchpoint;
3976 t->to_insert_exec_catchpoint = linux_child_insert_exec_catchpoint;
3977 t->to_pid_to_exec_file = linux_child_pid_to_exec_file;
10d6c8cd 3978 t->to_post_startup_inferior = linux_child_post_startup_inferior;
6d8fd2b7
UW
3979 t->to_post_attach = linux_child_post_attach;
3980 t->to_follow_fork = linux_child_follow_fork;
10d6c8cd
DJ
3981 t->to_find_memory_regions = linux_nat_find_memory_regions;
3982 t->to_make_corefile_notes = linux_nat_make_corefile_notes;
3983
3984 super_xfer_partial = t->to_xfer_partial;
3985 t->to_xfer_partial = linux_xfer_partial;
910122bf
UW
3986}
3987
3988struct target_ops *
3989linux_target (void)
3990{
3991 struct target_ops *t;
3992
3993 t = inf_ptrace_target ();
3994 linux_target_install_ops (t);
3995
3996 return t;
3997}
3998
3999struct target_ops *
7714d83a 4000linux_trad_target (CORE_ADDR (*register_u_offset)(struct gdbarch *, int, int))
910122bf
UW
4001{
4002 struct target_ops *t;
4003
4004 t = inf_ptrace_trad_target (register_u_offset);
4005 linux_target_install_ops (t);
10d6c8cd 4006
10d6c8cd
DJ
4007 return t;
4008}
4009
b84876c2
PA
4010/* target_is_async_p implementation. */
4011
4012static int
4013linux_nat_is_async_p (void)
4014{
4015 /* NOTE: palves 2008-03-21: We're only async when the user requests
c6ebd6cf 4016 it explicitly with the "maintenance set target-async" command.
b84876c2 4017 Someday, linux will always be async. */
c6ebd6cf 4018 if (!target_async_permitted)
b84876c2
PA
4019 return 0;
4020
4021 return 1;
4022}
4023
4024/* target_can_async_p implementation. */
4025
4026static int
4027linux_nat_can_async_p (void)
4028{
4029 /* NOTE: palves 2008-03-21: We're only async when the user requests
c6ebd6cf 4030 it explicitly with the "maintenance set target-async" command.
b84876c2 4031 Someday, linux will always be async. */
c6ebd6cf 4032 if (!target_async_permitted)
b84876c2
PA
4033 return 0;
4034
4035 /* See target.h/target_async_mask. */
4036 return linux_nat_async_mask_value;
4037}
4038
9908b566
VP
4039static int
4040linux_nat_supports_non_stop (void)
4041{
4042 return 1;
4043}
4044
b84876c2
PA
4045/* target_async_mask implementation. */
4046
4047static int
4048linux_nat_async_mask (int mask)
4049{
4050 int current_state;
4051 current_state = linux_nat_async_mask_value;
4052
4053 if (current_state != mask)
4054 {
4055 if (mask == 0)
4056 {
4057 linux_nat_async (NULL, 0);
4058 linux_nat_async_mask_value = mask;
b84876c2
PA
4059 }
4060 else
4061 {
b84876c2
PA
4062 linux_nat_async_mask_value = mask;
4063 linux_nat_async (inferior_event_handler, 0);
4064 }
4065 }
4066
4067 return current_state;
4068}
4069
4070/* Pop an event from the event pipe. */
4071
4072static int
4073linux_nat_event_pipe_pop (int* ptr_status, int* ptr_options)
4074{
4075 struct waitpid_result event = {0};
4076 int ret;
4077
4078 do
4079 {
4080 ret = read (linux_nat_event_pipe[0], &event, sizeof (event));
4081 }
4082 while (ret == -1 && errno == EINTR);
4083
4084 gdb_assert (ret == sizeof (event));
4085
4086 *ptr_status = event.status;
4087 *ptr_options = event.options;
4088
4089 linux_nat_num_queued_events--;
4090
4091 return event.pid;
4092}
4093
4094/* Push an event into the event pipe. */
4095
4096static void
4097linux_nat_event_pipe_push (int pid, int status, int options)
4098{
4099 int ret;
4100 struct waitpid_result event = {0};
4101 event.pid = pid;
4102 event.status = status;
4103 event.options = options;
4104
4105 do
4106 {
4107 ret = write (linux_nat_event_pipe[1], &event, sizeof (event));
4108 gdb_assert ((ret == -1 && errno == EINTR) || ret == sizeof (event));
4109 } while (ret == -1 && errno == EINTR);
4110
4111 linux_nat_num_queued_events++;
4112}
4113
4114static void
4115get_pending_events (void)
4116{
4117 int status, options, pid;
4118
c6ebd6cf 4119 if (!target_async_permitted
84e46146 4120 || linux_nat_async_events_state != sigchld_async)
b84876c2
PA
4121 internal_error (__FILE__, __LINE__,
4122 "get_pending_events called with async masked");
4123
4124 while (1)
4125 {
4126 status = 0;
4127 options = __WCLONE | WNOHANG;
4128
4129 do
4130 {
4131 pid = waitpid (-1, &status, options);
4132 }
4133 while (pid == -1 && errno == EINTR);
4134
4135 if (pid <= 0)
4136 {
4137 options = WNOHANG;
4138 do
4139 {
4140 pid = waitpid (-1, &status, options);
4141 }
4142 while (pid == -1 && errno == EINTR);
4143 }
4144
4145 if (pid <= 0)
4146 /* No more children reporting events. */
4147 break;
4148
4149 if (debug_linux_nat_async)
4150 fprintf_unfiltered (gdb_stdlog, "\
4151get_pending_events: pid(%d), status(%x), options (%x)\n",
4152 pid, status, options);
4153
4154 linux_nat_event_pipe_push (pid, status, options);
4155 }
4156
4157 if (debug_linux_nat_async)
4158 fprintf_unfiltered (gdb_stdlog, "\
4159get_pending_events: linux_nat_num_queued_events(%d)\n",
4160 linux_nat_num_queued_events);
4161}
4162
4163/* SIGCHLD handler for async mode. */
4164
4165static void
4166async_sigchld_handler (int signo)
4167{
4168 if (debug_linux_nat_async)
4169 fprintf_unfiltered (gdb_stdlog, "async_sigchld_handler\n");
4170
4171 get_pending_events ();
4172}
4173
84e46146 4174/* Set SIGCHLD handling state to STATE. Returns previous state. */
b84876c2 4175
84e46146
PA
4176static enum sigchld_state
4177linux_nat_async_events (enum sigchld_state state)
b84876c2 4178{
84e46146 4179 enum sigchld_state current_state = linux_nat_async_events_state;
b84876c2
PA
4180
4181 if (debug_linux_nat_async)
4182 fprintf_unfiltered (gdb_stdlog,
84e46146 4183 "LNAE: state(%d): linux_nat_async_events_state(%d), "
b84876c2 4184 "linux_nat_num_queued_events(%d)\n",
84e46146 4185 state, linux_nat_async_events_state,
b84876c2
PA
4186 linux_nat_num_queued_events);
4187
84e46146 4188 if (current_state != state)
b84876c2
PA
4189 {
4190 sigset_t mask;
4191 sigemptyset (&mask);
4192 sigaddset (&mask, SIGCHLD);
84e46146
PA
4193
4194 /* Always block before changing state. */
4195 sigprocmask (SIG_BLOCK, &mask, NULL);
4196
4197 /* Set new state. */
4198 linux_nat_async_events_state = state;
4199
4200 switch (state)
b84876c2 4201 {
84e46146
PA
4202 case sigchld_sync:
4203 {
4204 /* Block target events. */
4205 sigprocmask (SIG_BLOCK, &mask, NULL);
4206 sigaction (SIGCHLD, &sync_sigchld_action, NULL);
4207 /* Get events out of queue, and make them available to
4208 queued_waitpid / my_waitpid. */
4209 pipe_to_local_event_queue ();
4210 }
4211 break;
4212 case sigchld_async:
4213 {
4214 /* Unblock target events for async mode. */
4215
4216 sigprocmask (SIG_BLOCK, &mask, NULL);
4217
4218 /* Put events we already waited on, in the pipe first, so
4219 events are FIFO. */
4220 local_event_queue_to_pipe ();
4221 /* While in masked async, we may have not collected all
4222 the pending events. Get them out now. */
4223 get_pending_events ();
4224
4225 /* Let'em come. */
4226 sigaction (SIGCHLD, &async_sigchld_action, NULL);
4227 sigprocmask (SIG_UNBLOCK, &mask, NULL);
4228 }
4229 break;
4230 case sigchld_default:
4231 {
4232 /* SIGCHLD default mode. */
4233 sigaction (SIGCHLD, &sigchld_default_action, NULL);
4234
4235 /* Get events out of queue, and make them available to
4236 queued_waitpid / my_waitpid. */
4237 pipe_to_local_event_queue ();
4238
4239 /* Unblock SIGCHLD. */
4240 sigprocmask (SIG_UNBLOCK, &mask, NULL);
4241 }
4242 break;
b84876c2
PA
4243 }
4244 }
4245
4246 return current_state;
4247}
4248
4249static int async_terminal_is_ours = 1;
4250
4251/* target_terminal_inferior implementation. */
4252
4253static void
4254linux_nat_terminal_inferior (void)
4255{
4256 if (!target_is_async_p ())
4257 {
4258 /* Async mode is disabled. */
4259 terminal_inferior ();
4260 return;
4261 }
4262
4263 /* GDB should never give the terminal to the inferior, if the
4264 inferior is running in the background (run&, continue&, etc.).
4265 This check can be removed when the common code is fixed. */
4266 if (!sync_execution)
4267 return;
4268
4269 terminal_inferior ();
4270
4271 if (!async_terminal_is_ours)
4272 return;
4273
4274 delete_file_handler (input_fd);
4275 async_terminal_is_ours = 0;
4276 set_sigint_trap ();
4277}
4278
4279/* target_terminal_ours implementation. */
4280
4281void
4282linux_nat_terminal_ours (void)
4283{
4284 if (!target_is_async_p ())
4285 {
4286 /* Async mode is disabled. */
4287 terminal_ours ();
4288 return;
4289 }
4290
4291 /* GDB should never give the terminal to the inferior if the
4292 inferior is running in the background (run&, continue&, etc.),
4293 but claiming it sure should. */
4294 terminal_ours ();
4295
4296 if (!sync_execution)
4297 return;
4298
4299 if (async_terminal_is_ours)
4300 return;
4301
4302 clear_sigint_trap ();
4303 add_file_handler (input_fd, stdin_event_handler, 0);
4304 async_terminal_is_ours = 1;
4305}
4306
4307static void (*async_client_callback) (enum inferior_event_type event_type,
4308 void *context);
4309static void *async_client_context;
4310
4311static void
4312linux_nat_async_file_handler (int error, gdb_client_data client_data)
4313{
4314 async_client_callback (INF_REG_EVENT, async_client_context);
4315}
4316
4317/* target_async implementation. */
4318
4319static void
4320linux_nat_async (void (*callback) (enum inferior_event_type event_type,
4321 void *context), void *context)
4322{
c6ebd6cf 4323 if (linux_nat_async_mask_value == 0 || !target_async_permitted)
b84876c2
PA
4324 internal_error (__FILE__, __LINE__,
4325 "Calling target_async when async is masked");
4326
4327 if (callback != NULL)
4328 {
4329 async_client_callback = callback;
4330 async_client_context = context;
4331 add_file_handler (linux_nat_event_pipe[0],
4332 linux_nat_async_file_handler, NULL);
4333
84e46146 4334 linux_nat_async_events (sigchld_async);
b84876c2
PA
4335 }
4336 else
4337 {
4338 async_client_callback = callback;
4339 async_client_context = context;
4340
84e46146 4341 linux_nat_async_events (sigchld_sync);
b84876c2
PA
4342 delete_file_handler (linux_nat_event_pipe[0]);
4343 }
4344 return;
4345}
4346
4c28f408
PA
4347static int
4348send_sigint_callback (struct lwp_info *lp, void *data)
4349{
4350 /* Use is_running instead of !lp->stopped, because the lwp may be
4351 stopped due to an internal event, and we want to interrupt it in
4352 that case too. What we want is to check if the thread is stopped
4353 from the point of view of the user. */
4354 if (is_running (lp->ptid))
4355 kill_lwp (GET_LWP (lp->ptid), SIGINT);
4356 return 0;
4357}
4358
4359static void
4360linux_nat_stop (ptid_t ptid)
4361{
4362 if (non_stop)
4363 {
4364 if (ptid_equal (ptid, minus_one_ptid))
4365 iterate_over_lwps (send_sigint_callback, &ptid);
4366 else
4367 {
4368 struct lwp_info *lp = find_lwp_pid (ptid);
4369 send_sigint_callback (lp, NULL);
4370 }
4371 }
4372 else
4373 linux_ops->to_stop (ptid);
4374}
4375
f973ed9c
DJ
4376void
4377linux_nat_add_target (struct target_ops *t)
4378{
f973ed9c
DJ
4379 /* Save the provided single-threaded target. We save this in a separate
4380 variable because another target we've inherited from (e.g. inf-ptrace)
4381 may have saved a pointer to T; we want to use it for the final
4382 process stratum target. */
4383 linux_ops_saved = *t;
4384 linux_ops = &linux_ops_saved;
4385
4386 /* Override some methods for multithreading. */
b84876c2 4387 t->to_create_inferior = linux_nat_create_inferior;
f973ed9c
DJ
4388 t->to_attach = linux_nat_attach;
4389 t->to_detach = linux_nat_detach;
4390 t->to_resume = linux_nat_resume;
4391 t->to_wait = linux_nat_wait;
4392 t->to_xfer_partial = linux_nat_xfer_partial;
4393 t->to_kill = linux_nat_kill;
4394 t->to_mourn_inferior = linux_nat_mourn_inferior;
4395 t->to_thread_alive = linux_nat_thread_alive;
4396 t->to_pid_to_str = linux_nat_pid_to_str;
4397 t->to_has_thread_control = tc_schedlock;
4398
b84876c2
PA
4399 t->to_can_async_p = linux_nat_can_async_p;
4400 t->to_is_async_p = linux_nat_is_async_p;
9908b566 4401 t->to_supports_non_stop = linux_nat_supports_non_stop;
b84876c2
PA
4402 t->to_async = linux_nat_async;
4403 t->to_async_mask = linux_nat_async_mask;
4404 t->to_terminal_inferior = linux_nat_terminal_inferior;
4405 t->to_terminal_ours = linux_nat_terminal_ours;
4406
4c28f408
PA
4407 /* Methods for non-stop support. */
4408 t->to_stop = linux_nat_stop;
4409
f973ed9c
DJ
4410 /* We don't change the stratum; this target will sit at
4411 process_stratum and thread_db will set at thread_stratum. This
4412 is a little strange, since this is a multi-threaded-capable
4413 target, but we want to be on the stack below thread_db, and we
4414 also want to be used for single-threaded processes. */
4415
4416 add_target (t);
4417
4418 /* TODO: Eliminate this and have libthread_db use
4419 find_target_beneath. */
4420 thread_db_init (t);
4421}
4422
9f0bdab8
DJ
4423/* Register a method to call whenever a new thread is attached. */
4424void
4425linux_nat_set_new_thread (struct target_ops *t, void (*new_thread) (ptid_t))
4426{
4427 /* Save the pointer. We only support a single registered instance
4428 of the GNU/Linux native target, so we do not need to map this to
4429 T. */
4430 linux_nat_new_thread = new_thread;
4431}
4432
4433/* Return the saved siginfo associated with PTID. */
4434struct siginfo *
4435linux_nat_get_siginfo (ptid_t ptid)
4436{
4437 struct lwp_info *lp = find_lwp_pid (ptid);
4438
4439 gdb_assert (lp != NULL);
4440
4441 return &lp->siginfo;
4442}
4443
c6ebd6cf
VP
4444/* Enable/Disable async mode. */
4445
4446static void
4447linux_nat_setup_async (void)
4448{
4449 if (pipe (linux_nat_event_pipe) == -1)
4450 internal_error (__FILE__, __LINE__,
4451 "creating event pipe failed.");
4452 fcntl (linux_nat_event_pipe[0], F_SETFL, O_NONBLOCK);
4453 fcntl (linux_nat_event_pipe[1], F_SETFL, O_NONBLOCK);
4454}
4455
d6b0e80f
AC
4456void
4457_initialize_linux_nat (void)
4458{
b84876c2 4459 sigset_t mask;
dba24537 4460
1bedd215
AC
4461 add_info ("proc", linux_nat_info_proc_cmd, _("\
4462Show /proc process information about any running process.\n\
dba24537
AC
4463Specify any process id, or use the program being debugged by default.\n\
4464Specify any of the following keywords for detailed info:\n\
4465 mappings -- list of mapped memory regions.\n\
4466 stat -- list a bunch of random process info.\n\
4467 status -- list a different bunch of random process info.\n\
1bedd215 4468 all -- list all available /proc info."));
d6b0e80f 4469
b84876c2
PA
4470 add_setshow_zinteger_cmd ("lin-lwp", class_maintenance,
4471 &debug_linux_nat, _("\
4472Set debugging of GNU/Linux lwp module."), _("\
4473Show debugging of GNU/Linux lwp module."), _("\
4474Enables printf debugging output."),
4475 NULL,
4476 show_debug_linux_nat,
4477 &setdebuglist, &showdebuglist);
4478
4479 add_setshow_zinteger_cmd ("lin-lwp-async", class_maintenance,
4480 &debug_linux_nat_async, _("\
4481Set debugging of GNU/Linux async lwp module."), _("\
4482Show debugging of GNU/Linux async lwp module."), _("\
4483Enables printf debugging output."),
4484 NULL,
4485 show_debug_linux_nat_async,
4486 &setdebuglist, &showdebuglist);
4487
84e46146
PA
4488 /* Get the default SIGCHLD action. Used while forking an inferior
4489 (see linux_nat_create_inferior/linux_nat_async_events). */
4490 sigaction (SIGCHLD, NULL, &sigchld_default_action);
4491
b84876c2
PA
4492 /* Block SIGCHLD by default. Doing this early prevents it getting
4493 unblocked if an exception is thrown due to an error while the
4494 inferior is starting (sigsetjmp/siglongjmp). */
4495 sigemptyset (&mask);
4496 sigaddset (&mask, SIGCHLD);
4497 sigprocmask (SIG_BLOCK, &mask, NULL);
4498
4499 /* Save this mask as the default. */
d6b0e80f
AC
4500 sigprocmask (SIG_SETMASK, NULL, &normal_mask);
4501
b84876c2
PA
4502 /* The synchronous SIGCHLD handler. */
4503 sync_sigchld_action.sa_handler = sigchld_handler;
4504 sigemptyset (&sync_sigchld_action.sa_mask);
4505 sync_sigchld_action.sa_flags = SA_RESTART;
4506
4507 /* Make it the default. */
4508 sigaction (SIGCHLD, &sync_sigchld_action, NULL);
d6b0e80f
AC
4509
4510 /* Make sure we don't block SIGCHLD during a sigsuspend. */
4511 sigprocmask (SIG_SETMASK, NULL, &suspend_mask);
4512 sigdelset (&suspend_mask, SIGCHLD);
4513
b84876c2
PA
4514 /* SIGCHLD handler for async mode. */
4515 async_sigchld_action.sa_handler = async_sigchld_handler;
4516 sigemptyset (&async_sigchld_action.sa_mask);
4517 async_sigchld_action.sa_flags = SA_RESTART;
d6b0e80f 4518
c6ebd6cf 4519 linux_nat_setup_async ();
10568435
JK
4520
4521 add_setshow_boolean_cmd ("disable-randomization", class_support,
4522 &disable_randomization, _("\
4523Set disabling of debuggee's virtual address space randomization."), _("\
4524Show disabling of debuggee's virtual address space randomization."), _("\
4525When this mode is on (which is the default), randomization of the virtual\n\
4526address space is disabled. Standalone programs run with the randomization\n\
4527enabled by default on some platforms."),
4528 &set_disable_randomization,
4529 &show_disable_randomization,
4530 &setlist, &showlist);
d6b0e80f
AC
4531}
4532\f
4533
4534/* FIXME: kettenis/2000-08-26: The stuff on this page is specific to
4535 the GNU/Linux Threads library and therefore doesn't really belong
4536 here. */
4537
4538/* Read variable NAME in the target and return its value if found.
4539 Otherwise return zero. It is assumed that the type of the variable
4540 is `int'. */
4541
4542static int
4543get_signo (const char *name)
4544{
4545 struct minimal_symbol *ms;
4546 int signo;
4547
4548 ms = lookup_minimal_symbol (name, NULL, NULL);
4549 if (ms == NULL)
4550 return 0;
4551
8e70166d 4552 if (target_read_memory (SYMBOL_VALUE_ADDRESS (ms), (gdb_byte *) &signo,
d6b0e80f
AC
4553 sizeof (signo)) != 0)
4554 return 0;
4555
4556 return signo;
4557}
4558
4559/* Return the set of signals used by the threads library in *SET. */
4560
4561void
4562lin_thread_get_thread_signals (sigset_t *set)
4563{
4564 struct sigaction action;
4565 int restart, cancel;
b84876c2 4566 sigset_t blocked_mask;
d6b0e80f 4567
b84876c2 4568 sigemptyset (&blocked_mask);
d6b0e80f
AC
4569 sigemptyset (set);
4570
4571 restart = get_signo ("__pthread_sig_restart");
17fbb0bd
DJ
4572 cancel = get_signo ("__pthread_sig_cancel");
4573
4574 /* LinuxThreads normally uses the first two RT signals, but in some legacy
4575 cases may use SIGUSR1/SIGUSR2. NPTL always uses RT signals, but does
4576 not provide any way for the debugger to query the signal numbers -
4577 fortunately they don't change! */
4578
d6b0e80f 4579 if (restart == 0)
17fbb0bd 4580 restart = __SIGRTMIN;
d6b0e80f 4581
d6b0e80f 4582 if (cancel == 0)
17fbb0bd 4583 cancel = __SIGRTMIN + 1;
d6b0e80f
AC
4584
4585 sigaddset (set, restart);
4586 sigaddset (set, cancel);
4587
4588 /* The GNU/Linux Threads library makes terminating threads send a
4589 special "cancel" signal instead of SIGCHLD. Make sure we catch
4590 those (to prevent them from terminating GDB itself, which is
4591 likely to be their default action) and treat them the same way as
4592 SIGCHLD. */
4593
4594 action.sa_handler = sigchld_handler;
4595 sigemptyset (&action.sa_mask);
58aecb61 4596 action.sa_flags = SA_RESTART;
d6b0e80f
AC
4597 sigaction (cancel, &action, NULL);
4598
4599 /* We block the "cancel" signal throughout this code ... */
4600 sigaddset (&blocked_mask, cancel);
4601 sigprocmask (SIG_BLOCK, &blocked_mask, NULL);
4602
4603 /* ... except during a sigsuspend. */
4604 sigdelset (&suspend_mask, cancel);
4605}
This page took 0.626893 seconds and 4 git commands to generate.