* dwarf2loc.c (dwarf_expr_frame_base): Error out on missing
[deliverable/binutils-gdb.git] / gdb / linux-nat.c
CommitLineData
3993f6b1 1/* GNU/Linux native-dependent code common to multiple platforms.
dba24537 2
9b254dd1 3 Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
e26af52f 4 Free Software Foundation, Inc.
3993f6b1
DJ
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
a9762ec7 10 the Free Software Foundation; either version 3 of the License, or
3993f6b1
DJ
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
a9762ec7 19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
3993f6b1
DJ
20
21#include "defs.h"
22#include "inferior.h"
23#include "target.h"
d6b0e80f 24#include "gdb_string.h"
3993f6b1 25#include "gdb_wait.h"
d6b0e80f
AC
26#include "gdb_assert.h"
27#ifdef HAVE_TKILL_SYSCALL
28#include <unistd.h>
29#include <sys/syscall.h>
30#endif
3993f6b1 31#include <sys/ptrace.h>
0274a8ce 32#include "linux-nat.h"
ac264b3b 33#include "linux-fork.h"
d6b0e80f
AC
34#include "gdbthread.h"
35#include "gdbcmd.h"
36#include "regcache.h"
4f844a66 37#include "regset.h"
10d6c8cd
DJ
38#include "inf-ptrace.h"
39#include "auxv.h"
dba24537
AC
40#include <sys/param.h> /* for MAXPATHLEN */
41#include <sys/procfs.h> /* for elf_gregset etc. */
42#include "elf-bfd.h" /* for elfcore_write_* */
43#include "gregset.h" /* for gregset */
44#include "gdbcore.h" /* for get_exec_file */
45#include <ctype.h> /* for isdigit */
46#include "gdbthread.h" /* for struct thread_info etc. */
47#include "gdb_stat.h" /* for struct stat */
48#include <fcntl.h> /* for O_RDONLY */
b84876c2
PA
49#include "inf-loop.h"
50#include "event-loop.h"
51#include "event-top.h"
dba24537 52
a0ef4274
DJ
53/* Note on this file's use of signals:
54
55 We stop threads by sending a SIGSTOP. The use of SIGSTOP instead
56 of another signal is not entirely significant; we just need for a
57 signal to be delivered, so that we can intercept it. SIGSTOP's
58 advantage is that it can not be blocked. A disadvantage is that it
59 is not a real-time signal, so it can only be queued once; we do not
60 keep track of other sources of SIGSTOP.
61
62 Two other signals that can't be blocked are SIGCONT and SIGKILL.
63 But we can't use them, because they have special behavior when the
64 signal is generated - not when it is delivered. SIGCONT resumes
65 the entire thread group and SIGKILL kills the entire thread group.
66
67 A delivered SIGSTOP would stop the entire thread group, not just the
68 thread we tkill'd. But we never let the SIGSTOP deliver; we always
69 intercept and cancel it (by PTRACE_CONT without passing SIGSTOP).
70
71 We could use a real-time signal instead. This would solve those
72 problems; we could use PTRACE_GETSIGINFO to locate the specific
73 stop signals sent by GDB. But we would still have to have some
74 support for SIGSTOP, since PTRACE_ATTACH generates it, and there
75 are races with trying to find a signal that is not blocked. */
76
dba24537
AC
77#ifndef O_LARGEFILE
78#define O_LARGEFILE 0
79#endif
0274a8ce 80
3993f6b1
DJ
81/* If the system headers did not provide the constants, hard-code the normal
82 values. */
83#ifndef PTRACE_EVENT_FORK
84
85#define PTRACE_SETOPTIONS 0x4200
86#define PTRACE_GETEVENTMSG 0x4201
87
88/* options set using PTRACE_SETOPTIONS */
89#define PTRACE_O_TRACESYSGOOD 0x00000001
90#define PTRACE_O_TRACEFORK 0x00000002
91#define PTRACE_O_TRACEVFORK 0x00000004
92#define PTRACE_O_TRACECLONE 0x00000008
93#define PTRACE_O_TRACEEXEC 0x00000010
9016a515
DJ
94#define PTRACE_O_TRACEVFORKDONE 0x00000020
95#define PTRACE_O_TRACEEXIT 0x00000040
3993f6b1
DJ
96
97/* Wait extended result codes for the above trace options. */
98#define PTRACE_EVENT_FORK 1
99#define PTRACE_EVENT_VFORK 2
100#define PTRACE_EVENT_CLONE 3
101#define PTRACE_EVENT_EXEC 4
c874c7fc 102#define PTRACE_EVENT_VFORK_DONE 5
9016a515 103#define PTRACE_EVENT_EXIT 6
3993f6b1
DJ
104
105#endif /* PTRACE_EVENT_FORK */
106
107/* We can't always assume that this flag is available, but all systems
108 with the ptrace event handlers also have __WALL, so it's safe to use
109 here. */
110#ifndef __WALL
111#define __WALL 0x40000000 /* Wait for any child. */
112#endif
113
02d3ff8c
UW
114#ifndef PTRACE_GETSIGINFO
115#define PTRACE_GETSIGINFO 0x4202
116#endif
117
10d6c8cd
DJ
118/* The single-threaded native GNU/Linux target_ops. We save a pointer for
119 the use of the multi-threaded target. */
120static struct target_ops *linux_ops;
f973ed9c 121static struct target_ops linux_ops_saved;
10d6c8cd 122
9f0bdab8
DJ
123/* The method to call, if any, when a new thread is attached. */
124static void (*linux_nat_new_thread) (ptid_t);
125
ac264b3b
MS
126/* The saved to_xfer_partial method, inherited from inf-ptrace.c.
127 Called by our to_xfer_partial. */
128static LONGEST (*super_xfer_partial) (struct target_ops *,
129 enum target_object,
130 const char *, gdb_byte *,
131 const gdb_byte *,
10d6c8cd
DJ
132 ULONGEST, LONGEST);
133
d6b0e80f 134static int debug_linux_nat;
920d2a44
AC
135static void
136show_debug_linux_nat (struct ui_file *file, int from_tty,
137 struct cmd_list_element *c, const char *value)
138{
139 fprintf_filtered (file, _("Debugging of GNU/Linux lwp module is %s.\n"),
140 value);
141}
d6b0e80f 142
b84876c2
PA
143static int debug_linux_nat_async = 0;
144static void
145show_debug_linux_nat_async (struct ui_file *file, int from_tty,
146 struct cmd_list_element *c, const char *value)
147{
148 fprintf_filtered (file, _("Debugging of GNU/Linux async lwp module is %s.\n"),
149 value);
150}
151
9016a515
DJ
152static int linux_parent_pid;
153
ae087d01
DJ
154struct simple_pid_list
155{
156 int pid;
3d799a95 157 int status;
ae087d01
DJ
158 struct simple_pid_list *next;
159};
160struct simple_pid_list *stopped_pids;
161
3993f6b1
DJ
162/* This variable is a tri-state flag: -1 for unknown, 0 if PTRACE_O_TRACEFORK
163 can not be used, 1 if it can. */
164
165static int linux_supports_tracefork_flag = -1;
166
9016a515
DJ
167/* If we have PTRACE_O_TRACEFORK, this flag indicates whether we also have
168 PTRACE_O_TRACEVFORKDONE. */
169
170static int linux_supports_tracevforkdone_flag = -1;
171
b84876c2
PA
172/* Async mode support */
173
174/* To listen to target events asynchronously, we install a SIGCHLD
175 handler whose duty is to call waitpid (-1, ..., WNOHANG) to get all
176 the pending events into a pipe. Whenever we're ready to handle
177 events asynchronously, this pipe is registered as the waitable file
178 handle in the event loop. When we get to entry target points
179 coming out of the common code (target_wait, target_resume, ...),
180 that are going to call waitpid, we block SIGCHLD signals, and
181 remove all the events placed in the pipe into a local queue. All
182 the subsequent calls to my_waitpid (a waitpid wrapper) check this
183 local queue first. */
184
185/* True if async mode is currently on. */
186static int linux_nat_async_enabled;
187
188/* Zero if the async mode, although enabled, is masked, which means
189 linux_nat_wait should behave as if async mode was off. */
190static int linux_nat_async_mask_value = 1;
191
192/* The read/write ends of the pipe registered as waitable file in the
193 event loop. */
194static int linux_nat_event_pipe[2] = { -1, -1 };
195
196/* Number of queued events in the pipe. */
197static volatile int linux_nat_num_queued_events;
198
199/* If async mode is on, true if we're listening for events; false if
200 target events are blocked. */
201static int linux_nat_async_events_enabled;
202
203static int linux_nat_async_events (int enable);
204static void pipe_to_local_event_queue (void);
205static void local_event_queue_to_pipe (void);
206static void linux_nat_event_pipe_push (int pid, int status, int options);
207static int linux_nat_event_pipe_pop (int* ptr_status, int* ptr_options);
208static void linux_nat_set_async_mode (int on);
209static void linux_nat_async (void (*callback)
210 (enum inferior_event_type event_type, void *context),
211 void *context);
212static int linux_nat_async_mask (int mask);
a0ef4274 213static int kill_lwp (int lwpid, int signo);
b84876c2
PA
214
215/* Captures the result of a successful waitpid call, along with the
216 options used in that call. */
217struct waitpid_result
218{
219 int pid;
220 int status;
221 int options;
222 struct waitpid_result *next;
223};
224
225/* A singly-linked list of the results of the waitpid calls performed
226 in the async SIGCHLD handler. */
227static struct waitpid_result *waitpid_queue = NULL;
228
229static int
230queued_waitpid (int pid, int *status, int flags)
231{
232 struct waitpid_result *msg = waitpid_queue, *prev = NULL;
233
234 if (debug_linux_nat_async)
235 fprintf_unfiltered (gdb_stdlog,
236 "\
237QWPID: linux_nat_async_events_enabled(%d), linux_nat_num_queued_events(%d)\n",
238 linux_nat_async_events_enabled,
239 linux_nat_num_queued_events);
240
241 if (flags & __WALL)
242 {
243 for (; msg; prev = msg, msg = msg->next)
244 if (pid == -1 || pid == msg->pid)
245 break;
246 }
247 else if (flags & __WCLONE)
248 {
249 for (; msg; prev = msg, msg = msg->next)
250 if (msg->options & __WCLONE
251 && (pid == -1 || pid == msg->pid))
252 break;
253 }
254 else
255 {
256 for (; msg; prev = msg, msg = msg->next)
257 if ((msg->options & __WCLONE) == 0
258 && (pid == -1 || pid == msg->pid))
259 break;
260 }
261
262 if (msg)
263 {
264 int pid;
265
266 if (prev)
267 prev->next = msg->next;
268 else
269 waitpid_queue = msg->next;
270
271 msg->next = NULL;
272 if (status)
273 *status = msg->status;
274 pid = msg->pid;
275
276 if (debug_linux_nat_async)
277 fprintf_unfiltered (gdb_stdlog, "QWPID: pid(%d), status(%x)\n",
278 pid, msg->status);
279 xfree (msg);
280
281 return pid;
282 }
283
284 if (debug_linux_nat_async)
285 fprintf_unfiltered (gdb_stdlog, "QWPID: miss\n");
286
287 if (status)
288 *status = 0;
289 return -1;
290}
291
292static void
293push_waitpid (int pid, int status, int options)
294{
295 struct waitpid_result *event, *new_event;
296
297 new_event = xmalloc (sizeof (*new_event));
298 new_event->pid = pid;
299 new_event->status = status;
300 new_event->options = options;
301 new_event->next = NULL;
302
303 if (waitpid_queue)
304 {
305 for (event = waitpid_queue;
306 event && event->next;
307 event = event->next)
308 ;
309
310 event->next = new_event;
311 }
312 else
313 waitpid_queue = new_event;
314}
315
710151dd 316/* Drain all queued events of PID. If PID is -1, the effect is of
b84876c2
PA
317 draining all events. */
318static void
319drain_queued_events (int pid)
320{
321 while (queued_waitpid (pid, NULL, __WALL) != -1)
322 ;
323}
324
ae087d01
DJ
325\f
326/* Trivial list manipulation functions to keep track of a list of
327 new stopped processes. */
328static void
3d799a95 329add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
ae087d01
DJ
330{
331 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
332 new_pid->pid = pid;
3d799a95 333 new_pid->status = status;
ae087d01
DJ
334 new_pid->next = *listp;
335 *listp = new_pid;
336}
337
338static int
3d799a95 339pull_pid_from_list (struct simple_pid_list **listp, int pid, int *status)
ae087d01
DJ
340{
341 struct simple_pid_list **p;
342
343 for (p = listp; *p != NULL; p = &(*p)->next)
344 if ((*p)->pid == pid)
345 {
346 struct simple_pid_list *next = (*p)->next;
3d799a95 347 *status = (*p)->status;
ae087d01
DJ
348 xfree (*p);
349 *p = next;
350 return 1;
351 }
352 return 0;
353}
354
3d799a95
DJ
355static void
356linux_record_stopped_pid (int pid, int status)
ae087d01 357{
3d799a95 358 add_to_pid_list (&stopped_pids, pid, status);
ae087d01
DJ
359}
360
3993f6b1
DJ
361\f
362/* A helper function for linux_test_for_tracefork, called after fork (). */
363
364static void
365linux_tracefork_child (void)
366{
367 int ret;
368
369 ptrace (PTRACE_TRACEME, 0, 0, 0);
370 kill (getpid (), SIGSTOP);
371 fork ();
48bb3cce 372 _exit (0);
3993f6b1
DJ
373}
374
b84876c2
PA
375/* Wrapper function for waitpid which handles EINTR, and checks for
376 locally queued events. */
b957e937
DJ
377
378static int
379my_waitpid (int pid, int *status, int flags)
380{
381 int ret;
b84876c2
PA
382
383 /* There should be no concurrent calls to waitpid. */
384 gdb_assert (!linux_nat_async_events_enabled);
385
386 ret = queued_waitpid (pid, status, flags);
387 if (ret != -1)
388 return ret;
389
b957e937
DJ
390 do
391 {
392 ret = waitpid (pid, status, flags);
393 }
394 while (ret == -1 && errno == EINTR);
395
396 return ret;
397}
398
399/* Determine if PTRACE_O_TRACEFORK can be used to follow fork events.
400
401 First, we try to enable fork tracing on ORIGINAL_PID. If this fails,
402 we know that the feature is not available. This may change the tracing
403 options for ORIGINAL_PID, but we'll be setting them shortly anyway.
404
405 However, if it succeeds, we don't know for sure that the feature is
406 available; old versions of PTRACE_SETOPTIONS ignored unknown options. We
3993f6b1 407 create a child process, attach to it, use PTRACE_SETOPTIONS to enable
b957e937
DJ
408 fork tracing, and let it fork. If the process exits, we assume that we
409 can't use TRACEFORK; if we get the fork notification, and we can extract
410 the new child's PID, then we assume that we can. */
3993f6b1
DJ
411
412static void
b957e937 413linux_test_for_tracefork (int original_pid)
3993f6b1
DJ
414{
415 int child_pid, ret, status;
416 long second_pid;
417
b957e937
DJ
418 linux_supports_tracefork_flag = 0;
419 linux_supports_tracevforkdone_flag = 0;
420
421 ret = ptrace (PTRACE_SETOPTIONS, original_pid, 0, PTRACE_O_TRACEFORK);
422 if (ret != 0)
423 return;
424
3993f6b1
DJ
425 child_pid = fork ();
426 if (child_pid == -1)
e2e0b3e5 427 perror_with_name (("fork"));
3993f6b1
DJ
428
429 if (child_pid == 0)
430 linux_tracefork_child ();
431
b957e937 432 ret = my_waitpid (child_pid, &status, 0);
3993f6b1 433 if (ret == -1)
e2e0b3e5 434 perror_with_name (("waitpid"));
3993f6b1 435 else if (ret != child_pid)
8a3fe4f8 436 error (_("linux_test_for_tracefork: waitpid: unexpected result %d."), ret);
3993f6b1 437 if (! WIFSTOPPED (status))
8a3fe4f8 438 error (_("linux_test_for_tracefork: waitpid: unexpected status %d."), status);
3993f6b1 439
3993f6b1
DJ
440 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0, PTRACE_O_TRACEFORK);
441 if (ret != 0)
442 {
b957e937
DJ
443 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
444 if (ret != 0)
445 {
8a3fe4f8 446 warning (_("linux_test_for_tracefork: failed to kill child"));
b957e937
DJ
447 return;
448 }
449
450 ret = my_waitpid (child_pid, &status, 0);
451 if (ret != child_pid)
8a3fe4f8 452 warning (_("linux_test_for_tracefork: failed to wait for killed child"));
b957e937 453 else if (!WIFSIGNALED (status))
8a3fe4f8
AC
454 warning (_("linux_test_for_tracefork: unexpected wait status 0x%x from "
455 "killed child"), status);
b957e937 456
3993f6b1
DJ
457 return;
458 }
459
9016a515
DJ
460 /* Check whether PTRACE_O_TRACEVFORKDONE is available. */
461 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0,
462 PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORKDONE);
463 linux_supports_tracevforkdone_flag = (ret == 0);
464
b957e937
DJ
465 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
466 if (ret != 0)
8a3fe4f8 467 warning (_("linux_test_for_tracefork: failed to resume child"));
b957e937
DJ
468
469 ret = my_waitpid (child_pid, &status, 0);
470
3993f6b1
DJ
471 if (ret == child_pid && WIFSTOPPED (status)
472 && status >> 16 == PTRACE_EVENT_FORK)
473 {
474 second_pid = 0;
475 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
476 if (ret == 0 && second_pid != 0)
477 {
478 int second_status;
479
480 linux_supports_tracefork_flag = 1;
b957e937
DJ
481 my_waitpid (second_pid, &second_status, 0);
482 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
483 if (ret != 0)
8a3fe4f8 484 warning (_("linux_test_for_tracefork: failed to kill second child"));
97725dc4 485 my_waitpid (second_pid, &status, 0);
3993f6b1
DJ
486 }
487 }
b957e937 488 else
8a3fe4f8
AC
489 warning (_("linux_test_for_tracefork: unexpected result from waitpid "
490 "(%d, status 0x%x)"), ret, status);
3993f6b1 491
b957e937
DJ
492 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
493 if (ret != 0)
8a3fe4f8 494 warning (_("linux_test_for_tracefork: failed to kill child"));
b957e937 495 my_waitpid (child_pid, &status, 0);
3993f6b1
DJ
496}
497
498/* Return non-zero iff we have tracefork functionality available.
499 This function also sets linux_supports_tracefork_flag. */
500
501static int
b957e937 502linux_supports_tracefork (int pid)
3993f6b1
DJ
503{
504 if (linux_supports_tracefork_flag == -1)
b957e937 505 linux_test_for_tracefork (pid);
3993f6b1
DJ
506 return linux_supports_tracefork_flag;
507}
508
9016a515 509static int
b957e937 510linux_supports_tracevforkdone (int pid)
9016a515
DJ
511{
512 if (linux_supports_tracefork_flag == -1)
b957e937 513 linux_test_for_tracefork (pid);
9016a515
DJ
514 return linux_supports_tracevforkdone_flag;
515}
516
3993f6b1 517\f
4de4c07c
DJ
518void
519linux_enable_event_reporting (ptid_t ptid)
520{
d3587048 521 int pid = ptid_get_lwp (ptid);
4de4c07c
DJ
522 int options;
523
d3587048
DJ
524 if (pid == 0)
525 pid = ptid_get_pid (ptid);
526
b957e937 527 if (! linux_supports_tracefork (pid))
4de4c07c
DJ
528 return;
529
a2f23071
DJ
530 options = PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORK | PTRACE_O_TRACEEXEC
531 | PTRACE_O_TRACECLONE;
b957e937 532 if (linux_supports_tracevforkdone (pid))
9016a515
DJ
533 options |= PTRACE_O_TRACEVFORKDONE;
534
535 /* Do not enable PTRACE_O_TRACEEXIT until GDB is more prepared to support
536 read-only process state. */
4de4c07c
DJ
537
538 ptrace (PTRACE_SETOPTIONS, pid, 0, options);
539}
540
6d8fd2b7
UW
541static void
542linux_child_post_attach (int pid)
4de4c07c
DJ
543{
544 linux_enable_event_reporting (pid_to_ptid (pid));
0ec9a092 545 check_for_thread_db ();
4de4c07c
DJ
546}
547
10d6c8cd 548static void
4de4c07c
DJ
549linux_child_post_startup_inferior (ptid_t ptid)
550{
551 linux_enable_event_reporting (ptid);
0ec9a092 552 check_for_thread_db ();
4de4c07c
DJ
553}
554
6d8fd2b7
UW
555static int
556linux_child_follow_fork (struct target_ops *ops, int follow_child)
3993f6b1 557{
4de4c07c
DJ
558 ptid_t last_ptid;
559 struct target_waitstatus last_status;
9016a515 560 int has_vforked;
4de4c07c
DJ
561 int parent_pid, child_pid;
562
b84876c2
PA
563 if (target_can_async_p ())
564 target_async (NULL, 0);
565
4de4c07c 566 get_last_target_status (&last_ptid, &last_status);
9016a515 567 has_vforked = (last_status.kind == TARGET_WAITKIND_VFORKED);
d3587048
DJ
568 parent_pid = ptid_get_lwp (last_ptid);
569 if (parent_pid == 0)
570 parent_pid = ptid_get_pid (last_ptid);
4de4c07c
DJ
571 child_pid = last_status.value.related_pid;
572
573 if (! follow_child)
574 {
575 /* We're already attached to the parent, by default. */
576
577 /* Before detaching from the child, remove all breakpoints from
578 it. (This won't actually modify the breakpoint list, but will
579 physically remove the breakpoints from the child.) */
9016a515
DJ
580 /* If we vforked this will remove the breakpoints from the parent
581 also, but they'll be reinserted below. */
4de4c07c
DJ
582 detach_breakpoints (child_pid);
583
ac264b3b
MS
584 /* Detach new forked process? */
585 if (detach_fork)
f75c00e4 586 {
e85a822c 587 if (info_verbose || debug_linux_nat)
ac264b3b
MS
588 {
589 target_terminal_ours ();
590 fprintf_filtered (gdb_stdlog,
591 "Detaching after fork from child process %d.\n",
592 child_pid);
593 }
4de4c07c 594
ac264b3b
MS
595 ptrace (PTRACE_DETACH, child_pid, 0, 0);
596 }
597 else
598 {
599 struct fork_info *fp;
600 /* Retain child fork in ptrace (stopped) state. */
601 fp = find_fork_pid (child_pid);
602 if (!fp)
603 fp = add_fork (child_pid);
604 fork_save_infrun_state (fp, 0);
605 }
9016a515
DJ
606
607 if (has_vforked)
608 {
b957e937
DJ
609 gdb_assert (linux_supports_tracefork_flag >= 0);
610 if (linux_supports_tracevforkdone (0))
9016a515
DJ
611 {
612 int status;
613
614 ptrace (PTRACE_CONT, parent_pid, 0, 0);
58aecb61 615 my_waitpid (parent_pid, &status, __WALL);
c874c7fc 616 if ((status >> 16) != PTRACE_EVENT_VFORK_DONE)
8a3fe4f8
AC
617 warning (_("Unexpected waitpid result %06x when waiting for "
618 "vfork-done"), status);
9016a515
DJ
619 }
620 else
621 {
622 /* We can't insert breakpoints until the child has
623 finished with the shared memory region. We need to
624 wait until that happens. Ideal would be to just
625 call:
626 - ptrace (PTRACE_SYSCALL, parent_pid, 0, 0);
627 - waitpid (parent_pid, &status, __WALL);
628 However, most architectures can't handle a syscall
629 being traced on the way out if it wasn't traced on
630 the way in.
631
632 We might also think to loop, continuing the child
633 until it exits or gets a SIGTRAP. One problem is
634 that the child might call ptrace with PTRACE_TRACEME.
635
636 There's no simple and reliable way to figure out when
637 the vforked child will be done with its copy of the
638 shared memory. We could step it out of the syscall,
639 two instructions, let it go, and then single-step the
640 parent once. When we have hardware single-step, this
641 would work; with software single-step it could still
642 be made to work but we'd have to be able to insert
643 single-step breakpoints in the child, and we'd have
644 to insert -just- the single-step breakpoint in the
645 parent. Very awkward.
646
647 In the end, the best we can do is to make sure it
648 runs for a little while. Hopefully it will be out of
649 range of any breakpoints we reinsert. Usually this
650 is only the single-step breakpoint at vfork's return
651 point. */
652
653 usleep (10000);
654 }
655
656 /* Since we vforked, breakpoints were removed in the parent
657 too. Put them back. */
658 reattach_breakpoints (parent_pid);
659 }
4de4c07c 660 }
3993f6b1 661 else
4de4c07c
DJ
662 {
663 char child_pid_spelling[40];
664
665 /* Needed to keep the breakpoint lists in sync. */
9016a515
DJ
666 if (! has_vforked)
667 detach_breakpoints (child_pid);
4de4c07c
DJ
668
669 /* Before detaching from the parent, remove all breakpoints from it. */
670 remove_breakpoints ();
671
e85a822c 672 if (info_verbose || debug_linux_nat)
f75c00e4
DJ
673 {
674 target_terminal_ours ();
ac264b3b
MS
675 fprintf_filtered (gdb_stdlog,
676 "Attaching after fork to child process %d.\n",
677 child_pid);
f75c00e4 678 }
4de4c07c 679
9016a515
DJ
680 /* If we're vforking, we may want to hold on to the parent until
681 the child exits or execs. At exec time we can remove the old
682 breakpoints from the parent and detach it; at exit time we
683 could do the same (or even, sneakily, resume debugging it - the
684 child's exec has failed, or something similar).
685
686 This doesn't clean up "properly", because we can't call
687 target_detach, but that's OK; if the current target is "child",
688 then it doesn't need any further cleanups, and lin_lwp will
689 generally not encounter vfork (vfork is defined to fork
690 in libpthread.so).
691
692 The holding part is very easy if we have VFORKDONE events;
693 but keeping track of both processes is beyond GDB at the
694 moment. So we don't expose the parent to the rest of GDB.
695 Instead we quietly hold onto it until such time as we can
696 safely resume it. */
697
698 if (has_vforked)
699 linux_parent_pid = parent_pid;
ac264b3b
MS
700 else if (!detach_fork)
701 {
702 struct fork_info *fp;
703 /* Retain parent fork in ptrace (stopped) state. */
704 fp = find_fork_pid (parent_pid);
705 if (!fp)
706 fp = add_fork (parent_pid);
707 fork_save_infrun_state (fp, 0);
708 }
9016a515 709 else
b84876c2 710 target_detach (NULL, 0);
4de4c07c 711
9f0bdab8 712 inferior_ptid = ptid_build (child_pid, child_pid, 0);
ee057212
DJ
713
714 /* Reinstall ourselves, since we might have been removed in
715 target_detach (which does other necessary cleanup). */
ac264b3b 716
ee057212 717 push_target (ops);
9f0bdab8 718 linux_nat_switch_fork (inferior_ptid);
ef29ce1a 719 check_for_thread_db ();
4de4c07c
DJ
720
721 /* Reset breakpoints in the child as appropriate. */
722 follow_inferior_reset_breakpoints ();
723 }
724
b84876c2
PA
725 if (target_can_async_p ())
726 target_async (inferior_event_handler, 0);
727
4de4c07c
DJ
728 return 0;
729}
730
4de4c07c 731\f
6d8fd2b7
UW
732static void
733linux_child_insert_fork_catchpoint (int pid)
4de4c07c 734{
b957e937 735 if (! linux_supports_tracefork (pid))
8a3fe4f8 736 error (_("Your system does not support fork catchpoints."));
3993f6b1
DJ
737}
738
6d8fd2b7
UW
739static void
740linux_child_insert_vfork_catchpoint (int pid)
3993f6b1 741{
b957e937 742 if (!linux_supports_tracefork (pid))
8a3fe4f8 743 error (_("Your system does not support vfork catchpoints."));
3993f6b1
DJ
744}
745
6d8fd2b7
UW
746static void
747linux_child_insert_exec_catchpoint (int pid)
3993f6b1 748{
b957e937 749 if (!linux_supports_tracefork (pid))
8a3fe4f8 750 error (_("Your system does not support exec catchpoints."));
3993f6b1
DJ
751}
752
d6b0e80f
AC
753/* On GNU/Linux there are no real LWP's. The closest thing to LWP's
754 are processes sharing the same VM space. A multi-threaded process
755 is basically a group of such processes. However, such a grouping
756 is almost entirely a user-space issue; the kernel doesn't enforce
757 such a grouping at all (this might change in the future). In
758 general, we'll rely on the threads library (i.e. the GNU/Linux
759 Threads library) to provide such a grouping.
760
761 It is perfectly well possible to write a multi-threaded application
762 without the assistance of a threads library, by using the clone
763 system call directly. This module should be able to give some
764 rudimentary support for debugging such applications if developers
765 specify the CLONE_PTRACE flag in the clone system call, and are
766 using the Linux kernel 2.4 or above.
767
768 Note that there are some peculiarities in GNU/Linux that affect
769 this code:
770
771 - In general one should specify the __WCLONE flag to waitpid in
772 order to make it report events for any of the cloned processes
773 (and leave it out for the initial process). However, if a cloned
774 process has exited the exit status is only reported if the
775 __WCLONE flag is absent. Linux kernel 2.4 has a __WALL flag, but
776 we cannot use it since GDB must work on older systems too.
777
778 - When a traced, cloned process exits and is waited for by the
779 debugger, the kernel reassigns it to the original parent and
780 keeps it around as a "zombie". Somehow, the GNU/Linux Threads
781 library doesn't notice this, which leads to the "zombie problem":
782 When debugged a multi-threaded process that spawns a lot of
783 threads will run out of processes, even if the threads exit,
784 because the "zombies" stay around. */
785
786/* List of known LWPs. */
9f0bdab8 787struct lwp_info *lwp_list;
d6b0e80f
AC
788
789/* Number of LWPs in the list. */
790static int num_lwps;
d6b0e80f
AC
791\f
792
d6b0e80f
AC
793/* If the last reported event was a SIGTRAP, this variable is set to
794 the process id of the LWP/thread that got it. */
795ptid_t trap_ptid;
796\f
797
d6b0e80f
AC
798/* Since we cannot wait (in linux_nat_wait) for the initial process and
799 any cloned processes with a single call to waitpid, we have to use
800 the WNOHANG flag and call waitpid in a loop. To optimize
801 things a bit we use `sigsuspend' to wake us up when a process has
802 something to report (it will send us a SIGCHLD if it has). To make
803 this work we have to juggle with the signal mask. We save the
804 original signal mask such that we can restore it before creating a
805 new process in order to avoid blocking certain signals in the
806 inferior. We then block SIGCHLD during the waitpid/sigsuspend
807 loop. */
808
809/* Original signal mask. */
810static sigset_t normal_mask;
811
812/* Signal mask for use with sigsuspend in linux_nat_wait, initialized in
813 _initialize_linux_nat. */
814static sigset_t suspend_mask;
815
b84876c2
PA
816/* SIGCHLD action for synchronous mode. */
817struct sigaction sync_sigchld_action;
818
819/* SIGCHLD action for asynchronous mode. */
820static struct sigaction async_sigchld_action;
d6b0e80f
AC
821\f
822
823/* Prototypes for local functions. */
824static int stop_wait_callback (struct lwp_info *lp, void *data);
825static int linux_nat_thread_alive (ptid_t ptid);
6d8fd2b7 826static char *linux_child_pid_to_exec_file (int pid);
710151dd
PA
827static int cancel_breakpoint (struct lwp_info *lp);
828
d6b0e80f
AC
829\f
830/* Convert wait status STATUS to a string. Used for printing debug
831 messages only. */
832
833static char *
834status_to_str (int status)
835{
836 static char buf[64];
837
838 if (WIFSTOPPED (status))
839 snprintf (buf, sizeof (buf), "%s (stopped)",
840 strsignal (WSTOPSIG (status)));
841 else if (WIFSIGNALED (status))
842 snprintf (buf, sizeof (buf), "%s (terminated)",
843 strsignal (WSTOPSIG (status)));
844 else
845 snprintf (buf, sizeof (buf), "%d (exited)", WEXITSTATUS (status));
846
847 return buf;
848}
849
850/* Initialize the list of LWPs. Note that this module, contrary to
851 what GDB's generic threads layer does for its thread list,
852 re-initializes the LWP lists whenever we mourn or detach (which
853 doesn't involve mourning) the inferior. */
854
855static void
856init_lwp_list (void)
857{
858 struct lwp_info *lp, *lpnext;
859
860 for (lp = lwp_list; lp; lp = lpnext)
861 {
862 lpnext = lp->next;
863 xfree (lp);
864 }
865
866 lwp_list = NULL;
867 num_lwps = 0;
d6b0e80f
AC
868}
869
f973ed9c 870/* Add the LWP specified by PID to the list. Return a pointer to the
9f0bdab8
DJ
871 structure describing the new LWP. The LWP should already be stopped
872 (with an exception for the very first LWP). */
d6b0e80f
AC
873
874static struct lwp_info *
875add_lwp (ptid_t ptid)
876{
877 struct lwp_info *lp;
878
879 gdb_assert (is_lwp (ptid));
880
881 lp = (struct lwp_info *) xmalloc (sizeof (struct lwp_info));
882
883 memset (lp, 0, sizeof (struct lwp_info));
884
885 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
886
887 lp->ptid = ptid;
888
889 lp->next = lwp_list;
890 lwp_list = lp;
f973ed9c 891 ++num_lwps;
d6b0e80f 892
9f0bdab8
DJ
893 if (num_lwps > 1 && linux_nat_new_thread != NULL)
894 linux_nat_new_thread (ptid);
895
d6b0e80f
AC
896 return lp;
897}
898
899/* Remove the LWP specified by PID from the list. */
900
901static void
902delete_lwp (ptid_t ptid)
903{
904 struct lwp_info *lp, *lpprev;
905
906 lpprev = NULL;
907
908 for (lp = lwp_list; lp; lpprev = lp, lp = lp->next)
909 if (ptid_equal (lp->ptid, ptid))
910 break;
911
912 if (!lp)
913 return;
914
d6b0e80f
AC
915 num_lwps--;
916
917 if (lpprev)
918 lpprev->next = lp->next;
919 else
920 lwp_list = lp->next;
921
922 xfree (lp);
923}
924
925/* Return a pointer to the structure describing the LWP corresponding
926 to PID. If no corresponding LWP could be found, return NULL. */
927
928static struct lwp_info *
929find_lwp_pid (ptid_t ptid)
930{
931 struct lwp_info *lp;
932 int lwp;
933
934 if (is_lwp (ptid))
935 lwp = GET_LWP (ptid);
936 else
937 lwp = GET_PID (ptid);
938
939 for (lp = lwp_list; lp; lp = lp->next)
940 if (lwp == GET_LWP (lp->ptid))
941 return lp;
942
943 return NULL;
944}
945
946/* Call CALLBACK with its second argument set to DATA for every LWP in
947 the list. If CALLBACK returns 1 for a particular LWP, return a
948 pointer to the structure describing that LWP immediately.
949 Otherwise return NULL. */
950
951struct lwp_info *
952iterate_over_lwps (int (*callback) (struct lwp_info *, void *), void *data)
953{
954 struct lwp_info *lp, *lpnext;
955
956 for (lp = lwp_list; lp; lp = lpnext)
957 {
958 lpnext = lp->next;
959 if ((*callback) (lp, data))
960 return lp;
961 }
962
963 return NULL;
964}
965
f973ed9c
DJ
966/* Update our internal state when changing from one fork (checkpoint,
967 et cetera) to another indicated by NEW_PTID. We can only switch
968 single-threaded applications, so we only create one new LWP, and
969 the previous list is discarded. */
970
971void
972linux_nat_switch_fork (ptid_t new_ptid)
973{
974 struct lwp_info *lp;
975
728c8f58 976 init_thread_list ();
f973ed9c
DJ
977 init_lwp_list ();
978 lp = add_lwp (new_ptid);
728c8f58 979 add_thread_silent (new_ptid);
f973ed9c
DJ
980 lp->stopped = 1;
981}
982
e26af52f
DJ
983/* Record a PTID for later deletion. */
984
985struct saved_ptids
986{
987 ptid_t ptid;
988 struct saved_ptids *next;
989};
990static struct saved_ptids *threads_to_delete;
991
992static void
993record_dead_thread (ptid_t ptid)
994{
995 struct saved_ptids *p = xmalloc (sizeof (struct saved_ptids));
996 p->ptid = ptid;
997 p->next = threads_to_delete;
998 threads_to_delete = p;
999}
1000
1001/* Delete any dead threads which are not the current thread. */
1002
1003static void
1004prune_lwps (void)
1005{
1006 struct saved_ptids **p = &threads_to_delete;
1007
1008 while (*p)
1009 if (! ptid_equal ((*p)->ptid, inferior_ptid))
1010 {
1011 struct saved_ptids *tmp = *p;
1012 delete_thread (tmp->ptid);
1013 *p = tmp->next;
1014 xfree (tmp);
1015 }
1016 else
1017 p = &(*p)->next;
1018}
1019
e26af52f
DJ
1020/* Handle the exit of a single thread LP. */
1021
1022static void
1023exit_lwp (struct lwp_info *lp)
1024{
063bfe2e
VP
1025 struct thread_info *th = find_thread_pid (lp->ptid);
1026
1027 if (th)
e26af52f 1028 {
17faa917
DJ
1029 if (print_thread_events)
1030 printf_unfiltered (_("[%s exited]\n"), target_pid_to_str (lp->ptid));
1031
e26af52f
DJ
1032 /* Core GDB cannot deal with us deleting the current thread. */
1033 if (!ptid_equal (lp->ptid, inferior_ptid))
1034 delete_thread (lp->ptid);
1035 else
1036 record_dead_thread (lp->ptid);
e26af52f
DJ
1037 }
1038
1039 delete_lwp (lp->ptid);
1040}
1041
a0ef4274
DJ
1042/* Detect `T (stopped)' in `/proc/PID/status'.
1043 Other states including `T (tracing stop)' are reported as false. */
1044
1045static int
1046pid_is_stopped (pid_t pid)
1047{
1048 FILE *status_file;
1049 char buf[100];
1050 int retval = 0;
1051
1052 snprintf (buf, sizeof (buf), "/proc/%d/status", (int) pid);
1053 status_file = fopen (buf, "r");
1054 if (status_file != NULL)
1055 {
1056 int have_state = 0;
1057
1058 while (fgets (buf, sizeof (buf), status_file))
1059 {
1060 if (strncmp (buf, "State:", 6) == 0)
1061 {
1062 have_state = 1;
1063 break;
1064 }
1065 }
1066 if (have_state && strstr (buf, "T (stopped)") != NULL)
1067 retval = 1;
1068 fclose (status_file);
1069 }
1070 return retval;
1071}
1072
1073/* Wait for the LWP specified by LP, which we have just attached to.
1074 Returns a wait status for that LWP, to cache. */
1075
1076static int
1077linux_nat_post_attach_wait (ptid_t ptid, int first, int *cloned,
1078 int *signalled)
1079{
1080 pid_t new_pid, pid = GET_LWP (ptid);
1081 int status;
1082
1083 if (pid_is_stopped (pid))
1084 {
1085 if (debug_linux_nat)
1086 fprintf_unfiltered (gdb_stdlog,
1087 "LNPAW: Attaching to a stopped process\n");
1088
1089 /* The process is definitely stopped. It is in a job control
1090 stop, unless the kernel predates the TASK_STOPPED /
1091 TASK_TRACED distinction, in which case it might be in a
1092 ptrace stop. Make sure it is in a ptrace stop; from there we
1093 can kill it, signal it, et cetera.
1094
1095 First make sure there is a pending SIGSTOP. Since we are
1096 already attached, the process can not transition from stopped
1097 to running without a PTRACE_CONT; so we know this signal will
1098 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1099 probably already in the queue (unless this kernel is old
1100 enough to use TASK_STOPPED for ptrace stops); but since SIGSTOP
1101 is not an RT signal, it can only be queued once. */
1102 kill_lwp (pid, SIGSTOP);
1103
1104 /* Finally, resume the stopped process. This will deliver the SIGSTOP
1105 (or a higher priority signal, just like normal PTRACE_ATTACH). */
1106 ptrace (PTRACE_CONT, pid, 0, 0);
1107 }
1108
1109 /* Make sure the initial process is stopped. The user-level threads
1110 layer might want to poke around in the inferior, and that won't
1111 work if things haven't stabilized yet. */
1112 new_pid = my_waitpid (pid, &status, 0);
1113 if (new_pid == -1 && errno == ECHILD)
1114 {
1115 if (first)
1116 warning (_("%s is a cloned process"), target_pid_to_str (ptid));
1117
1118 /* Try again with __WCLONE to check cloned processes. */
1119 new_pid = my_waitpid (pid, &status, __WCLONE);
1120 *cloned = 1;
1121 }
1122
1123 gdb_assert (pid == new_pid && WIFSTOPPED (status));
1124
1125 if (WSTOPSIG (status) != SIGSTOP)
1126 {
1127 *signalled = 1;
1128 if (debug_linux_nat)
1129 fprintf_unfiltered (gdb_stdlog,
1130 "LNPAW: Received %s after attaching\n",
1131 status_to_str (status));
1132 }
1133
1134 return status;
1135}
1136
1137/* Attach to the LWP specified by PID. Return 0 if successful or -1
1138 if the new LWP could not be attached. */
d6b0e80f 1139
9ee57c33 1140int
93815fbf 1141lin_lwp_attach_lwp (ptid_t ptid)
d6b0e80f 1142{
9ee57c33 1143 struct lwp_info *lp;
b84876c2 1144 int async_events_were_enabled = 0;
d6b0e80f
AC
1145
1146 gdb_assert (is_lwp (ptid));
1147
b84876c2
PA
1148 if (target_can_async_p ())
1149 async_events_were_enabled = linux_nat_async_events (0);
d6b0e80f 1150
9ee57c33 1151 lp = find_lwp_pid (ptid);
d6b0e80f
AC
1152
1153 /* We assume that we're already attached to any LWP that has an id
1154 equal to the overall process id, and to any LWP that is already
1155 in our list of LWPs. If we're not seeing exit events from threads
1156 and we've had PID wraparound since we last tried to stop all threads,
1157 this assumption might be wrong; fortunately, this is very unlikely
1158 to happen. */
9ee57c33 1159 if (GET_LWP (ptid) != GET_PID (ptid) && lp == NULL)
d6b0e80f 1160 {
a0ef4274 1161 int status, cloned = 0, signalled = 0;
d6b0e80f
AC
1162
1163 if (ptrace (PTRACE_ATTACH, GET_LWP (ptid), 0, 0) < 0)
9ee57c33
DJ
1164 {
1165 /* If we fail to attach to the thread, issue a warning,
1166 but continue. One way this can happen is if thread
e9efe249 1167 creation is interrupted; as of Linux kernel 2.6.19, a
9ee57c33
DJ
1168 bug may place threads in the thread list and then fail
1169 to create them. */
1170 warning (_("Can't attach %s: %s"), target_pid_to_str (ptid),
1171 safe_strerror (errno));
1172 return -1;
1173 }
1174
d6b0e80f
AC
1175 if (debug_linux_nat)
1176 fprintf_unfiltered (gdb_stdlog,
1177 "LLAL: PTRACE_ATTACH %s, 0, 0 (OK)\n",
1178 target_pid_to_str (ptid));
1179
a0ef4274
DJ
1180 status = linux_nat_post_attach_wait (ptid, 0, &cloned, &signalled);
1181 lp = add_lwp (ptid);
1182 lp->stopped = 1;
1183 lp->cloned = cloned;
1184 lp->signalled = signalled;
1185 if (WSTOPSIG (status) != SIGSTOP)
d6b0e80f 1186 {
a0ef4274
DJ
1187 lp->resumed = 1;
1188 lp->status = status;
d6b0e80f
AC
1189 }
1190
a0ef4274 1191 target_post_attach (GET_LWP (lp->ptid));
d6b0e80f
AC
1192
1193 if (debug_linux_nat)
1194 {
1195 fprintf_unfiltered (gdb_stdlog,
1196 "LLAL: waitpid %s received %s\n",
1197 target_pid_to_str (ptid),
1198 status_to_str (status));
1199 }
1200 }
1201 else
1202 {
1203 /* We assume that the LWP representing the original process is
1204 already stopped. Mark it as stopped in the data structure
155bd5d1
AC
1205 that the GNU/linux ptrace layer uses to keep track of
1206 threads. Note that this won't have already been done since
1207 the main thread will have, we assume, been stopped by an
1208 attach from a different layer. */
9ee57c33
DJ
1209 if (lp == NULL)
1210 lp = add_lwp (ptid);
d6b0e80f
AC
1211 lp->stopped = 1;
1212 }
9ee57c33 1213
b84876c2
PA
1214 if (async_events_were_enabled)
1215 linux_nat_async_events (1);
1216
9ee57c33 1217 return 0;
d6b0e80f
AC
1218}
1219
b84876c2
PA
1220static void
1221linux_nat_create_inferior (char *exec_file, char *allargs, char **env,
1222 int from_tty)
1223{
1224 int saved_async = 0;
1225
1226 /* The fork_child mechanism is synchronous and calls target_wait, so
1227 we have to mask the async mode. */
1228
1229 if (target_can_async_p ())
1230 saved_async = linux_nat_async_mask (0);
1231 else
1232 {
1233 /* Restore the original signal mask. */
1234 sigprocmask (SIG_SETMASK, &normal_mask, NULL);
1235 /* Make sure we don't block SIGCHLD during a sigsuspend. */
1236 suspend_mask = normal_mask;
1237 sigdelset (&suspend_mask, SIGCHLD);
1238 }
1239
1240 linux_ops->to_create_inferior (exec_file, allargs, env, from_tty);
1241
1242 if (saved_async)
1243 linux_nat_async_mask (saved_async);
1244}
1245
d6b0e80f
AC
1246static void
1247linux_nat_attach (char *args, int from_tty)
1248{
1249 struct lwp_info *lp;
d6b0e80f
AC
1250 int status;
1251
1252 /* FIXME: We should probably accept a list of process id's, and
1253 attach all of them. */
10d6c8cd 1254 linux_ops->to_attach (args, from_tty);
d6b0e80f 1255
b84876c2
PA
1256 if (!target_can_async_p ())
1257 {
1258 /* Restore the original signal mask. */
1259 sigprocmask (SIG_SETMASK, &normal_mask, NULL);
1260 /* Make sure we don't block SIGCHLD during a sigsuspend. */
1261 suspend_mask = normal_mask;
1262 sigdelset (&suspend_mask, SIGCHLD);
1263 }
1264
9f0bdab8
DJ
1265 /* Add the initial process as the first LWP to the list. */
1266 inferior_ptid = BUILD_LWP (GET_PID (inferior_ptid), GET_PID (inferior_ptid));
1267 lp = add_lwp (inferior_ptid);
a0ef4274
DJ
1268
1269 status = linux_nat_post_attach_wait (lp->ptid, 1, &lp->cloned,
1270 &lp->signalled);
1271 lp->stopped = 1;
9f0bdab8 1272
403fe197
PA
1273 /* If this process is not using thread_db, then we still don't
1274 detect any other threads, but add at least this one. */
1275 add_thread_silent (lp->ptid);
1276
a0ef4274 1277 /* Save the wait status to report later. */
d6b0e80f 1278 lp->resumed = 1;
a0ef4274
DJ
1279 if (debug_linux_nat)
1280 fprintf_unfiltered (gdb_stdlog,
1281 "LNA: waitpid %ld, saving status %s\n",
1282 (long) GET_PID (lp->ptid), status_to_str (status));
710151dd
PA
1283
1284 if (!target_can_async_p ())
a0ef4274 1285 lp->status = status;
710151dd
PA
1286 else
1287 {
1288 /* We already waited for this LWP, so put the wait result on the
1289 pipe. The event loop will wake up and gets us to handling
1290 this event. */
a0ef4274
DJ
1291 linux_nat_event_pipe_push (GET_PID (lp->ptid), status,
1292 lp->cloned ? __WCLONE : 0);
b84876c2
PA
1293 /* Register in the event loop. */
1294 target_async (inferior_event_handler, 0);
d6b0e80f
AC
1295 }
1296}
1297
a0ef4274
DJ
1298/* Get pending status of LP. */
1299static int
1300get_pending_status (struct lwp_info *lp, int *status)
1301{
1302 struct target_waitstatus last;
1303 ptid_t last_ptid;
1304
1305 get_last_target_status (&last_ptid, &last);
1306
1307 /* If this lwp is the ptid that GDB is processing an event from, the
1308 signal will be in stop_signal. Otherwise, in all-stop + sync
1309 mode, we may cache pending events in lp->status while trying to
1310 stop all threads (see stop_wait_callback). In async mode, the
1311 events are always cached in waitpid_queue. */
1312
1313 *status = 0;
1314 if (GET_LWP (lp->ptid) == GET_LWP (last_ptid))
1315 {
1316 if (stop_signal != TARGET_SIGNAL_0
1317 && signal_pass_state (stop_signal))
1318 *status = W_STOPCODE (target_signal_to_host (stop_signal));
1319 }
1320 else if (target_can_async_p ())
1321 queued_waitpid (GET_LWP (lp->ptid), status, __WALL);
1322 else
1323 *status = lp->status;
1324
1325 return 0;
1326}
1327
d6b0e80f
AC
1328static int
1329detach_callback (struct lwp_info *lp, void *data)
1330{
1331 gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status));
1332
1333 if (debug_linux_nat && lp->status)
1334 fprintf_unfiltered (gdb_stdlog, "DC: Pending %s for %s on detach.\n",
1335 strsignal (WSTOPSIG (lp->status)),
1336 target_pid_to_str (lp->ptid));
1337
a0ef4274
DJ
1338 /* If there is a pending SIGSTOP, get rid of it. */
1339 if (lp->signalled)
d6b0e80f 1340 {
d6b0e80f
AC
1341 if (debug_linux_nat)
1342 fprintf_unfiltered (gdb_stdlog,
a0ef4274
DJ
1343 "DC: Sending SIGCONT to %s\n",
1344 target_pid_to_str (lp->ptid));
d6b0e80f 1345
a0ef4274 1346 kill_lwp (GET_LWP (lp->ptid), SIGCONT);
d6b0e80f 1347 lp->signalled = 0;
d6b0e80f
AC
1348 }
1349
1350 /* We don't actually detach from the LWP that has an id equal to the
1351 overall process id just yet. */
1352 if (GET_LWP (lp->ptid) != GET_PID (lp->ptid))
1353 {
a0ef4274
DJ
1354 int status = 0;
1355
1356 /* Pass on any pending signal for this LWP. */
1357 get_pending_status (lp, &status);
1358
d6b0e80f
AC
1359 errno = 0;
1360 if (ptrace (PTRACE_DETACH, GET_LWP (lp->ptid), 0,
a0ef4274 1361 WSTOPSIG (status)) < 0)
8a3fe4f8 1362 error (_("Can't detach %s: %s"), target_pid_to_str (lp->ptid),
d6b0e80f
AC
1363 safe_strerror (errno));
1364
1365 if (debug_linux_nat)
1366 fprintf_unfiltered (gdb_stdlog,
1367 "PTRACE_DETACH (%s, %s, 0) (OK)\n",
1368 target_pid_to_str (lp->ptid),
1369 strsignal (WSTOPSIG (lp->status)));
1370
1371 delete_lwp (lp->ptid);
1372 }
1373
1374 return 0;
1375}
1376
1377static void
1378linux_nat_detach (char *args, int from_tty)
1379{
b84876c2 1380 int pid;
a0ef4274
DJ
1381 int status;
1382 enum target_signal sig;
1383
b84876c2
PA
1384 if (target_can_async_p ())
1385 linux_nat_async (NULL, 0);
1386
d6b0e80f
AC
1387 iterate_over_lwps (detach_callback, NULL);
1388
1389 /* Only the initial process should be left right now. */
1390 gdb_assert (num_lwps == 1);
1391
a0ef4274
DJ
1392 /* Pass on any pending signal for the last LWP. */
1393 if ((args == NULL || *args == '\0')
1394 && get_pending_status (lwp_list, &status) != -1
1395 && WIFSTOPPED (status))
1396 {
1397 /* Put the signal number in ARGS so that inf_ptrace_detach will
1398 pass it along with PTRACE_DETACH. */
1399 args = alloca (8);
1400 sprintf (args, "%d", (int) WSTOPSIG (status));
1401 fprintf_unfiltered (gdb_stdlog,
1402 "LND: Sending signal %s to %s\n",
1403 args,
1404 target_pid_to_str (lwp_list->ptid));
1405 }
1406
d6b0e80f
AC
1407 trap_ptid = null_ptid;
1408
1409 /* Destroy LWP info; it's no longer valid. */
1410 init_lwp_list ();
1411
b84876c2
PA
1412 pid = GET_PID (inferior_ptid);
1413 inferior_ptid = pid_to_ptid (pid);
10d6c8cd 1414 linux_ops->to_detach (args, from_tty);
b84876c2
PA
1415
1416 if (target_can_async_p ())
1417 drain_queued_events (pid);
d6b0e80f
AC
1418}
1419
1420/* Resume LP. */
1421
1422static int
1423resume_callback (struct lwp_info *lp, void *data)
1424{
1425 if (lp->stopped && lp->status == 0)
1426 {
10d6c8cd
DJ
1427 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
1428 0, TARGET_SIGNAL_0);
d6b0e80f
AC
1429 if (debug_linux_nat)
1430 fprintf_unfiltered (gdb_stdlog,
1431 "RC: PTRACE_CONT %s, 0, 0 (resume sibling)\n",
1432 target_pid_to_str (lp->ptid));
1433 lp->stopped = 0;
1434 lp->step = 0;
9f0bdab8 1435 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
d6b0e80f
AC
1436 }
1437
1438 return 0;
1439}
1440
1441static int
1442resume_clear_callback (struct lwp_info *lp, void *data)
1443{
1444 lp->resumed = 0;
1445 return 0;
1446}
1447
1448static int
1449resume_set_callback (struct lwp_info *lp, void *data)
1450{
1451 lp->resumed = 1;
1452 return 0;
1453}
1454
1455static void
1456linux_nat_resume (ptid_t ptid, int step, enum target_signal signo)
1457{
1458 struct lwp_info *lp;
1459 int resume_all;
1460
76f50ad1
DJ
1461 if (debug_linux_nat)
1462 fprintf_unfiltered (gdb_stdlog,
1463 "LLR: Preparing to %s %s, %s, inferior_ptid %s\n",
1464 step ? "step" : "resume",
1465 target_pid_to_str (ptid),
1466 signo ? strsignal (signo) : "0",
1467 target_pid_to_str (inferior_ptid));
1468
e26af52f
DJ
1469 prune_lwps ();
1470
b84876c2
PA
1471 if (target_can_async_p ())
1472 /* Block events while we're here. */
1473 linux_nat_async_events (0);
1474
d6b0e80f
AC
1475 /* A specific PTID means `step only this process id'. */
1476 resume_all = (PIDGET (ptid) == -1);
1477
1478 if (resume_all)
1479 iterate_over_lwps (resume_set_callback, NULL);
1480 else
1481 iterate_over_lwps (resume_clear_callback, NULL);
1482
1483 /* If PID is -1, it's the current inferior that should be
1484 handled specially. */
1485 if (PIDGET (ptid) == -1)
1486 ptid = inferior_ptid;
1487
1488 lp = find_lwp_pid (ptid);
9f0bdab8 1489 gdb_assert (lp != NULL);
d6b0e80f 1490
9f0bdab8 1491 ptid = pid_to_ptid (GET_LWP (lp->ptid));
d6b0e80f 1492
9f0bdab8
DJ
1493 /* Remember if we're stepping. */
1494 lp->step = step;
d6b0e80f 1495
9f0bdab8
DJ
1496 /* Mark this LWP as resumed. */
1497 lp->resumed = 1;
76f50ad1 1498
9f0bdab8
DJ
1499 /* If we have a pending wait status for this thread, there is no
1500 point in resuming the process. But first make sure that
1501 linux_nat_wait won't preemptively handle the event - we
1502 should never take this short-circuit if we are going to
1503 leave LP running, since we have skipped resuming all the
1504 other threads. This bit of code needs to be synchronized
1505 with linux_nat_wait. */
76f50ad1 1506
710151dd
PA
1507 /* In async mode, we never have pending wait status. */
1508 if (target_can_async_p () && lp->status)
1509 internal_error (__FILE__, __LINE__, "Pending status in async mode");
1510
9f0bdab8
DJ
1511 if (lp->status && WIFSTOPPED (lp->status))
1512 {
1513 int saved_signo = target_signal_from_host (WSTOPSIG (lp->status));
76f50ad1 1514
9f0bdab8
DJ
1515 if (signal_stop_state (saved_signo) == 0
1516 && signal_print_state (saved_signo) == 0
1517 && signal_pass_state (saved_signo) == 1)
d6b0e80f 1518 {
9f0bdab8
DJ
1519 if (debug_linux_nat)
1520 fprintf_unfiltered (gdb_stdlog,
1521 "LLR: Not short circuiting for ignored "
1522 "status 0x%x\n", lp->status);
1523
d6b0e80f
AC
1524 /* FIXME: What should we do if we are supposed to continue
1525 this thread with a signal? */
1526 gdb_assert (signo == TARGET_SIGNAL_0);
9f0bdab8
DJ
1527 signo = saved_signo;
1528 lp->status = 0;
1529 }
1530 }
76f50ad1 1531
9f0bdab8
DJ
1532 if (lp->status)
1533 {
1534 /* FIXME: What should we do if we are supposed to continue
1535 this thread with a signal? */
1536 gdb_assert (signo == TARGET_SIGNAL_0);
76f50ad1 1537
9f0bdab8
DJ
1538 if (debug_linux_nat)
1539 fprintf_unfiltered (gdb_stdlog,
1540 "LLR: Short circuiting for status 0x%x\n",
1541 lp->status);
d6b0e80f 1542
9f0bdab8 1543 return;
d6b0e80f
AC
1544 }
1545
9f0bdab8
DJ
1546 /* Mark LWP as not stopped to prevent it from being continued by
1547 resume_callback. */
1548 lp->stopped = 0;
1549
d6b0e80f
AC
1550 if (resume_all)
1551 iterate_over_lwps (resume_callback, NULL);
1552
10d6c8cd 1553 linux_ops->to_resume (ptid, step, signo);
9f0bdab8
DJ
1554 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
1555
d6b0e80f
AC
1556 if (debug_linux_nat)
1557 fprintf_unfiltered (gdb_stdlog,
1558 "LLR: %s %s, %s (resume event thread)\n",
1559 step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
1560 target_pid_to_str (ptid),
1561 signo ? strsignal (signo) : "0");
b84876c2
PA
1562
1563 if (target_can_async_p ())
1564 {
1565 target_executing = 1;
1566 target_async (inferior_event_handler, 0);
1567 }
d6b0e80f
AC
1568}
1569
1570/* Issue kill to specified lwp. */
1571
1572static int tkill_failed;
1573
1574static int
1575kill_lwp (int lwpid, int signo)
1576{
1577 errno = 0;
1578
1579/* Use tkill, if possible, in case we are using nptl threads. If tkill
1580 fails, then we are not using nptl threads and we should be using kill. */
1581
1582#ifdef HAVE_TKILL_SYSCALL
1583 if (!tkill_failed)
1584 {
1585 int ret = syscall (__NR_tkill, lwpid, signo);
1586 if (errno != ENOSYS)
1587 return ret;
1588 errno = 0;
1589 tkill_failed = 1;
1590 }
1591#endif
1592
1593 return kill (lwpid, signo);
1594}
1595
3d799a95
DJ
1596/* Handle a GNU/Linux extended wait response. If we see a clone
1597 event, we need to add the new LWP to our list (and not report the
1598 trap to higher layers). This function returns non-zero if the
1599 event should be ignored and we should wait again. If STOPPING is
1600 true, the new LWP remains stopped, otherwise it is continued. */
d6b0e80f
AC
1601
1602static int
3d799a95
DJ
1603linux_handle_extended_wait (struct lwp_info *lp, int status,
1604 int stopping)
d6b0e80f 1605{
3d799a95
DJ
1606 int pid = GET_LWP (lp->ptid);
1607 struct target_waitstatus *ourstatus = &lp->waitstatus;
1608 struct lwp_info *new_lp = NULL;
1609 int event = status >> 16;
d6b0e80f 1610
3d799a95
DJ
1611 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK
1612 || event == PTRACE_EVENT_CLONE)
d6b0e80f 1613 {
3d799a95
DJ
1614 unsigned long new_pid;
1615 int ret;
1616
1617 ptrace (PTRACE_GETEVENTMSG, pid, 0, &new_pid);
6fc19103 1618
3d799a95
DJ
1619 /* If we haven't already seen the new PID stop, wait for it now. */
1620 if (! pull_pid_from_list (&stopped_pids, new_pid, &status))
1621 {
1622 /* The new child has a pending SIGSTOP. We can't affect it until it
1623 hits the SIGSTOP, but we're already attached. */
1624 ret = my_waitpid (new_pid, &status,
1625 (event == PTRACE_EVENT_CLONE) ? __WCLONE : 0);
1626 if (ret == -1)
1627 perror_with_name (_("waiting for new child"));
1628 else if (ret != new_pid)
1629 internal_error (__FILE__, __LINE__,
1630 _("wait returned unexpected PID %d"), ret);
1631 else if (!WIFSTOPPED (status))
1632 internal_error (__FILE__, __LINE__,
1633 _("wait returned unexpected status 0x%x"), status);
1634 }
1635
1636 ourstatus->value.related_pid = new_pid;
1637
1638 if (event == PTRACE_EVENT_FORK)
1639 ourstatus->kind = TARGET_WAITKIND_FORKED;
1640 else if (event == PTRACE_EVENT_VFORK)
1641 ourstatus->kind = TARGET_WAITKIND_VFORKED;
6fc19103 1642 else
3d799a95
DJ
1643 {
1644 ourstatus->kind = TARGET_WAITKIND_IGNORE;
1645 new_lp = add_lwp (BUILD_LWP (new_pid, GET_PID (inferior_ptid)));
1646 new_lp->cloned = 1;
d6b0e80f 1647
3d799a95
DJ
1648 if (WSTOPSIG (status) != SIGSTOP)
1649 {
1650 /* This can happen if someone starts sending signals to
1651 the new thread before it gets a chance to run, which
1652 have a lower number than SIGSTOP (e.g. SIGUSR1).
1653 This is an unlikely case, and harder to handle for
1654 fork / vfork than for clone, so we do not try - but
1655 we handle it for clone events here. We'll send
1656 the other signal on to the thread below. */
1657
1658 new_lp->signalled = 1;
1659 }
1660 else
1661 status = 0;
d6b0e80f 1662
3d799a95
DJ
1663 if (stopping)
1664 new_lp->stopped = 1;
1665 else
1666 {
1667 new_lp->resumed = 1;
1668 ptrace (PTRACE_CONT, lp->waitstatus.value.related_pid, 0,
1669 status ? WSTOPSIG (status) : 0);
1670 }
d6b0e80f 1671
3d799a95
DJ
1672 if (debug_linux_nat)
1673 fprintf_unfiltered (gdb_stdlog,
1674 "LHEW: Got clone event from LWP %ld, resuming\n",
1675 GET_LWP (lp->ptid));
1676 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
1677
1678 return 1;
1679 }
1680
1681 return 0;
d6b0e80f
AC
1682 }
1683
3d799a95
DJ
1684 if (event == PTRACE_EVENT_EXEC)
1685 {
1686 ourstatus->kind = TARGET_WAITKIND_EXECD;
1687 ourstatus->value.execd_pathname
6d8fd2b7 1688 = xstrdup (linux_child_pid_to_exec_file (pid));
3d799a95
DJ
1689
1690 if (linux_parent_pid)
1691 {
1692 detach_breakpoints (linux_parent_pid);
1693 ptrace (PTRACE_DETACH, linux_parent_pid, 0, 0);
1694
1695 linux_parent_pid = 0;
1696 }
1697
1698 return 0;
1699 }
1700
1701 internal_error (__FILE__, __LINE__,
1702 _("unknown ptrace event %d"), event);
d6b0e80f
AC
1703}
1704
1705/* Wait for LP to stop. Returns the wait status, or 0 if the LWP has
1706 exited. */
1707
1708static int
1709wait_lwp (struct lwp_info *lp)
1710{
1711 pid_t pid;
1712 int status;
1713 int thread_dead = 0;
1714
1715 gdb_assert (!lp->stopped);
1716 gdb_assert (lp->status == 0);
1717
58aecb61 1718 pid = my_waitpid (GET_LWP (lp->ptid), &status, 0);
d6b0e80f
AC
1719 if (pid == -1 && errno == ECHILD)
1720 {
58aecb61 1721 pid = my_waitpid (GET_LWP (lp->ptid), &status, __WCLONE);
d6b0e80f
AC
1722 if (pid == -1 && errno == ECHILD)
1723 {
1724 /* The thread has previously exited. We need to delete it
1725 now because, for some vendor 2.4 kernels with NPTL
1726 support backported, there won't be an exit event unless
1727 it is the main thread. 2.6 kernels will report an exit
1728 event for each thread that exits, as expected. */
1729 thread_dead = 1;
1730 if (debug_linux_nat)
1731 fprintf_unfiltered (gdb_stdlog, "WL: %s vanished.\n",
1732 target_pid_to_str (lp->ptid));
1733 }
1734 }
1735
1736 if (!thread_dead)
1737 {
1738 gdb_assert (pid == GET_LWP (lp->ptid));
1739
1740 if (debug_linux_nat)
1741 {
1742 fprintf_unfiltered (gdb_stdlog,
1743 "WL: waitpid %s received %s\n",
1744 target_pid_to_str (lp->ptid),
1745 status_to_str (status));
1746 }
1747 }
1748
1749 /* Check if the thread has exited. */
1750 if (WIFEXITED (status) || WIFSIGNALED (status))
1751 {
1752 thread_dead = 1;
1753 if (debug_linux_nat)
1754 fprintf_unfiltered (gdb_stdlog, "WL: %s exited.\n",
1755 target_pid_to_str (lp->ptid));
1756 }
1757
1758 if (thread_dead)
1759 {
e26af52f 1760 exit_lwp (lp);
d6b0e80f
AC
1761 return 0;
1762 }
1763
1764 gdb_assert (WIFSTOPPED (status));
1765
1766 /* Handle GNU/Linux's extended waitstatus for trace events. */
1767 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
1768 {
1769 if (debug_linux_nat)
1770 fprintf_unfiltered (gdb_stdlog,
1771 "WL: Handling extended status 0x%06x\n",
1772 status);
3d799a95 1773 if (linux_handle_extended_wait (lp, status, 1))
d6b0e80f
AC
1774 return wait_lwp (lp);
1775 }
1776
1777 return status;
1778}
1779
9f0bdab8
DJ
1780/* Save the most recent siginfo for LP. This is currently only called
1781 for SIGTRAP; some ports use the si_addr field for
1782 target_stopped_data_address. In the future, it may also be used to
1783 restore the siginfo of requeued signals. */
1784
1785static void
1786save_siginfo (struct lwp_info *lp)
1787{
1788 errno = 0;
1789 ptrace (PTRACE_GETSIGINFO, GET_LWP (lp->ptid),
1790 (PTRACE_TYPE_ARG3) 0, &lp->siginfo);
1791
1792 if (errno != 0)
1793 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
1794}
1795
d6b0e80f
AC
1796/* Send a SIGSTOP to LP. */
1797
1798static int
1799stop_callback (struct lwp_info *lp, void *data)
1800{
1801 if (!lp->stopped && !lp->signalled)
1802 {
1803 int ret;
1804
1805 if (debug_linux_nat)
1806 {
1807 fprintf_unfiltered (gdb_stdlog,
1808 "SC: kill %s **<SIGSTOP>**\n",
1809 target_pid_to_str (lp->ptid));
1810 }
1811 errno = 0;
1812 ret = kill_lwp (GET_LWP (lp->ptid), SIGSTOP);
1813 if (debug_linux_nat)
1814 {
1815 fprintf_unfiltered (gdb_stdlog,
1816 "SC: lwp kill %d %s\n",
1817 ret,
1818 errno ? safe_strerror (errno) : "ERRNO-OK");
1819 }
1820
1821 lp->signalled = 1;
1822 gdb_assert (lp->status == 0);
1823 }
1824
1825 return 0;
1826}
1827
1828/* Wait until LP is stopped. If DATA is non-null it is interpreted as
1829 a pointer to a set of signals to be flushed immediately. */
1830
1831static int
1832stop_wait_callback (struct lwp_info *lp, void *data)
1833{
1834 sigset_t *flush_mask = data;
1835
1836 if (!lp->stopped)
1837 {
1838 int status;
1839
1840 status = wait_lwp (lp);
1841 if (status == 0)
1842 return 0;
1843
1844 /* Ignore any signals in FLUSH_MASK. */
1845 if (flush_mask && sigismember (flush_mask, WSTOPSIG (status)))
1846 {
1847 if (!lp->signalled)
1848 {
1849 lp->stopped = 1;
1850 return 0;
1851 }
1852
1853 errno = 0;
1854 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
1855 if (debug_linux_nat)
1856 fprintf_unfiltered (gdb_stdlog,
1857 "PTRACE_CONT %s, 0, 0 (%s)\n",
1858 target_pid_to_str (lp->ptid),
1859 errno ? safe_strerror (errno) : "OK");
1860
1861 return stop_wait_callback (lp, flush_mask);
1862 }
1863
1864 if (WSTOPSIG (status) != SIGSTOP)
1865 {
1866 if (WSTOPSIG (status) == SIGTRAP)
1867 {
1868 /* If a LWP other than the LWP that we're reporting an
1869 event for has hit a GDB breakpoint (as opposed to
1870 some random trap signal), then just arrange for it to
1871 hit it again later. We don't keep the SIGTRAP status
1872 and don't forward the SIGTRAP signal to the LWP. We
1873 will handle the current event, eventually we will
1874 resume all LWPs, and this one will get its breakpoint
1875 trap again.
1876
1877 If we do not do this, then we run the risk that the
1878 user will delete or disable the breakpoint, but the
1879 thread will have already tripped on it. */
1880
9f0bdab8
DJ
1881 /* Save the trap's siginfo in case we need it later. */
1882 save_siginfo (lp);
1883
d6b0e80f
AC
1884 /* Now resume this LWP and get the SIGSTOP event. */
1885 errno = 0;
1886 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
1887 if (debug_linux_nat)
1888 {
1889 fprintf_unfiltered (gdb_stdlog,
1890 "PTRACE_CONT %s, 0, 0 (%s)\n",
1891 target_pid_to_str (lp->ptid),
1892 errno ? safe_strerror (errno) : "OK");
1893
1894 fprintf_unfiltered (gdb_stdlog,
1895 "SWC: Candidate SIGTRAP event in %s\n",
1896 target_pid_to_str (lp->ptid));
1897 }
710151dd
PA
1898 /* Hold this event/waitstatus while we check to see if
1899 there are any more (we still want to get that SIGSTOP). */
d6b0e80f 1900 stop_wait_callback (lp, data);
710151dd
PA
1901
1902 if (target_can_async_p ())
d6b0e80f 1903 {
710151dd
PA
1904 /* Don't leave a pending wait status in async mode.
1905 Retrigger the breakpoint. */
1906 if (!cancel_breakpoint (lp))
d6b0e80f 1907 {
710151dd
PA
1908 /* There was no gdb breakpoint set at pc. Put
1909 the event back in the queue. */
1910 if (debug_linux_nat)
1911 fprintf_unfiltered (gdb_stdlog,
1912 "SWC: kill %s, %s\n",
1913 target_pid_to_str (lp->ptid),
1914 status_to_str ((int) status));
1915 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (status));
1916 }
1917 }
1918 else
1919 {
1920 /* Hold the SIGTRAP for handling by
1921 linux_nat_wait. */
1922 /* If there's another event, throw it back into the
1923 queue. */
1924 if (lp->status)
1925 {
1926 if (debug_linux_nat)
1927 fprintf_unfiltered (gdb_stdlog,
1928 "SWC: kill %s, %s\n",
1929 target_pid_to_str (lp->ptid),
1930 status_to_str ((int) status));
1931 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (lp->status));
d6b0e80f 1932 }
710151dd
PA
1933 /* Save the sigtrap event. */
1934 lp->status = status;
d6b0e80f 1935 }
d6b0e80f
AC
1936 return 0;
1937 }
1938 else
1939 {
1940 /* The thread was stopped with a signal other than
1941 SIGSTOP, and didn't accidentally trip a breakpoint. */
1942
1943 if (debug_linux_nat)
1944 {
1945 fprintf_unfiltered (gdb_stdlog,
1946 "SWC: Pending event %s in %s\n",
1947 status_to_str ((int) status),
1948 target_pid_to_str (lp->ptid));
1949 }
1950 /* Now resume this LWP and get the SIGSTOP event. */
1951 errno = 0;
1952 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
1953 if (debug_linux_nat)
1954 fprintf_unfiltered (gdb_stdlog,
1955 "SWC: PTRACE_CONT %s, 0, 0 (%s)\n",
1956 target_pid_to_str (lp->ptid),
1957 errno ? safe_strerror (errno) : "OK");
1958
1959 /* Hold this event/waitstatus while we check to see if
1960 there are any more (we still want to get that SIGSTOP). */
1961 stop_wait_callback (lp, data);
710151dd
PA
1962
1963 /* If the lp->status field is still empty, use it to
1964 hold this event. If not, then this event must be
1965 returned to the event queue of the LWP. */
1966 if (lp->status || target_can_async_p ())
d6b0e80f
AC
1967 {
1968 if (debug_linux_nat)
1969 {
1970 fprintf_unfiltered (gdb_stdlog,
1971 "SWC: kill %s, %s\n",
1972 target_pid_to_str (lp->ptid),
1973 status_to_str ((int) status));
1974 }
1975 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (status));
1976 }
710151dd
PA
1977 else
1978 lp->status = status;
d6b0e80f
AC
1979 return 0;
1980 }
1981 }
1982 else
1983 {
1984 /* We caught the SIGSTOP that we intended to catch, so
1985 there's no SIGSTOP pending. */
1986 lp->stopped = 1;
1987 lp->signalled = 0;
1988 }
1989 }
1990
1991 return 0;
1992}
1993
1994/* Check whether PID has any pending signals in FLUSH_MASK. If so set
1995 the appropriate bits in PENDING, and return 1 - otherwise return 0. */
1996
1997static int
1998linux_nat_has_pending (int pid, sigset_t *pending, sigset_t *flush_mask)
1999{
2000 sigset_t blocked, ignored;
2001 int i;
2002
2003 linux_proc_pending_signals (pid, pending, &blocked, &ignored);
2004
2005 if (!flush_mask)
2006 return 0;
2007
2008 for (i = 1; i < NSIG; i++)
2009 if (sigismember (pending, i))
2010 if (!sigismember (flush_mask, i)
2011 || sigismember (&blocked, i)
2012 || sigismember (&ignored, i))
2013 sigdelset (pending, i);
2014
2015 if (sigisemptyset (pending))
2016 return 0;
2017
2018 return 1;
2019}
2020
2021/* DATA is interpreted as a mask of signals to flush. If LP has
2022 signals pending, and they are all in the flush mask, then arrange
2023 to flush them. LP should be stopped, as should all other threads
2024 it might share a signal queue with. */
2025
2026static int
2027flush_callback (struct lwp_info *lp, void *data)
2028{
2029 sigset_t *flush_mask = data;
2030 sigset_t pending, intersection, blocked, ignored;
2031 int pid, status;
2032
2033 /* Normally, when an LWP exits, it is removed from the LWP list. The
2034 last LWP isn't removed till later, however. So if there is only
2035 one LWP on the list, make sure it's alive. */
2036 if (lwp_list == lp && lp->next == NULL)
2037 if (!linux_nat_thread_alive (lp->ptid))
2038 return 0;
2039
2040 /* Just because the LWP is stopped doesn't mean that new signals
2041 can't arrive from outside, so this function must be careful of
2042 race conditions. However, because all threads are stopped, we
2043 can assume that the pending mask will not shrink unless we resume
2044 the LWP, and that it will then get another signal. We can't
2045 control which one, however. */
2046
2047 if (lp->status)
2048 {
2049 if (debug_linux_nat)
a3f17187 2050 printf_unfiltered (_("FC: LP has pending status %06x\n"), lp->status);
d6b0e80f
AC
2051 if (WIFSTOPPED (lp->status) && sigismember (flush_mask, WSTOPSIG (lp->status)))
2052 lp->status = 0;
2053 }
2054
3d799a95
DJ
2055 /* While there is a pending signal we would like to flush, continue
2056 the inferior and collect another signal. But if there's already
2057 a saved status that we don't want to flush, we can't resume the
2058 inferior - if it stopped for some other reason we wouldn't have
2059 anywhere to save the new status. In that case, we must leave the
2060 signal unflushed (and possibly generate an extra SIGINT stop).
2061 That's much less bad than losing a signal. */
2062 while (lp->status == 0
2063 && linux_nat_has_pending (GET_LWP (lp->ptid), &pending, flush_mask))
d6b0e80f
AC
2064 {
2065 int ret;
2066
2067 errno = 0;
2068 ret = ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2069 if (debug_linux_nat)
2070 fprintf_unfiltered (gdb_stderr,
2071 "FC: Sent PTRACE_CONT, ret %d %d\n", ret, errno);
2072
2073 lp->stopped = 0;
2074 stop_wait_callback (lp, flush_mask);
2075 if (debug_linux_nat)
2076 fprintf_unfiltered (gdb_stderr,
2077 "FC: Wait finished; saved status is %d\n",
2078 lp->status);
2079 }
2080
2081 return 0;
2082}
2083
2084/* Return non-zero if LP has a wait status pending. */
2085
2086static int
2087status_callback (struct lwp_info *lp, void *data)
2088{
2089 /* Only report a pending wait status if we pretend that this has
2090 indeed been resumed. */
2091 return (lp->status != 0 && lp->resumed);
2092}
2093
2094/* Return non-zero if LP isn't stopped. */
2095
2096static int
2097running_callback (struct lwp_info *lp, void *data)
2098{
2099 return (lp->stopped == 0 || (lp->status != 0 && lp->resumed));
2100}
2101
2102/* Count the LWP's that have had events. */
2103
2104static int
2105count_events_callback (struct lwp_info *lp, void *data)
2106{
2107 int *count = data;
2108
2109 gdb_assert (count != NULL);
2110
2111 /* Count only LWPs that have a SIGTRAP event pending. */
2112 if (lp->status != 0
2113 && WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP)
2114 (*count)++;
2115
2116 return 0;
2117}
2118
2119/* Select the LWP (if any) that is currently being single-stepped. */
2120
2121static int
2122select_singlestep_lwp_callback (struct lwp_info *lp, void *data)
2123{
2124 if (lp->step && lp->status != 0)
2125 return 1;
2126 else
2127 return 0;
2128}
2129
2130/* Select the Nth LWP that has had a SIGTRAP event. */
2131
2132static int
2133select_event_lwp_callback (struct lwp_info *lp, void *data)
2134{
2135 int *selector = data;
2136
2137 gdb_assert (selector != NULL);
2138
2139 /* Select only LWPs that have a SIGTRAP event pending. */
2140 if (lp->status != 0
2141 && WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP)
2142 if ((*selector)-- == 0)
2143 return 1;
2144
2145 return 0;
2146}
2147
710151dd
PA
2148static int
2149cancel_breakpoint (struct lwp_info *lp)
2150{
2151 /* Arrange for a breakpoint to be hit again later. We don't keep
2152 the SIGTRAP status and don't forward the SIGTRAP signal to the
2153 LWP. We will handle the current event, eventually we will resume
2154 this LWP, and this breakpoint will trap again.
2155
2156 If we do not do this, then we run the risk that the user will
2157 delete or disable the breakpoint, but the LWP will have already
2158 tripped on it. */
2159
2160 if (breakpoint_inserted_here_p (read_pc_pid (lp->ptid) -
2161 gdbarch_decr_pc_after_break
2162 (current_gdbarch)))
2163 {
2164 if (debug_linux_nat)
2165 fprintf_unfiltered (gdb_stdlog,
2166 "CB: Push back breakpoint for %s\n",
2167 target_pid_to_str (lp->ptid));
2168
2169 /* Back up the PC if necessary. */
2170 if (gdbarch_decr_pc_after_break (current_gdbarch))
2171 write_pc_pid (read_pc_pid (lp->ptid) - gdbarch_decr_pc_after_break
2172 (current_gdbarch),
2173 lp->ptid);
2174 return 1;
2175 }
2176 return 0;
2177}
2178
d6b0e80f
AC
2179static int
2180cancel_breakpoints_callback (struct lwp_info *lp, void *data)
2181{
2182 struct lwp_info *event_lp = data;
2183
2184 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
2185 if (lp == event_lp)
2186 return 0;
2187
2188 /* If a LWP other than the LWP that we're reporting an event for has
2189 hit a GDB breakpoint (as opposed to some random trap signal),
2190 then just arrange for it to hit it again later. We don't keep
2191 the SIGTRAP status and don't forward the SIGTRAP signal to the
2192 LWP. We will handle the current event, eventually we will resume
2193 all LWPs, and this one will get its breakpoint trap again.
2194
2195 If we do not do this, then we run the risk that the user will
2196 delete or disable the breakpoint, but the LWP will have already
2197 tripped on it. */
2198
2199 if (lp->status != 0
2200 && WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP
710151dd
PA
2201 && cancel_breakpoint (lp))
2202 /* Throw away the SIGTRAP. */
2203 lp->status = 0;
d6b0e80f
AC
2204
2205 return 0;
2206}
2207
2208/* Select one LWP out of those that have events pending. */
2209
2210static void
2211select_event_lwp (struct lwp_info **orig_lp, int *status)
2212{
2213 int num_events = 0;
2214 int random_selector;
2215 struct lwp_info *event_lp;
2216
ac264b3b 2217 /* Record the wait status for the original LWP. */
d6b0e80f
AC
2218 (*orig_lp)->status = *status;
2219
2220 /* Give preference to any LWP that is being single-stepped. */
2221 event_lp = iterate_over_lwps (select_singlestep_lwp_callback, NULL);
2222 if (event_lp != NULL)
2223 {
2224 if (debug_linux_nat)
2225 fprintf_unfiltered (gdb_stdlog,
2226 "SEL: Select single-step %s\n",
2227 target_pid_to_str (event_lp->ptid));
2228 }
2229 else
2230 {
2231 /* No single-stepping LWP. Select one at random, out of those
2232 which have had SIGTRAP events. */
2233
2234 /* First see how many SIGTRAP events we have. */
2235 iterate_over_lwps (count_events_callback, &num_events);
2236
2237 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
2238 random_selector = (int)
2239 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2240
2241 if (debug_linux_nat && num_events > 1)
2242 fprintf_unfiltered (gdb_stdlog,
2243 "SEL: Found %d SIGTRAP events, selecting #%d\n",
2244 num_events, random_selector);
2245
2246 event_lp = iterate_over_lwps (select_event_lwp_callback,
2247 &random_selector);
2248 }
2249
2250 if (event_lp != NULL)
2251 {
2252 /* Switch the event LWP. */
2253 *orig_lp = event_lp;
2254 *status = event_lp->status;
2255 }
2256
2257 /* Flush the wait status for the event LWP. */
2258 (*orig_lp)->status = 0;
2259}
2260
2261/* Return non-zero if LP has been resumed. */
2262
2263static int
2264resumed_callback (struct lwp_info *lp, void *data)
2265{
2266 return lp->resumed;
2267}
2268
d6b0e80f
AC
2269/* Stop an active thread, verify it still exists, then resume it. */
2270
2271static int
2272stop_and_resume_callback (struct lwp_info *lp, void *data)
2273{
2274 struct lwp_info *ptr;
2275
2276 if (!lp->stopped && !lp->signalled)
2277 {
2278 stop_callback (lp, NULL);
2279 stop_wait_callback (lp, NULL);
2280 /* Resume if the lwp still exists. */
2281 for (ptr = lwp_list; ptr; ptr = ptr->next)
2282 if (lp == ptr)
2283 {
2284 resume_callback (lp, NULL);
2285 resume_set_callback (lp, NULL);
2286 }
2287 }
2288 return 0;
2289}
2290
02f3fc28 2291/* Check if we should go on and pass this event to common code.
fa2c6a57 2292 Return the affected lwp if we are, or NULL otherwise. */
02f3fc28
PA
2293static struct lwp_info *
2294linux_nat_filter_event (int lwpid, int status, int options)
2295{
2296 struct lwp_info *lp;
2297
2298 lp = find_lwp_pid (pid_to_ptid (lwpid));
2299
2300 /* Check for stop events reported by a process we didn't already
2301 know about - anything not already in our LWP list.
2302
2303 If we're expecting to receive stopped processes after
2304 fork, vfork, and clone events, then we'll just add the
2305 new one to our list and go back to waiting for the event
2306 to be reported - the stopped process might be returned
2307 from waitpid before or after the event is. */
2308 if (WIFSTOPPED (status) && !lp)
2309 {
2310 linux_record_stopped_pid (lwpid, status);
2311 return NULL;
2312 }
2313
2314 /* Make sure we don't report an event for the exit of an LWP not in
2315 our list, i.e. not part of the current process. This can happen
2316 if we detach from a program we original forked and then it
2317 exits. */
2318 if (!WIFSTOPPED (status) && !lp)
2319 return NULL;
2320
2321 /* NOTE drow/2003-06-17: This code seems to be meant for debugging
2322 CLONE_PTRACE processes which do not use the thread library -
2323 otherwise we wouldn't find the new LWP this way. That doesn't
2324 currently work, and the following code is currently unreachable
2325 due to the two blocks above. If it's fixed some day, this code
2326 should be broken out into a function so that we can also pick up
2327 LWPs from the new interface. */
2328 if (!lp)
2329 {
2330 lp = add_lwp (BUILD_LWP (lwpid, GET_PID (inferior_ptid)));
2331 if (options & __WCLONE)
2332 lp->cloned = 1;
2333
2334 gdb_assert (WIFSTOPPED (status)
2335 && WSTOPSIG (status) == SIGSTOP);
2336 lp->signalled = 1;
2337
2338 if (!in_thread_list (inferior_ptid))
2339 {
2340 inferior_ptid = BUILD_LWP (GET_PID (inferior_ptid),
2341 GET_PID (inferior_ptid));
2342 add_thread (inferior_ptid);
2343 }
2344
2345 add_thread (lp->ptid);
2346 }
2347
2348 /* Save the trap's siginfo in case we need it later. */
2349 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP)
2350 save_siginfo (lp);
2351
2352 /* Handle GNU/Linux's extended waitstatus for trace events. */
2353 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
2354 {
2355 if (debug_linux_nat)
2356 fprintf_unfiltered (gdb_stdlog,
2357 "LLW: Handling extended status 0x%06x\n",
2358 status);
2359 if (linux_handle_extended_wait (lp, status, 0))
2360 return NULL;
2361 }
2362
2363 /* Check if the thread has exited. */
2364 if ((WIFEXITED (status) || WIFSIGNALED (status)) && num_lwps > 1)
2365 {
2366 /* If this is the main thread, we must stop all threads and
2367 verify if they are still alive. This is because in the nptl
2368 thread model, there is no signal issued for exiting LWPs
2369 other than the main thread. We only get the main thread exit
2370 signal once all child threads have already exited. If we
2371 stop all the threads and use the stop_wait_callback to check
2372 if they have exited we can determine whether this signal
2373 should be ignored or whether it means the end of the debugged
2374 application, regardless of which threading model is being
2375 used. */
2376 if (GET_PID (lp->ptid) == GET_LWP (lp->ptid))
2377 {
2378 lp->stopped = 1;
2379 iterate_over_lwps (stop_and_resume_callback, NULL);
2380 }
2381
2382 if (debug_linux_nat)
2383 fprintf_unfiltered (gdb_stdlog,
2384 "LLW: %s exited.\n",
2385 target_pid_to_str (lp->ptid));
2386
2387 exit_lwp (lp);
2388
2389 /* If there is at least one more LWP, then the exit signal was
2390 not the end of the debugged application and should be
2391 ignored. */
2392 if (num_lwps > 0)
2393 {
2394 /* Make sure there is at least one thread running. */
2395 gdb_assert (iterate_over_lwps (running_callback, NULL));
2396
2397 /* Discard the event. */
2398 return NULL;
2399 }
2400 }
2401
2402 /* Check if the current LWP has previously exited. In the nptl
2403 thread model, LWPs other than the main thread do not issue
2404 signals when they exit so we must check whenever the thread has
2405 stopped. A similar check is made in stop_wait_callback(). */
2406 if (num_lwps > 1 && !linux_nat_thread_alive (lp->ptid))
2407 {
2408 if (debug_linux_nat)
2409 fprintf_unfiltered (gdb_stdlog,
2410 "LLW: %s exited.\n",
2411 target_pid_to_str (lp->ptid));
2412
2413 exit_lwp (lp);
2414
2415 /* Make sure there is at least one thread running. */
2416 gdb_assert (iterate_over_lwps (running_callback, NULL));
2417
2418 /* Discard the event. */
2419 return NULL;
2420 }
2421
2422 /* Make sure we don't report a SIGSTOP that we sent ourselves in
2423 an attempt to stop an LWP. */
2424 if (lp->signalled
2425 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP)
2426 {
2427 if (debug_linux_nat)
2428 fprintf_unfiltered (gdb_stdlog,
2429 "LLW: Delayed SIGSTOP caught for %s.\n",
2430 target_pid_to_str (lp->ptid));
2431
2432 /* This is a delayed SIGSTOP. */
2433 lp->signalled = 0;
2434
2435 registers_changed ();
2436
2437 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
2438 lp->step, TARGET_SIGNAL_0);
2439 if (debug_linux_nat)
2440 fprintf_unfiltered (gdb_stdlog,
2441 "LLW: %s %s, 0, 0 (discard SIGSTOP)\n",
2442 lp->step ?
2443 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2444 target_pid_to_str (lp->ptid));
2445
2446 lp->stopped = 0;
2447 gdb_assert (lp->resumed);
2448
2449 /* Discard the event. */
2450 return NULL;
2451 }
2452
2453 /* An interesting event. */
2454 gdb_assert (lp);
2455 return lp;
2456}
2457
b84876c2
PA
2458/* Get the events stored in the pipe into the local queue, so they are
2459 accessible to queued_waitpid. We need to do this, since it is not
2460 always the case that the event at the head of the pipe is the event
2461 we want. */
2462
2463static void
2464pipe_to_local_event_queue (void)
2465{
2466 if (debug_linux_nat_async)
2467 fprintf_unfiltered (gdb_stdlog,
2468 "PTLEQ: linux_nat_num_queued_events(%d)\n",
2469 linux_nat_num_queued_events);
2470 while (linux_nat_num_queued_events)
2471 {
2472 int lwpid, status, options;
b84876c2 2473 lwpid = linux_nat_event_pipe_pop (&status, &options);
b84876c2
PA
2474 gdb_assert (lwpid > 0);
2475 push_waitpid (lwpid, status, options);
2476 }
2477}
2478
2479/* Get the unprocessed events stored in the local queue back into the
2480 pipe, so the event loop realizes there's something else to
2481 process. */
2482
2483static void
2484local_event_queue_to_pipe (void)
2485{
2486 struct waitpid_result *w = waitpid_queue;
2487 while (w)
2488 {
2489 struct waitpid_result *next = w->next;
2490 linux_nat_event_pipe_push (w->pid,
2491 w->status,
2492 w->options);
2493 xfree (w);
2494 w = next;
2495 }
2496 waitpid_queue = NULL;
2497
2498 if (debug_linux_nat_async)
2499 fprintf_unfiltered (gdb_stdlog,
2500 "LEQTP: linux_nat_num_queued_events(%d)\n",
2501 linux_nat_num_queued_events);
2502}
2503
d6b0e80f
AC
2504static ptid_t
2505linux_nat_wait (ptid_t ptid, struct target_waitstatus *ourstatus)
2506{
2507 struct lwp_info *lp = NULL;
2508 int options = 0;
2509 int status = 0;
2510 pid_t pid = PIDGET (ptid);
2511 sigset_t flush_mask;
2512
b84876c2
PA
2513 if (debug_linux_nat_async)
2514 fprintf_unfiltered (gdb_stdlog, "LLW: enter\n");
2515
f973ed9c
DJ
2516 /* The first time we get here after starting a new inferior, we may
2517 not have added it to the LWP list yet - this is the earliest
2518 moment at which we know its PID. */
2519 if (num_lwps == 0)
2520 {
2521 gdb_assert (!is_lwp (inferior_ptid));
2522
2523 inferior_ptid = BUILD_LWP (GET_PID (inferior_ptid),
2524 GET_PID (inferior_ptid));
2525 lp = add_lwp (inferior_ptid);
2526 lp->resumed = 1;
403fe197
PA
2527 /* Add the main thread to GDB's thread list. */
2528 add_thread_silent (lp->ptid);
f973ed9c
DJ
2529 }
2530
d6b0e80f
AC
2531 sigemptyset (&flush_mask);
2532
b84876c2
PA
2533 if (target_can_async_p ())
2534 /* Block events while we're here. */
2535 target_async (NULL, 0);
d6b0e80f
AC
2536
2537retry:
2538
f973ed9c
DJ
2539 /* Make sure there is at least one LWP that has been resumed. */
2540 gdb_assert (iterate_over_lwps (resumed_callback, NULL));
d6b0e80f
AC
2541
2542 /* First check if there is a LWP with a wait status pending. */
2543 if (pid == -1)
2544 {
2545 /* Any LWP that's been resumed will do. */
2546 lp = iterate_over_lwps (status_callback, NULL);
2547 if (lp)
2548 {
710151dd
PA
2549 if (target_can_async_p ())
2550 internal_error (__FILE__, __LINE__,
2551 "Found an LWP with a pending status in async mode.");
2552
d6b0e80f
AC
2553 status = lp->status;
2554 lp->status = 0;
2555
2556 if (debug_linux_nat && status)
2557 fprintf_unfiltered (gdb_stdlog,
2558 "LLW: Using pending wait status %s for %s.\n",
2559 status_to_str (status),
2560 target_pid_to_str (lp->ptid));
2561 }
2562
b84876c2 2563 /* But if we don't find one, we'll have to wait, and check both
d6b0e80f
AC
2564 cloned and uncloned processes. We start with the cloned
2565 processes. */
2566 options = __WCLONE | WNOHANG;
2567 }
2568 else if (is_lwp (ptid))
2569 {
2570 if (debug_linux_nat)
2571 fprintf_unfiltered (gdb_stdlog,
2572 "LLW: Waiting for specific LWP %s.\n",
2573 target_pid_to_str (ptid));
2574
2575 /* We have a specific LWP to check. */
2576 lp = find_lwp_pid (ptid);
2577 gdb_assert (lp);
2578 status = lp->status;
2579 lp->status = 0;
2580
2581 if (debug_linux_nat && status)
2582 fprintf_unfiltered (gdb_stdlog,
2583 "LLW: Using pending wait status %s for %s.\n",
2584 status_to_str (status),
2585 target_pid_to_str (lp->ptid));
2586
2587 /* If we have to wait, take into account whether PID is a cloned
2588 process or not. And we have to convert it to something that
2589 the layer beneath us can understand. */
2590 options = lp->cloned ? __WCLONE : 0;
2591 pid = GET_LWP (ptid);
2592 }
2593
2594 if (status && lp->signalled)
2595 {
2596 /* A pending SIGSTOP may interfere with the normal stream of
2597 events. In a typical case where interference is a problem,
2598 we have a SIGSTOP signal pending for LWP A while
2599 single-stepping it, encounter an event in LWP B, and take the
2600 pending SIGSTOP while trying to stop LWP A. After processing
2601 the event in LWP B, LWP A is continued, and we'll never see
2602 the SIGTRAP associated with the last time we were
2603 single-stepping LWP A. */
2604
2605 /* Resume the thread. It should halt immediately returning the
2606 pending SIGSTOP. */
2607 registers_changed ();
10d6c8cd
DJ
2608 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
2609 lp->step, TARGET_SIGNAL_0);
d6b0e80f
AC
2610 if (debug_linux_nat)
2611 fprintf_unfiltered (gdb_stdlog,
2612 "LLW: %s %s, 0, 0 (expect SIGSTOP)\n",
2613 lp->step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2614 target_pid_to_str (lp->ptid));
2615 lp->stopped = 0;
2616 gdb_assert (lp->resumed);
2617
2618 /* This should catch the pending SIGSTOP. */
2619 stop_wait_callback (lp, NULL);
2620 }
2621
b84876c2
PA
2622 if (!target_can_async_p ())
2623 {
2624 /* Causes SIGINT to be passed on to the attached process. */
2625 set_sigint_trap ();
2626 set_sigio_trap ();
2627 }
d6b0e80f
AC
2628
2629 while (status == 0)
2630 {
2631 pid_t lwpid;
2632
b84876c2
PA
2633 if (target_can_async_p ())
2634 /* In async mode, don't ever block. Only look at the locally
2635 queued events. */
2636 lwpid = queued_waitpid (pid, &status, options);
2637 else
2638 lwpid = my_waitpid (pid, &status, options);
2639
d6b0e80f
AC
2640 if (lwpid > 0)
2641 {
2642 gdb_assert (pid == -1 || lwpid == pid);
2643
2644 if (debug_linux_nat)
2645 {
2646 fprintf_unfiltered (gdb_stdlog,
2647 "LLW: waitpid %ld received %s\n",
2648 (long) lwpid, status_to_str (status));
2649 }
2650
02f3fc28 2651 lp = linux_nat_filter_event (lwpid, status, options);
d6b0e80f
AC
2652 if (!lp)
2653 {
02f3fc28 2654 /* A discarded event. */
d6b0e80f
AC
2655 status = 0;
2656 continue;
2657 }
2658
2659 break;
2660 }
2661
2662 if (pid == -1)
2663 {
2664 /* Alternate between checking cloned and uncloned processes. */
2665 options ^= __WCLONE;
2666
b84876c2
PA
2667 /* And every time we have checked both:
2668 In async mode, return to event loop;
2669 In sync mode, suspend waiting for a SIGCHLD signal. */
d6b0e80f 2670 if (options & __WCLONE)
b84876c2
PA
2671 {
2672 if (target_can_async_p ())
2673 {
2674 /* No interesting event. */
2675 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2676
2677 /* Get ready for the next event. */
2678 target_async (inferior_event_handler, 0);
2679
2680 if (debug_linux_nat_async)
2681 fprintf_unfiltered (gdb_stdlog, "LLW: exit (ignore)\n");
2682
2683 return minus_one_ptid;
2684 }
2685
2686 sigsuspend (&suspend_mask);
2687 }
d6b0e80f
AC
2688 }
2689
2690 /* We shouldn't end up here unless we want to try again. */
2691 gdb_assert (status == 0);
2692 }
2693
b84876c2
PA
2694 if (!target_can_async_p ())
2695 {
2696 clear_sigio_trap ();
2697 clear_sigint_trap ();
2698 }
d6b0e80f
AC
2699
2700 gdb_assert (lp);
2701
2702 /* Don't report signals that GDB isn't interested in, such as
2703 signals that are neither printed nor stopped upon. Stopping all
2704 threads can be a bit time-consuming so if we want decent
2705 performance with heavily multi-threaded programs, especially when
2706 they're using a high frequency timer, we'd better avoid it if we
2707 can. */
2708
2709 if (WIFSTOPPED (status))
2710 {
2711 int signo = target_signal_from_host (WSTOPSIG (status));
2712
d539ed7e
UW
2713 /* If we get a signal while single-stepping, we may need special
2714 care, e.g. to skip the signal handler. Defer to common code. */
2715 if (!lp->step
2716 && signal_stop_state (signo) == 0
d6b0e80f
AC
2717 && signal_print_state (signo) == 0
2718 && signal_pass_state (signo) == 1)
2719 {
2720 /* FIMXE: kettenis/2001-06-06: Should we resume all threads
2721 here? It is not clear we should. GDB may not expect
2722 other threads to run. On the other hand, not resuming
2723 newly attached threads may cause an unwanted delay in
2724 getting them running. */
2725 registers_changed ();
10d6c8cd
DJ
2726 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
2727 lp->step, signo);
d6b0e80f
AC
2728 if (debug_linux_nat)
2729 fprintf_unfiltered (gdb_stdlog,
2730 "LLW: %s %s, %s (preempt 'handle')\n",
2731 lp->step ?
2732 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2733 target_pid_to_str (lp->ptid),
2734 signo ? strsignal (signo) : "0");
2735 lp->stopped = 0;
2736 status = 0;
2737 goto retry;
2738 }
2739
2740 if (signo == TARGET_SIGNAL_INT && signal_pass_state (signo) == 0)
2741 {
2742 /* If ^C/BREAK is typed at the tty/console, SIGINT gets
2743 forwarded to the entire process group, that is, all LWP's
2744 will receive it. Since we only want to report it once,
2745 we try to flush it from all LWPs except this one. */
2746 sigaddset (&flush_mask, SIGINT);
2747 }
2748 }
2749
2750 /* This LWP is stopped now. */
2751 lp->stopped = 1;
2752
2753 if (debug_linux_nat)
2754 fprintf_unfiltered (gdb_stdlog, "LLW: Candidate event %s in %s.\n",
2755 status_to_str (status), target_pid_to_str (lp->ptid));
2756
2757 /* Now stop all other LWP's ... */
2758 iterate_over_lwps (stop_callback, NULL);
2759
2760 /* ... and wait until all of them have reported back that they're no
2761 longer running. */
2762 iterate_over_lwps (stop_wait_callback, &flush_mask);
2763 iterate_over_lwps (flush_callback, &flush_mask);
2764
2765 /* If we're not waiting for a specific LWP, choose an event LWP from
2766 among those that have had events. Giving equal priority to all
2767 LWPs that have had events helps prevent starvation. */
2768 if (pid == -1)
2769 select_event_lwp (&lp, &status);
2770
2771 /* Now that we've selected our final event LWP, cancel any
2772 breakpoints in other LWPs that have hit a GDB breakpoint. See
2773 the comment in cancel_breakpoints_callback to find out why. */
2774 iterate_over_lwps (cancel_breakpoints_callback, lp);
2775
d6b0e80f
AC
2776 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP)
2777 {
f973ed9c 2778 trap_ptid = lp->ptid;
d6b0e80f
AC
2779 if (debug_linux_nat)
2780 fprintf_unfiltered (gdb_stdlog,
2781 "LLW: trap_ptid is %s.\n",
2782 target_pid_to_str (trap_ptid));
2783 }
2784 else
2785 trap_ptid = null_ptid;
2786
2787 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
2788 {
2789 *ourstatus = lp->waitstatus;
2790 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
2791 }
2792 else
2793 store_waitstatus (ourstatus, status);
2794
b84876c2
PA
2795 /* Get ready for the next event. */
2796 if (target_can_async_p ())
2797 target_async (inferior_event_handler, 0);
2798
2799 if (debug_linux_nat_async)
2800 fprintf_unfiltered (gdb_stdlog, "LLW: exit\n");
2801
f973ed9c 2802 return lp->ptid;
d6b0e80f
AC
2803}
2804
2805static int
2806kill_callback (struct lwp_info *lp, void *data)
2807{
2808 errno = 0;
2809 ptrace (PTRACE_KILL, GET_LWP (lp->ptid), 0, 0);
2810 if (debug_linux_nat)
2811 fprintf_unfiltered (gdb_stdlog,
2812 "KC: PTRACE_KILL %s, 0, 0 (%s)\n",
2813 target_pid_to_str (lp->ptid),
2814 errno ? safe_strerror (errno) : "OK");
2815
2816 return 0;
2817}
2818
2819static int
2820kill_wait_callback (struct lwp_info *lp, void *data)
2821{
2822 pid_t pid;
2823
2824 /* We must make sure that there are no pending events (delayed
2825 SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current
2826 program doesn't interfere with any following debugging session. */
2827
2828 /* For cloned processes we must check both with __WCLONE and
2829 without, since the exit status of a cloned process isn't reported
2830 with __WCLONE. */
2831 if (lp->cloned)
2832 {
2833 do
2834 {
58aecb61 2835 pid = my_waitpid (GET_LWP (lp->ptid), NULL, __WCLONE);
e85a822c 2836 if (pid != (pid_t) -1)
d6b0e80f 2837 {
e85a822c
DJ
2838 if (debug_linux_nat)
2839 fprintf_unfiltered (gdb_stdlog,
2840 "KWC: wait %s received unknown.\n",
2841 target_pid_to_str (lp->ptid));
2842 /* The Linux kernel sometimes fails to kill a thread
2843 completely after PTRACE_KILL; that goes from the stop
2844 point in do_fork out to the one in
2845 get_signal_to_deliever and waits again. So kill it
2846 again. */
2847 kill_callback (lp, NULL);
d6b0e80f
AC
2848 }
2849 }
2850 while (pid == GET_LWP (lp->ptid));
2851
2852 gdb_assert (pid == -1 && errno == ECHILD);
2853 }
2854
2855 do
2856 {
58aecb61 2857 pid = my_waitpid (GET_LWP (lp->ptid), NULL, 0);
e85a822c 2858 if (pid != (pid_t) -1)
d6b0e80f 2859 {
e85a822c
DJ
2860 if (debug_linux_nat)
2861 fprintf_unfiltered (gdb_stdlog,
2862 "KWC: wait %s received unk.\n",
2863 target_pid_to_str (lp->ptid));
2864 /* See the call to kill_callback above. */
2865 kill_callback (lp, NULL);
d6b0e80f
AC
2866 }
2867 }
2868 while (pid == GET_LWP (lp->ptid));
2869
2870 gdb_assert (pid == -1 && errno == ECHILD);
2871 return 0;
2872}
2873
2874static void
2875linux_nat_kill (void)
2876{
f973ed9c
DJ
2877 struct target_waitstatus last;
2878 ptid_t last_ptid;
2879 int status;
d6b0e80f 2880
b84876c2
PA
2881 if (target_can_async_p ())
2882 target_async (NULL, 0);
2883
f973ed9c
DJ
2884 /* If we're stopped while forking and we haven't followed yet,
2885 kill the other task. We need to do this first because the
2886 parent will be sleeping if this is a vfork. */
d6b0e80f 2887
f973ed9c 2888 get_last_target_status (&last_ptid, &last);
d6b0e80f 2889
f973ed9c
DJ
2890 if (last.kind == TARGET_WAITKIND_FORKED
2891 || last.kind == TARGET_WAITKIND_VFORKED)
2892 {
2893 ptrace (PT_KILL, last.value.related_pid, 0, 0);
2894 wait (&status);
2895 }
2896
2897 if (forks_exist_p ())
b84876c2
PA
2898 {
2899 linux_fork_killall ();
2900 drain_queued_events (-1);
2901 }
f973ed9c
DJ
2902 else
2903 {
2904 /* Kill all LWP's ... */
2905 iterate_over_lwps (kill_callback, NULL);
2906
2907 /* ... and wait until we've flushed all events. */
2908 iterate_over_lwps (kill_wait_callback, NULL);
2909 }
2910
2911 target_mourn_inferior ();
d6b0e80f
AC
2912}
2913
2914static void
2915linux_nat_mourn_inferior (void)
2916{
2917 trap_ptid = null_ptid;
2918
2919 /* Destroy LWP info; it's no longer valid. */
2920 init_lwp_list ();
2921
f973ed9c 2922 if (! forks_exist_p ())
b84876c2
PA
2923 {
2924 /* Normal case, no other forks available. */
2925 if (target_can_async_p ())
2926 linux_nat_async (NULL, 0);
2927 linux_ops->to_mourn_inferior ();
2928 }
f973ed9c
DJ
2929 else
2930 /* Multi-fork case. The current inferior_ptid has exited, but
2931 there are other viable forks to debug. Delete the exiting
2932 one and context-switch to the first available. */
2933 linux_fork_mourn_inferior ();
d6b0e80f
AC
2934}
2935
10d6c8cd
DJ
2936static LONGEST
2937linux_nat_xfer_partial (struct target_ops *ops, enum target_object object,
2938 const char *annex, gdb_byte *readbuf,
2939 const gdb_byte *writebuf,
2940 ULONGEST offset, LONGEST len)
d6b0e80f
AC
2941{
2942 struct cleanup *old_chain = save_inferior_ptid ();
10d6c8cd 2943 LONGEST xfer;
d6b0e80f
AC
2944
2945 if (is_lwp (inferior_ptid))
2946 inferior_ptid = pid_to_ptid (GET_LWP (inferior_ptid));
2947
10d6c8cd
DJ
2948 xfer = linux_ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
2949 offset, len);
d6b0e80f
AC
2950
2951 do_cleanups (old_chain);
2952 return xfer;
2953}
2954
2955static int
2956linux_nat_thread_alive (ptid_t ptid)
2957{
2958 gdb_assert (is_lwp (ptid));
2959
2960 errno = 0;
2961 ptrace (PTRACE_PEEKUSER, GET_LWP (ptid), 0, 0);
2962 if (debug_linux_nat)
2963 fprintf_unfiltered (gdb_stdlog,
2964 "LLTA: PTRACE_PEEKUSER %s, 0, 0 (%s)\n",
2965 target_pid_to_str (ptid),
2966 errno ? safe_strerror (errno) : "OK");
9c0dd46b 2967
155bd5d1
AC
2968 /* Not every Linux kernel implements PTRACE_PEEKUSER. But we can
2969 handle that case gracefully since ptrace will first do a lookup
2970 for the process based upon the passed-in pid. If that fails we
2971 will get either -ESRCH or -EPERM, otherwise the child exists and
2972 is alive. */
a529be7c 2973 if (errno == ESRCH || errno == EPERM)
d6b0e80f
AC
2974 return 0;
2975
2976 return 1;
2977}
2978
2979static char *
2980linux_nat_pid_to_str (ptid_t ptid)
2981{
2982 static char buf[64];
2983
a0ef4274
DJ
2984 if (is_lwp (ptid)
2985 && ((lwp_list && lwp_list->next)
2986 || GET_PID (ptid) != GET_LWP (ptid)))
d6b0e80f
AC
2987 {
2988 snprintf (buf, sizeof (buf), "LWP %ld", GET_LWP (ptid));
2989 return buf;
2990 }
2991
2992 return normal_pid_to_str (ptid);
2993}
2994
d6b0e80f
AC
2995static void
2996sigchld_handler (int signo)
2997{
b84876c2
PA
2998 if (linux_nat_async_enabled
2999 && linux_nat_async_events_enabled
3000 && signo == SIGCHLD)
3001 /* It is *always* a bug to hit this. */
3002 internal_error (__FILE__, __LINE__,
3003 "sigchld_handler called when async events are enabled");
3004
d6b0e80f
AC
3005 /* Do nothing. The only reason for this handler is that it allows
3006 us to use sigsuspend in linux_nat_wait above to wait for the
3007 arrival of a SIGCHLD. */
3008}
3009
dba24537
AC
3010/* Accepts an integer PID; Returns a string representing a file that
3011 can be opened to get the symbols for the child process. */
3012
6d8fd2b7
UW
3013static char *
3014linux_child_pid_to_exec_file (int pid)
dba24537
AC
3015{
3016 char *name1, *name2;
3017
3018 name1 = xmalloc (MAXPATHLEN);
3019 name2 = xmalloc (MAXPATHLEN);
3020 make_cleanup (xfree, name1);
3021 make_cleanup (xfree, name2);
3022 memset (name2, 0, MAXPATHLEN);
3023
3024 sprintf (name1, "/proc/%d/exe", pid);
3025 if (readlink (name1, name2, MAXPATHLEN) > 0)
3026 return name2;
3027 else
3028 return name1;
3029}
3030
3031/* Service function for corefiles and info proc. */
3032
3033static int
3034read_mapping (FILE *mapfile,
3035 long long *addr,
3036 long long *endaddr,
3037 char *permissions,
3038 long long *offset,
3039 char *device, long long *inode, char *filename)
3040{
3041 int ret = fscanf (mapfile, "%llx-%llx %s %llx %s %llx",
3042 addr, endaddr, permissions, offset, device, inode);
3043
2e14c2ea
MS
3044 filename[0] = '\0';
3045 if (ret > 0 && ret != EOF)
dba24537
AC
3046 {
3047 /* Eat everything up to EOL for the filename. This will prevent
3048 weird filenames (such as one with embedded whitespace) from
3049 confusing this code. It also makes this code more robust in
3050 respect to annotations the kernel may add after the filename.
3051
3052 Note the filename is used for informational purposes
3053 only. */
3054 ret += fscanf (mapfile, "%[^\n]\n", filename);
3055 }
2e14c2ea 3056
dba24537
AC
3057 return (ret != 0 && ret != EOF);
3058}
3059
3060/* Fills the "to_find_memory_regions" target vector. Lists the memory
3061 regions in the inferior for a corefile. */
3062
3063static int
3064linux_nat_find_memory_regions (int (*func) (CORE_ADDR,
3065 unsigned long,
3066 int, int, int, void *), void *obfd)
3067{
3068 long long pid = PIDGET (inferior_ptid);
3069 char mapsfilename[MAXPATHLEN];
3070 FILE *mapsfile;
3071 long long addr, endaddr, size, offset, inode;
3072 char permissions[8], device[8], filename[MAXPATHLEN];
3073 int read, write, exec;
3074 int ret;
3075
3076 /* Compose the filename for the /proc memory map, and open it. */
3077 sprintf (mapsfilename, "/proc/%lld/maps", pid);
3078 if ((mapsfile = fopen (mapsfilename, "r")) == NULL)
8a3fe4f8 3079 error (_("Could not open %s."), mapsfilename);
dba24537
AC
3080
3081 if (info_verbose)
3082 fprintf_filtered (gdb_stdout,
3083 "Reading memory regions from %s\n", mapsfilename);
3084
3085 /* Now iterate until end-of-file. */
3086 while (read_mapping (mapsfile, &addr, &endaddr, &permissions[0],
3087 &offset, &device[0], &inode, &filename[0]))
3088 {
3089 size = endaddr - addr;
3090
3091 /* Get the segment's permissions. */
3092 read = (strchr (permissions, 'r') != 0);
3093 write = (strchr (permissions, 'w') != 0);
3094 exec = (strchr (permissions, 'x') != 0);
3095
3096 if (info_verbose)
3097 {
3098 fprintf_filtered (gdb_stdout,
3099 "Save segment, %lld bytes at 0x%s (%c%c%c)",
3100 size, paddr_nz (addr),
3101 read ? 'r' : ' ',
3102 write ? 'w' : ' ', exec ? 'x' : ' ');
b260b6c1 3103 if (filename[0])
dba24537
AC
3104 fprintf_filtered (gdb_stdout, " for %s", filename);
3105 fprintf_filtered (gdb_stdout, "\n");
3106 }
3107
3108 /* Invoke the callback function to create the corefile
3109 segment. */
3110 func (addr, size, read, write, exec, obfd);
3111 }
3112 fclose (mapsfile);
3113 return 0;
3114}
3115
3116/* Records the thread's register state for the corefile note
3117 section. */
3118
3119static char *
3120linux_nat_do_thread_registers (bfd *obfd, ptid_t ptid,
3121 char *note_data, int *note_size)
3122{
3123 gdb_gregset_t gregs;
3124 gdb_fpregset_t fpregs;
3125#ifdef FILL_FPXREGSET
3126 gdb_fpxregset_t fpxregs;
3127#endif
3128 unsigned long lwp = ptid_get_lwp (ptid);
594f7785
UW
3129 struct regcache *regcache = get_thread_regcache (ptid);
3130 struct gdbarch *gdbarch = get_regcache_arch (regcache);
4f844a66 3131 const struct regset *regset;
55e969c1 3132 int core_regset_p;
594f7785
UW
3133 struct cleanup *old_chain;
3134
3135 old_chain = save_inferior_ptid ();
3136 inferior_ptid = ptid;
3137 target_fetch_registers (regcache, -1);
3138 do_cleanups (old_chain);
4f844a66
DM
3139
3140 core_regset_p = gdbarch_regset_from_core_section_p (gdbarch);
55e969c1
DM
3141 if (core_regset_p
3142 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg",
3143 sizeof (gregs))) != NULL
3144 && regset->collect_regset != NULL)
594f7785 3145 regset->collect_regset (regset, regcache, -1,
55e969c1 3146 &gregs, sizeof (gregs));
4f844a66 3147 else
594f7785 3148 fill_gregset (regcache, &gregs, -1);
4f844a66 3149
55e969c1
DM
3150 note_data = (char *) elfcore_write_prstatus (obfd,
3151 note_data,
3152 note_size,
3153 lwp,
3154 stop_signal, &gregs);
3155
3156 if (core_regset_p
3157 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg2",
3158 sizeof (fpregs))) != NULL
3159 && regset->collect_regset != NULL)
594f7785 3160 regset->collect_regset (regset, regcache, -1,
55e969c1 3161 &fpregs, sizeof (fpregs));
4f844a66 3162 else
594f7785 3163 fill_fpregset (regcache, &fpregs, -1);
4f844a66 3164
55e969c1
DM
3165 note_data = (char *) elfcore_write_prfpreg (obfd,
3166 note_data,
3167 note_size,
3168 &fpregs, sizeof (fpregs));
dba24537 3169
dba24537 3170#ifdef FILL_FPXREGSET
55e969c1
DM
3171 if (core_regset_p
3172 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg-xfp",
3173 sizeof (fpxregs))) != NULL
3174 && regset->collect_regset != NULL)
594f7785 3175 regset->collect_regset (regset, regcache, -1,
55e969c1 3176 &fpxregs, sizeof (fpxregs));
4f844a66 3177 else
594f7785 3178 fill_fpxregset (regcache, &fpxregs, -1);
4f844a66 3179
55e969c1
DM
3180 note_data = (char *) elfcore_write_prxfpreg (obfd,
3181 note_data,
3182 note_size,
3183 &fpxregs, sizeof (fpxregs));
dba24537
AC
3184#endif
3185 return note_data;
3186}
3187
3188struct linux_nat_corefile_thread_data
3189{
3190 bfd *obfd;
3191 char *note_data;
3192 int *note_size;
3193 int num_notes;
3194};
3195
3196/* Called by gdbthread.c once per thread. Records the thread's
3197 register state for the corefile note section. */
3198
3199static int
3200linux_nat_corefile_thread_callback (struct lwp_info *ti, void *data)
3201{
3202 struct linux_nat_corefile_thread_data *args = data;
dba24537 3203
dba24537
AC
3204 args->note_data = linux_nat_do_thread_registers (args->obfd,
3205 ti->ptid,
3206 args->note_data,
3207 args->note_size);
3208 args->num_notes++;
56be3814 3209
dba24537
AC
3210 return 0;
3211}
3212
3213/* Records the register state for the corefile note section. */
3214
3215static char *
3216linux_nat_do_registers (bfd *obfd, ptid_t ptid,
3217 char *note_data, int *note_size)
3218{
dba24537
AC
3219 return linux_nat_do_thread_registers (obfd,
3220 ptid_build (ptid_get_pid (inferior_ptid),
3221 ptid_get_pid (inferior_ptid),
3222 0),
3223 note_data, note_size);
dba24537
AC
3224}
3225
3226/* Fills the "to_make_corefile_note" target vector. Builds the note
3227 section for a corefile, and returns it in a malloc buffer. */
3228
3229static char *
3230linux_nat_make_corefile_notes (bfd *obfd, int *note_size)
3231{
3232 struct linux_nat_corefile_thread_data thread_args;
3233 struct cleanup *old_chain;
d99148ef 3234 /* The variable size must be >= sizeof (prpsinfo_t.pr_fname). */
dba24537 3235 char fname[16] = { '\0' };
d99148ef 3236 /* The variable size must be >= sizeof (prpsinfo_t.pr_psargs). */
dba24537
AC
3237 char psargs[80] = { '\0' };
3238 char *note_data = NULL;
3239 ptid_t current_ptid = inferior_ptid;
c6826062 3240 gdb_byte *auxv;
dba24537
AC
3241 int auxv_len;
3242
3243 if (get_exec_file (0))
3244 {
3245 strncpy (fname, strrchr (get_exec_file (0), '/') + 1, sizeof (fname));
3246 strncpy (psargs, get_exec_file (0), sizeof (psargs));
3247 if (get_inferior_args ())
3248 {
d99148ef
JK
3249 char *string_end;
3250 char *psargs_end = psargs + sizeof (psargs);
3251
3252 /* linux_elfcore_write_prpsinfo () handles zero unterminated
3253 strings fine. */
3254 string_end = memchr (psargs, 0, sizeof (psargs));
3255 if (string_end != NULL)
3256 {
3257 *string_end++ = ' ';
3258 strncpy (string_end, get_inferior_args (),
3259 psargs_end - string_end);
3260 }
dba24537
AC
3261 }
3262 note_data = (char *) elfcore_write_prpsinfo (obfd,
3263 note_data,
3264 note_size, fname, psargs);
3265 }
3266
3267 /* Dump information for threads. */
3268 thread_args.obfd = obfd;
3269 thread_args.note_data = note_data;
3270 thread_args.note_size = note_size;
3271 thread_args.num_notes = 0;
3272 iterate_over_lwps (linux_nat_corefile_thread_callback, &thread_args);
3273 if (thread_args.num_notes == 0)
3274 {
3275 /* iterate_over_threads didn't come up with any threads; just
3276 use inferior_ptid. */
3277 note_data = linux_nat_do_registers (obfd, inferior_ptid,
3278 note_data, note_size);
3279 }
3280 else
3281 {
3282 note_data = thread_args.note_data;
3283 }
3284
13547ab6
DJ
3285 auxv_len = target_read_alloc (&current_target, TARGET_OBJECT_AUXV,
3286 NULL, &auxv);
dba24537
AC
3287 if (auxv_len > 0)
3288 {
3289 note_data = elfcore_write_note (obfd, note_data, note_size,
3290 "CORE", NT_AUXV, auxv, auxv_len);
3291 xfree (auxv);
3292 }
3293
3294 make_cleanup (xfree, note_data);
3295 return note_data;
3296}
3297
3298/* Implement the "info proc" command. */
3299
3300static void
3301linux_nat_info_proc_cmd (char *args, int from_tty)
3302{
3303 long long pid = PIDGET (inferior_ptid);
3304 FILE *procfile;
3305 char **argv = NULL;
3306 char buffer[MAXPATHLEN];
3307 char fname1[MAXPATHLEN], fname2[MAXPATHLEN];
3308 int cmdline_f = 1;
3309 int cwd_f = 1;
3310 int exe_f = 1;
3311 int mappings_f = 0;
3312 int environ_f = 0;
3313 int status_f = 0;
3314 int stat_f = 0;
3315 int all = 0;
3316 struct stat dummy;
3317
3318 if (args)
3319 {
3320 /* Break up 'args' into an argv array. */
3321 if ((argv = buildargv (args)) == NULL)
3322 nomem (0);
3323 else
3324 make_cleanup_freeargv (argv);
3325 }
3326 while (argv != NULL && *argv != NULL)
3327 {
3328 if (isdigit (argv[0][0]))
3329 {
3330 pid = strtoul (argv[0], NULL, 10);
3331 }
3332 else if (strncmp (argv[0], "mappings", strlen (argv[0])) == 0)
3333 {
3334 mappings_f = 1;
3335 }
3336 else if (strcmp (argv[0], "status") == 0)
3337 {
3338 status_f = 1;
3339 }
3340 else if (strcmp (argv[0], "stat") == 0)
3341 {
3342 stat_f = 1;
3343 }
3344 else if (strcmp (argv[0], "cmd") == 0)
3345 {
3346 cmdline_f = 1;
3347 }
3348 else if (strncmp (argv[0], "exe", strlen (argv[0])) == 0)
3349 {
3350 exe_f = 1;
3351 }
3352 else if (strcmp (argv[0], "cwd") == 0)
3353 {
3354 cwd_f = 1;
3355 }
3356 else if (strncmp (argv[0], "all", strlen (argv[0])) == 0)
3357 {
3358 all = 1;
3359 }
3360 else
3361 {
3362 /* [...] (future options here) */
3363 }
3364 argv++;
3365 }
3366 if (pid == 0)
8a3fe4f8 3367 error (_("No current process: you must name one."));
dba24537
AC
3368
3369 sprintf (fname1, "/proc/%lld", pid);
3370 if (stat (fname1, &dummy) != 0)
8a3fe4f8 3371 error (_("No /proc directory: '%s'"), fname1);
dba24537 3372
a3f17187 3373 printf_filtered (_("process %lld\n"), pid);
dba24537
AC
3374 if (cmdline_f || all)
3375 {
3376 sprintf (fname1, "/proc/%lld/cmdline", pid);
d5d6fca5 3377 if ((procfile = fopen (fname1, "r")) != NULL)
dba24537
AC
3378 {
3379 fgets (buffer, sizeof (buffer), procfile);
3380 printf_filtered ("cmdline = '%s'\n", buffer);
3381 fclose (procfile);
3382 }
3383 else
8a3fe4f8 3384 warning (_("unable to open /proc file '%s'"), fname1);
dba24537
AC
3385 }
3386 if (cwd_f || all)
3387 {
3388 sprintf (fname1, "/proc/%lld/cwd", pid);
3389 memset (fname2, 0, sizeof (fname2));
3390 if (readlink (fname1, fname2, sizeof (fname2)) > 0)
3391 printf_filtered ("cwd = '%s'\n", fname2);
3392 else
8a3fe4f8 3393 warning (_("unable to read link '%s'"), fname1);
dba24537
AC
3394 }
3395 if (exe_f || all)
3396 {
3397 sprintf (fname1, "/proc/%lld/exe", pid);
3398 memset (fname2, 0, sizeof (fname2));
3399 if (readlink (fname1, fname2, sizeof (fname2)) > 0)
3400 printf_filtered ("exe = '%s'\n", fname2);
3401 else
8a3fe4f8 3402 warning (_("unable to read link '%s'"), fname1);
dba24537
AC
3403 }
3404 if (mappings_f || all)
3405 {
3406 sprintf (fname1, "/proc/%lld/maps", pid);
d5d6fca5 3407 if ((procfile = fopen (fname1, "r")) != NULL)
dba24537
AC
3408 {
3409 long long addr, endaddr, size, offset, inode;
3410 char permissions[8], device[8], filename[MAXPATHLEN];
3411
a3f17187 3412 printf_filtered (_("Mapped address spaces:\n\n"));
17a912b6 3413 if (gdbarch_addr_bit (current_gdbarch) == 32)
dba24537
AC
3414 {
3415 printf_filtered ("\t%10s %10s %10s %10s %7s\n",
3416 "Start Addr",
3417 " End Addr",
3418 " Size", " Offset", "objfile");
3419 }
3420 else
3421 {
3422 printf_filtered (" %18s %18s %10s %10s %7s\n",
3423 "Start Addr",
3424 " End Addr",
3425 " Size", " Offset", "objfile");
3426 }
3427
3428 while (read_mapping (procfile, &addr, &endaddr, &permissions[0],
3429 &offset, &device[0], &inode, &filename[0]))
3430 {
3431 size = endaddr - addr;
3432
3433 /* FIXME: carlton/2003-08-27: Maybe the printf_filtered
3434 calls here (and possibly above) should be abstracted
3435 out into their own functions? Andrew suggests using
3436 a generic local_address_string instead to print out
3437 the addresses; that makes sense to me, too. */
3438
17a912b6 3439 if (gdbarch_addr_bit (current_gdbarch) == 32)
dba24537
AC
3440 {
3441 printf_filtered ("\t%#10lx %#10lx %#10x %#10x %7s\n",
3442 (unsigned long) addr, /* FIXME: pr_addr */
3443 (unsigned long) endaddr,
3444 (int) size,
3445 (unsigned int) offset,
3446 filename[0] ? filename : "");
3447 }
3448 else
3449 {
3450 printf_filtered (" %#18lx %#18lx %#10x %#10x %7s\n",
3451 (unsigned long) addr, /* FIXME: pr_addr */
3452 (unsigned long) endaddr,
3453 (int) size,
3454 (unsigned int) offset,
3455 filename[0] ? filename : "");
3456 }
3457 }
3458
3459 fclose (procfile);
3460 }
3461 else
8a3fe4f8 3462 warning (_("unable to open /proc file '%s'"), fname1);
dba24537
AC
3463 }
3464 if (status_f || all)
3465 {
3466 sprintf (fname1, "/proc/%lld/status", pid);
d5d6fca5 3467 if ((procfile = fopen (fname1, "r")) != NULL)
dba24537
AC
3468 {
3469 while (fgets (buffer, sizeof (buffer), procfile) != NULL)
3470 puts_filtered (buffer);
3471 fclose (procfile);
3472 }
3473 else
8a3fe4f8 3474 warning (_("unable to open /proc file '%s'"), fname1);
dba24537
AC
3475 }
3476 if (stat_f || all)
3477 {
3478 sprintf (fname1, "/proc/%lld/stat", pid);
d5d6fca5 3479 if ((procfile = fopen (fname1, "r")) != NULL)
dba24537
AC
3480 {
3481 int itmp;
3482 char ctmp;
a25694b4 3483 long ltmp;
dba24537
AC
3484
3485 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 3486 printf_filtered (_("Process: %d\n"), itmp);
a25694b4 3487 if (fscanf (procfile, "(%[^)]) ", &buffer[0]) > 0)
a3f17187 3488 printf_filtered (_("Exec file: %s\n"), buffer);
dba24537 3489 if (fscanf (procfile, "%c ", &ctmp) > 0)
a3f17187 3490 printf_filtered (_("State: %c\n"), ctmp);
dba24537 3491 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 3492 printf_filtered (_("Parent process: %d\n"), itmp);
dba24537 3493 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 3494 printf_filtered (_("Process group: %d\n"), itmp);
dba24537 3495 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 3496 printf_filtered (_("Session id: %d\n"), itmp);
dba24537 3497 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 3498 printf_filtered (_("TTY: %d\n"), itmp);
dba24537 3499 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 3500 printf_filtered (_("TTY owner process group: %d\n"), itmp);
a25694b4
AS
3501 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3502 printf_filtered (_("Flags: 0x%lx\n"), ltmp);
3503 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3504 printf_filtered (_("Minor faults (no memory page): %lu\n"),
3505 (unsigned long) ltmp);
3506 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3507 printf_filtered (_("Minor faults, children: %lu\n"),
3508 (unsigned long) ltmp);
3509 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3510 printf_filtered (_("Major faults (memory page faults): %lu\n"),
3511 (unsigned long) ltmp);
3512 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3513 printf_filtered (_("Major faults, children: %lu\n"),
3514 (unsigned long) ltmp);
3515 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3516 printf_filtered (_("utime: %ld\n"), ltmp);
3517 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3518 printf_filtered (_("stime: %ld\n"), ltmp);
3519 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3520 printf_filtered (_("utime, children: %ld\n"), ltmp);
3521 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3522 printf_filtered (_("stime, children: %ld\n"), ltmp);
3523 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3524 printf_filtered (_("jiffies remaining in current time slice: %ld\n"),
3525 ltmp);
3526 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3527 printf_filtered (_("'nice' value: %ld\n"), ltmp);
3528 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3529 printf_filtered (_("jiffies until next timeout: %lu\n"),
3530 (unsigned long) ltmp);
3531 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3532 printf_filtered (_("jiffies until next SIGALRM: %lu\n"),
3533 (unsigned long) ltmp);
3534 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3535 printf_filtered (_("start time (jiffies since system boot): %ld\n"),
3536 ltmp);
3537 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3538 printf_filtered (_("Virtual memory size: %lu\n"),
3539 (unsigned long) ltmp);
3540 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3541 printf_filtered (_("Resident set size: %lu\n"), (unsigned long) ltmp);
3542 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3543 printf_filtered (_("rlim: %lu\n"), (unsigned long) ltmp);
3544 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3545 printf_filtered (_("Start of text: 0x%lx\n"), ltmp);
3546 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3547 printf_filtered (_("End of text: 0x%lx\n"), ltmp);
3548 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3549 printf_filtered (_("Start of stack: 0x%lx\n"), ltmp);
dba24537
AC
3550#if 0 /* Don't know how architecture-dependent the rest is...
3551 Anyway the signal bitmap info is available from "status". */
a25694b4
AS
3552 if (fscanf (procfile, "%lu ", &ltmp) > 0) /* FIXME arch? */
3553 printf_filtered (_("Kernel stack pointer: 0x%lx\n"), ltmp);
3554 if (fscanf (procfile, "%lu ", &ltmp) > 0) /* FIXME arch? */
3555 printf_filtered (_("Kernel instr pointer: 0x%lx\n"), ltmp);
3556 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3557 printf_filtered (_("Pending signals bitmap: 0x%lx\n"), ltmp);
3558 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3559 printf_filtered (_("Blocked signals bitmap: 0x%lx\n"), ltmp);
3560 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3561 printf_filtered (_("Ignored signals bitmap: 0x%lx\n"), ltmp);
3562 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3563 printf_filtered (_("Catched signals bitmap: 0x%lx\n"), ltmp);
3564 if (fscanf (procfile, "%lu ", &ltmp) > 0) /* FIXME arch? */
3565 printf_filtered (_("wchan (system call): 0x%lx\n"), ltmp);
dba24537
AC
3566#endif
3567 fclose (procfile);
3568 }
3569 else
8a3fe4f8 3570 warning (_("unable to open /proc file '%s'"), fname1);
dba24537
AC
3571 }
3572}
3573
10d6c8cd
DJ
3574/* Implement the to_xfer_partial interface for memory reads using the /proc
3575 filesystem. Because we can use a single read() call for /proc, this
3576 can be much more efficient than banging away at PTRACE_PEEKTEXT,
3577 but it doesn't support writes. */
3578
3579static LONGEST
3580linux_proc_xfer_partial (struct target_ops *ops, enum target_object object,
3581 const char *annex, gdb_byte *readbuf,
3582 const gdb_byte *writebuf,
3583 ULONGEST offset, LONGEST len)
dba24537 3584{
10d6c8cd
DJ
3585 LONGEST ret;
3586 int fd;
dba24537
AC
3587 char filename[64];
3588
10d6c8cd 3589 if (object != TARGET_OBJECT_MEMORY || !readbuf)
dba24537
AC
3590 return 0;
3591
3592 /* Don't bother for one word. */
3593 if (len < 3 * sizeof (long))
3594 return 0;
3595
3596 /* We could keep this file open and cache it - possibly one per
3597 thread. That requires some juggling, but is even faster. */
3598 sprintf (filename, "/proc/%d/mem", PIDGET (inferior_ptid));
3599 fd = open (filename, O_RDONLY | O_LARGEFILE);
3600 if (fd == -1)
3601 return 0;
3602
3603 /* If pread64 is available, use it. It's faster if the kernel
3604 supports it (only one syscall), and it's 64-bit safe even on
3605 32-bit platforms (for instance, SPARC debugging a SPARC64
3606 application). */
3607#ifdef HAVE_PREAD64
10d6c8cd 3608 if (pread64 (fd, readbuf, len, offset) != len)
dba24537 3609#else
10d6c8cd 3610 if (lseek (fd, offset, SEEK_SET) == -1 || read (fd, readbuf, len) != len)
dba24537
AC
3611#endif
3612 ret = 0;
3613 else
3614 ret = len;
3615
3616 close (fd);
3617 return ret;
3618}
3619
3620/* Parse LINE as a signal set and add its set bits to SIGS. */
3621
3622static void
3623add_line_to_sigset (const char *line, sigset_t *sigs)
3624{
3625 int len = strlen (line) - 1;
3626 const char *p;
3627 int signum;
3628
3629 if (line[len] != '\n')
8a3fe4f8 3630 error (_("Could not parse signal set: %s"), line);
dba24537
AC
3631
3632 p = line;
3633 signum = len * 4;
3634 while (len-- > 0)
3635 {
3636 int digit;
3637
3638 if (*p >= '0' && *p <= '9')
3639 digit = *p - '0';
3640 else if (*p >= 'a' && *p <= 'f')
3641 digit = *p - 'a' + 10;
3642 else
8a3fe4f8 3643 error (_("Could not parse signal set: %s"), line);
dba24537
AC
3644
3645 signum -= 4;
3646
3647 if (digit & 1)
3648 sigaddset (sigs, signum + 1);
3649 if (digit & 2)
3650 sigaddset (sigs, signum + 2);
3651 if (digit & 4)
3652 sigaddset (sigs, signum + 3);
3653 if (digit & 8)
3654 sigaddset (sigs, signum + 4);
3655
3656 p++;
3657 }
3658}
3659
3660/* Find process PID's pending signals from /proc/pid/status and set
3661 SIGS to match. */
3662
3663void
3664linux_proc_pending_signals (int pid, sigset_t *pending, sigset_t *blocked, sigset_t *ignored)
3665{
3666 FILE *procfile;
3667 char buffer[MAXPATHLEN], fname[MAXPATHLEN];
3668 int signum;
3669
3670 sigemptyset (pending);
3671 sigemptyset (blocked);
3672 sigemptyset (ignored);
3673 sprintf (fname, "/proc/%d/status", pid);
3674 procfile = fopen (fname, "r");
3675 if (procfile == NULL)
8a3fe4f8 3676 error (_("Could not open %s"), fname);
dba24537
AC
3677
3678 while (fgets (buffer, MAXPATHLEN, procfile) != NULL)
3679 {
3680 /* Normal queued signals are on the SigPnd line in the status
3681 file. However, 2.6 kernels also have a "shared" pending
3682 queue for delivering signals to a thread group, so check for
3683 a ShdPnd line also.
3684
3685 Unfortunately some Red Hat kernels include the shared pending
3686 queue but not the ShdPnd status field. */
3687
3688 if (strncmp (buffer, "SigPnd:\t", 8) == 0)
3689 add_line_to_sigset (buffer + 8, pending);
3690 else if (strncmp (buffer, "ShdPnd:\t", 8) == 0)
3691 add_line_to_sigset (buffer + 8, pending);
3692 else if (strncmp (buffer, "SigBlk:\t", 8) == 0)
3693 add_line_to_sigset (buffer + 8, blocked);
3694 else if (strncmp (buffer, "SigIgn:\t", 8) == 0)
3695 add_line_to_sigset (buffer + 8, ignored);
3696 }
3697
3698 fclose (procfile);
3699}
3700
10d6c8cd
DJ
3701static LONGEST
3702linux_xfer_partial (struct target_ops *ops, enum target_object object,
3703 const char *annex, gdb_byte *readbuf,
3704 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
3705{
3706 LONGEST xfer;
3707
3708 if (object == TARGET_OBJECT_AUXV)
3709 return procfs_xfer_auxv (ops, object, annex, readbuf, writebuf,
3710 offset, len);
3711
3712 xfer = linux_proc_xfer_partial (ops, object, annex, readbuf, writebuf,
3713 offset, len);
3714 if (xfer != 0)
3715 return xfer;
3716
3717 return super_xfer_partial (ops, object, annex, readbuf, writebuf,
3718 offset, len);
3719}
3720
e9efe249 3721/* Create a prototype generic GNU/Linux target. The client can override
10d6c8cd
DJ
3722 it with local methods. */
3723
910122bf
UW
3724static void
3725linux_target_install_ops (struct target_ops *t)
10d6c8cd 3726{
6d8fd2b7
UW
3727 t->to_insert_fork_catchpoint = linux_child_insert_fork_catchpoint;
3728 t->to_insert_vfork_catchpoint = linux_child_insert_vfork_catchpoint;
3729 t->to_insert_exec_catchpoint = linux_child_insert_exec_catchpoint;
3730 t->to_pid_to_exec_file = linux_child_pid_to_exec_file;
10d6c8cd 3731 t->to_post_startup_inferior = linux_child_post_startup_inferior;
6d8fd2b7
UW
3732 t->to_post_attach = linux_child_post_attach;
3733 t->to_follow_fork = linux_child_follow_fork;
10d6c8cd
DJ
3734 t->to_find_memory_regions = linux_nat_find_memory_regions;
3735 t->to_make_corefile_notes = linux_nat_make_corefile_notes;
3736
3737 super_xfer_partial = t->to_xfer_partial;
3738 t->to_xfer_partial = linux_xfer_partial;
910122bf
UW
3739}
3740
3741struct target_ops *
3742linux_target (void)
3743{
3744 struct target_ops *t;
3745
3746 t = inf_ptrace_target ();
3747 linux_target_install_ops (t);
3748
3749 return t;
3750}
3751
3752struct target_ops *
7714d83a 3753linux_trad_target (CORE_ADDR (*register_u_offset)(struct gdbarch *, int, int))
910122bf
UW
3754{
3755 struct target_ops *t;
3756
3757 t = inf_ptrace_trad_target (register_u_offset);
3758 linux_target_install_ops (t);
10d6c8cd 3759
10d6c8cd
DJ
3760 return t;
3761}
3762
b84876c2
PA
3763/* Controls if async mode is permitted. */
3764static int linux_async_permitted = 0;
3765
3766/* The set command writes to this variable. If the inferior is
3767 executing, linux_nat_async_permitted is *not* updated. */
3768static int linux_async_permitted_1 = 0;
3769
3770static void
3771set_maintenance_linux_async_permitted (char *args, int from_tty,
3772 struct cmd_list_element *c)
3773{
3774 if (target_has_execution)
3775 {
3776 linux_async_permitted_1 = linux_async_permitted;
3777 error (_("Cannot change this setting while the inferior is running."));
3778 }
3779
3780 linux_async_permitted = linux_async_permitted_1;
3781 linux_nat_set_async_mode (linux_async_permitted);
3782}
3783
3784static void
3785show_maintenance_linux_async_permitted (struct ui_file *file, int from_tty,
3786 struct cmd_list_element *c, const char *value)
3787{
3788 fprintf_filtered (file, _("\
3789Controlling the GNU/Linux inferior in asynchronous mode is %s.\n"),
3790 value);
3791}
3792
3793/* target_is_async_p implementation. */
3794
3795static int
3796linux_nat_is_async_p (void)
3797{
3798 /* NOTE: palves 2008-03-21: We're only async when the user requests
3799 it explicitly with the "maintenance set linux-async" command.
3800 Someday, linux will always be async. */
3801 if (!linux_async_permitted)
3802 return 0;
3803
3804 return 1;
3805}
3806
3807/* target_can_async_p implementation. */
3808
3809static int
3810linux_nat_can_async_p (void)
3811{
3812 /* NOTE: palves 2008-03-21: We're only async when the user requests
3813 it explicitly with the "maintenance set linux-async" command.
3814 Someday, linux will always be async. */
3815 if (!linux_async_permitted)
3816 return 0;
3817
3818 /* See target.h/target_async_mask. */
3819 return linux_nat_async_mask_value;
3820}
3821
3822/* target_async_mask implementation. */
3823
3824static int
3825linux_nat_async_mask (int mask)
3826{
3827 int current_state;
3828 current_state = linux_nat_async_mask_value;
3829
3830 if (current_state != mask)
3831 {
3832 if (mask == 0)
3833 {
3834 linux_nat_async (NULL, 0);
3835 linux_nat_async_mask_value = mask;
3836 /* We're in sync mode. Make sure SIGCHLD isn't handled by
3837 async_sigchld_handler when we come out of sigsuspend in
3838 linux_nat_wait. */
3839 sigaction (SIGCHLD, &sync_sigchld_action, NULL);
3840 }
3841 else
3842 {
3843 /* Restore the async handler. */
3844 sigaction (SIGCHLD, &async_sigchld_action, NULL);
3845 linux_nat_async_mask_value = mask;
3846 linux_nat_async (inferior_event_handler, 0);
3847 }
3848 }
3849
3850 return current_state;
3851}
3852
3853/* Pop an event from the event pipe. */
3854
3855static int
3856linux_nat_event_pipe_pop (int* ptr_status, int* ptr_options)
3857{
3858 struct waitpid_result event = {0};
3859 int ret;
3860
3861 do
3862 {
3863 ret = read (linux_nat_event_pipe[0], &event, sizeof (event));
3864 }
3865 while (ret == -1 && errno == EINTR);
3866
3867 gdb_assert (ret == sizeof (event));
3868
3869 *ptr_status = event.status;
3870 *ptr_options = event.options;
3871
3872 linux_nat_num_queued_events--;
3873
3874 return event.pid;
3875}
3876
3877/* Push an event into the event pipe. */
3878
3879static void
3880linux_nat_event_pipe_push (int pid, int status, int options)
3881{
3882 int ret;
3883 struct waitpid_result event = {0};
3884 event.pid = pid;
3885 event.status = status;
3886 event.options = options;
3887
3888 do
3889 {
3890 ret = write (linux_nat_event_pipe[1], &event, sizeof (event));
3891 gdb_assert ((ret == -1 && errno == EINTR) || ret == sizeof (event));
3892 } while (ret == -1 && errno == EINTR);
3893
3894 linux_nat_num_queued_events++;
3895}
3896
3897static void
3898get_pending_events (void)
3899{
3900 int status, options, pid;
3901
3902 if (!linux_nat_async_enabled || !linux_nat_async_events_enabled)
3903 internal_error (__FILE__, __LINE__,
3904 "get_pending_events called with async masked");
3905
3906 while (1)
3907 {
3908 status = 0;
3909 options = __WCLONE | WNOHANG;
3910
3911 do
3912 {
3913 pid = waitpid (-1, &status, options);
3914 }
3915 while (pid == -1 && errno == EINTR);
3916
3917 if (pid <= 0)
3918 {
3919 options = WNOHANG;
3920 do
3921 {
3922 pid = waitpid (-1, &status, options);
3923 }
3924 while (pid == -1 && errno == EINTR);
3925 }
3926
3927 if (pid <= 0)
3928 /* No more children reporting events. */
3929 break;
3930
3931 if (debug_linux_nat_async)
3932 fprintf_unfiltered (gdb_stdlog, "\
3933get_pending_events: pid(%d), status(%x), options (%x)\n",
3934 pid, status, options);
3935
3936 linux_nat_event_pipe_push (pid, status, options);
3937 }
3938
3939 if (debug_linux_nat_async)
3940 fprintf_unfiltered (gdb_stdlog, "\
3941get_pending_events: linux_nat_num_queued_events(%d)\n",
3942 linux_nat_num_queued_events);
3943}
3944
3945/* SIGCHLD handler for async mode. */
3946
3947static void
3948async_sigchld_handler (int signo)
3949{
3950 if (debug_linux_nat_async)
3951 fprintf_unfiltered (gdb_stdlog, "async_sigchld_handler\n");
3952
3953 get_pending_events ();
3954}
3955
3956/* Enable or disable async SIGCHLD handling. */
3957
3958static int
3959linux_nat_async_events (int enable)
3960{
3961 int current_state = linux_nat_async_events_enabled;
3962
3963 if (debug_linux_nat_async)
3964 fprintf_unfiltered (gdb_stdlog,
3965 "LNAE: enable(%d): linux_nat_async_events_enabled(%d), "
3966 "linux_nat_num_queued_events(%d)\n",
3967 enable, linux_nat_async_events_enabled,
3968 linux_nat_num_queued_events);
3969
3970 if (current_state != enable)
3971 {
3972 sigset_t mask;
3973 sigemptyset (&mask);
3974 sigaddset (&mask, SIGCHLD);
3975 if (enable)
3976 {
3977 /* Unblock target events. */
3978 linux_nat_async_events_enabled = 1;
3979
3980 local_event_queue_to_pipe ();
3981 /* While in masked async, we may have not collected all the
3982 pending events. Get them out now. */
3983 get_pending_events ();
3984 sigprocmask (SIG_UNBLOCK, &mask, NULL);
3985 }
3986 else
3987 {
3988 /* Block target events. */
3989 sigprocmask (SIG_BLOCK, &mask, NULL);
3990 linux_nat_async_events_enabled = 0;
3991 /* Get events out of queue, and make them available to
3992 queued_waitpid / my_waitpid. */
3993 pipe_to_local_event_queue ();
3994 }
3995 }
3996
3997 return current_state;
3998}
3999
4000static int async_terminal_is_ours = 1;
4001
4002/* target_terminal_inferior implementation. */
4003
4004static void
4005linux_nat_terminal_inferior (void)
4006{
4007 if (!target_is_async_p ())
4008 {
4009 /* Async mode is disabled. */
4010 terminal_inferior ();
4011 return;
4012 }
4013
4014 /* GDB should never give the terminal to the inferior, if the
4015 inferior is running in the background (run&, continue&, etc.).
4016 This check can be removed when the common code is fixed. */
4017 if (!sync_execution)
4018 return;
4019
4020 terminal_inferior ();
4021
4022 if (!async_terminal_is_ours)
4023 return;
4024
4025 delete_file_handler (input_fd);
4026 async_terminal_is_ours = 0;
4027 set_sigint_trap ();
4028}
4029
4030/* target_terminal_ours implementation. */
4031
4032void
4033linux_nat_terminal_ours (void)
4034{
4035 if (!target_is_async_p ())
4036 {
4037 /* Async mode is disabled. */
4038 terminal_ours ();
4039 return;
4040 }
4041
4042 /* GDB should never give the terminal to the inferior if the
4043 inferior is running in the background (run&, continue&, etc.),
4044 but claiming it sure should. */
4045 terminal_ours ();
4046
4047 if (!sync_execution)
4048 return;
4049
4050 if (async_terminal_is_ours)
4051 return;
4052
4053 clear_sigint_trap ();
4054 add_file_handler (input_fd, stdin_event_handler, 0);
4055 async_terminal_is_ours = 1;
4056}
4057
4058static void (*async_client_callback) (enum inferior_event_type event_type,
4059 void *context);
4060static void *async_client_context;
4061
4062static void
4063linux_nat_async_file_handler (int error, gdb_client_data client_data)
4064{
4065 async_client_callback (INF_REG_EVENT, async_client_context);
4066}
4067
4068/* target_async implementation. */
4069
4070static void
4071linux_nat_async (void (*callback) (enum inferior_event_type event_type,
4072 void *context), void *context)
4073{
4074 if (linux_nat_async_mask_value == 0 || !linux_nat_async_enabled)
4075 internal_error (__FILE__, __LINE__,
4076 "Calling target_async when async is masked");
4077
4078 if (callback != NULL)
4079 {
4080 async_client_callback = callback;
4081 async_client_context = context;
4082 add_file_handler (linux_nat_event_pipe[0],
4083 linux_nat_async_file_handler, NULL);
4084
4085 linux_nat_async_events (1);
4086 }
4087 else
4088 {
4089 async_client_callback = callback;
4090 async_client_context = context;
4091
4092 linux_nat_async_events (0);
4093 delete_file_handler (linux_nat_event_pipe[0]);
4094 }
4095 return;
4096}
4097
4098/* Enable/Disable async mode. */
4099
4100static void
4101linux_nat_set_async_mode (int on)
4102{
4103 if (linux_nat_async_enabled != on)
4104 {
4105 if (on)
4106 {
4107 gdb_assert (waitpid_queue == NULL);
4108 sigaction (SIGCHLD, &async_sigchld_action, NULL);
4109
4110 if (pipe (linux_nat_event_pipe) == -1)
4111 internal_error (__FILE__, __LINE__,
4112 "creating event pipe failed.");
4113
4114 fcntl (linux_nat_event_pipe[0], F_SETFL, O_NONBLOCK);
4115 fcntl (linux_nat_event_pipe[1], F_SETFL, O_NONBLOCK);
4116 }
4117 else
4118 {
4119 sigaction (SIGCHLD, &sync_sigchld_action, NULL);
4120
4121 drain_queued_events (-1);
4122
4123 linux_nat_num_queued_events = 0;
4124 close (linux_nat_event_pipe[0]);
4125 close (linux_nat_event_pipe[1]);
4126 linux_nat_event_pipe[0] = linux_nat_event_pipe[1] = -1;
4127
4128 }
4129 }
4130 linux_nat_async_enabled = on;
4131}
4132
f973ed9c
DJ
4133void
4134linux_nat_add_target (struct target_ops *t)
4135{
f973ed9c
DJ
4136 /* Save the provided single-threaded target. We save this in a separate
4137 variable because another target we've inherited from (e.g. inf-ptrace)
4138 may have saved a pointer to T; we want to use it for the final
4139 process stratum target. */
4140 linux_ops_saved = *t;
4141 linux_ops = &linux_ops_saved;
4142
4143 /* Override some methods for multithreading. */
b84876c2 4144 t->to_create_inferior = linux_nat_create_inferior;
f973ed9c
DJ
4145 t->to_attach = linux_nat_attach;
4146 t->to_detach = linux_nat_detach;
4147 t->to_resume = linux_nat_resume;
4148 t->to_wait = linux_nat_wait;
4149 t->to_xfer_partial = linux_nat_xfer_partial;
4150 t->to_kill = linux_nat_kill;
4151 t->to_mourn_inferior = linux_nat_mourn_inferior;
4152 t->to_thread_alive = linux_nat_thread_alive;
4153 t->to_pid_to_str = linux_nat_pid_to_str;
4154 t->to_has_thread_control = tc_schedlock;
4155
b84876c2
PA
4156 t->to_can_async_p = linux_nat_can_async_p;
4157 t->to_is_async_p = linux_nat_is_async_p;
4158 t->to_async = linux_nat_async;
4159 t->to_async_mask = linux_nat_async_mask;
4160 t->to_terminal_inferior = linux_nat_terminal_inferior;
4161 t->to_terminal_ours = linux_nat_terminal_ours;
4162
f973ed9c
DJ
4163 /* We don't change the stratum; this target will sit at
4164 process_stratum and thread_db will set at thread_stratum. This
4165 is a little strange, since this is a multi-threaded-capable
4166 target, but we want to be on the stack below thread_db, and we
4167 also want to be used for single-threaded processes. */
4168
4169 add_target (t);
4170
4171 /* TODO: Eliminate this and have libthread_db use
4172 find_target_beneath. */
4173 thread_db_init (t);
4174}
4175
9f0bdab8
DJ
4176/* Register a method to call whenever a new thread is attached. */
4177void
4178linux_nat_set_new_thread (struct target_ops *t, void (*new_thread) (ptid_t))
4179{
4180 /* Save the pointer. We only support a single registered instance
4181 of the GNU/Linux native target, so we do not need to map this to
4182 T. */
4183 linux_nat_new_thread = new_thread;
4184}
4185
4186/* Return the saved siginfo associated with PTID. */
4187struct siginfo *
4188linux_nat_get_siginfo (ptid_t ptid)
4189{
4190 struct lwp_info *lp = find_lwp_pid (ptid);
4191
4192 gdb_assert (lp != NULL);
4193
4194 return &lp->siginfo;
4195}
4196
d6b0e80f
AC
4197void
4198_initialize_linux_nat (void)
4199{
b84876c2 4200 sigset_t mask;
dba24537 4201
1bedd215
AC
4202 add_info ("proc", linux_nat_info_proc_cmd, _("\
4203Show /proc process information about any running process.\n\
dba24537
AC
4204Specify any process id, or use the program being debugged by default.\n\
4205Specify any of the following keywords for detailed info:\n\
4206 mappings -- list of mapped memory regions.\n\
4207 stat -- list a bunch of random process info.\n\
4208 status -- list a different bunch of random process info.\n\
1bedd215 4209 all -- list all available /proc info."));
d6b0e80f 4210
b84876c2
PA
4211 add_setshow_zinteger_cmd ("lin-lwp", class_maintenance,
4212 &debug_linux_nat, _("\
4213Set debugging of GNU/Linux lwp module."), _("\
4214Show debugging of GNU/Linux lwp module."), _("\
4215Enables printf debugging output."),
4216 NULL,
4217 show_debug_linux_nat,
4218 &setdebuglist, &showdebuglist);
4219
4220 add_setshow_zinteger_cmd ("lin-lwp-async", class_maintenance,
4221 &debug_linux_nat_async, _("\
4222Set debugging of GNU/Linux async lwp module."), _("\
4223Show debugging of GNU/Linux async lwp module."), _("\
4224Enables printf debugging output."),
4225 NULL,
4226 show_debug_linux_nat_async,
4227 &setdebuglist, &showdebuglist);
4228
4229 add_setshow_boolean_cmd ("linux-async", class_maintenance,
4230 &linux_async_permitted_1, _("\
4231Set whether gdb controls the GNU/Linux inferior in asynchronous mode."), _("\
4232Show whether gdb controls the GNU/Linux inferior in asynchronous mode."), _("\
4233Tells gdb whether to control the GNU/Linux inferior in asynchronous mode."),
4234 set_maintenance_linux_async_permitted,
4235 show_maintenance_linux_async_permitted,
4236 &maintenance_set_cmdlist,
4237 &maintenance_show_cmdlist);
4238
4239 /* Block SIGCHLD by default. Doing this early prevents it getting
4240 unblocked if an exception is thrown due to an error while the
4241 inferior is starting (sigsetjmp/siglongjmp). */
4242 sigemptyset (&mask);
4243 sigaddset (&mask, SIGCHLD);
4244 sigprocmask (SIG_BLOCK, &mask, NULL);
4245
4246 /* Save this mask as the default. */
d6b0e80f
AC
4247 sigprocmask (SIG_SETMASK, NULL, &normal_mask);
4248
b84876c2
PA
4249 /* The synchronous SIGCHLD handler. */
4250 sync_sigchld_action.sa_handler = sigchld_handler;
4251 sigemptyset (&sync_sigchld_action.sa_mask);
4252 sync_sigchld_action.sa_flags = SA_RESTART;
4253
4254 /* Make it the default. */
4255 sigaction (SIGCHLD, &sync_sigchld_action, NULL);
d6b0e80f
AC
4256
4257 /* Make sure we don't block SIGCHLD during a sigsuspend. */
4258 sigprocmask (SIG_SETMASK, NULL, &suspend_mask);
4259 sigdelset (&suspend_mask, SIGCHLD);
4260
b84876c2
PA
4261 /* SIGCHLD handler for async mode. */
4262 async_sigchld_action.sa_handler = async_sigchld_handler;
4263 sigemptyset (&async_sigchld_action.sa_mask);
4264 async_sigchld_action.sa_flags = SA_RESTART;
d6b0e80f 4265
b84876c2
PA
4266 /* Install the default mode. */
4267 linux_nat_set_async_mode (linux_async_permitted);
d6b0e80f
AC
4268}
4269\f
4270
4271/* FIXME: kettenis/2000-08-26: The stuff on this page is specific to
4272 the GNU/Linux Threads library and therefore doesn't really belong
4273 here. */
4274
4275/* Read variable NAME in the target and return its value if found.
4276 Otherwise return zero. It is assumed that the type of the variable
4277 is `int'. */
4278
4279static int
4280get_signo (const char *name)
4281{
4282 struct minimal_symbol *ms;
4283 int signo;
4284
4285 ms = lookup_minimal_symbol (name, NULL, NULL);
4286 if (ms == NULL)
4287 return 0;
4288
8e70166d 4289 if (target_read_memory (SYMBOL_VALUE_ADDRESS (ms), (gdb_byte *) &signo,
d6b0e80f
AC
4290 sizeof (signo)) != 0)
4291 return 0;
4292
4293 return signo;
4294}
4295
4296/* Return the set of signals used by the threads library in *SET. */
4297
4298void
4299lin_thread_get_thread_signals (sigset_t *set)
4300{
4301 struct sigaction action;
4302 int restart, cancel;
b84876c2 4303 sigset_t blocked_mask;
d6b0e80f 4304
b84876c2 4305 sigemptyset (&blocked_mask);
d6b0e80f
AC
4306 sigemptyset (set);
4307
4308 restart = get_signo ("__pthread_sig_restart");
17fbb0bd
DJ
4309 cancel = get_signo ("__pthread_sig_cancel");
4310
4311 /* LinuxThreads normally uses the first two RT signals, but in some legacy
4312 cases may use SIGUSR1/SIGUSR2. NPTL always uses RT signals, but does
4313 not provide any way for the debugger to query the signal numbers -
4314 fortunately they don't change! */
4315
d6b0e80f 4316 if (restart == 0)
17fbb0bd 4317 restart = __SIGRTMIN;
d6b0e80f 4318
d6b0e80f 4319 if (cancel == 0)
17fbb0bd 4320 cancel = __SIGRTMIN + 1;
d6b0e80f
AC
4321
4322 sigaddset (set, restart);
4323 sigaddset (set, cancel);
4324
4325 /* The GNU/Linux Threads library makes terminating threads send a
4326 special "cancel" signal instead of SIGCHLD. Make sure we catch
4327 those (to prevent them from terminating GDB itself, which is
4328 likely to be their default action) and treat them the same way as
4329 SIGCHLD. */
4330
4331 action.sa_handler = sigchld_handler;
4332 sigemptyset (&action.sa_mask);
58aecb61 4333 action.sa_flags = SA_RESTART;
d6b0e80f
AC
4334 sigaction (cancel, &action, NULL);
4335
4336 /* We block the "cancel" signal throughout this code ... */
4337 sigaddset (&blocked_mask, cancel);
4338 sigprocmask (SIG_BLOCK, &blocked_mask, NULL);
4339
4340 /* ... except during a sigsuspend. */
4341 sigdelset (&suspend_mask, cancel);
4342}
This page took 3.141475 seconds and 4 git commands to generate.