PR gdb/1665
[deliverable/binutils-gdb.git] / gdb / linux-nat.c
CommitLineData
3993f6b1 1/* GNU/Linux native-dependent code common to multiple platforms.
dba24537 2
9b254dd1 3 Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
e26af52f 4 Free Software Foundation, Inc.
3993f6b1
DJ
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
a9762ec7 10 the Free Software Foundation; either version 3 of the License, or
3993f6b1
DJ
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
a9762ec7 19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
3993f6b1
DJ
20
21#include "defs.h"
22#include "inferior.h"
23#include "target.h"
d6b0e80f 24#include "gdb_string.h"
3993f6b1 25#include "gdb_wait.h"
d6b0e80f
AC
26#include "gdb_assert.h"
27#ifdef HAVE_TKILL_SYSCALL
28#include <unistd.h>
29#include <sys/syscall.h>
30#endif
3993f6b1 31#include <sys/ptrace.h>
0274a8ce 32#include "linux-nat.h"
ac264b3b 33#include "linux-fork.h"
d6b0e80f
AC
34#include "gdbthread.h"
35#include "gdbcmd.h"
36#include "regcache.h"
4f844a66 37#include "regset.h"
10d6c8cd
DJ
38#include "inf-ptrace.h"
39#include "auxv.h"
dba24537
AC
40#include <sys/param.h> /* for MAXPATHLEN */
41#include <sys/procfs.h> /* for elf_gregset etc. */
42#include "elf-bfd.h" /* for elfcore_write_* */
43#include "gregset.h" /* for gregset */
44#include "gdbcore.h" /* for get_exec_file */
45#include <ctype.h> /* for isdigit */
46#include "gdbthread.h" /* for struct thread_info etc. */
47#include "gdb_stat.h" /* for struct stat */
48#include <fcntl.h> /* for O_RDONLY */
b84876c2
PA
49#include "inf-loop.h"
50#include "event-loop.h"
51#include "event-top.h"
dba24537 52
a0ef4274
DJ
53/* Note on this file's use of signals:
54
55 We stop threads by sending a SIGSTOP. The use of SIGSTOP instead
56 of another signal is not entirely significant; we just need for a
57 signal to be delivered, so that we can intercept it. SIGSTOP's
58 advantage is that it can not be blocked. A disadvantage is that it
59 is not a real-time signal, so it can only be queued once; we do not
60 keep track of other sources of SIGSTOP.
61
62 Two other signals that can't be blocked are SIGCONT and SIGKILL.
63 But we can't use them, because they have special behavior when the
64 signal is generated - not when it is delivered. SIGCONT resumes
65 the entire thread group and SIGKILL kills the entire thread group.
66
67 A delivered SIGSTOP would stop the entire thread group, not just the
68 thread we tkill'd. But we never let the SIGSTOP deliver; we always
69 intercept and cancel it (by PTRACE_CONT without passing SIGSTOP).
70
71 We could use a real-time signal instead. This would solve those
72 problems; we could use PTRACE_GETSIGINFO to locate the specific
73 stop signals sent by GDB. But we would still have to have some
74 support for SIGSTOP, since PTRACE_ATTACH generates it, and there
75 are races with trying to find a signal that is not blocked. */
76
dba24537
AC
77#ifndef O_LARGEFILE
78#define O_LARGEFILE 0
79#endif
0274a8ce 80
3993f6b1
DJ
81/* If the system headers did not provide the constants, hard-code the normal
82 values. */
83#ifndef PTRACE_EVENT_FORK
84
85#define PTRACE_SETOPTIONS 0x4200
86#define PTRACE_GETEVENTMSG 0x4201
87
88/* options set using PTRACE_SETOPTIONS */
89#define PTRACE_O_TRACESYSGOOD 0x00000001
90#define PTRACE_O_TRACEFORK 0x00000002
91#define PTRACE_O_TRACEVFORK 0x00000004
92#define PTRACE_O_TRACECLONE 0x00000008
93#define PTRACE_O_TRACEEXEC 0x00000010
9016a515
DJ
94#define PTRACE_O_TRACEVFORKDONE 0x00000020
95#define PTRACE_O_TRACEEXIT 0x00000040
3993f6b1
DJ
96
97/* Wait extended result codes for the above trace options. */
98#define PTRACE_EVENT_FORK 1
99#define PTRACE_EVENT_VFORK 2
100#define PTRACE_EVENT_CLONE 3
101#define PTRACE_EVENT_EXEC 4
c874c7fc 102#define PTRACE_EVENT_VFORK_DONE 5
9016a515 103#define PTRACE_EVENT_EXIT 6
3993f6b1
DJ
104
105#endif /* PTRACE_EVENT_FORK */
106
107/* We can't always assume that this flag is available, but all systems
108 with the ptrace event handlers also have __WALL, so it's safe to use
109 here. */
110#ifndef __WALL
111#define __WALL 0x40000000 /* Wait for any child. */
112#endif
113
02d3ff8c
UW
114#ifndef PTRACE_GETSIGINFO
115#define PTRACE_GETSIGINFO 0x4202
116#endif
117
10d6c8cd
DJ
118/* The single-threaded native GNU/Linux target_ops. We save a pointer for
119 the use of the multi-threaded target. */
120static struct target_ops *linux_ops;
f973ed9c 121static struct target_ops linux_ops_saved;
10d6c8cd 122
9f0bdab8
DJ
123/* The method to call, if any, when a new thread is attached. */
124static void (*linux_nat_new_thread) (ptid_t);
125
ac264b3b
MS
126/* The saved to_xfer_partial method, inherited from inf-ptrace.c.
127 Called by our to_xfer_partial. */
128static LONGEST (*super_xfer_partial) (struct target_ops *,
129 enum target_object,
130 const char *, gdb_byte *,
131 const gdb_byte *,
10d6c8cd
DJ
132 ULONGEST, LONGEST);
133
d6b0e80f 134static int debug_linux_nat;
920d2a44
AC
135static void
136show_debug_linux_nat (struct ui_file *file, int from_tty,
137 struct cmd_list_element *c, const char *value)
138{
139 fprintf_filtered (file, _("Debugging of GNU/Linux lwp module is %s.\n"),
140 value);
141}
d6b0e80f 142
b84876c2
PA
143static int debug_linux_nat_async = 0;
144static void
145show_debug_linux_nat_async (struct ui_file *file, int from_tty,
146 struct cmd_list_element *c, const char *value)
147{
148 fprintf_filtered (file, _("Debugging of GNU/Linux async lwp module is %s.\n"),
149 value);
150}
151
9016a515
DJ
152static int linux_parent_pid;
153
ae087d01
DJ
154struct simple_pid_list
155{
156 int pid;
3d799a95 157 int status;
ae087d01
DJ
158 struct simple_pid_list *next;
159};
160struct simple_pid_list *stopped_pids;
161
3993f6b1
DJ
162/* This variable is a tri-state flag: -1 for unknown, 0 if PTRACE_O_TRACEFORK
163 can not be used, 1 if it can. */
164
165static int linux_supports_tracefork_flag = -1;
166
9016a515
DJ
167/* If we have PTRACE_O_TRACEFORK, this flag indicates whether we also have
168 PTRACE_O_TRACEVFORKDONE. */
169
170static int linux_supports_tracevforkdone_flag = -1;
171
b84876c2
PA
172/* Async mode support */
173
174/* To listen to target events asynchronously, we install a SIGCHLD
175 handler whose duty is to call waitpid (-1, ..., WNOHANG) to get all
176 the pending events into a pipe. Whenever we're ready to handle
177 events asynchronously, this pipe is registered as the waitable file
178 handle in the event loop. When we get to entry target points
179 coming out of the common code (target_wait, target_resume, ...),
180 that are going to call waitpid, we block SIGCHLD signals, and
181 remove all the events placed in the pipe into a local queue. All
182 the subsequent calls to my_waitpid (a waitpid wrapper) check this
183 local queue first. */
184
185/* True if async mode is currently on. */
186static int linux_nat_async_enabled;
187
188/* Zero if the async mode, although enabled, is masked, which means
189 linux_nat_wait should behave as if async mode was off. */
190static int linux_nat_async_mask_value = 1;
191
192/* The read/write ends of the pipe registered as waitable file in the
193 event loop. */
194static int linux_nat_event_pipe[2] = { -1, -1 };
195
196/* Number of queued events in the pipe. */
197static volatile int linux_nat_num_queued_events;
198
199/* If async mode is on, true if we're listening for events; false if
200 target events are blocked. */
201static int linux_nat_async_events_enabled;
202
203static int linux_nat_async_events (int enable);
204static void pipe_to_local_event_queue (void);
205static void local_event_queue_to_pipe (void);
206static void linux_nat_event_pipe_push (int pid, int status, int options);
207static int linux_nat_event_pipe_pop (int* ptr_status, int* ptr_options);
208static void linux_nat_set_async_mode (int on);
209static void linux_nat_async (void (*callback)
210 (enum inferior_event_type event_type, void *context),
211 void *context);
212static int linux_nat_async_mask (int mask);
a0ef4274 213static int kill_lwp (int lwpid, int signo);
b84876c2
PA
214
215/* Captures the result of a successful waitpid call, along with the
216 options used in that call. */
217struct waitpid_result
218{
219 int pid;
220 int status;
221 int options;
222 struct waitpid_result *next;
223};
224
225/* A singly-linked list of the results of the waitpid calls performed
226 in the async SIGCHLD handler. */
227static struct waitpid_result *waitpid_queue = NULL;
228
229static int
230queued_waitpid (int pid, int *status, int flags)
231{
232 struct waitpid_result *msg = waitpid_queue, *prev = NULL;
233
234 if (debug_linux_nat_async)
235 fprintf_unfiltered (gdb_stdlog,
236 "\
237QWPID: linux_nat_async_events_enabled(%d), linux_nat_num_queued_events(%d)\n",
238 linux_nat_async_events_enabled,
239 linux_nat_num_queued_events);
240
241 if (flags & __WALL)
242 {
243 for (; msg; prev = msg, msg = msg->next)
244 if (pid == -1 || pid == msg->pid)
245 break;
246 }
247 else if (flags & __WCLONE)
248 {
249 for (; msg; prev = msg, msg = msg->next)
250 if (msg->options & __WCLONE
251 && (pid == -1 || pid == msg->pid))
252 break;
253 }
254 else
255 {
256 for (; msg; prev = msg, msg = msg->next)
257 if ((msg->options & __WCLONE) == 0
258 && (pid == -1 || pid == msg->pid))
259 break;
260 }
261
262 if (msg)
263 {
264 int pid;
265
266 if (prev)
267 prev->next = msg->next;
268 else
269 waitpid_queue = msg->next;
270
271 msg->next = NULL;
272 if (status)
273 *status = msg->status;
274 pid = msg->pid;
275
276 if (debug_linux_nat_async)
277 fprintf_unfiltered (gdb_stdlog, "QWPID: pid(%d), status(%x)\n",
278 pid, msg->status);
279 xfree (msg);
280
281 return pid;
282 }
283
284 if (debug_linux_nat_async)
285 fprintf_unfiltered (gdb_stdlog, "QWPID: miss\n");
286
287 if (status)
288 *status = 0;
289 return -1;
290}
291
292static void
293push_waitpid (int pid, int status, int options)
294{
295 struct waitpid_result *event, *new_event;
296
297 new_event = xmalloc (sizeof (*new_event));
298 new_event->pid = pid;
299 new_event->status = status;
300 new_event->options = options;
301 new_event->next = NULL;
302
303 if (waitpid_queue)
304 {
305 for (event = waitpid_queue;
306 event && event->next;
307 event = event->next)
308 ;
309
310 event->next = new_event;
311 }
312 else
313 waitpid_queue = new_event;
314}
315
710151dd 316/* Drain all queued events of PID. If PID is -1, the effect is of
b84876c2
PA
317 draining all events. */
318static void
319drain_queued_events (int pid)
320{
321 while (queued_waitpid (pid, NULL, __WALL) != -1)
322 ;
323}
324
ae087d01
DJ
325\f
326/* Trivial list manipulation functions to keep track of a list of
327 new stopped processes. */
328static void
3d799a95 329add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
ae087d01
DJ
330{
331 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
332 new_pid->pid = pid;
3d799a95 333 new_pid->status = status;
ae087d01
DJ
334 new_pid->next = *listp;
335 *listp = new_pid;
336}
337
338static int
3d799a95 339pull_pid_from_list (struct simple_pid_list **listp, int pid, int *status)
ae087d01
DJ
340{
341 struct simple_pid_list **p;
342
343 for (p = listp; *p != NULL; p = &(*p)->next)
344 if ((*p)->pid == pid)
345 {
346 struct simple_pid_list *next = (*p)->next;
3d799a95 347 *status = (*p)->status;
ae087d01
DJ
348 xfree (*p);
349 *p = next;
350 return 1;
351 }
352 return 0;
353}
354
3d799a95
DJ
355static void
356linux_record_stopped_pid (int pid, int status)
ae087d01 357{
3d799a95 358 add_to_pid_list (&stopped_pids, pid, status);
ae087d01
DJ
359}
360
3993f6b1
DJ
361\f
362/* A helper function for linux_test_for_tracefork, called after fork (). */
363
364static void
365linux_tracefork_child (void)
366{
367 int ret;
368
369 ptrace (PTRACE_TRACEME, 0, 0, 0);
370 kill (getpid (), SIGSTOP);
371 fork ();
48bb3cce 372 _exit (0);
3993f6b1
DJ
373}
374
b84876c2
PA
375/* Wrapper function for waitpid which handles EINTR, and checks for
376 locally queued events. */
b957e937
DJ
377
378static int
379my_waitpid (int pid, int *status, int flags)
380{
381 int ret;
b84876c2
PA
382
383 /* There should be no concurrent calls to waitpid. */
384 gdb_assert (!linux_nat_async_events_enabled);
385
386 ret = queued_waitpid (pid, status, flags);
387 if (ret != -1)
388 return ret;
389
b957e937
DJ
390 do
391 {
392 ret = waitpid (pid, status, flags);
393 }
394 while (ret == -1 && errno == EINTR);
395
396 return ret;
397}
398
399/* Determine if PTRACE_O_TRACEFORK can be used to follow fork events.
400
401 First, we try to enable fork tracing on ORIGINAL_PID. If this fails,
402 we know that the feature is not available. This may change the tracing
403 options for ORIGINAL_PID, but we'll be setting them shortly anyway.
404
405 However, if it succeeds, we don't know for sure that the feature is
406 available; old versions of PTRACE_SETOPTIONS ignored unknown options. We
3993f6b1 407 create a child process, attach to it, use PTRACE_SETOPTIONS to enable
b957e937
DJ
408 fork tracing, and let it fork. If the process exits, we assume that we
409 can't use TRACEFORK; if we get the fork notification, and we can extract
410 the new child's PID, then we assume that we can. */
3993f6b1
DJ
411
412static void
b957e937 413linux_test_for_tracefork (int original_pid)
3993f6b1
DJ
414{
415 int child_pid, ret, status;
416 long second_pid;
417
b957e937
DJ
418 linux_supports_tracefork_flag = 0;
419 linux_supports_tracevforkdone_flag = 0;
420
421 ret = ptrace (PTRACE_SETOPTIONS, original_pid, 0, PTRACE_O_TRACEFORK);
422 if (ret != 0)
423 return;
424
3993f6b1
DJ
425 child_pid = fork ();
426 if (child_pid == -1)
e2e0b3e5 427 perror_with_name (("fork"));
3993f6b1
DJ
428
429 if (child_pid == 0)
430 linux_tracefork_child ();
431
b957e937 432 ret = my_waitpid (child_pid, &status, 0);
3993f6b1 433 if (ret == -1)
e2e0b3e5 434 perror_with_name (("waitpid"));
3993f6b1 435 else if (ret != child_pid)
8a3fe4f8 436 error (_("linux_test_for_tracefork: waitpid: unexpected result %d."), ret);
3993f6b1 437 if (! WIFSTOPPED (status))
8a3fe4f8 438 error (_("linux_test_for_tracefork: waitpid: unexpected status %d."), status);
3993f6b1 439
3993f6b1
DJ
440 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0, PTRACE_O_TRACEFORK);
441 if (ret != 0)
442 {
b957e937
DJ
443 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
444 if (ret != 0)
445 {
8a3fe4f8 446 warning (_("linux_test_for_tracefork: failed to kill child"));
b957e937
DJ
447 return;
448 }
449
450 ret = my_waitpid (child_pid, &status, 0);
451 if (ret != child_pid)
8a3fe4f8 452 warning (_("linux_test_for_tracefork: failed to wait for killed child"));
b957e937 453 else if (!WIFSIGNALED (status))
8a3fe4f8
AC
454 warning (_("linux_test_for_tracefork: unexpected wait status 0x%x from "
455 "killed child"), status);
b957e937 456
3993f6b1
DJ
457 return;
458 }
459
9016a515
DJ
460 /* Check whether PTRACE_O_TRACEVFORKDONE is available. */
461 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0,
462 PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORKDONE);
463 linux_supports_tracevforkdone_flag = (ret == 0);
464
b957e937
DJ
465 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
466 if (ret != 0)
8a3fe4f8 467 warning (_("linux_test_for_tracefork: failed to resume child"));
b957e937
DJ
468
469 ret = my_waitpid (child_pid, &status, 0);
470
3993f6b1
DJ
471 if (ret == child_pid && WIFSTOPPED (status)
472 && status >> 16 == PTRACE_EVENT_FORK)
473 {
474 second_pid = 0;
475 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
476 if (ret == 0 && second_pid != 0)
477 {
478 int second_status;
479
480 linux_supports_tracefork_flag = 1;
b957e937
DJ
481 my_waitpid (second_pid, &second_status, 0);
482 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
483 if (ret != 0)
8a3fe4f8 484 warning (_("linux_test_for_tracefork: failed to kill second child"));
97725dc4 485 my_waitpid (second_pid, &status, 0);
3993f6b1
DJ
486 }
487 }
b957e937 488 else
8a3fe4f8
AC
489 warning (_("linux_test_for_tracefork: unexpected result from waitpid "
490 "(%d, status 0x%x)"), ret, status);
3993f6b1 491
b957e937
DJ
492 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
493 if (ret != 0)
8a3fe4f8 494 warning (_("linux_test_for_tracefork: failed to kill child"));
b957e937 495 my_waitpid (child_pid, &status, 0);
3993f6b1
DJ
496}
497
498/* Return non-zero iff we have tracefork functionality available.
499 This function also sets linux_supports_tracefork_flag. */
500
501static int
b957e937 502linux_supports_tracefork (int pid)
3993f6b1
DJ
503{
504 if (linux_supports_tracefork_flag == -1)
b957e937 505 linux_test_for_tracefork (pid);
3993f6b1
DJ
506 return linux_supports_tracefork_flag;
507}
508
9016a515 509static int
b957e937 510linux_supports_tracevforkdone (int pid)
9016a515
DJ
511{
512 if (linux_supports_tracefork_flag == -1)
b957e937 513 linux_test_for_tracefork (pid);
9016a515
DJ
514 return linux_supports_tracevforkdone_flag;
515}
516
3993f6b1 517\f
4de4c07c
DJ
518void
519linux_enable_event_reporting (ptid_t ptid)
520{
d3587048 521 int pid = ptid_get_lwp (ptid);
4de4c07c
DJ
522 int options;
523
d3587048
DJ
524 if (pid == 0)
525 pid = ptid_get_pid (ptid);
526
b957e937 527 if (! linux_supports_tracefork (pid))
4de4c07c
DJ
528 return;
529
a2f23071
DJ
530 options = PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORK | PTRACE_O_TRACEEXEC
531 | PTRACE_O_TRACECLONE;
b957e937 532 if (linux_supports_tracevforkdone (pid))
9016a515
DJ
533 options |= PTRACE_O_TRACEVFORKDONE;
534
535 /* Do not enable PTRACE_O_TRACEEXIT until GDB is more prepared to support
536 read-only process state. */
4de4c07c
DJ
537
538 ptrace (PTRACE_SETOPTIONS, pid, 0, options);
539}
540
6d8fd2b7
UW
541static void
542linux_child_post_attach (int pid)
4de4c07c
DJ
543{
544 linux_enable_event_reporting (pid_to_ptid (pid));
0ec9a092 545 check_for_thread_db ();
4de4c07c
DJ
546}
547
10d6c8cd 548static void
4de4c07c
DJ
549linux_child_post_startup_inferior (ptid_t ptid)
550{
551 linux_enable_event_reporting (ptid);
0ec9a092 552 check_for_thread_db ();
4de4c07c
DJ
553}
554
6d8fd2b7
UW
555static int
556linux_child_follow_fork (struct target_ops *ops, int follow_child)
3993f6b1 557{
4de4c07c
DJ
558 ptid_t last_ptid;
559 struct target_waitstatus last_status;
9016a515 560 int has_vforked;
4de4c07c
DJ
561 int parent_pid, child_pid;
562
b84876c2
PA
563 if (target_can_async_p ())
564 target_async (NULL, 0);
565
4de4c07c 566 get_last_target_status (&last_ptid, &last_status);
9016a515 567 has_vforked = (last_status.kind == TARGET_WAITKIND_VFORKED);
d3587048
DJ
568 parent_pid = ptid_get_lwp (last_ptid);
569 if (parent_pid == 0)
570 parent_pid = ptid_get_pid (last_ptid);
4de4c07c
DJ
571 child_pid = last_status.value.related_pid;
572
573 if (! follow_child)
574 {
575 /* We're already attached to the parent, by default. */
576
577 /* Before detaching from the child, remove all breakpoints from
578 it. (This won't actually modify the breakpoint list, but will
579 physically remove the breakpoints from the child.) */
9016a515
DJ
580 /* If we vforked this will remove the breakpoints from the parent
581 also, but they'll be reinserted below. */
4de4c07c
DJ
582 detach_breakpoints (child_pid);
583
ac264b3b
MS
584 /* Detach new forked process? */
585 if (detach_fork)
f75c00e4 586 {
e85a822c 587 if (info_verbose || debug_linux_nat)
ac264b3b
MS
588 {
589 target_terminal_ours ();
590 fprintf_filtered (gdb_stdlog,
591 "Detaching after fork from child process %d.\n",
592 child_pid);
593 }
4de4c07c 594
ac264b3b
MS
595 ptrace (PTRACE_DETACH, child_pid, 0, 0);
596 }
597 else
598 {
599 struct fork_info *fp;
600 /* Retain child fork in ptrace (stopped) state. */
601 fp = find_fork_pid (child_pid);
602 if (!fp)
603 fp = add_fork (child_pid);
604 fork_save_infrun_state (fp, 0);
605 }
9016a515
DJ
606
607 if (has_vforked)
608 {
b957e937
DJ
609 gdb_assert (linux_supports_tracefork_flag >= 0);
610 if (linux_supports_tracevforkdone (0))
9016a515
DJ
611 {
612 int status;
613
614 ptrace (PTRACE_CONT, parent_pid, 0, 0);
58aecb61 615 my_waitpid (parent_pid, &status, __WALL);
c874c7fc 616 if ((status >> 16) != PTRACE_EVENT_VFORK_DONE)
8a3fe4f8
AC
617 warning (_("Unexpected waitpid result %06x when waiting for "
618 "vfork-done"), status);
9016a515
DJ
619 }
620 else
621 {
622 /* We can't insert breakpoints until the child has
623 finished with the shared memory region. We need to
624 wait until that happens. Ideal would be to just
625 call:
626 - ptrace (PTRACE_SYSCALL, parent_pid, 0, 0);
627 - waitpid (parent_pid, &status, __WALL);
628 However, most architectures can't handle a syscall
629 being traced on the way out if it wasn't traced on
630 the way in.
631
632 We might also think to loop, continuing the child
633 until it exits or gets a SIGTRAP. One problem is
634 that the child might call ptrace with PTRACE_TRACEME.
635
636 There's no simple and reliable way to figure out when
637 the vforked child will be done with its copy of the
638 shared memory. We could step it out of the syscall,
639 two instructions, let it go, and then single-step the
640 parent once. When we have hardware single-step, this
641 would work; with software single-step it could still
642 be made to work but we'd have to be able to insert
643 single-step breakpoints in the child, and we'd have
644 to insert -just- the single-step breakpoint in the
645 parent. Very awkward.
646
647 In the end, the best we can do is to make sure it
648 runs for a little while. Hopefully it will be out of
649 range of any breakpoints we reinsert. Usually this
650 is only the single-step breakpoint at vfork's return
651 point. */
652
653 usleep (10000);
654 }
655
656 /* Since we vforked, breakpoints were removed in the parent
657 too. Put them back. */
658 reattach_breakpoints (parent_pid);
659 }
4de4c07c 660 }
3993f6b1 661 else
4de4c07c
DJ
662 {
663 char child_pid_spelling[40];
664
665 /* Needed to keep the breakpoint lists in sync. */
9016a515
DJ
666 if (! has_vforked)
667 detach_breakpoints (child_pid);
4de4c07c
DJ
668
669 /* Before detaching from the parent, remove all breakpoints from it. */
670 remove_breakpoints ();
671
e85a822c 672 if (info_verbose || debug_linux_nat)
f75c00e4
DJ
673 {
674 target_terminal_ours ();
ac264b3b
MS
675 fprintf_filtered (gdb_stdlog,
676 "Attaching after fork to child process %d.\n",
677 child_pid);
f75c00e4 678 }
4de4c07c 679
9016a515
DJ
680 /* If we're vforking, we may want to hold on to the parent until
681 the child exits or execs. At exec time we can remove the old
682 breakpoints from the parent and detach it; at exit time we
683 could do the same (or even, sneakily, resume debugging it - the
684 child's exec has failed, or something similar).
685
686 This doesn't clean up "properly", because we can't call
687 target_detach, but that's OK; if the current target is "child",
688 then it doesn't need any further cleanups, and lin_lwp will
689 generally not encounter vfork (vfork is defined to fork
690 in libpthread.so).
691
692 The holding part is very easy if we have VFORKDONE events;
693 but keeping track of both processes is beyond GDB at the
694 moment. So we don't expose the parent to the rest of GDB.
695 Instead we quietly hold onto it until such time as we can
696 safely resume it. */
697
698 if (has_vforked)
699 linux_parent_pid = parent_pid;
ac264b3b
MS
700 else if (!detach_fork)
701 {
702 struct fork_info *fp;
703 /* Retain parent fork in ptrace (stopped) state. */
704 fp = find_fork_pid (parent_pid);
705 if (!fp)
706 fp = add_fork (parent_pid);
707 fork_save_infrun_state (fp, 0);
708 }
9016a515 709 else
b84876c2 710 target_detach (NULL, 0);
4de4c07c 711
9f0bdab8 712 inferior_ptid = ptid_build (child_pid, child_pid, 0);
ee057212
DJ
713
714 /* Reinstall ourselves, since we might have been removed in
715 target_detach (which does other necessary cleanup). */
ac264b3b 716
ee057212 717 push_target (ops);
9f0bdab8 718 linux_nat_switch_fork (inferior_ptid);
ef29ce1a 719 check_for_thread_db ();
4de4c07c
DJ
720
721 /* Reset breakpoints in the child as appropriate. */
722 follow_inferior_reset_breakpoints ();
723 }
724
b84876c2
PA
725 if (target_can_async_p ())
726 target_async (inferior_event_handler, 0);
727
4de4c07c
DJ
728 return 0;
729}
730
4de4c07c 731\f
6d8fd2b7
UW
732static void
733linux_child_insert_fork_catchpoint (int pid)
4de4c07c 734{
b957e937 735 if (! linux_supports_tracefork (pid))
8a3fe4f8 736 error (_("Your system does not support fork catchpoints."));
3993f6b1
DJ
737}
738
6d8fd2b7
UW
739static void
740linux_child_insert_vfork_catchpoint (int pid)
3993f6b1 741{
b957e937 742 if (!linux_supports_tracefork (pid))
8a3fe4f8 743 error (_("Your system does not support vfork catchpoints."));
3993f6b1
DJ
744}
745
6d8fd2b7
UW
746static void
747linux_child_insert_exec_catchpoint (int pid)
3993f6b1 748{
b957e937 749 if (!linux_supports_tracefork (pid))
8a3fe4f8 750 error (_("Your system does not support exec catchpoints."));
3993f6b1
DJ
751}
752
d6b0e80f
AC
753/* On GNU/Linux there are no real LWP's. The closest thing to LWP's
754 are processes sharing the same VM space. A multi-threaded process
755 is basically a group of such processes. However, such a grouping
756 is almost entirely a user-space issue; the kernel doesn't enforce
757 such a grouping at all (this might change in the future). In
758 general, we'll rely on the threads library (i.e. the GNU/Linux
759 Threads library) to provide such a grouping.
760
761 It is perfectly well possible to write a multi-threaded application
762 without the assistance of a threads library, by using the clone
763 system call directly. This module should be able to give some
764 rudimentary support for debugging such applications if developers
765 specify the CLONE_PTRACE flag in the clone system call, and are
766 using the Linux kernel 2.4 or above.
767
768 Note that there are some peculiarities in GNU/Linux that affect
769 this code:
770
771 - In general one should specify the __WCLONE flag to waitpid in
772 order to make it report events for any of the cloned processes
773 (and leave it out for the initial process). However, if a cloned
774 process has exited the exit status is only reported if the
775 __WCLONE flag is absent. Linux kernel 2.4 has a __WALL flag, but
776 we cannot use it since GDB must work on older systems too.
777
778 - When a traced, cloned process exits and is waited for by the
779 debugger, the kernel reassigns it to the original parent and
780 keeps it around as a "zombie". Somehow, the GNU/Linux Threads
781 library doesn't notice this, which leads to the "zombie problem":
782 When debugged a multi-threaded process that spawns a lot of
783 threads will run out of processes, even if the threads exit,
784 because the "zombies" stay around. */
785
786/* List of known LWPs. */
9f0bdab8 787struct lwp_info *lwp_list;
d6b0e80f
AC
788
789/* Number of LWPs in the list. */
790static int num_lwps;
d6b0e80f
AC
791\f
792
d6b0e80f
AC
793/* If the last reported event was a SIGTRAP, this variable is set to
794 the process id of the LWP/thread that got it. */
795ptid_t trap_ptid;
796\f
797
d6b0e80f
AC
798/* Since we cannot wait (in linux_nat_wait) for the initial process and
799 any cloned processes with a single call to waitpid, we have to use
800 the WNOHANG flag and call waitpid in a loop. To optimize
801 things a bit we use `sigsuspend' to wake us up when a process has
802 something to report (it will send us a SIGCHLD if it has). To make
803 this work we have to juggle with the signal mask. We save the
804 original signal mask such that we can restore it before creating a
805 new process in order to avoid blocking certain signals in the
806 inferior. We then block SIGCHLD during the waitpid/sigsuspend
807 loop. */
808
809/* Original signal mask. */
810static sigset_t normal_mask;
811
812/* Signal mask for use with sigsuspend in linux_nat_wait, initialized in
813 _initialize_linux_nat. */
814static sigset_t suspend_mask;
815
b84876c2
PA
816/* SIGCHLD action for synchronous mode. */
817struct sigaction sync_sigchld_action;
818
819/* SIGCHLD action for asynchronous mode. */
820static struct sigaction async_sigchld_action;
d6b0e80f
AC
821\f
822
823/* Prototypes for local functions. */
824static int stop_wait_callback (struct lwp_info *lp, void *data);
825static int linux_nat_thread_alive (ptid_t ptid);
6d8fd2b7 826static char *linux_child_pid_to_exec_file (int pid);
710151dd
PA
827static int cancel_breakpoint (struct lwp_info *lp);
828
d6b0e80f
AC
829\f
830/* Convert wait status STATUS to a string. Used for printing debug
831 messages only. */
832
833static char *
834status_to_str (int status)
835{
836 static char buf[64];
837
838 if (WIFSTOPPED (status))
839 snprintf (buf, sizeof (buf), "%s (stopped)",
840 strsignal (WSTOPSIG (status)));
841 else if (WIFSIGNALED (status))
842 snprintf (buf, sizeof (buf), "%s (terminated)",
843 strsignal (WSTOPSIG (status)));
844 else
845 snprintf (buf, sizeof (buf), "%d (exited)", WEXITSTATUS (status));
846
847 return buf;
848}
849
850/* Initialize the list of LWPs. Note that this module, contrary to
851 what GDB's generic threads layer does for its thread list,
852 re-initializes the LWP lists whenever we mourn or detach (which
853 doesn't involve mourning) the inferior. */
854
855static void
856init_lwp_list (void)
857{
858 struct lwp_info *lp, *lpnext;
859
860 for (lp = lwp_list; lp; lp = lpnext)
861 {
862 lpnext = lp->next;
863 xfree (lp);
864 }
865
866 lwp_list = NULL;
867 num_lwps = 0;
d6b0e80f
AC
868}
869
f973ed9c 870/* Add the LWP specified by PID to the list. Return a pointer to the
9f0bdab8
DJ
871 structure describing the new LWP. The LWP should already be stopped
872 (with an exception for the very first LWP). */
d6b0e80f
AC
873
874static struct lwp_info *
875add_lwp (ptid_t ptid)
876{
877 struct lwp_info *lp;
878
879 gdb_assert (is_lwp (ptid));
880
881 lp = (struct lwp_info *) xmalloc (sizeof (struct lwp_info));
882
883 memset (lp, 0, sizeof (struct lwp_info));
884
885 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
886
887 lp->ptid = ptid;
888
889 lp->next = lwp_list;
890 lwp_list = lp;
f973ed9c 891 ++num_lwps;
d6b0e80f 892
9f0bdab8
DJ
893 if (num_lwps > 1 && linux_nat_new_thread != NULL)
894 linux_nat_new_thread (ptid);
895
d6b0e80f
AC
896 return lp;
897}
898
899/* Remove the LWP specified by PID from the list. */
900
901static void
902delete_lwp (ptid_t ptid)
903{
904 struct lwp_info *lp, *lpprev;
905
906 lpprev = NULL;
907
908 for (lp = lwp_list; lp; lpprev = lp, lp = lp->next)
909 if (ptid_equal (lp->ptid, ptid))
910 break;
911
912 if (!lp)
913 return;
914
d6b0e80f
AC
915 num_lwps--;
916
917 if (lpprev)
918 lpprev->next = lp->next;
919 else
920 lwp_list = lp->next;
921
922 xfree (lp);
923}
924
925/* Return a pointer to the structure describing the LWP corresponding
926 to PID. If no corresponding LWP could be found, return NULL. */
927
928static struct lwp_info *
929find_lwp_pid (ptid_t ptid)
930{
931 struct lwp_info *lp;
932 int lwp;
933
934 if (is_lwp (ptid))
935 lwp = GET_LWP (ptid);
936 else
937 lwp = GET_PID (ptid);
938
939 for (lp = lwp_list; lp; lp = lp->next)
940 if (lwp == GET_LWP (lp->ptid))
941 return lp;
942
943 return NULL;
944}
945
946/* Call CALLBACK with its second argument set to DATA for every LWP in
947 the list. If CALLBACK returns 1 for a particular LWP, return a
948 pointer to the structure describing that LWP immediately.
949 Otherwise return NULL. */
950
951struct lwp_info *
952iterate_over_lwps (int (*callback) (struct lwp_info *, void *), void *data)
953{
954 struct lwp_info *lp, *lpnext;
955
956 for (lp = lwp_list; lp; lp = lpnext)
957 {
958 lpnext = lp->next;
959 if ((*callback) (lp, data))
960 return lp;
961 }
962
963 return NULL;
964}
965
f973ed9c
DJ
966/* Update our internal state when changing from one fork (checkpoint,
967 et cetera) to another indicated by NEW_PTID. We can only switch
968 single-threaded applications, so we only create one new LWP, and
969 the previous list is discarded. */
970
971void
972linux_nat_switch_fork (ptid_t new_ptid)
973{
974 struct lwp_info *lp;
975
976 init_lwp_list ();
977 lp = add_lwp (new_ptid);
978 lp->stopped = 1;
979}
980
e26af52f
DJ
981/* Record a PTID for later deletion. */
982
983struct saved_ptids
984{
985 ptid_t ptid;
986 struct saved_ptids *next;
987};
988static struct saved_ptids *threads_to_delete;
989
990static void
991record_dead_thread (ptid_t ptid)
992{
993 struct saved_ptids *p = xmalloc (sizeof (struct saved_ptids));
994 p->ptid = ptid;
995 p->next = threads_to_delete;
996 threads_to_delete = p;
997}
998
999/* Delete any dead threads which are not the current thread. */
1000
1001static void
1002prune_lwps (void)
1003{
1004 struct saved_ptids **p = &threads_to_delete;
1005
1006 while (*p)
1007 if (! ptid_equal ((*p)->ptid, inferior_ptid))
1008 {
1009 struct saved_ptids *tmp = *p;
1010 delete_thread (tmp->ptid);
1011 *p = tmp->next;
1012 xfree (tmp);
1013 }
1014 else
1015 p = &(*p)->next;
1016}
1017
e26af52f
DJ
1018/* Handle the exit of a single thread LP. */
1019
1020static void
1021exit_lwp (struct lwp_info *lp)
1022{
1023 if (in_thread_list (lp->ptid))
1024 {
17faa917
DJ
1025 if (print_thread_events)
1026 printf_unfiltered (_("[%s exited]\n"), target_pid_to_str (lp->ptid));
1027
e26af52f
DJ
1028 /* Core GDB cannot deal with us deleting the current thread. */
1029 if (!ptid_equal (lp->ptid, inferior_ptid))
1030 delete_thread (lp->ptid);
1031 else
1032 record_dead_thread (lp->ptid);
e26af52f
DJ
1033 }
1034
1035 delete_lwp (lp->ptid);
1036}
1037
a0ef4274
DJ
1038/* Detect `T (stopped)' in `/proc/PID/status'.
1039 Other states including `T (tracing stop)' are reported as false. */
1040
1041static int
1042pid_is_stopped (pid_t pid)
1043{
1044 FILE *status_file;
1045 char buf[100];
1046 int retval = 0;
1047
1048 snprintf (buf, sizeof (buf), "/proc/%d/status", (int) pid);
1049 status_file = fopen (buf, "r");
1050 if (status_file != NULL)
1051 {
1052 int have_state = 0;
1053
1054 while (fgets (buf, sizeof (buf), status_file))
1055 {
1056 if (strncmp (buf, "State:", 6) == 0)
1057 {
1058 have_state = 1;
1059 break;
1060 }
1061 }
1062 if (have_state && strstr (buf, "T (stopped)") != NULL)
1063 retval = 1;
1064 fclose (status_file);
1065 }
1066 return retval;
1067}
1068
1069/* Wait for the LWP specified by LP, which we have just attached to.
1070 Returns a wait status for that LWP, to cache. */
1071
1072static int
1073linux_nat_post_attach_wait (ptid_t ptid, int first, int *cloned,
1074 int *signalled)
1075{
1076 pid_t new_pid, pid = GET_LWP (ptid);
1077 int status;
1078
1079 if (pid_is_stopped (pid))
1080 {
1081 if (debug_linux_nat)
1082 fprintf_unfiltered (gdb_stdlog,
1083 "LNPAW: Attaching to a stopped process\n");
1084
1085 /* The process is definitely stopped. It is in a job control
1086 stop, unless the kernel predates the TASK_STOPPED /
1087 TASK_TRACED distinction, in which case it might be in a
1088 ptrace stop. Make sure it is in a ptrace stop; from there we
1089 can kill it, signal it, et cetera.
1090
1091 First make sure there is a pending SIGSTOP. Since we are
1092 already attached, the process can not transition from stopped
1093 to running without a PTRACE_CONT; so we know this signal will
1094 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1095 probably already in the queue (unless this kernel is old
1096 enough to use TASK_STOPPED for ptrace stops); but since SIGSTOP
1097 is not an RT signal, it can only be queued once. */
1098 kill_lwp (pid, SIGSTOP);
1099
1100 /* Finally, resume the stopped process. This will deliver the SIGSTOP
1101 (or a higher priority signal, just like normal PTRACE_ATTACH). */
1102 ptrace (PTRACE_CONT, pid, 0, 0);
1103 }
1104
1105 /* Make sure the initial process is stopped. The user-level threads
1106 layer might want to poke around in the inferior, and that won't
1107 work if things haven't stabilized yet. */
1108 new_pid = my_waitpid (pid, &status, 0);
1109 if (new_pid == -1 && errno == ECHILD)
1110 {
1111 if (first)
1112 warning (_("%s is a cloned process"), target_pid_to_str (ptid));
1113
1114 /* Try again with __WCLONE to check cloned processes. */
1115 new_pid = my_waitpid (pid, &status, __WCLONE);
1116 *cloned = 1;
1117 }
1118
1119 gdb_assert (pid == new_pid && WIFSTOPPED (status));
1120
1121 if (WSTOPSIG (status) != SIGSTOP)
1122 {
1123 *signalled = 1;
1124 if (debug_linux_nat)
1125 fprintf_unfiltered (gdb_stdlog,
1126 "LNPAW: Received %s after attaching\n",
1127 status_to_str (status));
1128 }
1129
1130 return status;
1131}
1132
1133/* Attach to the LWP specified by PID. Return 0 if successful or -1
1134 if the new LWP could not be attached. */
d6b0e80f 1135
9ee57c33 1136int
93815fbf 1137lin_lwp_attach_lwp (ptid_t ptid)
d6b0e80f 1138{
9ee57c33 1139 struct lwp_info *lp;
b84876c2 1140 int async_events_were_enabled = 0;
d6b0e80f
AC
1141
1142 gdb_assert (is_lwp (ptid));
1143
b84876c2
PA
1144 if (target_can_async_p ())
1145 async_events_were_enabled = linux_nat_async_events (0);
d6b0e80f 1146
9ee57c33 1147 lp = find_lwp_pid (ptid);
d6b0e80f
AC
1148
1149 /* We assume that we're already attached to any LWP that has an id
1150 equal to the overall process id, and to any LWP that is already
1151 in our list of LWPs. If we're not seeing exit events from threads
1152 and we've had PID wraparound since we last tried to stop all threads,
1153 this assumption might be wrong; fortunately, this is very unlikely
1154 to happen. */
9ee57c33 1155 if (GET_LWP (ptid) != GET_PID (ptid) && lp == NULL)
d6b0e80f 1156 {
a0ef4274 1157 int status, cloned = 0, signalled = 0;
d6b0e80f
AC
1158
1159 if (ptrace (PTRACE_ATTACH, GET_LWP (ptid), 0, 0) < 0)
9ee57c33
DJ
1160 {
1161 /* If we fail to attach to the thread, issue a warning,
1162 but continue. One way this can happen is if thread
e9efe249 1163 creation is interrupted; as of Linux kernel 2.6.19, a
9ee57c33
DJ
1164 bug may place threads in the thread list and then fail
1165 to create them. */
1166 warning (_("Can't attach %s: %s"), target_pid_to_str (ptid),
1167 safe_strerror (errno));
1168 return -1;
1169 }
1170
d6b0e80f
AC
1171 if (debug_linux_nat)
1172 fprintf_unfiltered (gdb_stdlog,
1173 "LLAL: PTRACE_ATTACH %s, 0, 0 (OK)\n",
1174 target_pid_to_str (ptid));
1175
a0ef4274
DJ
1176 status = linux_nat_post_attach_wait (ptid, 0, &cloned, &signalled);
1177 lp = add_lwp (ptid);
1178 lp->stopped = 1;
1179 lp->cloned = cloned;
1180 lp->signalled = signalled;
1181 if (WSTOPSIG (status) != SIGSTOP)
d6b0e80f 1182 {
a0ef4274
DJ
1183 lp->resumed = 1;
1184 lp->status = status;
d6b0e80f
AC
1185 }
1186
a0ef4274 1187 target_post_attach (GET_LWP (lp->ptid));
d6b0e80f
AC
1188
1189 if (debug_linux_nat)
1190 {
1191 fprintf_unfiltered (gdb_stdlog,
1192 "LLAL: waitpid %s received %s\n",
1193 target_pid_to_str (ptid),
1194 status_to_str (status));
1195 }
1196 }
1197 else
1198 {
1199 /* We assume that the LWP representing the original process is
1200 already stopped. Mark it as stopped in the data structure
155bd5d1
AC
1201 that the GNU/linux ptrace layer uses to keep track of
1202 threads. Note that this won't have already been done since
1203 the main thread will have, we assume, been stopped by an
1204 attach from a different layer. */
9ee57c33
DJ
1205 if (lp == NULL)
1206 lp = add_lwp (ptid);
d6b0e80f
AC
1207 lp->stopped = 1;
1208 }
9ee57c33 1209
b84876c2
PA
1210 if (async_events_were_enabled)
1211 linux_nat_async_events (1);
1212
9ee57c33 1213 return 0;
d6b0e80f
AC
1214}
1215
b84876c2
PA
1216static void
1217linux_nat_create_inferior (char *exec_file, char *allargs, char **env,
1218 int from_tty)
1219{
1220 int saved_async = 0;
1221
1222 /* The fork_child mechanism is synchronous and calls target_wait, so
1223 we have to mask the async mode. */
1224
1225 if (target_can_async_p ())
1226 saved_async = linux_nat_async_mask (0);
1227 else
1228 {
1229 /* Restore the original signal mask. */
1230 sigprocmask (SIG_SETMASK, &normal_mask, NULL);
1231 /* Make sure we don't block SIGCHLD during a sigsuspend. */
1232 suspend_mask = normal_mask;
1233 sigdelset (&suspend_mask, SIGCHLD);
1234 }
1235
1236 linux_ops->to_create_inferior (exec_file, allargs, env, from_tty);
1237
1238 if (saved_async)
1239 linux_nat_async_mask (saved_async);
1240}
1241
d6b0e80f
AC
1242static void
1243linux_nat_attach (char *args, int from_tty)
1244{
1245 struct lwp_info *lp;
d6b0e80f
AC
1246 int status;
1247
1248 /* FIXME: We should probably accept a list of process id's, and
1249 attach all of them. */
10d6c8cd 1250 linux_ops->to_attach (args, from_tty);
d6b0e80f 1251
b84876c2
PA
1252 if (!target_can_async_p ())
1253 {
1254 /* Restore the original signal mask. */
1255 sigprocmask (SIG_SETMASK, &normal_mask, NULL);
1256 /* Make sure we don't block SIGCHLD during a sigsuspend. */
1257 suspend_mask = normal_mask;
1258 sigdelset (&suspend_mask, SIGCHLD);
1259 }
1260
9f0bdab8
DJ
1261 /* Add the initial process as the first LWP to the list. */
1262 inferior_ptid = BUILD_LWP (GET_PID (inferior_ptid), GET_PID (inferior_ptid));
1263 lp = add_lwp (inferior_ptid);
a0ef4274
DJ
1264
1265 status = linux_nat_post_attach_wait (lp->ptid, 1, &lp->cloned,
1266 &lp->signalled);
1267 lp->stopped = 1;
9f0bdab8 1268
403fe197
PA
1269 /* If this process is not using thread_db, then we still don't
1270 detect any other threads, but add at least this one. */
1271 add_thread_silent (lp->ptid);
1272
a0ef4274 1273 /* Save the wait status to report later. */
d6b0e80f 1274 lp->resumed = 1;
a0ef4274
DJ
1275 if (debug_linux_nat)
1276 fprintf_unfiltered (gdb_stdlog,
1277 "LNA: waitpid %ld, saving status %s\n",
1278 (long) GET_PID (lp->ptid), status_to_str (status));
710151dd
PA
1279
1280 if (!target_can_async_p ())
a0ef4274 1281 lp->status = status;
710151dd
PA
1282 else
1283 {
1284 /* We already waited for this LWP, so put the wait result on the
1285 pipe. The event loop will wake up and gets us to handling
1286 this event. */
a0ef4274
DJ
1287 linux_nat_event_pipe_push (GET_PID (lp->ptid), status,
1288 lp->cloned ? __WCLONE : 0);
b84876c2
PA
1289 /* Register in the event loop. */
1290 target_async (inferior_event_handler, 0);
d6b0e80f
AC
1291 }
1292}
1293
a0ef4274
DJ
1294/* Get pending status of LP. */
1295static int
1296get_pending_status (struct lwp_info *lp, int *status)
1297{
1298 struct target_waitstatus last;
1299 ptid_t last_ptid;
1300
1301 get_last_target_status (&last_ptid, &last);
1302
1303 /* If this lwp is the ptid that GDB is processing an event from, the
1304 signal will be in stop_signal. Otherwise, in all-stop + sync
1305 mode, we may cache pending events in lp->status while trying to
1306 stop all threads (see stop_wait_callback). In async mode, the
1307 events are always cached in waitpid_queue. */
1308
1309 *status = 0;
1310 if (GET_LWP (lp->ptid) == GET_LWP (last_ptid))
1311 {
1312 if (stop_signal != TARGET_SIGNAL_0
1313 && signal_pass_state (stop_signal))
1314 *status = W_STOPCODE (target_signal_to_host (stop_signal));
1315 }
1316 else if (target_can_async_p ())
1317 queued_waitpid (GET_LWP (lp->ptid), status, __WALL);
1318 else
1319 *status = lp->status;
1320
1321 return 0;
1322}
1323
d6b0e80f
AC
1324static int
1325detach_callback (struct lwp_info *lp, void *data)
1326{
1327 gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status));
1328
1329 if (debug_linux_nat && lp->status)
1330 fprintf_unfiltered (gdb_stdlog, "DC: Pending %s for %s on detach.\n",
1331 strsignal (WSTOPSIG (lp->status)),
1332 target_pid_to_str (lp->ptid));
1333
a0ef4274
DJ
1334 /* If there is a pending SIGSTOP, get rid of it. */
1335 if (lp->signalled)
d6b0e80f 1336 {
d6b0e80f
AC
1337 if (debug_linux_nat)
1338 fprintf_unfiltered (gdb_stdlog,
a0ef4274
DJ
1339 "DC: Sending SIGCONT to %s\n",
1340 target_pid_to_str (lp->ptid));
d6b0e80f 1341
a0ef4274 1342 kill_lwp (GET_LWP (lp->ptid), SIGCONT);
d6b0e80f 1343 lp->signalled = 0;
d6b0e80f
AC
1344 }
1345
1346 /* We don't actually detach from the LWP that has an id equal to the
1347 overall process id just yet. */
1348 if (GET_LWP (lp->ptid) != GET_PID (lp->ptid))
1349 {
a0ef4274
DJ
1350 int status = 0;
1351
1352 /* Pass on any pending signal for this LWP. */
1353 get_pending_status (lp, &status);
1354
d6b0e80f
AC
1355 errno = 0;
1356 if (ptrace (PTRACE_DETACH, GET_LWP (lp->ptid), 0,
a0ef4274 1357 WSTOPSIG (status)) < 0)
8a3fe4f8 1358 error (_("Can't detach %s: %s"), target_pid_to_str (lp->ptid),
d6b0e80f
AC
1359 safe_strerror (errno));
1360
1361 if (debug_linux_nat)
1362 fprintf_unfiltered (gdb_stdlog,
1363 "PTRACE_DETACH (%s, %s, 0) (OK)\n",
1364 target_pid_to_str (lp->ptid),
1365 strsignal (WSTOPSIG (lp->status)));
1366
1367 delete_lwp (lp->ptid);
1368 }
1369
1370 return 0;
1371}
1372
1373static void
1374linux_nat_detach (char *args, int from_tty)
1375{
b84876c2 1376 int pid;
a0ef4274
DJ
1377 int status;
1378 enum target_signal sig;
1379
b84876c2
PA
1380 if (target_can_async_p ())
1381 linux_nat_async (NULL, 0);
1382
d6b0e80f
AC
1383 iterate_over_lwps (detach_callback, NULL);
1384
1385 /* Only the initial process should be left right now. */
1386 gdb_assert (num_lwps == 1);
1387
a0ef4274
DJ
1388 /* Pass on any pending signal for the last LWP. */
1389 if ((args == NULL || *args == '\0')
1390 && get_pending_status (lwp_list, &status) != -1
1391 && WIFSTOPPED (status))
1392 {
1393 /* Put the signal number in ARGS so that inf_ptrace_detach will
1394 pass it along with PTRACE_DETACH. */
1395 args = alloca (8);
1396 sprintf (args, "%d", (int) WSTOPSIG (status));
1397 fprintf_unfiltered (gdb_stdlog,
1398 "LND: Sending signal %s to %s\n",
1399 args,
1400 target_pid_to_str (lwp_list->ptid));
1401 }
1402
d6b0e80f
AC
1403 trap_ptid = null_ptid;
1404
1405 /* Destroy LWP info; it's no longer valid. */
1406 init_lwp_list ();
1407
b84876c2
PA
1408 pid = GET_PID (inferior_ptid);
1409 inferior_ptid = pid_to_ptid (pid);
10d6c8cd 1410 linux_ops->to_detach (args, from_tty);
b84876c2
PA
1411
1412 if (target_can_async_p ())
1413 drain_queued_events (pid);
d6b0e80f
AC
1414}
1415
1416/* Resume LP. */
1417
1418static int
1419resume_callback (struct lwp_info *lp, void *data)
1420{
1421 if (lp->stopped && lp->status == 0)
1422 {
10d6c8cd
DJ
1423 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
1424 0, TARGET_SIGNAL_0);
d6b0e80f
AC
1425 if (debug_linux_nat)
1426 fprintf_unfiltered (gdb_stdlog,
1427 "RC: PTRACE_CONT %s, 0, 0 (resume sibling)\n",
1428 target_pid_to_str (lp->ptid));
1429 lp->stopped = 0;
1430 lp->step = 0;
9f0bdab8 1431 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
d6b0e80f
AC
1432 }
1433
1434 return 0;
1435}
1436
1437static int
1438resume_clear_callback (struct lwp_info *lp, void *data)
1439{
1440 lp->resumed = 0;
1441 return 0;
1442}
1443
1444static int
1445resume_set_callback (struct lwp_info *lp, void *data)
1446{
1447 lp->resumed = 1;
1448 return 0;
1449}
1450
1451static void
1452linux_nat_resume (ptid_t ptid, int step, enum target_signal signo)
1453{
1454 struct lwp_info *lp;
1455 int resume_all;
1456
76f50ad1
DJ
1457 if (debug_linux_nat)
1458 fprintf_unfiltered (gdb_stdlog,
1459 "LLR: Preparing to %s %s, %s, inferior_ptid %s\n",
1460 step ? "step" : "resume",
1461 target_pid_to_str (ptid),
1462 signo ? strsignal (signo) : "0",
1463 target_pid_to_str (inferior_ptid));
1464
e26af52f
DJ
1465 prune_lwps ();
1466
b84876c2
PA
1467 if (target_can_async_p ())
1468 /* Block events while we're here. */
1469 linux_nat_async_events (0);
1470
d6b0e80f
AC
1471 /* A specific PTID means `step only this process id'. */
1472 resume_all = (PIDGET (ptid) == -1);
1473
1474 if (resume_all)
1475 iterate_over_lwps (resume_set_callback, NULL);
1476 else
1477 iterate_over_lwps (resume_clear_callback, NULL);
1478
1479 /* If PID is -1, it's the current inferior that should be
1480 handled specially. */
1481 if (PIDGET (ptid) == -1)
1482 ptid = inferior_ptid;
1483
1484 lp = find_lwp_pid (ptid);
9f0bdab8 1485 gdb_assert (lp != NULL);
d6b0e80f 1486
9f0bdab8 1487 ptid = pid_to_ptid (GET_LWP (lp->ptid));
d6b0e80f 1488
9f0bdab8
DJ
1489 /* Remember if we're stepping. */
1490 lp->step = step;
d6b0e80f 1491
9f0bdab8
DJ
1492 /* Mark this LWP as resumed. */
1493 lp->resumed = 1;
76f50ad1 1494
9f0bdab8
DJ
1495 /* If we have a pending wait status for this thread, there is no
1496 point in resuming the process. But first make sure that
1497 linux_nat_wait won't preemptively handle the event - we
1498 should never take this short-circuit if we are going to
1499 leave LP running, since we have skipped resuming all the
1500 other threads. This bit of code needs to be synchronized
1501 with linux_nat_wait. */
76f50ad1 1502
710151dd
PA
1503 /* In async mode, we never have pending wait status. */
1504 if (target_can_async_p () && lp->status)
1505 internal_error (__FILE__, __LINE__, "Pending status in async mode");
1506
9f0bdab8
DJ
1507 if (lp->status && WIFSTOPPED (lp->status))
1508 {
1509 int saved_signo = target_signal_from_host (WSTOPSIG (lp->status));
76f50ad1 1510
9f0bdab8
DJ
1511 if (signal_stop_state (saved_signo) == 0
1512 && signal_print_state (saved_signo) == 0
1513 && signal_pass_state (saved_signo) == 1)
d6b0e80f 1514 {
9f0bdab8
DJ
1515 if (debug_linux_nat)
1516 fprintf_unfiltered (gdb_stdlog,
1517 "LLR: Not short circuiting for ignored "
1518 "status 0x%x\n", lp->status);
1519
d6b0e80f
AC
1520 /* FIXME: What should we do if we are supposed to continue
1521 this thread with a signal? */
1522 gdb_assert (signo == TARGET_SIGNAL_0);
9f0bdab8
DJ
1523 signo = saved_signo;
1524 lp->status = 0;
1525 }
1526 }
76f50ad1 1527
9f0bdab8
DJ
1528 if (lp->status)
1529 {
1530 /* FIXME: What should we do if we are supposed to continue
1531 this thread with a signal? */
1532 gdb_assert (signo == TARGET_SIGNAL_0);
76f50ad1 1533
9f0bdab8
DJ
1534 if (debug_linux_nat)
1535 fprintf_unfiltered (gdb_stdlog,
1536 "LLR: Short circuiting for status 0x%x\n",
1537 lp->status);
d6b0e80f 1538
9f0bdab8 1539 return;
d6b0e80f
AC
1540 }
1541
9f0bdab8
DJ
1542 /* Mark LWP as not stopped to prevent it from being continued by
1543 resume_callback. */
1544 lp->stopped = 0;
1545
d6b0e80f
AC
1546 if (resume_all)
1547 iterate_over_lwps (resume_callback, NULL);
1548
10d6c8cd 1549 linux_ops->to_resume (ptid, step, signo);
9f0bdab8
DJ
1550 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
1551
d6b0e80f
AC
1552 if (debug_linux_nat)
1553 fprintf_unfiltered (gdb_stdlog,
1554 "LLR: %s %s, %s (resume event thread)\n",
1555 step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
1556 target_pid_to_str (ptid),
1557 signo ? strsignal (signo) : "0");
b84876c2
PA
1558
1559 if (target_can_async_p ())
1560 {
1561 target_executing = 1;
1562 target_async (inferior_event_handler, 0);
1563 }
d6b0e80f
AC
1564}
1565
1566/* Issue kill to specified lwp. */
1567
1568static int tkill_failed;
1569
1570static int
1571kill_lwp (int lwpid, int signo)
1572{
1573 errno = 0;
1574
1575/* Use tkill, if possible, in case we are using nptl threads. If tkill
1576 fails, then we are not using nptl threads and we should be using kill. */
1577
1578#ifdef HAVE_TKILL_SYSCALL
1579 if (!tkill_failed)
1580 {
1581 int ret = syscall (__NR_tkill, lwpid, signo);
1582 if (errno != ENOSYS)
1583 return ret;
1584 errno = 0;
1585 tkill_failed = 1;
1586 }
1587#endif
1588
1589 return kill (lwpid, signo);
1590}
1591
3d799a95
DJ
1592/* Handle a GNU/Linux extended wait response. If we see a clone
1593 event, we need to add the new LWP to our list (and not report the
1594 trap to higher layers). This function returns non-zero if the
1595 event should be ignored and we should wait again. If STOPPING is
1596 true, the new LWP remains stopped, otherwise it is continued. */
d6b0e80f
AC
1597
1598static int
3d799a95
DJ
1599linux_handle_extended_wait (struct lwp_info *lp, int status,
1600 int stopping)
d6b0e80f 1601{
3d799a95
DJ
1602 int pid = GET_LWP (lp->ptid);
1603 struct target_waitstatus *ourstatus = &lp->waitstatus;
1604 struct lwp_info *new_lp = NULL;
1605 int event = status >> 16;
d6b0e80f 1606
3d799a95
DJ
1607 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK
1608 || event == PTRACE_EVENT_CLONE)
d6b0e80f 1609 {
3d799a95
DJ
1610 unsigned long new_pid;
1611 int ret;
1612
1613 ptrace (PTRACE_GETEVENTMSG, pid, 0, &new_pid);
6fc19103 1614
3d799a95
DJ
1615 /* If we haven't already seen the new PID stop, wait for it now. */
1616 if (! pull_pid_from_list (&stopped_pids, new_pid, &status))
1617 {
1618 /* The new child has a pending SIGSTOP. We can't affect it until it
1619 hits the SIGSTOP, but we're already attached. */
1620 ret = my_waitpid (new_pid, &status,
1621 (event == PTRACE_EVENT_CLONE) ? __WCLONE : 0);
1622 if (ret == -1)
1623 perror_with_name (_("waiting for new child"));
1624 else if (ret != new_pid)
1625 internal_error (__FILE__, __LINE__,
1626 _("wait returned unexpected PID %d"), ret);
1627 else if (!WIFSTOPPED (status))
1628 internal_error (__FILE__, __LINE__,
1629 _("wait returned unexpected status 0x%x"), status);
1630 }
1631
1632 ourstatus->value.related_pid = new_pid;
1633
1634 if (event == PTRACE_EVENT_FORK)
1635 ourstatus->kind = TARGET_WAITKIND_FORKED;
1636 else if (event == PTRACE_EVENT_VFORK)
1637 ourstatus->kind = TARGET_WAITKIND_VFORKED;
6fc19103 1638 else
3d799a95
DJ
1639 {
1640 ourstatus->kind = TARGET_WAITKIND_IGNORE;
1641 new_lp = add_lwp (BUILD_LWP (new_pid, GET_PID (inferior_ptid)));
1642 new_lp->cloned = 1;
d6b0e80f 1643
3d799a95
DJ
1644 if (WSTOPSIG (status) != SIGSTOP)
1645 {
1646 /* This can happen if someone starts sending signals to
1647 the new thread before it gets a chance to run, which
1648 have a lower number than SIGSTOP (e.g. SIGUSR1).
1649 This is an unlikely case, and harder to handle for
1650 fork / vfork than for clone, so we do not try - but
1651 we handle it for clone events here. We'll send
1652 the other signal on to the thread below. */
1653
1654 new_lp->signalled = 1;
1655 }
1656 else
1657 status = 0;
d6b0e80f 1658
3d799a95
DJ
1659 if (stopping)
1660 new_lp->stopped = 1;
1661 else
1662 {
1663 new_lp->resumed = 1;
1664 ptrace (PTRACE_CONT, lp->waitstatus.value.related_pid, 0,
1665 status ? WSTOPSIG (status) : 0);
1666 }
d6b0e80f 1667
3d799a95
DJ
1668 if (debug_linux_nat)
1669 fprintf_unfiltered (gdb_stdlog,
1670 "LHEW: Got clone event from LWP %ld, resuming\n",
1671 GET_LWP (lp->ptid));
1672 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
1673
1674 return 1;
1675 }
1676
1677 return 0;
d6b0e80f
AC
1678 }
1679
3d799a95
DJ
1680 if (event == PTRACE_EVENT_EXEC)
1681 {
1682 ourstatus->kind = TARGET_WAITKIND_EXECD;
1683 ourstatus->value.execd_pathname
6d8fd2b7 1684 = xstrdup (linux_child_pid_to_exec_file (pid));
3d799a95
DJ
1685
1686 if (linux_parent_pid)
1687 {
1688 detach_breakpoints (linux_parent_pid);
1689 ptrace (PTRACE_DETACH, linux_parent_pid, 0, 0);
1690
1691 linux_parent_pid = 0;
1692 }
1693
1694 return 0;
1695 }
1696
1697 internal_error (__FILE__, __LINE__,
1698 _("unknown ptrace event %d"), event);
d6b0e80f
AC
1699}
1700
1701/* Wait for LP to stop. Returns the wait status, or 0 if the LWP has
1702 exited. */
1703
1704static int
1705wait_lwp (struct lwp_info *lp)
1706{
1707 pid_t pid;
1708 int status;
1709 int thread_dead = 0;
1710
1711 gdb_assert (!lp->stopped);
1712 gdb_assert (lp->status == 0);
1713
58aecb61 1714 pid = my_waitpid (GET_LWP (lp->ptid), &status, 0);
d6b0e80f
AC
1715 if (pid == -1 && errno == ECHILD)
1716 {
58aecb61 1717 pid = my_waitpid (GET_LWP (lp->ptid), &status, __WCLONE);
d6b0e80f
AC
1718 if (pid == -1 && errno == ECHILD)
1719 {
1720 /* The thread has previously exited. We need to delete it
1721 now because, for some vendor 2.4 kernels with NPTL
1722 support backported, there won't be an exit event unless
1723 it is the main thread. 2.6 kernels will report an exit
1724 event for each thread that exits, as expected. */
1725 thread_dead = 1;
1726 if (debug_linux_nat)
1727 fprintf_unfiltered (gdb_stdlog, "WL: %s vanished.\n",
1728 target_pid_to_str (lp->ptid));
1729 }
1730 }
1731
1732 if (!thread_dead)
1733 {
1734 gdb_assert (pid == GET_LWP (lp->ptid));
1735
1736 if (debug_linux_nat)
1737 {
1738 fprintf_unfiltered (gdb_stdlog,
1739 "WL: waitpid %s received %s\n",
1740 target_pid_to_str (lp->ptid),
1741 status_to_str (status));
1742 }
1743 }
1744
1745 /* Check if the thread has exited. */
1746 if (WIFEXITED (status) || WIFSIGNALED (status))
1747 {
1748 thread_dead = 1;
1749 if (debug_linux_nat)
1750 fprintf_unfiltered (gdb_stdlog, "WL: %s exited.\n",
1751 target_pid_to_str (lp->ptid));
1752 }
1753
1754 if (thread_dead)
1755 {
e26af52f 1756 exit_lwp (lp);
d6b0e80f
AC
1757 return 0;
1758 }
1759
1760 gdb_assert (WIFSTOPPED (status));
1761
1762 /* Handle GNU/Linux's extended waitstatus for trace events. */
1763 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
1764 {
1765 if (debug_linux_nat)
1766 fprintf_unfiltered (gdb_stdlog,
1767 "WL: Handling extended status 0x%06x\n",
1768 status);
3d799a95 1769 if (linux_handle_extended_wait (lp, status, 1))
d6b0e80f
AC
1770 return wait_lwp (lp);
1771 }
1772
1773 return status;
1774}
1775
9f0bdab8
DJ
1776/* Save the most recent siginfo for LP. This is currently only called
1777 for SIGTRAP; some ports use the si_addr field for
1778 target_stopped_data_address. In the future, it may also be used to
1779 restore the siginfo of requeued signals. */
1780
1781static void
1782save_siginfo (struct lwp_info *lp)
1783{
1784 errno = 0;
1785 ptrace (PTRACE_GETSIGINFO, GET_LWP (lp->ptid),
1786 (PTRACE_TYPE_ARG3) 0, &lp->siginfo);
1787
1788 if (errno != 0)
1789 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
1790}
1791
d6b0e80f
AC
1792/* Send a SIGSTOP to LP. */
1793
1794static int
1795stop_callback (struct lwp_info *lp, void *data)
1796{
1797 if (!lp->stopped && !lp->signalled)
1798 {
1799 int ret;
1800
1801 if (debug_linux_nat)
1802 {
1803 fprintf_unfiltered (gdb_stdlog,
1804 "SC: kill %s **<SIGSTOP>**\n",
1805 target_pid_to_str (lp->ptid));
1806 }
1807 errno = 0;
1808 ret = kill_lwp (GET_LWP (lp->ptid), SIGSTOP);
1809 if (debug_linux_nat)
1810 {
1811 fprintf_unfiltered (gdb_stdlog,
1812 "SC: lwp kill %d %s\n",
1813 ret,
1814 errno ? safe_strerror (errno) : "ERRNO-OK");
1815 }
1816
1817 lp->signalled = 1;
1818 gdb_assert (lp->status == 0);
1819 }
1820
1821 return 0;
1822}
1823
1824/* Wait until LP is stopped. If DATA is non-null it is interpreted as
1825 a pointer to a set of signals to be flushed immediately. */
1826
1827static int
1828stop_wait_callback (struct lwp_info *lp, void *data)
1829{
1830 sigset_t *flush_mask = data;
1831
1832 if (!lp->stopped)
1833 {
1834 int status;
1835
1836 status = wait_lwp (lp);
1837 if (status == 0)
1838 return 0;
1839
1840 /* Ignore any signals in FLUSH_MASK. */
1841 if (flush_mask && sigismember (flush_mask, WSTOPSIG (status)))
1842 {
1843 if (!lp->signalled)
1844 {
1845 lp->stopped = 1;
1846 return 0;
1847 }
1848
1849 errno = 0;
1850 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
1851 if (debug_linux_nat)
1852 fprintf_unfiltered (gdb_stdlog,
1853 "PTRACE_CONT %s, 0, 0 (%s)\n",
1854 target_pid_to_str (lp->ptid),
1855 errno ? safe_strerror (errno) : "OK");
1856
1857 return stop_wait_callback (lp, flush_mask);
1858 }
1859
1860 if (WSTOPSIG (status) != SIGSTOP)
1861 {
1862 if (WSTOPSIG (status) == SIGTRAP)
1863 {
1864 /* If a LWP other than the LWP that we're reporting an
1865 event for has hit a GDB breakpoint (as opposed to
1866 some random trap signal), then just arrange for it to
1867 hit it again later. We don't keep the SIGTRAP status
1868 and don't forward the SIGTRAP signal to the LWP. We
1869 will handle the current event, eventually we will
1870 resume all LWPs, and this one will get its breakpoint
1871 trap again.
1872
1873 If we do not do this, then we run the risk that the
1874 user will delete or disable the breakpoint, but the
1875 thread will have already tripped on it. */
1876
9f0bdab8
DJ
1877 /* Save the trap's siginfo in case we need it later. */
1878 save_siginfo (lp);
1879
d6b0e80f
AC
1880 /* Now resume this LWP and get the SIGSTOP event. */
1881 errno = 0;
1882 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
1883 if (debug_linux_nat)
1884 {
1885 fprintf_unfiltered (gdb_stdlog,
1886 "PTRACE_CONT %s, 0, 0 (%s)\n",
1887 target_pid_to_str (lp->ptid),
1888 errno ? safe_strerror (errno) : "OK");
1889
1890 fprintf_unfiltered (gdb_stdlog,
1891 "SWC: Candidate SIGTRAP event in %s\n",
1892 target_pid_to_str (lp->ptid));
1893 }
710151dd
PA
1894 /* Hold this event/waitstatus while we check to see if
1895 there are any more (we still want to get that SIGSTOP). */
d6b0e80f 1896 stop_wait_callback (lp, data);
710151dd
PA
1897
1898 if (target_can_async_p ())
d6b0e80f 1899 {
710151dd
PA
1900 /* Don't leave a pending wait status in async mode.
1901 Retrigger the breakpoint. */
1902 if (!cancel_breakpoint (lp))
d6b0e80f 1903 {
710151dd
PA
1904 /* There was no gdb breakpoint set at pc. Put
1905 the event back in the queue. */
1906 if (debug_linux_nat)
1907 fprintf_unfiltered (gdb_stdlog,
1908 "SWC: kill %s, %s\n",
1909 target_pid_to_str (lp->ptid),
1910 status_to_str ((int) status));
1911 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (status));
1912 }
1913 }
1914 else
1915 {
1916 /* Hold the SIGTRAP for handling by
1917 linux_nat_wait. */
1918 /* If there's another event, throw it back into the
1919 queue. */
1920 if (lp->status)
1921 {
1922 if (debug_linux_nat)
1923 fprintf_unfiltered (gdb_stdlog,
1924 "SWC: kill %s, %s\n",
1925 target_pid_to_str (lp->ptid),
1926 status_to_str ((int) status));
1927 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (lp->status));
d6b0e80f 1928 }
710151dd
PA
1929 /* Save the sigtrap event. */
1930 lp->status = status;
d6b0e80f 1931 }
d6b0e80f
AC
1932 return 0;
1933 }
1934 else
1935 {
1936 /* The thread was stopped with a signal other than
1937 SIGSTOP, and didn't accidentally trip a breakpoint. */
1938
1939 if (debug_linux_nat)
1940 {
1941 fprintf_unfiltered (gdb_stdlog,
1942 "SWC: Pending event %s in %s\n",
1943 status_to_str ((int) status),
1944 target_pid_to_str (lp->ptid));
1945 }
1946 /* Now resume this LWP and get the SIGSTOP event. */
1947 errno = 0;
1948 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
1949 if (debug_linux_nat)
1950 fprintf_unfiltered (gdb_stdlog,
1951 "SWC: PTRACE_CONT %s, 0, 0 (%s)\n",
1952 target_pid_to_str (lp->ptid),
1953 errno ? safe_strerror (errno) : "OK");
1954
1955 /* Hold this event/waitstatus while we check to see if
1956 there are any more (we still want to get that SIGSTOP). */
1957 stop_wait_callback (lp, data);
710151dd
PA
1958
1959 /* If the lp->status field is still empty, use it to
1960 hold this event. If not, then this event must be
1961 returned to the event queue of the LWP. */
1962 if (lp->status || target_can_async_p ())
d6b0e80f
AC
1963 {
1964 if (debug_linux_nat)
1965 {
1966 fprintf_unfiltered (gdb_stdlog,
1967 "SWC: kill %s, %s\n",
1968 target_pid_to_str (lp->ptid),
1969 status_to_str ((int) status));
1970 }
1971 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (status));
1972 }
710151dd
PA
1973 else
1974 lp->status = status;
d6b0e80f
AC
1975 return 0;
1976 }
1977 }
1978 else
1979 {
1980 /* We caught the SIGSTOP that we intended to catch, so
1981 there's no SIGSTOP pending. */
1982 lp->stopped = 1;
1983 lp->signalled = 0;
1984 }
1985 }
1986
1987 return 0;
1988}
1989
1990/* Check whether PID has any pending signals in FLUSH_MASK. If so set
1991 the appropriate bits in PENDING, and return 1 - otherwise return 0. */
1992
1993static int
1994linux_nat_has_pending (int pid, sigset_t *pending, sigset_t *flush_mask)
1995{
1996 sigset_t blocked, ignored;
1997 int i;
1998
1999 linux_proc_pending_signals (pid, pending, &blocked, &ignored);
2000
2001 if (!flush_mask)
2002 return 0;
2003
2004 for (i = 1; i < NSIG; i++)
2005 if (sigismember (pending, i))
2006 if (!sigismember (flush_mask, i)
2007 || sigismember (&blocked, i)
2008 || sigismember (&ignored, i))
2009 sigdelset (pending, i);
2010
2011 if (sigisemptyset (pending))
2012 return 0;
2013
2014 return 1;
2015}
2016
2017/* DATA is interpreted as a mask of signals to flush. If LP has
2018 signals pending, and they are all in the flush mask, then arrange
2019 to flush them. LP should be stopped, as should all other threads
2020 it might share a signal queue with. */
2021
2022static int
2023flush_callback (struct lwp_info *lp, void *data)
2024{
2025 sigset_t *flush_mask = data;
2026 sigset_t pending, intersection, blocked, ignored;
2027 int pid, status;
2028
2029 /* Normally, when an LWP exits, it is removed from the LWP list. The
2030 last LWP isn't removed till later, however. So if there is only
2031 one LWP on the list, make sure it's alive. */
2032 if (lwp_list == lp && lp->next == NULL)
2033 if (!linux_nat_thread_alive (lp->ptid))
2034 return 0;
2035
2036 /* Just because the LWP is stopped doesn't mean that new signals
2037 can't arrive from outside, so this function must be careful of
2038 race conditions. However, because all threads are stopped, we
2039 can assume that the pending mask will not shrink unless we resume
2040 the LWP, and that it will then get another signal. We can't
2041 control which one, however. */
2042
2043 if (lp->status)
2044 {
2045 if (debug_linux_nat)
a3f17187 2046 printf_unfiltered (_("FC: LP has pending status %06x\n"), lp->status);
d6b0e80f
AC
2047 if (WIFSTOPPED (lp->status) && sigismember (flush_mask, WSTOPSIG (lp->status)))
2048 lp->status = 0;
2049 }
2050
3d799a95
DJ
2051 /* While there is a pending signal we would like to flush, continue
2052 the inferior and collect another signal. But if there's already
2053 a saved status that we don't want to flush, we can't resume the
2054 inferior - if it stopped for some other reason we wouldn't have
2055 anywhere to save the new status. In that case, we must leave the
2056 signal unflushed (and possibly generate an extra SIGINT stop).
2057 That's much less bad than losing a signal. */
2058 while (lp->status == 0
2059 && linux_nat_has_pending (GET_LWP (lp->ptid), &pending, flush_mask))
d6b0e80f
AC
2060 {
2061 int ret;
2062
2063 errno = 0;
2064 ret = ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2065 if (debug_linux_nat)
2066 fprintf_unfiltered (gdb_stderr,
2067 "FC: Sent PTRACE_CONT, ret %d %d\n", ret, errno);
2068
2069 lp->stopped = 0;
2070 stop_wait_callback (lp, flush_mask);
2071 if (debug_linux_nat)
2072 fprintf_unfiltered (gdb_stderr,
2073 "FC: Wait finished; saved status is %d\n",
2074 lp->status);
2075 }
2076
2077 return 0;
2078}
2079
2080/* Return non-zero if LP has a wait status pending. */
2081
2082static int
2083status_callback (struct lwp_info *lp, void *data)
2084{
2085 /* Only report a pending wait status if we pretend that this has
2086 indeed been resumed. */
2087 return (lp->status != 0 && lp->resumed);
2088}
2089
2090/* Return non-zero if LP isn't stopped. */
2091
2092static int
2093running_callback (struct lwp_info *lp, void *data)
2094{
2095 return (lp->stopped == 0 || (lp->status != 0 && lp->resumed));
2096}
2097
2098/* Count the LWP's that have had events. */
2099
2100static int
2101count_events_callback (struct lwp_info *lp, void *data)
2102{
2103 int *count = data;
2104
2105 gdb_assert (count != NULL);
2106
2107 /* Count only LWPs that have a SIGTRAP event pending. */
2108 if (lp->status != 0
2109 && WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP)
2110 (*count)++;
2111
2112 return 0;
2113}
2114
2115/* Select the LWP (if any) that is currently being single-stepped. */
2116
2117static int
2118select_singlestep_lwp_callback (struct lwp_info *lp, void *data)
2119{
2120 if (lp->step && lp->status != 0)
2121 return 1;
2122 else
2123 return 0;
2124}
2125
2126/* Select the Nth LWP that has had a SIGTRAP event. */
2127
2128static int
2129select_event_lwp_callback (struct lwp_info *lp, void *data)
2130{
2131 int *selector = data;
2132
2133 gdb_assert (selector != NULL);
2134
2135 /* Select only LWPs that have a SIGTRAP event pending. */
2136 if (lp->status != 0
2137 && WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP)
2138 if ((*selector)-- == 0)
2139 return 1;
2140
2141 return 0;
2142}
2143
710151dd
PA
2144static int
2145cancel_breakpoint (struct lwp_info *lp)
2146{
2147 /* Arrange for a breakpoint to be hit again later. We don't keep
2148 the SIGTRAP status and don't forward the SIGTRAP signal to the
2149 LWP. We will handle the current event, eventually we will resume
2150 this LWP, and this breakpoint will trap again.
2151
2152 If we do not do this, then we run the risk that the user will
2153 delete or disable the breakpoint, but the LWP will have already
2154 tripped on it. */
2155
2156 if (breakpoint_inserted_here_p (read_pc_pid (lp->ptid) -
2157 gdbarch_decr_pc_after_break
2158 (current_gdbarch)))
2159 {
2160 if (debug_linux_nat)
2161 fprintf_unfiltered (gdb_stdlog,
2162 "CB: Push back breakpoint for %s\n",
2163 target_pid_to_str (lp->ptid));
2164
2165 /* Back up the PC if necessary. */
2166 if (gdbarch_decr_pc_after_break (current_gdbarch))
2167 write_pc_pid (read_pc_pid (lp->ptid) - gdbarch_decr_pc_after_break
2168 (current_gdbarch),
2169 lp->ptid);
2170 return 1;
2171 }
2172 return 0;
2173}
2174
d6b0e80f
AC
2175static int
2176cancel_breakpoints_callback (struct lwp_info *lp, void *data)
2177{
2178 struct lwp_info *event_lp = data;
2179
2180 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
2181 if (lp == event_lp)
2182 return 0;
2183
2184 /* If a LWP other than the LWP that we're reporting an event for has
2185 hit a GDB breakpoint (as opposed to some random trap signal),
2186 then just arrange for it to hit it again later. We don't keep
2187 the SIGTRAP status and don't forward the SIGTRAP signal to the
2188 LWP. We will handle the current event, eventually we will resume
2189 all LWPs, and this one will get its breakpoint trap again.
2190
2191 If we do not do this, then we run the risk that the user will
2192 delete or disable the breakpoint, but the LWP will have already
2193 tripped on it. */
2194
2195 if (lp->status != 0
2196 && WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP
710151dd
PA
2197 && cancel_breakpoint (lp))
2198 /* Throw away the SIGTRAP. */
2199 lp->status = 0;
d6b0e80f
AC
2200
2201 return 0;
2202}
2203
2204/* Select one LWP out of those that have events pending. */
2205
2206static void
2207select_event_lwp (struct lwp_info **orig_lp, int *status)
2208{
2209 int num_events = 0;
2210 int random_selector;
2211 struct lwp_info *event_lp;
2212
ac264b3b 2213 /* Record the wait status for the original LWP. */
d6b0e80f
AC
2214 (*orig_lp)->status = *status;
2215
2216 /* Give preference to any LWP that is being single-stepped. */
2217 event_lp = iterate_over_lwps (select_singlestep_lwp_callback, NULL);
2218 if (event_lp != NULL)
2219 {
2220 if (debug_linux_nat)
2221 fprintf_unfiltered (gdb_stdlog,
2222 "SEL: Select single-step %s\n",
2223 target_pid_to_str (event_lp->ptid));
2224 }
2225 else
2226 {
2227 /* No single-stepping LWP. Select one at random, out of those
2228 which have had SIGTRAP events. */
2229
2230 /* First see how many SIGTRAP events we have. */
2231 iterate_over_lwps (count_events_callback, &num_events);
2232
2233 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
2234 random_selector = (int)
2235 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2236
2237 if (debug_linux_nat && num_events > 1)
2238 fprintf_unfiltered (gdb_stdlog,
2239 "SEL: Found %d SIGTRAP events, selecting #%d\n",
2240 num_events, random_selector);
2241
2242 event_lp = iterate_over_lwps (select_event_lwp_callback,
2243 &random_selector);
2244 }
2245
2246 if (event_lp != NULL)
2247 {
2248 /* Switch the event LWP. */
2249 *orig_lp = event_lp;
2250 *status = event_lp->status;
2251 }
2252
2253 /* Flush the wait status for the event LWP. */
2254 (*orig_lp)->status = 0;
2255}
2256
2257/* Return non-zero if LP has been resumed. */
2258
2259static int
2260resumed_callback (struct lwp_info *lp, void *data)
2261{
2262 return lp->resumed;
2263}
2264
d6b0e80f
AC
2265/* Stop an active thread, verify it still exists, then resume it. */
2266
2267static int
2268stop_and_resume_callback (struct lwp_info *lp, void *data)
2269{
2270 struct lwp_info *ptr;
2271
2272 if (!lp->stopped && !lp->signalled)
2273 {
2274 stop_callback (lp, NULL);
2275 stop_wait_callback (lp, NULL);
2276 /* Resume if the lwp still exists. */
2277 for (ptr = lwp_list; ptr; ptr = ptr->next)
2278 if (lp == ptr)
2279 {
2280 resume_callback (lp, NULL);
2281 resume_set_callback (lp, NULL);
2282 }
2283 }
2284 return 0;
2285}
2286
02f3fc28 2287/* Check if we should go on and pass this event to common code.
fa2c6a57 2288 Return the affected lwp if we are, or NULL otherwise. */
02f3fc28
PA
2289static struct lwp_info *
2290linux_nat_filter_event (int lwpid, int status, int options)
2291{
2292 struct lwp_info *lp;
2293
2294 lp = find_lwp_pid (pid_to_ptid (lwpid));
2295
2296 /* Check for stop events reported by a process we didn't already
2297 know about - anything not already in our LWP list.
2298
2299 If we're expecting to receive stopped processes after
2300 fork, vfork, and clone events, then we'll just add the
2301 new one to our list and go back to waiting for the event
2302 to be reported - the stopped process might be returned
2303 from waitpid before or after the event is. */
2304 if (WIFSTOPPED (status) && !lp)
2305 {
2306 linux_record_stopped_pid (lwpid, status);
2307 return NULL;
2308 }
2309
2310 /* Make sure we don't report an event for the exit of an LWP not in
2311 our list, i.e. not part of the current process. This can happen
2312 if we detach from a program we original forked and then it
2313 exits. */
2314 if (!WIFSTOPPED (status) && !lp)
2315 return NULL;
2316
2317 /* NOTE drow/2003-06-17: This code seems to be meant for debugging
2318 CLONE_PTRACE processes which do not use the thread library -
2319 otherwise we wouldn't find the new LWP this way. That doesn't
2320 currently work, and the following code is currently unreachable
2321 due to the two blocks above. If it's fixed some day, this code
2322 should be broken out into a function so that we can also pick up
2323 LWPs from the new interface. */
2324 if (!lp)
2325 {
2326 lp = add_lwp (BUILD_LWP (lwpid, GET_PID (inferior_ptid)));
2327 if (options & __WCLONE)
2328 lp->cloned = 1;
2329
2330 gdb_assert (WIFSTOPPED (status)
2331 && WSTOPSIG (status) == SIGSTOP);
2332 lp->signalled = 1;
2333
2334 if (!in_thread_list (inferior_ptid))
2335 {
2336 inferior_ptid = BUILD_LWP (GET_PID (inferior_ptid),
2337 GET_PID (inferior_ptid));
2338 add_thread (inferior_ptid);
2339 }
2340
2341 add_thread (lp->ptid);
2342 }
2343
2344 /* Save the trap's siginfo in case we need it later. */
2345 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP)
2346 save_siginfo (lp);
2347
2348 /* Handle GNU/Linux's extended waitstatus for trace events. */
2349 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
2350 {
2351 if (debug_linux_nat)
2352 fprintf_unfiltered (gdb_stdlog,
2353 "LLW: Handling extended status 0x%06x\n",
2354 status);
2355 if (linux_handle_extended_wait (lp, status, 0))
2356 return NULL;
2357 }
2358
2359 /* Check if the thread has exited. */
2360 if ((WIFEXITED (status) || WIFSIGNALED (status)) && num_lwps > 1)
2361 {
2362 /* If this is the main thread, we must stop all threads and
2363 verify if they are still alive. This is because in the nptl
2364 thread model, there is no signal issued for exiting LWPs
2365 other than the main thread. We only get the main thread exit
2366 signal once all child threads have already exited. If we
2367 stop all the threads and use the stop_wait_callback to check
2368 if they have exited we can determine whether this signal
2369 should be ignored or whether it means the end of the debugged
2370 application, regardless of which threading model is being
2371 used. */
2372 if (GET_PID (lp->ptid) == GET_LWP (lp->ptid))
2373 {
2374 lp->stopped = 1;
2375 iterate_over_lwps (stop_and_resume_callback, NULL);
2376 }
2377
2378 if (debug_linux_nat)
2379 fprintf_unfiltered (gdb_stdlog,
2380 "LLW: %s exited.\n",
2381 target_pid_to_str (lp->ptid));
2382
2383 exit_lwp (lp);
2384
2385 /* If there is at least one more LWP, then the exit signal was
2386 not the end of the debugged application and should be
2387 ignored. */
2388 if (num_lwps > 0)
2389 {
2390 /* Make sure there is at least one thread running. */
2391 gdb_assert (iterate_over_lwps (running_callback, NULL));
2392
2393 /* Discard the event. */
2394 return NULL;
2395 }
2396 }
2397
2398 /* Check if the current LWP has previously exited. In the nptl
2399 thread model, LWPs other than the main thread do not issue
2400 signals when they exit so we must check whenever the thread has
2401 stopped. A similar check is made in stop_wait_callback(). */
2402 if (num_lwps > 1 && !linux_nat_thread_alive (lp->ptid))
2403 {
2404 if (debug_linux_nat)
2405 fprintf_unfiltered (gdb_stdlog,
2406 "LLW: %s exited.\n",
2407 target_pid_to_str (lp->ptid));
2408
2409 exit_lwp (lp);
2410
2411 /* Make sure there is at least one thread running. */
2412 gdb_assert (iterate_over_lwps (running_callback, NULL));
2413
2414 /* Discard the event. */
2415 return NULL;
2416 }
2417
2418 /* Make sure we don't report a SIGSTOP that we sent ourselves in
2419 an attempt to stop an LWP. */
2420 if (lp->signalled
2421 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP)
2422 {
2423 if (debug_linux_nat)
2424 fprintf_unfiltered (gdb_stdlog,
2425 "LLW: Delayed SIGSTOP caught for %s.\n",
2426 target_pid_to_str (lp->ptid));
2427
2428 /* This is a delayed SIGSTOP. */
2429 lp->signalled = 0;
2430
2431 registers_changed ();
2432
2433 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
2434 lp->step, TARGET_SIGNAL_0);
2435 if (debug_linux_nat)
2436 fprintf_unfiltered (gdb_stdlog,
2437 "LLW: %s %s, 0, 0 (discard SIGSTOP)\n",
2438 lp->step ?
2439 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2440 target_pid_to_str (lp->ptid));
2441
2442 lp->stopped = 0;
2443 gdb_assert (lp->resumed);
2444
2445 /* Discard the event. */
2446 return NULL;
2447 }
2448
2449 /* An interesting event. */
2450 gdb_assert (lp);
2451 return lp;
2452}
2453
b84876c2
PA
2454/* Get the events stored in the pipe into the local queue, so they are
2455 accessible to queued_waitpid. We need to do this, since it is not
2456 always the case that the event at the head of the pipe is the event
2457 we want. */
2458
2459static void
2460pipe_to_local_event_queue (void)
2461{
2462 if (debug_linux_nat_async)
2463 fprintf_unfiltered (gdb_stdlog,
2464 "PTLEQ: linux_nat_num_queued_events(%d)\n",
2465 linux_nat_num_queued_events);
2466 while (linux_nat_num_queued_events)
2467 {
2468 int lwpid, status, options;
b84876c2 2469 lwpid = linux_nat_event_pipe_pop (&status, &options);
b84876c2
PA
2470 gdb_assert (lwpid > 0);
2471 push_waitpid (lwpid, status, options);
2472 }
2473}
2474
2475/* Get the unprocessed events stored in the local queue back into the
2476 pipe, so the event loop realizes there's something else to
2477 process. */
2478
2479static void
2480local_event_queue_to_pipe (void)
2481{
2482 struct waitpid_result *w = waitpid_queue;
2483 while (w)
2484 {
2485 struct waitpid_result *next = w->next;
2486 linux_nat_event_pipe_push (w->pid,
2487 w->status,
2488 w->options);
2489 xfree (w);
2490 w = next;
2491 }
2492 waitpid_queue = NULL;
2493
2494 if (debug_linux_nat_async)
2495 fprintf_unfiltered (gdb_stdlog,
2496 "LEQTP: linux_nat_num_queued_events(%d)\n",
2497 linux_nat_num_queued_events);
2498}
2499
d6b0e80f
AC
2500static ptid_t
2501linux_nat_wait (ptid_t ptid, struct target_waitstatus *ourstatus)
2502{
2503 struct lwp_info *lp = NULL;
2504 int options = 0;
2505 int status = 0;
2506 pid_t pid = PIDGET (ptid);
2507 sigset_t flush_mask;
2508
b84876c2
PA
2509 if (debug_linux_nat_async)
2510 fprintf_unfiltered (gdb_stdlog, "LLW: enter\n");
2511
f973ed9c
DJ
2512 /* The first time we get here after starting a new inferior, we may
2513 not have added it to the LWP list yet - this is the earliest
2514 moment at which we know its PID. */
2515 if (num_lwps == 0)
2516 {
2517 gdb_assert (!is_lwp (inferior_ptid));
2518
2519 inferior_ptid = BUILD_LWP (GET_PID (inferior_ptid),
2520 GET_PID (inferior_ptid));
2521 lp = add_lwp (inferior_ptid);
2522 lp->resumed = 1;
403fe197
PA
2523 /* Add the main thread to GDB's thread list. */
2524 add_thread_silent (lp->ptid);
f973ed9c
DJ
2525 }
2526
d6b0e80f
AC
2527 sigemptyset (&flush_mask);
2528
b84876c2
PA
2529 if (target_can_async_p ())
2530 /* Block events while we're here. */
2531 target_async (NULL, 0);
d6b0e80f
AC
2532
2533retry:
2534
f973ed9c
DJ
2535 /* Make sure there is at least one LWP that has been resumed. */
2536 gdb_assert (iterate_over_lwps (resumed_callback, NULL));
d6b0e80f
AC
2537
2538 /* First check if there is a LWP with a wait status pending. */
2539 if (pid == -1)
2540 {
2541 /* Any LWP that's been resumed will do. */
2542 lp = iterate_over_lwps (status_callback, NULL);
2543 if (lp)
2544 {
710151dd
PA
2545 if (target_can_async_p ())
2546 internal_error (__FILE__, __LINE__,
2547 "Found an LWP with a pending status in async mode.");
2548
d6b0e80f
AC
2549 status = lp->status;
2550 lp->status = 0;
2551
2552 if (debug_linux_nat && status)
2553 fprintf_unfiltered (gdb_stdlog,
2554 "LLW: Using pending wait status %s for %s.\n",
2555 status_to_str (status),
2556 target_pid_to_str (lp->ptid));
2557 }
2558
b84876c2 2559 /* But if we don't find one, we'll have to wait, and check both
d6b0e80f
AC
2560 cloned and uncloned processes. We start with the cloned
2561 processes. */
2562 options = __WCLONE | WNOHANG;
2563 }
2564 else if (is_lwp (ptid))
2565 {
2566 if (debug_linux_nat)
2567 fprintf_unfiltered (gdb_stdlog,
2568 "LLW: Waiting for specific LWP %s.\n",
2569 target_pid_to_str (ptid));
2570
2571 /* We have a specific LWP to check. */
2572 lp = find_lwp_pid (ptid);
2573 gdb_assert (lp);
2574 status = lp->status;
2575 lp->status = 0;
2576
2577 if (debug_linux_nat && status)
2578 fprintf_unfiltered (gdb_stdlog,
2579 "LLW: Using pending wait status %s for %s.\n",
2580 status_to_str (status),
2581 target_pid_to_str (lp->ptid));
2582
2583 /* If we have to wait, take into account whether PID is a cloned
2584 process or not. And we have to convert it to something that
2585 the layer beneath us can understand. */
2586 options = lp->cloned ? __WCLONE : 0;
2587 pid = GET_LWP (ptid);
2588 }
2589
2590 if (status && lp->signalled)
2591 {
2592 /* A pending SIGSTOP may interfere with the normal stream of
2593 events. In a typical case where interference is a problem,
2594 we have a SIGSTOP signal pending for LWP A while
2595 single-stepping it, encounter an event in LWP B, and take the
2596 pending SIGSTOP while trying to stop LWP A. After processing
2597 the event in LWP B, LWP A is continued, and we'll never see
2598 the SIGTRAP associated with the last time we were
2599 single-stepping LWP A. */
2600
2601 /* Resume the thread. It should halt immediately returning the
2602 pending SIGSTOP. */
2603 registers_changed ();
10d6c8cd
DJ
2604 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
2605 lp->step, TARGET_SIGNAL_0);
d6b0e80f
AC
2606 if (debug_linux_nat)
2607 fprintf_unfiltered (gdb_stdlog,
2608 "LLW: %s %s, 0, 0 (expect SIGSTOP)\n",
2609 lp->step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2610 target_pid_to_str (lp->ptid));
2611 lp->stopped = 0;
2612 gdb_assert (lp->resumed);
2613
2614 /* This should catch the pending SIGSTOP. */
2615 stop_wait_callback (lp, NULL);
2616 }
2617
b84876c2
PA
2618 if (!target_can_async_p ())
2619 {
2620 /* Causes SIGINT to be passed on to the attached process. */
2621 set_sigint_trap ();
2622 set_sigio_trap ();
2623 }
d6b0e80f
AC
2624
2625 while (status == 0)
2626 {
2627 pid_t lwpid;
2628
b84876c2
PA
2629 if (target_can_async_p ())
2630 /* In async mode, don't ever block. Only look at the locally
2631 queued events. */
2632 lwpid = queued_waitpid (pid, &status, options);
2633 else
2634 lwpid = my_waitpid (pid, &status, options);
2635
d6b0e80f
AC
2636 if (lwpid > 0)
2637 {
2638 gdb_assert (pid == -1 || lwpid == pid);
2639
2640 if (debug_linux_nat)
2641 {
2642 fprintf_unfiltered (gdb_stdlog,
2643 "LLW: waitpid %ld received %s\n",
2644 (long) lwpid, status_to_str (status));
2645 }
2646
02f3fc28 2647 lp = linux_nat_filter_event (lwpid, status, options);
d6b0e80f
AC
2648 if (!lp)
2649 {
02f3fc28 2650 /* A discarded event. */
d6b0e80f
AC
2651 status = 0;
2652 continue;
2653 }
2654
2655 break;
2656 }
2657
2658 if (pid == -1)
2659 {
2660 /* Alternate between checking cloned and uncloned processes. */
2661 options ^= __WCLONE;
2662
b84876c2
PA
2663 /* And every time we have checked both:
2664 In async mode, return to event loop;
2665 In sync mode, suspend waiting for a SIGCHLD signal. */
d6b0e80f 2666 if (options & __WCLONE)
b84876c2
PA
2667 {
2668 if (target_can_async_p ())
2669 {
2670 /* No interesting event. */
2671 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2672
2673 /* Get ready for the next event. */
2674 target_async (inferior_event_handler, 0);
2675
2676 if (debug_linux_nat_async)
2677 fprintf_unfiltered (gdb_stdlog, "LLW: exit (ignore)\n");
2678
2679 return minus_one_ptid;
2680 }
2681
2682 sigsuspend (&suspend_mask);
2683 }
d6b0e80f
AC
2684 }
2685
2686 /* We shouldn't end up here unless we want to try again. */
2687 gdb_assert (status == 0);
2688 }
2689
b84876c2
PA
2690 if (!target_can_async_p ())
2691 {
2692 clear_sigio_trap ();
2693 clear_sigint_trap ();
2694 }
d6b0e80f
AC
2695
2696 gdb_assert (lp);
2697
2698 /* Don't report signals that GDB isn't interested in, such as
2699 signals that are neither printed nor stopped upon. Stopping all
2700 threads can be a bit time-consuming so if we want decent
2701 performance with heavily multi-threaded programs, especially when
2702 they're using a high frequency timer, we'd better avoid it if we
2703 can. */
2704
2705 if (WIFSTOPPED (status))
2706 {
2707 int signo = target_signal_from_host (WSTOPSIG (status));
2708
d539ed7e
UW
2709 /* If we get a signal while single-stepping, we may need special
2710 care, e.g. to skip the signal handler. Defer to common code. */
2711 if (!lp->step
2712 && signal_stop_state (signo) == 0
d6b0e80f
AC
2713 && signal_print_state (signo) == 0
2714 && signal_pass_state (signo) == 1)
2715 {
2716 /* FIMXE: kettenis/2001-06-06: Should we resume all threads
2717 here? It is not clear we should. GDB may not expect
2718 other threads to run. On the other hand, not resuming
2719 newly attached threads may cause an unwanted delay in
2720 getting them running. */
2721 registers_changed ();
10d6c8cd
DJ
2722 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
2723 lp->step, signo);
d6b0e80f
AC
2724 if (debug_linux_nat)
2725 fprintf_unfiltered (gdb_stdlog,
2726 "LLW: %s %s, %s (preempt 'handle')\n",
2727 lp->step ?
2728 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2729 target_pid_to_str (lp->ptid),
2730 signo ? strsignal (signo) : "0");
2731 lp->stopped = 0;
2732 status = 0;
2733 goto retry;
2734 }
2735
2736 if (signo == TARGET_SIGNAL_INT && signal_pass_state (signo) == 0)
2737 {
2738 /* If ^C/BREAK is typed at the tty/console, SIGINT gets
2739 forwarded to the entire process group, that is, all LWP's
2740 will receive it. Since we only want to report it once,
2741 we try to flush it from all LWPs except this one. */
2742 sigaddset (&flush_mask, SIGINT);
2743 }
2744 }
2745
2746 /* This LWP is stopped now. */
2747 lp->stopped = 1;
2748
2749 if (debug_linux_nat)
2750 fprintf_unfiltered (gdb_stdlog, "LLW: Candidate event %s in %s.\n",
2751 status_to_str (status), target_pid_to_str (lp->ptid));
2752
2753 /* Now stop all other LWP's ... */
2754 iterate_over_lwps (stop_callback, NULL);
2755
2756 /* ... and wait until all of them have reported back that they're no
2757 longer running. */
2758 iterate_over_lwps (stop_wait_callback, &flush_mask);
2759 iterate_over_lwps (flush_callback, &flush_mask);
2760
2761 /* If we're not waiting for a specific LWP, choose an event LWP from
2762 among those that have had events. Giving equal priority to all
2763 LWPs that have had events helps prevent starvation. */
2764 if (pid == -1)
2765 select_event_lwp (&lp, &status);
2766
2767 /* Now that we've selected our final event LWP, cancel any
2768 breakpoints in other LWPs that have hit a GDB breakpoint. See
2769 the comment in cancel_breakpoints_callback to find out why. */
2770 iterate_over_lwps (cancel_breakpoints_callback, lp);
2771
d6b0e80f
AC
2772 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP)
2773 {
f973ed9c 2774 trap_ptid = lp->ptid;
d6b0e80f
AC
2775 if (debug_linux_nat)
2776 fprintf_unfiltered (gdb_stdlog,
2777 "LLW: trap_ptid is %s.\n",
2778 target_pid_to_str (trap_ptid));
2779 }
2780 else
2781 trap_ptid = null_ptid;
2782
2783 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
2784 {
2785 *ourstatus = lp->waitstatus;
2786 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
2787 }
2788 else
2789 store_waitstatus (ourstatus, status);
2790
b84876c2
PA
2791 /* Get ready for the next event. */
2792 if (target_can_async_p ())
2793 target_async (inferior_event_handler, 0);
2794
2795 if (debug_linux_nat_async)
2796 fprintf_unfiltered (gdb_stdlog, "LLW: exit\n");
2797
f973ed9c 2798 return lp->ptid;
d6b0e80f
AC
2799}
2800
2801static int
2802kill_callback (struct lwp_info *lp, void *data)
2803{
2804 errno = 0;
2805 ptrace (PTRACE_KILL, GET_LWP (lp->ptid), 0, 0);
2806 if (debug_linux_nat)
2807 fprintf_unfiltered (gdb_stdlog,
2808 "KC: PTRACE_KILL %s, 0, 0 (%s)\n",
2809 target_pid_to_str (lp->ptid),
2810 errno ? safe_strerror (errno) : "OK");
2811
2812 return 0;
2813}
2814
2815static int
2816kill_wait_callback (struct lwp_info *lp, void *data)
2817{
2818 pid_t pid;
2819
2820 /* We must make sure that there are no pending events (delayed
2821 SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current
2822 program doesn't interfere with any following debugging session. */
2823
2824 /* For cloned processes we must check both with __WCLONE and
2825 without, since the exit status of a cloned process isn't reported
2826 with __WCLONE. */
2827 if (lp->cloned)
2828 {
2829 do
2830 {
58aecb61 2831 pid = my_waitpid (GET_LWP (lp->ptid), NULL, __WCLONE);
e85a822c 2832 if (pid != (pid_t) -1)
d6b0e80f 2833 {
e85a822c
DJ
2834 if (debug_linux_nat)
2835 fprintf_unfiltered (gdb_stdlog,
2836 "KWC: wait %s received unknown.\n",
2837 target_pid_to_str (lp->ptid));
2838 /* The Linux kernel sometimes fails to kill a thread
2839 completely after PTRACE_KILL; that goes from the stop
2840 point in do_fork out to the one in
2841 get_signal_to_deliever and waits again. So kill it
2842 again. */
2843 kill_callback (lp, NULL);
d6b0e80f
AC
2844 }
2845 }
2846 while (pid == GET_LWP (lp->ptid));
2847
2848 gdb_assert (pid == -1 && errno == ECHILD);
2849 }
2850
2851 do
2852 {
58aecb61 2853 pid = my_waitpid (GET_LWP (lp->ptid), NULL, 0);
e85a822c 2854 if (pid != (pid_t) -1)
d6b0e80f 2855 {
e85a822c
DJ
2856 if (debug_linux_nat)
2857 fprintf_unfiltered (gdb_stdlog,
2858 "KWC: wait %s received unk.\n",
2859 target_pid_to_str (lp->ptid));
2860 /* See the call to kill_callback above. */
2861 kill_callback (lp, NULL);
d6b0e80f
AC
2862 }
2863 }
2864 while (pid == GET_LWP (lp->ptid));
2865
2866 gdb_assert (pid == -1 && errno == ECHILD);
2867 return 0;
2868}
2869
2870static void
2871linux_nat_kill (void)
2872{
f973ed9c
DJ
2873 struct target_waitstatus last;
2874 ptid_t last_ptid;
2875 int status;
d6b0e80f 2876
b84876c2
PA
2877 if (target_can_async_p ())
2878 target_async (NULL, 0);
2879
f973ed9c
DJ
2880 /* If we're stopped while forking and we haven't followed yet,
2881 kill the other task. We need to do this first because the
2882 parent will be sleeping if this is a vfork. */
d6b0e80f 2883
f973ed9c 2884 get_last_target_status (&last_ptid, &last);
d6b0e80f 2885
f973ed9c
DJ
2886 if (last.kind == TARGET_WAITKIND_FORKED
2887 || last.kind == TARGET_WAITKIND_VFORKED)
2888 {
2889 ptrace (PT_KILL, last.value.related_pid, 0, 0);
2890 wait (&status);
2891 }
2892
2893 if (forks_exist_p ())
b84876c2
PA
2894 {
2895 linux_fork_killall ();
2896 drain_queued_events (-1);
2897 }
f973ed9c
DJ
2898 else
2899 {
2900 /* Kill all LWP's ... */
2901 iterate_over_lwps (kill_callback, NULL);
2902
2903 /* ... and wait until we've flushed all events. */
2904 iterate_over_lwps (kill_wait_callback, NULL);
2905 }
2906
2907 target_mourn_inferior ();
d6b0e80f
AC
2908}
2909
2910static void
2911linux_nat_mourn_inferior (void)
2912{
2913 trap_ptid = null_ptid;
2914
2915 /* Destroy LWP info; it's no longer valid. */
2916 init_lwp_list ();
2917
f973ed9c 2918 if (! forks_exist_p ())
b84876c2
PA
2919 {
2920 /* Normal case, no other forks available. */
2921 if (target_can_async_p ())
2922 linux_nat_async (NULL, 0);
2923 linux_ops->to_mourn_inferior ();
2924 }
f973ed9c
DJ
2925 else
2926 /* Multi-fork case. The current inferior_ptid has exited, but
2927 there are other viable forks to debug. Delete the exiting
2928 one and context-switch to the first available. */
2929 linux_fork_mourn_inferior ();
d6b0e80f
AC
2930}
2931
10d6c8cd
DJ
2932static LONGEST
2933linux_nat_xfer_partial (struct target_ops *ops, enum target_object object,
2934 const char *annex, gdb_byte *readbuf,
2935 const gdb_byte *writebuf,
2936 ULONGEST offset, LONGEST len)
d6b0e80f
AC
2937{
2938 struct cleanup *old_chain = save_inferior_ptid ();
10d6c8cd 2939 LONGEST xfer;
d6b0e80f
AC
2940
2941 if (is_lwp (inferior_ptid))
2942 inferior_ptid = pid_to_ptid (GET_LWP (inferior_ptid));
2943
10d6c8cd
DJ
2944 xfer = linux_ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
2945 offset, len);
d6b0e80f
AC
2946
2947 do_cleanups (old_chain);
2948 return xfer;
2949}
2950
2951static int
2952linux_nat_thread_alive (ptid_t ptid)
2953{
2954 gdb_assert (is_lwp (ptid));
2955
2956 errno = 0;
2957 ptrace (PTRACE_PEEKUSER, GET_LWP (ptid), 0, 0);
2958 if (debug_linux_nat)
2959 fprintf_unfiltered (gdb_stdlog,
2960 "LLTA: PTRACE_PEEKUSER %s, 0, 0 (%s)\n",
2961 target_pid_to_str (ptid),
2962 errno ? safe_strerror (errno) : "OK");
9c0dd46b 2963
155bd5d1
AC
2964 /* Not every Linux kernel implements PTRACE_PEEKUSER. But we can
2965 handle that case gracefully since ptrace will first do a lookup
2966 for the process based upon the passed-in pid. If that fails we
2967 will get either -ESRCH or -EPERM, otherwise the child exists and
2968 is alive. */
a529be7c 2969 if (errno == ESRCH || errno == EPERM)
d6b0e80f
AC
2970 return 0;
2971
2972 return 1;
2973}
2974
2975static char *
2976linux_nat_pid_to_str (ptid_t ptid)
2977{
2978 static char buf[64];
2979
a0ef4274
DJ
2980 if (is_lwp (ptid)
2981 && ((lwp_list && lwp_list->next)
2982 || GET_PID (ptid) != GET_LWP (ptid)))
d6b0e80f
AC
2983 {
2984 snprintf (buf, sizeof (buf), "LWP %ld", GET_LWP (ptid));
2985 return buf;
2986 }
2987
2988 return normal_pid_to_str (ptid);
2989}
2990
d6b0e80f
AC
2991static void
2992sigchld_handler (int signo)
2993{
b84876c2
PA
2994 if (linux_nat_async_enabled
2995 && linux_nat_async_events_enabled
2996 && signo == SIGCHLD)
2997 /* It is *always* a bug to hit this. */
2998 internal_error (__FILE__, __LINE__,
2999 "sigchld_handler called when async events are enabled");
3000
d6b0e80f
AC
3001 /* Do nothing. The only reason for this handler is that it allows
3002 us to use sigsuspend in linux_nat_wait above to wait for the
3003 arrival of a SIGCHLD. */
3004}
3005
dba24537
AC
3006/* Accepts an integer PID; Returns a string representing a file that
3007 can be opened to get the symbols for the child process. */
3008
6d8fd2b7
UW
3009static char *
3010linux_child_pid_to_exec_file (int pid)
dba24537
AC
3011{
3012 char *name1, *name2;
3013
3014 name1 = xmalloc (MAXPATHLEN);
3015 name2 = xmalloc (MAXPATHLEN);
3016 make_cleanup (xfree, name1);
3017 make_cleanup (xfree, name2);
3018 memset (name2, 0, MAXPATHLEN);
3019
3020 sprintf (name1, "/proc/%d/exe", pid);
3021 if (readlink (name1, name2, MAXPATHLEN) > 0)
3022 return name2;
3023 else
3024 return name1;
3025}
3026
3027/* Service function for corefiles and info proc. */
3028
3029static int
3030read_mapping (FILE *mapfile,
3031 long long *addr,
3032 long long *endaddr,
3033 char *permissions,
3034 long long *offset,
3035 char *device, long long *inode, char *filename)
3036{
3037 int ret = fscanf (mapfile, "%llx-%llx %s %llx %s %llx",
3038 addr, endaddr, permissions, offset, device, inode);
3039
2e14c2ea
MS
3040 filename[0] = '\0';
3041 if (ret > 0 && ret != EOF)
dba24537
AC
3042 {
3043 /* Eat everything up to EOL for the filename. This will prevent
3044 weird filenames (such as one with embedded whitespace) from
3045 confusing this code. It also makes this code more robust in
3046 respect to annotations the kernel may add after the filename.
3047
3048 Note the filename is used for informational purposes
3049 only. */
3050 ret += fscanf (mapfile, "%[^\n]\n", filename);
3051 }
2e14c2ea 3052
dba24537
AC
3053 return (ret != 0 && ret != EOF);
3054}
3055
3056/* Fills the "to_find_memory_regions" target vector. Lists the memory
3057 regions in the inferior for a corefile. */
3058
3059static int
3060linux_nat_find_memory_regions (int (*func) (CORE_ADDR,
3061 unsigned long,
3062 int, int, int, void *), void *obfd)
3063{
3064 long long pid = PIDGET (inferior_ptid);
3065 char mapsfilename[MAXPATHLEN];
3066 FILE *mapsfile;
3067 long long addr, endaddr, size, offset, inode;
3068 char permissions[8], device[8], filename[MAXPATHLEN];
3069 int read, write, exec;
3070 int ret;
3071
3072 /* Compose the filename for the /proc memory map, and open it. */
3073 sprintf (mapsfilename, "/proc/%lld/maps", pid);
3074 if ((mapsfile = fopen (mapsfilename, "r")) == NULL)
8a3fe4f8 3075 error (_("Could not open %s."), mapsfilename);
dba24537
AC
3076
3077 if (info_verbose)
3078 fprintf_filtered (gdb_stdout,
3079 "Reading memory regions from %s\n", mapsfilename);
3080
3081 /* Now iterate until end-of-file. */
3082 while (read_mapping (mapsfile, &addr, &endaddr, &permissions[0],
3083 &offset, &device[0], &inode, &filename[0]))
3084 {
3085 size = endaddr - addr;
3086
3087 /* Get the segment's permissions. */
3088 read = (strchr (permissions, 'r') != 0);
3089 write = (strchr (permissions, 'w') != 0);
3090 exec = (strchr (permissions, 'x') != 0);
3091
3092 if (info_verbose)
3093 {
3094 fprintf_filtered (gdb_stdout,
3095 "Save segment, %lld bytes at 0x%s (%c%c%c)",
3096 size, paddr_nz (addr),
3097 read ? 'r' : ' ',
3098 write ? 'w' : ' ', exec ? 'x' : ' ');
b260b6c1 3099 if (filename[0])
dba24537
AC
3100 fprintf_filtered (gdb_stdout, " for %s", filename);
3101 fprintf_filtered (gdb_stdout, "\n");
3102 }
3103
3104 /* Invoke the callback function to create the corefile
3105 segment. */
3106 func (addr, size, read, write, exec, obfd);
3107 }
3108 fclose (mapsfile);
3109 return 0;
3110}
3111
3112/* Records the thread's register state for the corefile note
3113 section. */
3114
3115static char *
3116linux_nat_do_thread_registers (bfd *obfd, ptid_t ptid,
3117 char *note_data, int *note_size)
3118{
3119 gdb_gregset_t gregs;
3120 gdb_fpregset_t fpregs;
3121#ifdef FILL_FPXREGSET
3122 gdb_fpxregset_t fpxregs;
3123#endif
3124 unsigned long lwp = ptid_get_lwp (ptid);
594f7785
UW
3125 struct regcache *regcache = get_thread_regcache (ptid);
3126 struct gdbarch *gdbarch = get_regcache_arch (regcache);
4f844a66 3127 const struct regset *regset;
55e969c1 3128 int core_regset_p;
594f7785
UW
3129 struct cleanup *old_chain;
3130
3131 old_chain = save_inferior_ptid ();
3132 inferior_ptid = ptid;
3133 target_fetch_registers (regcache, -1);
3134 do_cleanups (old_chain);
4f844a66
DM
3135
3136 core_regset_p = gdbarch_regset_from_core_section_p (gdbarch);
55e969c1
DM
3137 if (core_regset_p
3138 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg",
3139 sizeof (gregs))) != NULL
3140 && regset->collect_regset != NULL)
594f7785 3141 regset->collect_regset (regset, regcache, -1,
55e969c1 3142 &gregs, sizeof (gregs));
4f844a66 3143 else
594f7785 3144 fill_gregset (regcache, &gregs, -1);
4f844a66 3145
55e969c1
DM
3146 note_data = (char *) elfcore_write_prstatus (obfd,
3147 note_data,
3148 note_size,
3149 lwp,
3150 stop_signal, &gregs);
3151
3152 if (core_regset_p
3153 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg2",
3154 sizeof (fpregs))) != NULL
3155 && regset->collect_regset != NULL)
594f7785 3156 regset->collect_regset (regset, regcache, -1,
55e969c1 3157 &fpregs, sizeof (fpregs));
4f844a66 3158 else
594f7785 3159 fill_fpregset (regcache, &fpregs, -1);
4f844a66 3160
55e969c1
DM
3161 note_data = (char *) elfcore_write_prfpreg (obfd,
3162 note_data,
3163 note_size,
3164 &fpregs, sizeof (fpregs));
dba24537 3165
dba24537 3166#ifdef FILL_FPXREGSET
55e969c1
DM
3167 if (core_regset_p
3168 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg-xfp",
3169 sizeof (fpxregs))) != NULL
3170 && regset->collect_regset != NULL)
594f7785 3171 regset->collect_regset (regset, regcache, -1,
55e969c1 3172 &fpxregs, sizeof (fpxregs));
4f844a66 3173 else
594f7785 3174 fill_fpxregset (regcache, &fpxregs, -1);
4f844a66 3175
55e969c1
DM
3176 note_data = (char *) elfcore_write_prxfpreg (obfd,
3177 note_data,
3178 note_size,
3179 &fpxregs, sizeof (fpxregs));
dba24537
AC
3180#endif
3181 return note_data;
3182}
3183
3184struct linux_nat_corefile_thread_data
3185{
3186 bfd *obfd;
3187 char *note_data;
3188 int *note_size;
3189 int num_notes;
3190};
3191
3192/* Called by gdbthread.c once per thread. Records the thread's
3193 register state for the corefile note section. */
3194
3195static int
3196linux_nat_corefile_thread_callback (struct lwp_info *ti, void *data)
3197{
3198 struct linux_nat_corefile_thread_data *args = data;
dba24537 3199
dba24537
AC
3200 args->note_data = linux_nat_do_thread_registers (args->obfd,
3201 ti->ptid,
3202 args->note_data,
3203 args->note_size);
3204 args->num_notes++;
56be3814 3205
dba24537
AC
3206 return 0;
3207}
3208
3209/* Records the register state for the corefile note section. */
3210
3211static char *
3212linux_nat_do_registers (bfd *obfd, ptid_t ptid,
3213 char *note_data, int *note_size)
3214{
dba24537
AC
3215 return linux_nat_do_thread_registers (obfd,
3216 ptid_build (ptid_get_pid (inferior_ptid),
3217 ptid_get_pid (inferior_ptid),
3218 0),
3219 note_data, note_size);
dba24537
AC
3220}
3221
3222/* Fills the "to_make_corefile_note" target vector. Builds the note
3223 section for a corefile, and returns it in a malloc buffer. */
3224
3225static char *
3226linux_nat_make_corefile_notes (bfd *obfd, int *note_size)
3227{
3228 struct linux_nat_corefile_thread_data thread_args;
3229 struct cleanup *old_chain;
d99148ef 3230 /* The variable size must be >= sizeof (prpsinfo_t.pr_fname). */
dba24537 3231 char fname[16] = { '\0' };
d99148ef 3232 /* The variable size must be >= sizeof (prpsinfo_t.pr_psargs). */
dba24537
AC
3233 char psargs[80] = { '\0' };
3234 char *note_data = NULL;
3235 ptid_t current_ptid = inferior_ptid;
c6826062 3236 gdb_byte *auxv;
dba24537
AC
3237 int auxv_len;
3238
3239 if (get_exec_file (0))
3240 {
3241 strncpy (fname, strrchr (get_exec_file (0), '/') + 1, sizeof (fname));
3242 strncpy (psargs, get_exec_file (0), sizeof (psargs));
3243 if (get_inferior_args ())
3244 {
d99148ef
JK
3245 char *string_end;
3246 char *psargs_end = psargs + sizeof (psargs);
3247
3248 /* linux_elfcore_write_prpsinfo () handles zero unterminated
3249 strings fine. */
3250 string_end = memchr (psargs, 0, sizeof (psargs));
3251 if (string_end != NULL)
3252 {
3253 *string_end++ = ' ';
3254 strncpy (string_end, get_inferior_args (),
3255 psargs_end - string_end);
3256 }
dba24537
AC
3257 }
3258 note_data = (char *) elfcore_write_prpsinfo (obfd,
3259 note_data,
3260 note_size, fname, psargs);
3261 }
3262
3263 /* Dump information for threads. */
3264 thread_args.obfd = obfd;
3265 thread_args.note_data = note_data;
3266 thread_args.note_size = note_size;
3267 thread_args.num_notes = 0;
3268 iterate_over_lwps (linux_nat_corefile_thread_callback, &thread_args);
3269 if (thread_args.num_notes == 0)
3270 {
3271 /* iterate_over_threads didn't come up with any threads; just
3272 use inferior_ptid. */
3273 note_data = linux_nat_do_registers (obfd, inferior_ptid,
3274 note_data, note_size);
3275 }
3276 else
3277 {
3278 note_data = thread_args.note_data;
3279 }
3280
13547ab6
DJ
3281 auxv_len = target_read_alloc (&current_target, TARGET_OBJECT_AUXV,
3282 NULL, &auxv);
dba24537
AC
3283 if (auxv_len > 0)
3284 {
3285 note_data = elfcore_write_note (obfd, note_data, note_size,
3286 "CORE", NT_AUXV, auxv, auxv_len);
3287 xfree (auxv);
3288 }
3289
3290 make_cleanup (xfree, note_data);
3291 return note_data;
3292}
3293
3294/* Implement the "info proc" command. */
3295
3296static void
3297linux_nat_info_proc_cmd (char *args, int from_tty)
3298{
3299 long long pid = PIDGET (inferior_ptid);
3300 FILE *procfile;
3301 char **argv = NULL;
3302 char buffer[MAXPATHLEN];
3303 char fname1[MAXPATHLEN], fname2[MAXPATHLEN];
3304 int cmdline_f = 1;
3305 int cwd_f = 1;
3306 int exe_f = 1;
3307 int mappings_f = 0;
3308 int environ_f = 0;
3309 int status_f = 0;
3310 int stat_f = 0;
3311 int all = 0;
3312 struct stat dummy;
3313
3314 if (args)
3315 {
3316 /* Break up 'args' into an argv array. */
3317 if ((argv = buildargv (args)) == NULL)
3318 nomem (0);
3319 else
3320 make_cleanup_freeargv (argv);
3321 }
3322 while (argv != NULL && *argv != NULL)
3323 {
3324 if (isdigit (argv[0][0]))
3325 {
3326 pid = strtoul (argv[0], NULL, 10);
3327 }
3328 else if (strncmp (argv[0], "mappings", strlen (argv[0])) == 0)
3329 {
3330 mappings_f = 1;
3331 }
3332 else if (strcmp (argv[0], "status") == 0)
3333 {
3334 status_f = 1;
3335 }
3336 else if (strcmp (argv[0], "stat") == 0)
3337 {
3338 stat_f = 1;
3339 }
3340 else if (strcmp (argv[0], "cmd") == 0)
3341 {
3342 cmdline_f = 1;
3343 }
3344 else if (strncmp (argv[0], "exe", strlen (argv[0])) == 0)
3345 {
3346 exe_f = 1;
3347 }
3348 else if (strcmp (argv[0], "cwd") == 0)
3349 {
3350 cwd_f = 1;
3351 }
3352 else if (strncmp (argv[0], "all", strlen (argv[0])) == 0)
3353 {
3354 all = 1;
3355 }
3356 else
3357 {
3358 /* [...] (future options here) */
3359 }
3360 argv++;
3361 }
3362 if (pid == 0)
8a3fe4f8 3363 error (_("No current process: you must name one."));
dba24537
AC
3364
3365 sprintf (fname1, "/proc/%lld", pid);
3366 if (stat (fname1, &dummy) != 0)
8a3fe4f8 3367 error (_("No /proc directory: '%s'"), fname1);
dba24537 3368
a3f17187 3369 printf_filtered (_("process %lld\n"), pid);
dba24537
AC
3370 if (cmdline_f || all)
3371 {
3372 sprintf (fname1, "/proc/%lld/cmdline", pid);
d5d6fca5 3373 if ((procfile = fopen (fname1, "r")) != NULL)
dba24537
AC
3374 {
3375 fgets (buffer, sizeof (buffer), procfile);
3376 printf_filtered ("cmdline = '%s'\n", buffer);
3377 fclose (procfile);
3378 }
3379 else
8a3fe4f8 3380 warning (_("unable to open /proc file '%s'"), fname1);
dba24537
AC
3381 }
3382 if (cwd_f || all)
3383 {
3384 sprintf (fname1, "/proc/%lld/cwd", pid);
3385 memset (fname2, 0, sizeof (fname2));
3386 if (readlink (fname1, fname2, sizeof (fname2)) > 0)
3387 printf_filtered ("cwd = '%s'\n", fname2);
3388 else
8a3fe4f8 3389 warning (_("unable to read link '%s'"), fname1);
dba24537
AC
3390 }
3391 if (exe_f || all)
3392 {
3393 sprintf (fname1, "/proc/%lld/exe", pid);
3394 memset (fname2, 0, sizeof (fname2));
3395 if (readlink (fname1, fname2, sizeof (fname2)) > 0)
3396 printf_filtered ("exe = '%s'\n", fname2);
3397 else
8a3fe4f8 3398 warning (_("unable to read link '%s'"), fname1);
dba24537
AC
3399 }
3400 if (mappings_f || all)
3401 {
3402 sprintf (fname1, "/proc/%lld/maps", pid);
d5d6fca5 3403 if ((procfile = fopen (fname1, "r")) != NULL)
dba24537
AC
3404 {
3405 long long addr, endaddr, size, offset, inode;
3406 char permissions[8], device[8], filename[MAXPATHLEN];
3407
a3f17187 3408 printf_filtered (_("Mapped address spaces:\n\n"));
17a912b6 3409 if (gdbarch_addr_bit (current_gdbarch) == 32)
dba24537
AC
3410 {
3411 printf_filtered ("\t%10s %10s %10s %10s %7s\n",
3412 "Start Addr",
3413 " End Addr",
3414 " Size", " Offset", "objfile");
3415 }
3416 else
3417 {
3418 printf_filtered (" %18s %18s %10s %10s %7s\n",
3419 "Start Addr",
3420 " End Addr",
3421 " Size", " Offset", "objfile");
3422 }
3423
3424 while (read_mapping (procfile, &addr, &endaddr, &permissions[0],
3425 &offset, &device[0], &inode, &filename[0]))
3426 {
3427 size = endaddr - addr;
3428
3429 /* FIXME: carlton/2003-08-27: Maybe the printf_filtered
3430 calls here (and possibly above) should be abstracted
3431 out into their own functions? Andrew suggests using
3432 a generic local_address_string instead to print out
3433 the addresses; that makes sense to me, too. */
3434
17a912b6 3435 if (gdbarch_addr_bit (current_gdbarch) == 32)
dba24537
AC
3436 {
3437 printf_filtered ("\t%#10lx %#10lx %#10x %#10x %7s\n",
3438 (unsigned long) addr, /* FIXME: pr_addr */
3439 (unsigned long) endaddr,
3440 (int) size,
3441 (unsigned int) offset,
3442 filename[0] ? filename : "");
3443 }
3444 else
3445 {
3446 printf_filtered (" %#18lx %#18lx %#10x %#10x %7s\n",
3447 (unsigned long) addr, /* FIXME: pr_addr */
3448 (unsigned long) endaddr,
3449 (int) size,
3450 (unsigned int) offset,
3451 filename[0] ? filename : "");
3452 }
3453 }
3454
3455 fclose (procfile);
3456 }
3457 else
8a3fe4f8 3458 warning (_("unable to open /proc file '%s'"), fname1);
dba24537
AC
3459 }
3460 if (status_f || all)
3461 {
3462 sprintf (fname1, "/proc/%lld/status", pid);
d5d6fca5 3463 if ((procfile = fopen (fname1, "r")) != NULL)
dba24537
AC
3464 {
3465 while (fgets (buffer, sizeof (buffer), procfile) != NULL)
3466 puts_filtered (buffer);
3467 fclose (procfile);
3468 }
3469 else
8a3fe4f8 3470 warning (_("unable to open /proc file '%s'"), fname1);
dba24537
AC
3471 }
3472 if (stat_f || all)
3473 {
3474 sprintf (fname1, "/proc/%lld/stat", pid);
d5d6fca5 3475 if ((procfile = fopen (fname1, "r")) != NULL)
dba24537
AC
3476 {
3477 int itmp;
3478 char ctmp;
a25694b4 3479 long ltmp;
dba24537
AC
3480
3481 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 3482 printf_filtered (_("Process: %d\n"), itmp);
a25694b4 3483 if (fscanf (procfile, "(%[^)]) ", &buffer[0]) > 0)
a3f17187 3484 printf_filtered (_("Exec file: %s\n"), buffer);
dba24537 3485 if (fscanf (procfile, "%c ", &ctmp) > 0)
a3f17187 3486 printf_filtered (_("State: %c\n"), ctmp);
dba24537 3487 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 3488 printf_filtered (_("Parent process: %d\n"), itmp);
dba24537 3489 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 3490 printf_filtered (_("Process group: %d\n"), itmp);
dba24537 3491 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 3492 printf_filtered (_("Session id: %d\n"), itmp);
dba24537 3493 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 3494 printf_filtered (_("TTY: %d\n"), itmp);
dba24537 3495 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 3496 printf_filtered (_("TTY owner process group: %d\n"), itmp);
a25694b4
AS
3497 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3498 printf_filtered (_("Flags: 0x%lx\n"), ltmp);
3499 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3500 printf_filtered (_("Minor faults (no memory page): %lu\n"),
3501 (unsigned long) ltmp);
3502 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3503 printf_filtered (_("Minor faults, children: %lu\n"),
3504 (unsigned long) ltmp);
3505 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3506 printf_filtered (_("Major faults (memory page faults): %lu\n"),
3507 (unsigned long) ltmp);
3508 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3509 printf_filtered (_("Major faults, children: %lu\n"),
3510 (unsigned long) ltmp);
3511 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3512 printf_filtered (_("utime: %ld\n"), ltmp);
3513 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3514 printf_filtered (_("stime: %ld\n"), ltmp);
3515 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3516 printf_filtered (_("utime, children: %ld\n"), ltmp);
3517 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3518 printf_filtered (_("stime, children: %ld\n"), ltmp);
3519 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3520 printf_filtered (_("jiffies remaining in current time slice: %ld\n"),
3521 ltmp);
3522 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3523 printf_filtered (_("'nice' value: %ld\n"), ltmp);
3524 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3525 printf_filtered (_("jiffies until next timeout: %lu\n"),
3526 (unsigned long) ltmp);
3527 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3528 printf_filtered (_("jiffies until next SIGALRM: %lu\n"),
3529 (unsigned long) ltmp);
3530 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3531 printf_filtered (_("start time (jiffies since system boot): %ld\n"),
3532 ltmp);
3533 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3534 printf_filtered (_("Virtual memory size: %lu\n"),
3535 (unsigned long) ltmp);
3536 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3537 printf_filtered (_("Resident set size: %lu\n"), (unsigned long) ltmp);
3538 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3539 printf_filtered (_("rlim: %lu\n"), (unsigned long) ltmp);
3540 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3541 printf_filtered (_("Start of text: 0x%lx\n"), ltmp);
3542 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3543 printf_filtered (_("End of text: 0x%lx\n"), ltmp);
3544 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3545 printf_filtered (_("Start of stack: 0x%lx\n"), ltmp);
dba24537
AC
3546#if 0 /* Don't know how architecture-dependent the rest is...
3547 Anyway the signal bitmap info is available from "status". */
a25694b4
AS
3548 if (fscanf (procfile, "%lu ", &ltmp) > 0) /* FIXME arch? */
3549 printf_filtered (_("Kernel stack pointer: 0x%lx\n"), ltmp);
3550 if (fscanf (procfile, "%lu ", &ltmp) > 0) /* FIXME arch? */
3551 printf_filtered (_("Kernel instr pointer: 0x%lx\n"), ltmp);
3552 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3553 printf_filtered (_("Pending signals bitmap: 0x%lx\n"), ltmp);
3554 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3555 printf_filtered (_("Blocked signals bitmap: 0x%lx\n"), ltmp);
3556 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3557 printf_filtered (_("Ignored signals bitmap: 0x%lx\n"), ltmp);
3558 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3559 printf_filtered (_("Catched signals bitmap: 0x%lx\n"), ltmp);
3560 if (fscanf (procfile, "%lu ", &ltmp) > 0) /* FIXME arch? */
3561 printf_filtered (_("wchan (system call): 0x%lx\n"), ltmp);
dba24537
AC
3562#endif
3563 fclose (procfile);
3564 }
3565 else
8a3fe4f8 3566 warning (_("unable to open /proc file '%s'"), fname1);
dba24537
AC
3567 }
3568}
3569
10d6c8cd
DJ
3570/* Implement the to_xfer_partial interface for memory reads using the /proc
3571 filesystem. Because we can use a single read() call for /proc, this
3572 can be much more efficient than banging away at PTRACE_PEEKTEXT,
3573 but it doesn't support writes. */
3574
3575static LONGEST
3576linux_proc_xfer_partial (struct target_ops *ops, enum target_object object,
3577 const char *annex, gdb_byte *readbuf,
3578 const gdb_byte *writebuf,
3579 ULONGEST offset, LONGEST len)
dba24537 3580{
10d6c8cd
DJ
3581 LONGEST ret;
3582 int fd;
dba24537
AC
3583 char filename[64];
3584
10d6c8cd 3585 if (object != TARGET_OBJECT_MEMORY || !readbuf)
dba24537
AC
3586 return 0;
3587
3588 /* Don't bother for one word. */
3589 if (len < 3 * sizeof (long))
3590 return 0;
3591
3592 /* We could keep this file open and cache it - possibly one per
3593 thread. That requires some juggling, but is even faster. */
3594 sprintf (filename, "/proc/%d/mem", PIDGET (inferior_ptid));
3595 fd = open (filename, O_RDONLY | O_LARGEFILE);
3596 if (fd == -1)
3597 return 0;
3598
3599 /* If pread64 is available, use it. It's faster if the kernel
3600 supports it (only one syscall), and it's 64-bit safe even on
3601 32-bit platforms (for instance, SPARC debugging a SPARC64
3602 application). */
3603#ifdef HAVE_PREAD64
10d6c8cd 3604 if (pread64 (fd, readbuf, len, offset) != len)
dba24537 3605#else
10d6c8cd 3606 if (lseek (fd, offset, SEEK_SET) == -1 || read (fd, readbuf, len) != len)
dba24537
AC
3607#endif
3608 ret = 0;
3609 else
3610 ret = len;
3611
3612 close (fd);
3613 return ret;
3614}
3615
3616/* Parse LINE as a signal set and add its set bits to SIGS. */
3617
3618static void
3619add_line_to_sigset (const char *line, sigset_t *sigs)
3620{
3621 int len = strlen (line) - 1;
3622 const char *p;
3623 int signum;
3624
3625 if (line[len] != '\n')
8a3fe4f8 3626 error (_("Could not parse signal set: %s"), line);
dba24537
AC
3627
3628 p = line;
3629 signum = len * 4;
3630 while (len-- > 0)
3631 {
3632 int digit;
3633
3634 if (*p >= '0' && *p <= '9')
3635 digit = *p - '0';
3636 else if (*p >= 'a' && *p <= 'f')
3637 digit = *p - 'a' + 10;
3638 else
8a3fe4f8 3639 error (_("Could not parse signal set: %s"), line);
dba24537
AC
3640
3641 signum -= 4;
3642
3643 if (digit & 1)
3644 sigaddset (sigs, signum + 1);
3645 if (digit & 2)
3646 sigaddset (sigs, signum + 2);
3647 if (digit & 4)
3648 sigaddset (sigs, signum + 3);
3649 if (digit & 8)
3650 sigaddset (sigs, signum + 4);
3651
3652 p++;
3653 }
3654}
3655
3656/* Find process PID's pending signals from /proc/pid/status and set
3657 SIGS to match. */
3658
3659void
3660linux_proc_pending_signals (int pid, sigset_t *pending, sigset_t *blocked, sigset_t *ignored)
3661{
3662 FILE *procfile;
3663 char buffer[MAXPATHLEN], fname[MAXPATHLEN];
3664 int signum;
3665
3666 sigemptyset (pending);
3667 sigemptyset (blocked);
3668 sigemptyset (ignored);
3669 sprintf (fname, "/proc/%d/status", pid);
3670 procfile = fopen (fname, "r");
3671 if (procfile == NULL)
8a3fe4f8 3672 error (_("Could not open %s"), fname);
dba24537
AC
3673
3674 while (fgets (buffer, MAXPATHLEN, procfile) != NULL)
3675 {
3676 /* Normal queued signals are on the SigPnd line in the status
3677 file. However, 2.6 kernels also have a "shared" pending
3678 queue for delivering signals to a thread group, so check for
3679 a ShdPnd line also.
3680
3681 Unfortunately some Red Hat kernels include the shared pending
3682 queue but not the ShdPnd status field. */
3683
3684 if (strncmp (buffer, "SigPnd:\t", 8) == 0)
3685 add_line_to_sigset (buffer + 8, pending);
3686 else if (strncmp (buffer, "ShdPnd:\t", 8) == 0)
3687 add_line_to_sigset (buffer + 8, pending);
3688 else if (strncmp (buffer, "SigBlk:\t", 8) == 0)
3689 add_line_to_sigset (buffer + 8, blocked);
3690 else if (strncmp (buffer, "SigIgn:\t", 8) == 0)
3691 add_line_to_sigset (buffer + 8, ignored);
3692 }
3693
3694 fclose (procfile);
3695}
3696
10d6c8cd
DJ
3697static LONGEST
3698linux_xfer_partial (struct target_ops *ops, enum target_object object,
3699 const char *annex, gdb_byte *readbuf,
3700 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
3701{
3702 LONGEST xfer;
3703
3704 if (object == TARGET_OBJECT_AUXV)
3705 return procfs_xfer_auxv (ops, object, annex, readbuf, writebuf,
3706 offset, len);
3707
3708 xfer = linux_proc_xfer_partial (ops, object, annex, readbuf, writebuf,
3709 offset, len);
3710 if (xfer != 0)
3711 return xfer;
3712
3713 return super_xfer_partial (ops, object, annex, readbuf, writebuf,
3714 offset, len);
3715}
3716
e9efe249 3717/* Create a prototype generic GNU/Linux target. The client can override
10d6c8cd
DJ
3718 it with local methods. */
3719
910122bf
UW
3720static void
3721linux_target_install_ops (struct target_ops *t)
10d6c8cd 3722{
6d8fd2b7
UW
3723 t->to_insert_fork_catchpoint = linux_child_insert_fork_catchpoint;
3724 t->to_insert_vfork_catchpoint = linux_child_insert_vfork_catchpoint;
3725 t->to_insert_exec_catchpoint = linux_child_insert_exec_catchpoint;
3726 t->to_pid_to_exec_file = linux_child_pid_to_exec_file;
10d6c8cd 3727 t->to_post_startup_inferior = linux_child_post_startup_inferior;
6d8fd2b7
UW
3728 t->to_post_attach = linux_child_post_attach;
3729 t->to_follow_fork = linux_child_follow_fork;
10d6c8cd
DJ
3730 t->to_find_memory_regions = linux_nat_find_memory_regions;
3731 t->to_make_corefile_notes = linux_nat_make_corefile_notes;
3732
3733 super_xfer_partial = t->to_xfer_partial;
3734 t->to_xfer_partial = linux_xfer_partial;
910122bf
UW
3735}
3736
3737struct target_ops *
3738linux_target (void)
3739{
3740 struct target_ops *t;
3741
3742 t = inf_ptrace_target ();
3743 linux_target_install_ops (t);
3744
3745 return t;
3746}
3747
3748struct target_ops *
7714d83a 3749linux_trad_target (CORE_ADDR (*register_u_offset)(struct gdbarch *, int, int))
910122bf
UW
3750{
3751 struct target_ops *t;
3752
3753 t = inf_ptrace_trad_target (register_u_offset);
3754 linux_target_install_ops (t);
10d6c8cd 3755
10d6c8cd
DJ
3756 return t;
3757}
3758
b84876c2
PA
3759/* Controls if async mode is permitted. */
3760static int linux_async_permitted = 0;
3761
3762/* The set command writes to this variable. If the inferior is
3763 executing, linux_nat_async_permitted is *not* updated. */
3764static int linux_async_permitted_1 = 0;
3765
3766static void
3767set_maintenance_linux_async_permitted (char *args, int from_tty,
3768 struct cmd_list_element *c)
3769{
3770 if (target_has_execution)
3771 {
3772 linux_async_permitted_1 = linux_async_permitted;
3773 error (_("Cannot change this setting while the inferior is running."));
3774 }
3775
3776 linux_async_permitted = linux_async_permitted_1;
3777 linux_nat_set_async_mode (linux_async_permitted);
3778}
3779
3780static void
3781show_maintenance_linux_async_permitted (struct ui_file *file, int from_tty,
3782 struct cmd_list_element *c, const char *value)
3783{
3784 fprintf_filtered (file, _("\
3785Controlling the GNU/Linux inferior in asynchronous mode is %s.\n"),
3786 value);
3787}
3788
3789/* target_is_async_p implementation. */
3790
3791static int
3792linux_nat_is_async_p (void)
3793{
3794 /* NOTE: palves 2008-03-21: We're only async when the user requests
3795 it explicitly with the "maintenance set linux-async" command.
3796 Someday, linux will always be async. */
3797 if (!linux_async_permitted)
3798 return 0;
3799
3800 return 1;
3801}
3802
3803/* target_can_async_p implementation. */
3804
3805static int
3806linux_nat_can_async_p (void)
3807{
3808 /* NOTE: palves 2008-03-21: We're only async when the user requests
3809 it explicitly with the "maintenance set linux-async" command.
3810 Someday, linux will always be async. */
3811 if (!linux_async_permitted)
3812 return 0;
3813
3814 /* See target.h/target_async_mask. */
3815 return linux_nat_async_mask_value;
3816}
3817
3818/* target_async_mask implementation. */
3819
3820static int
3821linux_nat_async_mask (int mask)
3822{
3823 int current_state;
3824 current_state = linux_nat_async_mask_value;
3825
3826 if (current_state != mask)
3827 {
3828 if (mask == 0)
3829 {
3830 linux_nat_async (NULL, 0);
3831 linux_nat_async_mask_value = mask;
3832 /* We're in sync mode. Make sure SIGCHLD isn't handled by
3833 async_sigchld_handler when we come out of sigsuspend in
3834 linux_nat_wait. */
3835 sigaction (SIGCHLD, &sync_sigchld_action, NULL);
3836 }
3837 else
3838 {
3839 /* Restore the async handler. */
3840 sigaction (SIGCHLD, &async_sigchld_action, NULL);
3841 linux_nat_async_mask_value = mask;
3842 linux_nat_async (inferior_event_handler, 0);
3843 }
3844 }
3845
3846 return current_state;
3847}
3848
3849/* Pop an event from the event pipe. */
3850
3851static int
3852linux_nat_event_pipe_pop (int* ptr_status, int* ptr_options)
3853{
3854 struct waitpid_result event = {0};
3855 int ret;
3856
3857 do
3858 {
3859 ret = read (linux_nat_event_pipe[0], &event, sizeof (event));
3860 }
3861 while (ret == -1 && errno == EINTR);
3862
3863 gdb_assert (ret == sizeof (event));
3864
3865 *ptr_status = event.status;
3866 *ptr_options = event.options;
3867
3868 linux_nat_num_queued_events--;
3869
3870 return event.pid;
3871}
3872
3873/* Push an event into the event pipe. */
3874
3875static void
3876linux_nat_event_pipe_push (int pid, int status, int options)
3877{
3878 int ret;
3879 struct waitpid_result event = {0};
3880 event.pid = pid;
3881 event.status = status;
3882 event.options = options;
3883
3884 do
3885 {
3886 ret = write (linux_nat_event_pipe[1], &event, sizeof (event));
3887 gdb_assert ((ret == -1 && errno == EINTR) || ret == sizeof (event));
3888 } while (ret == -1 && errno == EINTR);
3889
3890 linux_nat_num_queued_events++;
3891}
3892
3893static void
3894get_pending_events (void)
3895{
3896 int status, options, pid;
3897
3898 if (!linux_nat_async_enabled || !linux_nat_async_events_enabled)
3899 internal_error (__FILE__, __LINE__,
3900 "get_pending_events called with async masked");
3901
3902 while (1)
3903 {
3904 status = 0;
3905 options = __WCLONE | WNOHANG;
3906
3907 do
3908 {
3909 pid = waitpid (-1, &status, options);
3910 }
3911 while (pid == -1 && errno == EINTR);
3912
3913 if (pid <= 0)
3914 {
3915 options = WNOHANG;
3916 do
3917 {
3918 pid = waitpid (-1, &status, options);
3919 }
3920 while (pid == -1 && errno == EINTR);
3921 }
3922
3923 if (pid <= 0)
3924 /* No more children reporting events. */
3925 break;
3926
3927 if (debug_linux_nat_async)
3928 fprintf_unfiltered (gdb_stdlog, "\
3929get_pending_events: pid(%d), status(%x), options (%x)\n",
3930 pid, status, options);
3931
3932 linux_nat_event_pipe_push (pid, status, options);
3933 }
3934
3935 if (debug_linux_nat_async)
3936 fprintf_unfiltered (gdb_stdlog, "\
3937get_pending_events: linux_nat_num_queued_events(%d)\n",
3938 linux_nat_num_queued_events);
3939}
3940
3941/* SIGCHLD handler for async mode. */
3942
3943static void
3944async_sigchld_handler (int signo)
3945{
3946 if (debug_linux_nat_async)
3947 fprintf_unfiltered (gdb_stdlog, "async_sigchld_handler\n");
3948
3949 get_pending_events ();
3950}
3951
3952/* Enable or disable async SIGCHLD handling. */
3953
3954static int
3955linux_nat_async_events (int enable)
3956{
3957 int current_state = linux_nat_async_events_enabled;
3958
3959 if (debug_linux_nat_async)
3960 fprintf_unfiltered (gdb_stdlog,
3961 "LNAE: enable(%d): linux_nat_async_events_enabled(%d), "
3962 "linux_nat_num_queued_events(%d)\n",
3963 enable, linux_nat_async_events_enabled,
3964 linux_nat_num_queued_events);
3965
3966 if (current_state != enable)
3967 {
3968 sigset_t mask;
3969 sigemptyset (&mask);
3970 sigaddset (&mask, SIGCHLD);
3971 if (enable)
3972 {
3973 /* Unblock target events. */
3974 linux_nat_async_events_enabled = 1;
3975
3976 local_event_queue_to_pipe ();
3977 /* While in masked async, we may have not collected all the
3978 pending events. Get them out now. */
3979 get_pending_events ();
3980 sigprocmask (SIG_UNBLOCK, &mask, NULL);
3981 }
3982 else
3983 {
3984 /* Block target events. */
3985 sigprocmask (SIG_BLOCK, &mask, NULL);
3986 linux_nat_async_events_enabled = 0;
3987 /* Get events out of queue, and make them available to
3988 queued_waitpid / my_waitpid. */
3989 pipe_to_local_event_queue ();
3990 }
3991 }
3992
3993 return current_state;
3994}
3995
3996static int async_terminal_is_ours = 1;
3997
3998/* target_terminal_inferior implementation. */
3999
4000static void
4001linux_nat_terminal_inferior (void)
4002{
4003 if (!target_is_async_p ())
4004 {
4005 /* Async mode is disabled. */
4006 terminal_inferior ();
4007 return;
4008 }
4009
4010 /* GDB should never give the terminal to the inferior, if the
4011 inferior is running in the background (run&, continue&, etc.).
4012 This check can be removed when the common code is fixed. */
4013 if (!sync_execution)
4014 return;
4015
4016 terminal_inferior ();
4017
4018 if (!async_terminal_is_ours)
4019 return;
4020
4021 delete_file_handler (input_fd);
4022 async_terminal_is_ours = 0;
4023 set_sigint_trap ();
4024}
4025
4026/* target_terminal_ours implementation. */
4027
4028void
4029linux_nat_terminal_ours (void)
4030{
4031 if (!target_is_async_p ())
4032 {
4033 /* Async mode is disabled. */
4034 terminal_ours ();
4035 return;
4036 }
4037
4038 /* GDB should never give the terminal to the inferior if the
4039 inferior is running in the background (run&, continue&, etc.),
4040 but claiming it sure should. */
4041 terminal_ours ();
4042
4043 if (!sync_execution)
4044 return;
4045
4046 if (async_terminal_is_ours)
4047 return;
4048
4049 clear_sigint_trap ();
4050 add_file_handler (input_fd, stdin_event_handler, 0);
4051 async_terminal_is_ours = 1;
4052}
4053
4054static void (*async_client_callback) (enum inferior_event_type event_type,
4055 void *context);
4056static void *async_client_context;
4057
4058static void
4059linux_nat_async_file_handler (int error, gdb_client_data client_data)
4060{
4061 async_client_callback (INF_REG_EVENT, async_client_context);
4062}
4063
4064/* target_async implementation. */
4065
4066static void
4067linux_nat_async (void (*callback) (enum inferior_event_type event_type,
4068 void *context), void *context)
4069{
4070 if (linux_nat_async_mask_value == 0 || !linux_nat_async_enabled)
4071 internal_error (__FILE__, __LINE__,
4072 "Calling target_async when async is masked");
4073
4074 if (callback != NULL)
4075 {
4076 async_client_callback = callback;
4077 async_client_context = context;
4078 add_file_handler (linux_nat_event_pipe[0],
4079 linux_nat_async_file_handler, NULL);
4080
4081 linux_nat_async_events (1);
4082 }
4083 else
4084 {
4085 async_client_callback = callback;
4086 async_client_context = context;
4087
4088 linux_nat_async_events (0);
4089 delete_file_handler (linux_nat_event_pipe[0]);
4090 }
4091 return;
4092}
4093
4094/* Enable/Disable async mode. */
4095
4096static void
4097linux_nat_set_async_mode (int on)
4098{
4099 if (linux_nat_async_enabled != on)
4100 {
4101 if (on)
4102 {
4103 gdb_assert (waitpid_queue == NULL);
4104 sigaction (SIGCHLD, &async_sigchld_action, NULL);
4105
4106 if (pipe (linux_nat_event_pipe) == -1)
4107 internal_error (__FILE__, __LINE__,
4108 "creating event pipe failed.");
4109
4110 fcntl (linux_nat_event_pipe[0], F_SETFL, O_NONBLOCK);
4111 fcntl (linux_nat_event_pipe[1], F_SETFL, O_NONBLOCK);
4112 }
4113 else
4114 {
4115 sigaction (SIGCHLD, &sync_sigchld_action, NULL);
4116
4117 drain_queued_events (-1);
4118
4119 linux_nat_num_queued_events = 0;
4120 close (linux_nat_event_pipe[0]);
4121 close (linux_nat_event_pipe[1]);
4122 linux_nat_event_pipe[0] = linux_nat_event_pipe[1] = -1;
4123
4124 }
4125 }
4126 linux_nat_async_enabled = on;
4127}
4128
f973ed9c
DJ
4129void
4130linux_nat_add_target (struct target_ops *t)
4131{
f973ed9c
DJ
4132 /* Save the provided single-threaded target. We save this in a separate
4133 variable because another target we've inherited from (e.g. inf-ptrace)
4134 may have saved a pointer to T; we want to use it for the final
4135 process stratum target. */
4136 linux_ops_saved = *t;
4137 linux_ops = &linux_ops_saved;
4138
4139 /* Override some methods for multithreading. */
b84876c2 4140 t->to_create_inferior = linux_nat_create_inferior;
f973ed9c
DJ
4141 t->to_attach = linux_nat_attach;
4142 t->to_detach = linux_nat_detach;
4143 t->to_resume = linux_nat_resume;
4144 t->to_wait = linux_nat_wait;
4145 t->to_xfer_partial = linux_nat_xfer_partial;
4146 t->to_kill = linux_nat_kill;
4147 t->to_mourn_inferior = linux_nat_mourn_inferior;
4148 t->to_thread_alive = linux_nat_thread_alive;
4149 t->to_pid_to_str = linux_nat_pid_to_str;
4150 t->to_has_thread_control = tc_schedlock;
4151
b84876c2
PA
4152 t->to_can_async_p = linux_nat_can_async_p;
4153 t->to_is_async_p = linux_nat_is_async_p;
4154 t->to_async = linux_nat_async;
4155 t->to_async_mask = linux_nat_async_mask;
4156 t->to_terminal_inferior = linux_nat_terminal_inferior;
4157 t->to_terminal_ours = linux_nat_terminal_ours;
4158
f973ed9c
DJ
4159 /* We don't change the stratum; this target will sit at
4160 process_stratum and thread_db will set at thread_stratum. This
4161 is a little strange, since this is a multi-threaded-capable
4162 target, but we want to be on the stack below thread_db, and we
4163 also want to be used for single-threaded processes. */
4164
4165 add_target (t);
4166
4167 /* TODO: Eliminate this and have libthread_db use
4168 find_target_beneath. */
4169 thread_db_init (t);
4170}
4171
9f0bdab8
DJ
4172/* Register a method to call whenever a new thread is attached. */
4173void
4174linux_nat_set_new_thread (struct target_ops *t, void (*new_thread) (ptid_t))
4175{
4176 /* Save the pointer. We only support a single registered instance
4177 of the GNU/Linux native target, so we do not need to map this to
4178 T. */
4179 linux_nat_new_thread = new_thread;
4180}
4181
4182/* Return the saved siginfo associated with PTID. */
4183struct siginfo *
4184linux_nat_get_siginfo (ptid_t ptid)
4185{
4186 struct lwp_info *lp = find_lwp_pid (ptid);
4187
4188 gdb_assert (lp != NULL);
4189
4190 return &lp->siginfo;
4191}
4192
d6b0e80f
AC
4193void
4194_initialize_linux_nat (void)
4195{
b84876c2 4196 sigset_t mask;
dba24537 4197
1bedd215
AC
4198 add_info ("proc", linux_nat_info_proc_cmd, _("\
4199Show /proc process information about any running process.\n\
dba24537
AC
4200Specify any process id, or use the program being debugged by default.\n\
4201Specify any of the following keywords for detailed info:\n\
4202 mappings -- list of mapped memory regions.\n\
4203 stat -- list a bunch of random process info.\n\
4204 status -- list a different bunch of random process info.\n\
1bedd215 4205 all -- list all available /proc info."));
d6b0e80f 4206
b84876c2
PA
4207 add_setshow_zinteger_cmd ("lin-lwp", class_maintenance,
4208 &debug_linux_nat, _("\
4209Set debugging of GNU/Linux lwp module."), _("\
4210Show debugging of GNU/Linux lwp module."), _("\
4211Enables printf debugging output."),
4212 NULL,
4213 show_debug_linux_nat,
4214 &setdebuglist, &showdebuglist);
4215
4216 add_setshow_zinteger_cmd ("lin-lwp-async", class_maintenance,
4217 &debug_linux_nat_async, _("\
4218Set debugging of GNU/Linux async lwp module."), _("\
4219Show debugging of GNU/Linux async lwp module."), _("\
4220Enables printf debugging output."),
4221 NULL,
4222 show_debug_linux_nat_async,
4223 &setdebuglist, &showdebuglist);
4224
4225 add_setshow_boolean_cmd ("linux-async", class_maintenance,
4226 &linux_async_permitted_1, _("\
4227Set whether gdb controls the GNU/Linux inferior in asynchronous mode."), _("\
4228Show whether gdb controls the GNU/Linux inferior in asynchronous mode."), _("\
4229Tells gdb whether to control the GNU/Linux inferior in asynchronous mode."),
4230 set_maintenance_linux_async_permitted,
4231 show_maintenance_linux_async_permitted,
4232 &maintenance_set_cmdlist,
4233 &maintenance_show_cmdlist);
4234
4235 /* Block SIGCHLD by default. Doing this early prevents it getting
4236 unblocked if an exception is thrown due to an error while the
4237 inferior is starting (sigsetjmp/siglongjmp). */
4238 sigemptyset (&mask);
4239 sigaddset (&mask, SIGCHLD);
4240 sigprocmask (SIG_BLOCK, &mask, NULL);
4241
4242 /* Save this mask as the default. */
d6b0e80f
AC
4243 sigprocmask (SIG_SETMASK, NULL, &normal_mask);
4244
b84876c2
PA
4245 /* The synchronous SIGCHLD handler. */
4246 sync_sigchld_action.sa_handler = sigchld_handler;
4247 sigemptyset (&sync_sigchld_action.sa_mask);
4248 sync_sigchld_action.sa_flags = SA_RESTART;
4249
4250 /* Make it the default. */
4251 sigaction (SIGCHLD, &sync_sigchld_action, NULL);
d6b0e80f
AC
4252
4253 /* Make sure we don't block SIGCHLD during a sigsuspend. */
4254 sigprocmask (SIG_SETMASK, NULL, &suspend_mask);
4255 sigdelset (&suspend_mask, SIGCHLD);
4256
b84876c2
PA
4257 /* SIGCHLD handler for async mode. */
4258 async_sigchld_action.sa_handler = async_sigchld_handler;
4259 sigemptyset (&async_sigchld_action.sa_mask);
4260 async_sigchld_action.sa_flags = SA_RESTART;
d6b0e80f 4261
b84876c2
PA
4262 /* Install the default mode. */
4263 linux_nat_set_async_mode (linux_async_permitted);
d6b0e80f
AC
4264}
4265\f
4266
4267/* FIXME: kettenis/2000-08-26: The stuff on this page is specific to
4268 the GNU/Linux Threads library and therefore doesn't really belong
4269 here. */
4270
4271/* Read variable NAME in the target and return its value if found.
4272 Otherwise return zero. It is assumed that the type of the variable
4273 is `int'. */
4274
4275static int
4276get_signo (const char *name)
4277{
4278 struct minimal_symbol *ms;
4279 int signo;
4280
4281 ms = lookup_minimal_symbol (name, NULL, NULL);
4282 if (ms == NULL)
4283 return 0;
4284
8e70166d 4285 if (target_read_memory (SYMBOL_VALUE_ADDRESS (ms), (gdb_byte *) &signo,
d6b0e80f
AC
4286 sizeof (signo)) != 0)
4287 return 0;
4288
4289 return signo;
4290}
4291
4292/* Return the set of signals used by the threads library in *SET. */
4293
4294void
4295lin_thread_get_thread_signals (sigset_t *set)
4296{
4297 struct sigaction action;
4298 int restart, cancel;
b84876c2 4299 sigset_t blocked_mask;
d6b0e80f 4300
b84876c2 4301 sigemptyset (&blocked_mask);
d6b0e80f
AC
4302 sigemptyset (set);
4303
4304 restart = get_signo ("__pthread_sig_restart");
17fbb0bd
DJ
4305 cancel = get_signo ("__pthread_sig_cancel");
4306
4307 /* LinuxThreads normally uses the first two RT signals, but in some legacy
4308 cases may use SIGUSR1/SIGUSR2. NPTL always uses RT signals, but does
4309 not provide any way for the debugger to query the signal numbers -
4310 fortunately they don't change! */
4311
d6b0e80f 4312 if (restart == 0)
17fbb0bd 4313 restart = __SIGRTMIN;
d6b0e80f 4314
d6b0e80f 4315 if (cancel == 0)
17fbb0bd 4316 cancel = __SIGRTMIN + 1;
d6b0e80f
AC
4317
4318 sigaddset (set, restart);
4319 sigaddset (set, cancel);
4320
4321 /* The GNU/Linux Threads library makes terminating threads send a
4322 special "cancel" signal instead of SIGCHLD. Make sure we catch
4323 those (to prevent them from terminating GDB itself, which is
4324 likely to be their default action) and treat them the same way as
4325 SIGCHLD. */
4326
4327 action.sa_handler = sigchld_handler;
4328 sigemptyset (&action.sa_mask);
58aecb61 4329 action.sa_flags = SA_RESTART;
d6b0e80f
AC
4330 sigaction (cancel, &action, NULL);
4331
4332 /* We block the "cancel" signal throughout this code ... */
4333 sigaddset (&blocked_mask, cancel);
4334 sigprocmask (SIG_BLOCK, &blocked_mask, NULL);
4335
4336 /* ... except during a sigsuspend. */
4337 sigdelset (&suspend_mask, cancel);
4338}
This page took 0.859693 seconds and 4 git commands to generate.