2007-10-02 Markus Deuling <deuling@de.ibm.com>
[deliverable/binutils-gdb.git] / gdb / linux-nat.c
1 /* GNU/Linux native-dependent code common to multiple platforms.
2
3 Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007
4 Free Software Foundation, Inc.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22 #include "inferior.h"
23 #include "target.h"
24 #include "gdb_string.h"
25 #include "gdb_wait.h"
26 #include "gdb_assert.h"
27 #ifdef HAVE_TKILL_SYSCALL
28 #include <unistd.h>
29 #include <sys/syscall.h>
30 #endif
31 #include <sys/ptrace.h>
32 #include "linux-nat.h"
33 #include "linux-fork.h"
34 #include "gdbthread.h"
35 #include "gdbcmd.h"
36 #include "regcache.h"
37 #include "regset.h"
38 #include "inf-ptrace.h"
39 #include "auxv.h"
40 #include <sys/param.h> /* for MAXPATHLEN */
41 #include <sys/procfs.h> /* for elf_gregset etc. */
42 #include "elf-bfd.h" /* for elfcore_write_* */
43 #include "gregset.h" /* for gregset */
44 #include "gdbcore.h" /* for get_exec_file */
45 #include <ctype.h> /* for isdigit */
46 #include "gdbthread.h" /* for struct thread_info etc. */
47 #include "gdb_stat.h" /* for struct stat */
48 #include <fcntl.h> /* for O_RDONLY */
49
50 #ifndef O_LARGEFILE
51 #define O_LARGEFILE 0
52 #endif
53
54 /* If the system headers did not provide the constants, hard-code the normal
55 values. */
56 #ifndef PTRACE_EVENT_FORK
57
58 #define PTRACE_SETOPTIONS 0x4200
59 #define PTRACE_GETEVENTMSG 0x4201
60
61 /* options set using PTRACE_SETOPTIONS */
62 #define PTRACE_O_TRACESYSGOOD 0x00000001
63 #define PTRACE_O_TRACEFORK 0x00000002
64 #define PTRACE_O_TRACEVFORK 0x00000004
65 #define PTRACE_O_TRACECLONE 0x00000008
66 #define PTRACE_O_TRACEEXEC 0x00000010
67 #define PTRACE_O_TRACEVFORKDONE 0x00000020
68 #define PTRACE_O_TRACEEXIT 0x00000040
69
70 /* Wait extended result codes for the above trace options. */
71 #define PTRACE_EVENT_FORK 1
72 #define PTRACE_EVENT_VFORK 2
73 #define PTRACE_EVENT_CLONE 3
74 #define PTRACE_EVENT_EXEC 4
75 #define PTRACE_EVENT_VFORK_DONE 5
76 #define PTRACE_EVENT_EXIT 6
77
78 #endif /* PTRACE_EVENT_FORK */
79
80 /* We can't always assume that this flag is available, but all systems
81 with the ptrace event handlers also have __WALL, so it's safe to use
82 here. */
83 #ifndef __WALL
84 #define __WALL 0x40000000 /* Wait for any child. */
85 #endif
86
87 #ifndef PTRACE_GETSIGINFO
88 #define PTRACE_GETSIGINFO 0x4202
89 #endif
90
91 /* The single-threaded native GNU/Linux target_ops. We save a pointer for
92 the use of the multi-threaded target. */
93 static struct target_ops *linux_ops;
94 static struct target_ops linux_ops_saved;
95
96 /* The method to call, if any, when a new thread is attached. */
97 static void (*linux_nat_new_thread) (ptid_t);
98
99 /* The saved to_xfer_partial method, inherited from inf-ptrace.c.
100 Called by our to_xfer_partial. */
101 static LONGEST (*super_xfer_partial) (struct target_ops *,
102 enum target_object,
103 const char *, gdb_byte *,
104 const gdb_byte *,
105 ULONGEST, LONGEST);
106
107 static int debug_linux_nat;
108 static void
109 show_debug_linux_nat (struct ui_file *file, int from_tty,
110 struct cmd_list_element *c, const char *value)
111 {
112 fprintf_filtered (file, _("Debugging of GNU/Linux lwp module is %s.\n"),
113 value);
114 }
115
116 static int linux_parent_pid;
117
118 struct simple_pid_list
119 {
120 int pid;
121 int status;
122 struct simple_pid_list *next;
123 };
124 struct simple_pid_list *stopped_pids;
125
126 /* This variable is a tri-state flag: -1 for unknown, 0 if PTRACE_O_TRACEFORK
127 can not be used, 1 if it can. */
128
129 static int linux_supports_tracefork_flag = -1;
130
131 /* If we have PTRACE_O_TRACEFORK, this flag indicates whether we also have
132 PTRACE_O_TRACEVFORKDONE. */
133
134 static int linux_supports_tracevforkdone_flag = -1;
135
136 \f
137 /* Trivial list manipulation functions to keep track of a list of
138 new stopped processes. */
139 static void
140 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
141 {
142 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
143 new_pid->pid = pid;
144 new_pid->status = status;
145 new_pid->next = *listp;
146 *listp = new_pid;
147 }
148
149 static int
150 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *status)
151 {
152 struct simple_pid_list **p;
153
154 for (p = listp; *p != NULL; p = &(*p)->next)
155 if ((*p)->pid == pid)
156 {
157 struct simple_pid_list *next = (*p)->next;
158 *status = (*p)->status;
159 xfree (*p);
160 *p = next;
161 return 1;
162 }
163 return 0;
164 }
165
166 static void
167 linux_record_stopped_pid (int pid, int status)
168 {
169 add_to_pid_list (&stopped_pids, pid, status);
170 }
171
172 \f
173 /* A helper function for linux_test_for_tracefork, called after fork (). */
174
175 static void
176 linux_tracefork_child (void)
177 {
178 int ret;
179
180 ptrace (PTRACE_TRACEME, 0, 0, 0);
181 kill (getpid (), SIGSTOP);
182 fork ();
183 _exit (0);
184 }
185
186 /* Wrapper function for waitpid which handles EINTR. */
187
188 static int
189 my_waitpid (int pid, int *status, int flags)
190 {
191 int ret;
192 do
193 {
194 ret = waitpid (pid, status, flags);
195 }
196 while (ret == -1 && errno == EINTR);
197
198 return ret;
199 }
200
201 /* Determine if PTRACE_O_TRACEFORK can be used to follow fork events.
202
203 First, we try to enable fork tracing on ORIGINAL_PID. If this fails,
204 we know that the feature is not available. This may change the tracing
205 options for ORIGINAL_PID, but we'll be setting them shortly anyway.
206
207 However, if it succeeds, we don't know for sure that the feature is
208 available; old versions of PTRACE_SETOPTIONS ignored unknown options. We
209 create a child process, attach to it, use PTRACE_SETOPTIONS to enable
210 fork tracing, and let it fork. If the process exits, we assume that we
211 can't use TRACEFORK; if we get the fork notification, and we can extract
212 the new child's PID, then we assume that we can. */
213
214 static void
215 linux_test_for_tracefork (int original_pid)
216 {
217 int child_pid, ret, status;
218 long second_pid;
219
220 linux_supports_tracefork_flag = 0;
221 linux_supports_tracevforkdone_flag = 0;
222
223 ret = ptrace (PTRACE_SETOPTIONS, original_pid, 0, PTRACE_O_TRACEFORK);
224 if (ret != 0)
225 return;
226
227 child_pid = fork ();
228 if (child_pid == -1)
229 perror_with_name (("fork"));
230
231 if (child_pid == 0)
232 linux_tracefork_child ();
233
234 ret = my_waitpid (child_pid, &status, 0);
235 if (ret == -1)
236 perror_with_name (("waitpid"));
237 else if (ret != child_pid)
238 error (_("linux_test_for_tracefork: waitpid: unexpected result %d."), ret);
239 if (! WIFSTOPPED (status))
240 error (_("linux_test_for_tracefork: waitpid: unexpected status %d."), status);
241
242 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0, PTRACE_O_TRACEFORK);
243 if (ret != 0)
244 {
245 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
246 if (ret != 0)
247 {
248 warning (_("linux_test_for_tracefork: failed to kill child"));
249 return;
250 }
251
252 ret = my_waitpid (child_pid, &status, 0);
253 if (ret != child_pid)
254 warning (_("linux_test_for_tracefork: failed to wait for killed child"));
255 else if (!WIFSIGNALED (status))
256 warning (_("linux_test_for_tracefork: unexpected wait status 0x%x from "
257 "killed child"), status);
258
259 return;
260 }
261
262 /* Check whether PTRACE_O_TRACEVFORKDONE is available. */
263 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0,
264 PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORKDONE);
265 linux_supports_tracevforkdone_flag = (ret == 0);
266
267 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
268 if (ret != 0)
269 warning (_("linux_test_for_tracefork: failed to resume child"));
270
271 ret = my_waitpid (child_pid, &status, 0);
272
273 if (ret == child_pid && WIFSTOPPED (status)
274 && status >> 16 == PTRACE_EVENT_FORK)
275 {
276 second_pid = 0;
277 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
278 if (ret == 0 && second_pid != 0)
279 {
280 int second_status;
281
282 linux_supports_tracefork_flag = 1;
283 my_waitpid (second_pid, &second_status, 0);
284 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
285 if (ret != 0)
286 warning (_("linux_test_for_tracefork: failed to kill second child"));
287 my_waitpid (second_pid, &status, 0);
288 }
289 }
290 else
291 warning (_("linux_test_for_tracefork: unexpected result from waitpid "
292 "(%d, status 0x%x)"), ret, status);
293
294 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
295 if (ret != 0)
296 warning (_("linux_test_for_tracefork: failed to kill child"));
297 my_waitpid (child_pid, &status, 0);
298 }
299
300 /* Return non-zero iff we have tracefork functionality available.
301 This function also sets linux_supports_tracefork_flag. */
302
303 static int
304 linux_supports_tracefork (int pid)
305 {
306 if (linux_supports_tracefork_flag == -1)
307 linux_test_for_tracefork (pid);
308 return linux_supports_tracefork_flag;
309 }
310
311 static int
312 linux_supports_tracevforkdone (int pid)
313 {
314 if (linux_supports_tracefork_flag == -1)
315 linux_test_for_tracefork (pid);
316 return linux_supports_tracevforkdone_flag;
317 }
318
319 \f
320 void
321 linux_enable_event_reporting (ptid_t ptid)
322 {
323 int pid = ptid_get_lwp (ptid);
324 int options;
325
326 if (pid == 0)
327 pid = ptid_get_pid (ptid);
328
329 if (! linux_supports_tracefork (pid))
330 return;
331
332 options = PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORK | PTRACE_O_TRACEEXEC
333 | PTRACE_O_TRACECLONE;
334 if (linux_supports_tracevforkdone (pid))
335 options |= PTRACE_O_TRACEVFORKDONE;
336
337 /* Do not enable PTRACE_O_TRACEEXIT until GDB is more prepared to support
338 read-only process state. */
339
340 ptrace (PTRACE_SETOPTIONS, pid, 0, options);
341 }
342
343 static void
344 linux_child_post_attach (int pid)
345 {
346 linux_enable_event_reporting (pid_to_ptid (pid));
347 check_for_thread_db ();
348 }
349
350 static void
351 linux_child_post_startup_inferior (ptid_t ptid)
352 {
353 linux_enable_event_reporting (ptid);
354 check_for_thread_db ();
355 }
356
357 static int
358 linux_child_follow_fork (struct target_ops *ops, int follow_child)
359 {
360 ptid_t last_ptid;
361 struct target_waitstatus last_status;
362 int has_vforked;
363 int parent_pid, child_pid;
364
365 get_last_target_status (&last_ptid, &last_status);
366 has_vforked = (last_status.kind == TARGET_WAITKIND_VFORKED);
367 parent_pid = ptid_get_lwp (last_ptid);
368 if (parent_pid == 0)
369 parent_pid = ptid_get_pid (last_ptid);
370 child_pid = last_status.value.related_pid;
371
372 if (! follow_child)
373 {
374 /* We're already attached to the parent, by default. */
375
376 /* Before detaching from the child, remove all breakpoints from
377 it. (This won't actually modify the breakpoint list, but will
378 physically remove the breakpoints from the child.) */
379 /* If we vforked this will remove the breakpoints from the parent
380 also, but they'll be reinserted below. */
381 detach_breakpoints (child_pid);
382
383 /* Detach new forked process? */
384 if (detach_fork)
385 {
386 if (debug_linux_nat)
387 {
388 target_terminal_ours ();
389 fprintf_filtered (gdb_stdlog,
390 "Detaching after fork from child process %d.\n",
391 child_pid);
392 }
393
394 ptrace (PTRACE_DETACH, child_pid, 0, 0);
395 }
396 else
397 {
398 struct fork_info *fp;
399 /* Retain child fork in ptrace (stopped) state. */
400 fp = find_fork_pid (child_pid);
401 if (!fp)
402 fp = add_fork (child_pid);
403 fork_save_infrun_state (fp, 0);
404 }
405
406 if (has_vforked)
407 {
408 gdb_assert (linux_supports_tracefork_flag >= 0);
409 if (linux_supports_tracevforkdone (0))
410 {
411 int status;
412
413 ptrace (PTRACE_CONT, parent_pid, 0, 0);
414 my_waitpid (parent_pid, &status, __WALL);
415 if ((status >> 16) != PTRACE_EVENT_VFORK_DONE)
416 warning (_("Unexpected waitpid result %06x when waiting for "
417 "vfork-done"), status);
418 }
419 else
420 {
421 /* We can't insert breakpoints until the child has
422 finished with the shared memory region. We need to
423 wait until that happens. Ideal would be to just
424 call:
425 - ptrace (PTRACE_SYSCALL, parent_pid, 0, 0);
426 - waitpid (parent_pid, &status, __WALL);
427 However, most architectures can't handle a syscall
428 being traced on the way out if it wasn't traced on
429 the way in.
430
431 We might also think to loop, continuing the child
432 until it exits or gets a SIGTRAP. One problem is
433 that the child might call ptrace with PTRACE_TRACEME.
434
435 There's no simple and reliable way to figure out when
436 the vforked child will be done with its copy of the
437 shared memory. We could step it out of the syscall,
438 two instructions, let it go, and then single-step the
439 parent once. When we have hardware single-step, this
440 would work; with software single-step it could still
441 be made to work but we'd have to be able to insert
442 single-step breakpoints in the child, and we'd have
443 to insert -just- the single-step breakpoint in the
444 parent. Very awkward.
445
446 In the end, the best we can do is to make sure it
447 runs for a little while. Hopefully it will be out of
448 range of any breakpoints we reinsert. Usually this
449 is only the single-step breakpoint at vfork's return
450 point. */
451
452 usleep (10000);
453 }
454
455 /* Since we vforked, breakpoints were removed in the parent
456 too. Put them back. */
457 reattach_breakpoints (parent_pid);
458 }
459 }
460 else
461 {
462 char child_pid_spelling[40];
463
464 /* Needed to keep the breakpoint lists in sync. */
465 if (! has_vforked)
466 detach_breakpoints (child_pid);
467
468 /* Before detaching from the parent, remove all breakpoints from it. */
469 remove_breakpoints ();
470
471 if (debug_linux_nat)
472 {
473 target_terminal_ours ();
474 fprintf_filtered (gdb_stdlog,
475 "Attaching after fork to child process %d.\n",
476 child_pid);
477 }
478
479 /* If we're vforking, we may want to hold on to the parent until
480 the child exits or execs. At exec time we can remove the old
481 breakpoints from the parent and detach it; at exit time we
482 could do the same (or even, sneakily, resume debugging it - the
483 child's exec has failed, or something similar).
484
485 This doesn't clean up "properly", because we can't call
486 target_detach, but that's OK; if the current target is "child",
487 then it doesn't need any further cleanups, and lin_lwp will
488 generally not encounter vfork (vfork is defined to fork
489 in libpthread.so).
490
491 The holding part is very easy if we have VFORKDONE events;
492 but keeping track of both processes is beyond GDB at the
493 moment. So we don't expose the parent to the rest of GDB.
494 Instead we quietly hold onto it until such time as we can
495 safely resume it. */
496
497 if (has_vforked)
498 linux_parent_pid = parent_pid;
499 else if (!detach_fork)
500 {
501 struct fork_info *fp;
502 /* Retain parent fork in ptrace (stopped) state. */
503 fp = find_fork_pid (parent_pid);
504 if (!fp)
505 fp = add_fork (parent_pid);
506 fork_save_infrun_state (fp, 0);
507 }
508 else
509 {
510 target_detach (NULL, 0);
511 }
512
513 inferior_ptid = ptid_build (child_pid, child_pid, 0);
514
515 /* Reinstall ourselves, since we might have been removed in
516 target_detach (which does other necessary cleanup). */
517
518 push_target (ops);
519 linux_nat_switch_fork (inferior_ptid);
520
521 /* Reset breakpoints in the child as appropriate. */
522 follow_inferior_reset_breakpoints ();
523 }
524
525 return 0;
526 }
527
528 \f
529 static void
530 linux_child_insert_fork_catchpoint (int pid)
531 {
532 if (! linux_supports_tracefork (pid))
533 error (_("Your system does not support fork catchpoints."));
534 }
535
536 static void
537 linux_child_insert_vfork_catchpoint (int pid)
538 {
539 if (!linux_supports_tracefork (pid))
540 error (_("Your system does not support vfork catchpoints."));
541 }
542
543 static void
544 linux_child_insert_exec_catchpoint (int pid)
545 {
546 if (!linux_supports_tracefork (pid))
547 error (_("Your system does not support exec catchpoints."));
548 }
549
550 /* On GNU/Linux there are no real LWP's. The closest thing to LWP's
551 are processes sharing the same VM space. A multi-threaded process
552 is basically a group of such processes. However, such a grouping
553 is almost entirely a user-space issue; the kernel doesn't enforce
554 such a grouping at all (this might change in the future). In
555 general, we'll rely on the threads library (i.e. the GNU/Linux
556 Threads library) to provide such a grouping.
557
558 It is perfectly well possible to write a multi-threaded application
559 without the assistance of a threads library, by using the clone
560 system call directly. This module should be able to give some
561 rudimentary support for debugging such applications if developers
562 specify the CLONE_PTRACE flag in the clone system call, and are
563 using the Linux kernel 2.4 or above.
564
565 Note that there are some peculiarities in GNU/Linux that affect
566 this code:
567
568 - In general one should specify the __WCLONE flag to waitpid in
569 order to make it report events for any of the cloned processes
570 (and leave it out for the initial process). However, if a cloned
571 process has exited the exit status is only reported if the
572 __WCLONE flag is absent. Linux kernel 2.4 has a __WALL flag, but
573 we cannot use it since GDB must work on older systems too.
574
575 - When a traced, cloned process exits and is waited for by the
576 debugger, the kernel reassigns it to the original parent and
577 keeps it around as a "zombie". Somehow, the GNU/Linux Threads
578 library doesn't notice this, which leads to the "zombie problem":
579 When debugged a multi-threaded process that spawns a lot of
580 threads will run out of processes, even if the threads exit,
581 because the "zombies" stay around. */
582
583 /* List of known LWPs. */
584 struct lwp_info *lwp_list;
585
586 /* Number of LWPs in the list. */
587 static int num_lwps;
588 \f
589
590 #define GET_LWP(ptid) ptid_get_lwp (ptid)
591 #define GET_PID(ptid) ptid_get_pid (ptid)
592 #define is_lwp(ptid) (GET_LWP (ptid) != 0)
593 #define BUILD_LWP(lwp, pid) ptid_build (pid, lwp, 0)
594
595 /* If the last reported event was a SIGTRAP, this variable is set to
596 the process id of the LWP/thread that got it. */
597 ptid_t trap_ptid;
598 \f
599
600 /* Since we cannot wait (in linux_nat_wait) for the initial process and
601 any cloned processes with a single call to waitpid, we have to use
602 the WNOHANG flag and call waitpid in a loop. To optimize
603 things a bit we use `sigsuspend' to wake us up when a process has
604 something to report (it will send us a SIGCHLD if it has). To make
605 this work we have to juggle with the signal mask. We save the
606 original signal mask such that we can restore it before creating a
607 new process in order to avoid blocking certain signals in the
608 inferior. We then block SIGCHLD during the waitpid/sigsuspend
609 loop. */
610
611 /* Original signal mask. */
612 static sigset_t normal_mask;
613
614 /* Signal mask for use with sigsuspend in linux_nat_wait, initialized in
615 _initialize_linux_nat. */
616 static sigset_t suspend_mask;
617
618 /* Signals to block to make that sigsuspend work. */
619 static sigset_t blocked_mask;
620 \f
621
622 /* Prototypes for local functions. */
623 static int stop_wait_callback (struct lwp_info *lp, void *data);
624 static int linux_nat_thread_alive (ptid_t ptid);
625 static char *linux_child_pid_to_exec_file (int pid);
626 \f
627 /* Convert wait status STATUS to a string. Used for printing debug
628 messages only. */
629
630 static char *
631 status_to_str (int status)
632 {
633 static char buf[64];
634
635 if (WIFSTOPPED (status))
636 snprintf (buf, sizeof (buf), "%s (stopped)",
637 strsignal (WSTOPSIG (status)));
638 else if (WIFSIGNALED (status))
639 snprintf (buf, sizeof (buf), "%s (terminated)",
640 strsignal (WSTOPSIG (status)));
641 else
642 snprintf (buf, sizeof (buf), "%d (exited)", WEXITSTATUS (status));
643
644 return buf;
645 }
646
647 /* Initialize the list of LWPs. Note that this module, contrary to
648 what GDB's generic threads layer does for its thread list,
649 re-initializes the LWP lists whenever we mourn or detach (which
650 doesn't involve mourning) the inferior. */
651
652 static void
653 init_lwp_list (void)
654 {
655 struct lwp_info *lp, *lpnext;
656
657 for (lp = lwp_list; lp; lp = lpnext)
658 {
659 lpnext = lp->next;
660 xfree (lp);
661 }
662
663 lwp_list = NULL;
664 num_lwps = 0;
665 }
666
667 /* Add the LWP specified by PID to the list. Return a pointer to the
668 structure describing the new LWP. The LWP should already be stopped
669 (with an exception for the very first LWP). */
670
671 static struct lwp_info *
672 add_lwp (ptid_t ptid)
673 {
674 struct lwp_info *lp;
675
676 gdb_assert (is_lwp (ptid));
677
678 lp = (struct lwp_info *) xmalloc (sizeof (struct lwp_info));
679
680 memset (lp, 0, sizeof (struct lwp_info));
681
682 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
683
684 lp->ptid = ptid;
685
686 lp->next = lwp_list;
687 lwp_list = lp;
688 ++num_lwps;
689
690 if (num_lwps > 1 && linux_nat_new_thread != NULL)
691 linux_nat_new_thread (ptid);
692
693 return lp;
694 }
695
696 /* Remove the LWP specified by PID from the list. */
697
698 static void
699 delete_lwp (ptid_t ptid)
700 {
701 struct lwp_info *lp, *lpprev;
702
703 lpprev = NULL;
704
705 for (lp = lwp_list; lp; lpprev = lp, lp = lp->next)
706 if (ptid_equal (lp->ptid, ptid))
707 break;
708
709 if (!lp)
710 return;
711
712 num_lwps--;
713
714 if (lpprev)
715 lpprev->next = lp->next;
716 else
717 lwp_list = lp->next;
718
719 xfree (lp);
720 }
721
722 /* Return a pointer to the structure describing the LWP corresponding
723 to PID. If no corresponding LWP could be found, return NULL. */
724
725 static struct lwp_info *
726 find_lwp_pid (ptid_t ptid)
727 {
728 struct lwp_info *lp;
729 int lwp;
730
731 if (is_lwp (ptid))
732 lwp = GET_LWP (ptid);
733 else
734 lwp = GET_PID (ptid);
735
736 for (lp = lwp_list; lp; lp = lp->next)
737 if (lwp == GET_LWP (lp->ptid))
738 return lp;
739
740 return NULL;
741 }
742
743 /* Call CALLBACK with its second argument set to DATA for every LWP in
744 the list. If CALLBACK returns 1 for a particular LWP, return a
745 pointer to the structure describing that LWP immediately.
746 Otherwise return NULL. */
747
748 struct lwp_info *
749 iterate_over_lwps (int (*callback) (struct lwp_info *, void *), void *data)
750 {
751 struct lwp_info *lp, *lpnext;
752
753 for (lp = lwp_list; lp; lp = lpnext)
754 {
755 lpnext = lp->next;
756 if ((*callback) (lp, data))
757 return lp;
758 }
759
760 return NULL;
761 }
762
763 /* Update our internal state when changing from one fork (checkpoint,
764 et cetera) to another indicated by NEW_PTID. We can only switch
765 single-threaded applications, so we only create one new LWP, and
766 the previous list is discarded. */
767
768 void
769 linux_nat_switch_fork (ptid_t new_ptid)
770 {
771 struct lwp_info *lp;
772
773 init_lwp_list ();
774 lp = add_lwp (new_ptid);
775 lp->stopped = 1;
776 }
777
778 /* Record a PTID for later deletion. */
779
780 struct saved_ptids
781 {
782 ptid_t ptid;
783 struct saved_ptids *next;
784 };
785 static struct saved_ptids *threads_to_delete;
786
787 static void
788 record_dead_thread (ptid_t ptid)
789 {
790 struct saved_ptids *p = xmalloc (sizeof (struct saved_ptids));
791 p->ptid = ptid;
792 p->next = threads_to_delete;
793 threads_to_delete = p;
794 }
795
796 /* Delete any dead threads which are not the current thread. */
797
798 static void
799 prune_lwps (void)
800 {
801 struct saved_ptids **p = &threads_to_delete;
802
803 while (*p)
804 if (! ptid_equal ((*p)->ptid, inferior_ptid))
805 {
806 struct saved_ptids *tmp = *p;
807 delete_thread (tmp->ptid);
808 *p = tmp->next;
809 xfree (tmp);
810 }
811 else
812 p = &(*p)->next;
813 }
814
815 /* Callback for iterate_over_threads that finds a thread corresponding
816 to the given LWP. */
817
818 static int
819 find_thread_from_lwp (struct thread_info *thr, void *dummy)
820 {
821 ptid_t *ptid_p = dummy;
822
823 if (GET_LWP (thr->ptid) && GET_LWP (thr->ptid) == GET_LWP (*ptid_p))
824 return 1;
825 else
826 return 0;
827 }
828
829 /* Handle the exit of a single thread LP. */
830
831 static void
832 exit_lwp (struct lwp_info *lp)
833 {
834 if (in_thread_list (lp->ptid))
835 {
836 /* Core GDB cannot deal with us deleting the current thread. */
837 if (!ptid_equal (lp->ptid, inferior_ptid))
838 delete_thread (lp->ptid);
839 else
840 record_dead_thread (lp->ptid);
841 printf_unfiltered (_("[%s exited]\n"),
842 target_pid_to_str (lp->ptid));
843 }
844 else
845 {
846 /* Even if LP->PTID is not in the global GDB thread list, the
847 LWP may be - with an additional thread ID. We don't need
848 to print anything in this case; thread_db is in use and
849 already took care of that. But it didn't delete the thread
850 in order to handle zombies correctly. */
851
852 struct thread_info *thr;
853
854 thr = iterate_over_threads (find_thread_from_lwp, &lp->ptid);
855 if (thr)
856 {
857 if (!ptid_equal (thr->ptid, inferior_ptid))
858 delete_thread (thr->ptid);
859 else
860 record_dead_thread (thr->ptid);
861 }
862 }
863
864 delete_lwp (lp->ptid);
865 }
866
867 /* Attach to the LWP specified by PID. If VERBOSE is non-zero, print
868 a message telling the user that a new LWP has been added to the
869 process. Return 0 if successful or -1 if the new LWP could not
870 be attached. */
871
872 int
873 lin_lwp_attach_lwp (ptid_t ptid, int verbose)
874 {
875 struct lwp_info *lp;
876
877 gdb_assert (is_lwp (ptid));
878
879 /* Make sure SIGCHLD is blocked. We don't want SIGCHLD events
880 to interrupt either the ptrace() or waitpid() calls below. */
881 if (!sigismember (&blocked_mask, SIGCHLD))
882 {
883 sigaddset (&blocked_mask, SIGCHLD);
884 sigprocmask (SIG_BLOCK, &blocked_mask, NULL);
885 }
886
887 lp = find_lwp_pid (ptid);
888
889 /* We assume that we're already attached to any LWP that has an id
890 equal to the overall process id, and to any LWP that is already
891 in our list of LWPs. If we're not seeing exit events from threads
892 and we've had PID wraparound since we last tried to stop all threads,
893 this assumption might be wrong; fortunately, this is very unlikely
894 to happen. */
895 if (GET_LWP (ptid) != GET_PID (ptid) && lp == NULL)
896 {
897 pid_t pid;
898 int status;
899 int cloned = 0;
900
901 if (ptrace (PTRACE_ATTACH, GET_LWP (ptid), 0, 0) < 0)
902 {
903 /* If we fail to attach to the thread, issue a warning,
904 but continue. One way this can happen is if thread
905 creation is interrupted; as of Linux 2.6.19, a kernel
906 bug may place threads in the thread list and then fail
907 to create them. */
908 warning (_("Can't attach %s: %s"), target_pid_to_str (ptid),
909 safe_strerror (errno));
910 return -1;
911 }
912
913 if (debug_linux_nat)
914 fprintf_unfiltered (gdb_stdlog,
915 "LLAL: PTRACE_ATTACH %s, 0, 0 (OK)\n",
916 target_pid_to_str (ptid));
917
918 pid = my_waitpid (GET_LWP (ptid), &status, 0);
919 if (pid == -1 && errno == ECHILD)
920 {
921 /* Try again with __WCLONE to check cloned processes. */
922 pid = my_waitpid (GET_LWP (ptid), &status, __WCLONE);
923 cloned = 1;
924 }
925
926 gdb_assert (pid == GET_LWP (ptid)
927 && WIFSTOPPED (status) && WSTOPSIG (status));
928
929 if (lp == NULL)
930 lp = add_lwp (ptid);
931 lp->cloned = cloned;
932
933 target_post_attach (pid);
934
935 lp->stopped = 1;
936
937 if (debug_linux_nat)
938 {
939 fprintf_unfiltered (gdb_stdlog,
940 "LLAL: waitpid %s received %s\n",
941 target_pid_to_str (ptid),
942 status_to_str (status));
943 }
944 }
945 else
946 {
947 /* We assume that the LWP representing the original process is
948 already stopped. Mark it as stopped in the data structure
949 that the GNU/linux ptrace layer uses to keep track of
950 threads. Note that this won't have already been done since
951 the main thread will have, we assume, been stopped by an
952 attach from a different layer. */
953 if (lp == NULL)
954 lp = add_lwp (ptid);
955 lp->stopped = 1;
956 }
957
958 if (verbose)
959 printf_filtered (_("[New %s]\n"), target_pid_to_str (ptid));
960
961 return 0;
962 }
963
964 static void
965 linux_nat_attach (char *args, int from_tty)
966 {
967 struct lwp_info *lp;
968 pid_t pid;
969 int status;
970 int cloned = 0;
971
972 /* FIXME: We should probably accept a list of process id's, and
973 attach all of them. */
974 linux_ops->to_attach (args, from_tty);
975
976 /* Make sure the initial process is stopped. The user-level threads
977 layer might want to poke around in the inferior, and that won't
978 work if things haven't stabilized yet. */
979 pid = my_waitpid (GET_PID (inferior_ptid), &status, 0);
980 if (pid == -1 && errno == ECHILD)
981 {
982 warning (_("%s is a cloned process"), target_pid_to_str (inferior_ptid));
983
984 /* Try again with __WCLONE to check cloned processes. */
985 pid = my_waitpid (GET_PID (inferior_ptid), &status, __WCLONE);
986 cloned = 1;
987 }
988
989 gdb_assert (pid == GET_PID (inferior_ptid)
990 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP);
991
992 /* Add the initial process as the first LWP to the list. */
993 inferior_ptid = BUILD_LWP (GET_PID (inferior_ptid), GET_PID (inferior_ptid));
994 lp = add_lwp (inferior_ptid);
995 lp->cloned = cloned;
996
997 lp->stopped = 1;
998
999 /* Fake the SIGSTOP that core GDB expects. */
1000 lp->status = W_STOPCODE (SIGSTOP);
1001 lp->resumed = 1;
1002 if (debug_linux_nat)
1003 {
1004 fprintf_unfiltered (gdb_stdlog,
1005 "LLA: waitpid %ld, faking SIGSTOP\n", (long) pid);
1006 }
1007 }
1008
1009 static int
1010 detach_callback (struct lwp_info *lp, void *data)
1011 {
1012 gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status));
1013
1014 if (debug_linux_nat && lp->status)
1015 fprintf_unfiltered (gdb_stdlog, "DC: Pending %s for %s on detach.\n",
1016 strsignal (WSTOPSIG (lp->status)),
1017 target_pid_to_str (lp->ptid));
1018
1019 while (lp->signalled && lp->stopped)
1020 {
1021 errno = 0;
1022 if (ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0,
1023 WSTOPSIG (lp->status)) < 0)
1024 error (_("Can't continue %s: %s"), target_pid_to_str (lp->ptid),
1025 safe_strerror (errno));
1026
1027 if (debug_linux_nat)
1028 fprintf_unfiltered (gdb_stdlog,
1029 "DC: PTRACE_CONTINUE (%s, 0, %s) (OK)\n",
1030 target_pid_to_str (lp->ptid),
1031 status_to_str (lp->status));
1032
1033 lp->stopped = 0;
1034 lp->signalled = 0;
1035 lp->status = 0;
1036 /* FIXME drow/2003-08-26: There was a call to stop_wait_callback
1037 here. But since lp->signalled was cleared above,
1038 stop_wait_callback didn't do anything; the process was left
1039 running. Shouldn't we be waiting for it to stop?
1040 I've removed the call, since stop_wait_callback now does do
1041 something when called with lp->signalled == 0. */
1042
1043 gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status));
1044 }
1045
1046 /* We don't actually detach from the LWP that has an id equal to the
1047 overall process id just yet. */
1048 if (GET_LWP (lp->ptid) != GET_PID (lp->ptid))
1049 {
1050 errno = 0;
1051 if (ptrace (PTRACE_DETACH, GET_LWP (lp->ptid), 0,
1052 WSTOPSIG (lp->status)) < 0)
1053 error (_("Can't detach %s: %s"), target_pid_to_str (lp->ptid),
1054 safe_strerror (errno));
1055
1056 if (debug_linux_nat)
1057 fprintf_unfiltered (gdb_stdlog,
1058 "PTRACE_DETACH (%s, %s, 0) (OK)\n",
1059 target_pid_to_str (lp->ptid),
1060 strsignal (WSTOPSIG (lp->status)));
1061
1062 delete_lwp (lp->ptid);
1063 }
1064
1065 return 0;
1066 }
1067
1068 static void
1069 linux_nat_detach (char *args, int from_tty)
1070 {
1071 iterate_over_lwps (detach_callback, NULL);
1072
1073 /* Only the initial process should be left right now. */
1074 gdb_assert (num_lwps == 1);
1075
1076 trap_ptid = null_ptid;
1077
1078 /* Destroy LWP info; it's no longer valid. */
1079 init_lwp_list ();
1080
1081 /* Restore the original signal mask. */
1082 sigprocmask (SIG_SETMASK, &normal_mask, NULL);
1083 sigemptyset (&blocked_mask);
1084
1085 inferior_ptid = pid_to_ptid (GET_PID (inferior_ptid));
1086 linux_ops->to_detach (args, from_tty);
1087 }
1088
1089 /* Resume LP. */
1090
1091 static int
1092 resume_callback (struct lwp_info *lp, void *data)
1093 {
1094 if (lp->stopped && lp->status == 0)
1095 {
1096 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
1097 0, TARGET_SIGNAL_0);
1098 if (debug_linux_nat)
1099 fprintf_unfiltered (gdb_stdlog,
1100 "RC: PTRACE_CONT %s, 0, 0 (resume sibling)\n",
1101 target_pid_to_str (lp->ptid));
1102 lp->stopped = 0;
1103 lp->step = 0;
1104 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
1105 }
1106
1107 return 0;
1108 }
1109
1110 static int
1111 resume_clear_callback (struct lwp_info *lp, void *data)
1112 {
1113 lp->resumed = 0;
1114 return 0;
1115 }
1116
1117 static int
1118 resume_set_callback (struct lwp_info *lp, void *data)
1119 {
1120 lp->resumed = 1;
1121 return 0;
1122 }
1123
1124 static void
1125 linux_nat_resume (ptid_t ptid, int step, enum target_signal signo)
1126 {
1127 struct lwp_info *lp;
1128 int resume_all;
1129
1130 if (debug_linux_nat)
1131 fprintf_unfiltered (gdb_stdlog,
1132 "LLR: Preparing to %s %s, %s, inferior_ptid %s\n",
1133 step ? "step" : "resume",
1134 target_pid_to_str (ptid),
1135 signo ? strsignal (signo) : "0",
1136 target_pid_to_str (inferior_ptid));
1137
1138 prune_lwps ();
1139
1140 /* A specific PTID means `step only this process id'. */
1141 resume_all = (PIDGET (ptid) == -1);
1142
1143 if (resume_all)
1144 iterate_over_lwps (resume_set_callback, NULL);
1145 else
1146 iterate_over_lwps (resume_clear_callback, NULL);
1147
1148 /* If PID is -1, it's the current inferior that should be
1149 handled specially. */
1150 if (PIDGET (ptid) == -1)
1151 ptid = inferior_ptid;
1152
1153 lp = find_lwp_pid (ptid);
1154 gdb_assert (lp != NULL);
1155
1156 ptid = pid_to_ptid (GET_LWP (lp->ptid));
1157
1158 /* Remember if we're stepping. */
1159 lp->step = step;
1160
1161 /* Mark this LWP as resumed. */
1162 lp->resumed = 1;
1163
1164 /* If we have a pending wait status for this thread, there is no
1165 point in resuming the process. But first make sure that
1166 linux_nat_wait won't preemptively handle the event - we
1167 should never take this short-circuit if we are going to
1168 leave LP running, since we have skipped resuming all the
1169 other threads. This bit of code needs to be synchronized
1170 with linux_nat_wait. */
1171
1172 if (lp->status && WIFSTOPPED (lp->status))
1173 {
1174 int saved_signo = target_signal_from_host (WSTOPSIG (lp->status));
1175
1176 if (signal_stop_state (saved_signo) == 0
1177 && signal_print_state (saved_signo) == 0
1178 && signal_pass_state (saved_signo) == 1)
1179 {
1180 if (debug_linux_nat)
1181 fprintf_unfiltered (gdb_stdlog,
1182 "LLR: Not short circuiting for ignored "
1183 "status 0x%x\n", lp->status);
1184
1185 /* FIXME: What should we do if we are supposed to continue
1186 this thread with a signal? */
1187 gdb_assert (signo == TARGET_SIGNAL_0);
1188 signo = saved_signo;
1189 lp->status = 0;
1190 }
1191 }
1192
1193 if (lp->status)
1194 {
1195 /* FIXME: What should we do if we are supposed to continue
1196 this thread with a signal? */
1197 gdb_assert (signo == TARGET_SIGNAL_0);
1198
1199 if (debug_linux_nat)
1200 fprintf_unfiltered (gdb_stdlog,
1201 "LLR: Short circuiting for status 0x%x\n",
1202 lp->status);
1203
1204 return;
1205 }
1206
1207 /* Mark LWP as not stopped to prevent it from being continued by
1208 resume_callback. */
1209 lp->stopped = 0;
1210
1211 if (resume_all)
1212 iterate_over_lwps (resume_callback, NULL);
1213
1214 linux_ops->to_resume (ptid, step, signo);
1215 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
1216
1217 if (debug_linux_nat)
1218 fprintf_unfiltered (gdb_stdlog,
1219 "LLR: %s %s, %s (resume event thread)\n",
1220 step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
1221 target_pid_to_str (ptid),
1222 signo ? strsignal (signo) : "0");
1223 }
1224
1225 /* Issue kill to specified lwp. */
1226
1227 static int tkill_failed;
1228
1229 static int
1230 kill_lwp (int lwpid, int signo)
1231 {
1232 errno = 0;
1233
1234 /* Use tkill, if possible, in case we are using nptl threads. If tkill
1235 fails, then we are not using nptl threads and we should be using kill. */
1236
1237 #ifdef HAVE_TKILL_SYSCALL
1238 if (!tkill_failed)
1239 {
1240 int ret = syscall (__NR_tkill, lwpid, signo);
1241 if (errno != ENOSYS)
1242 return ret;
1243 errno = 0;
1244 tkill_failed = 1;
1245 }
1246 #endif
1247
1248 return kill (lwpid, signo);
1249 }
1250
1251 /* Handle a GNU/Linux extended wait response. If we see a clone
1252 event, we need to add the new LWP to our list (and not report the
1253 trap to higher layers). This function returns non-zero if the
1254 event should be ignored and we should wait again. If STOPPING is
1255 true, the new LWP remains stopped, otherwise it is continued. */
1256
1257 static int
1258 linux_handle_extended_wait (struct lwp_info *lp, int status,
1259 int stopping)
1260 {
1261 int pid = GET_LWP (lp->ptid);
1262 struct target_waitstatus *ourstatus = &lp->waitstatus;
1263 struct lwp_info *new_lp = NULL;
1264 int event = status >> 16;
1265
1266 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK
1267 || event == PTRACE_EVENT_CLONE)
1268 {
1269 unsigned long new_pid;
1270 int ret;
1271
1272 ptrace (PTRACE_GETEVENTMSG, pid, 0, &new_pid);
1273
1274 /* If we haven't already seen the new PID stop, wait for it now. */
1275 if (! pull_pid_from_list (&stopped_pids, new_pid, &status))
1276 {
1277 /* The new child has a pending SIGSTOP. We can't affect it until it
1278 hits the SIGSTOP, but we're already attached. */
1279 ret = my_waitpid (new_pid, &status,
1280 (event == PTRACE_EVENT_CLONE) ? __WCLONE : 0);
1281 if (ret == -1)
1282 perror_with_name (_("waiting for new child"));
1283 else if (ret != new_pid)
1284 internal_error (__FILE__, __LINE__,
1285 _("wait returned unexpected PID %d"), ret);
1286 else if (!WIFSTOPPED (status))
1287 internal_error (__FILE__, __LINE__,
1288 _("wait returned unexpected status 0x%x"), status);
1289 }
1290
1291 ourstatus->value.related_pid = new_pid;
1292
1293 if (event == PTRACE_EVENT_FORK)
1294 ourstatus->kind = TARGET_WAITKIND_FORKED;
1295 else if (event == PTRACE_EVENT_VFORK)
1296 ourstatus->kind = TARGET_WAITKIND_VFORKED;
1297 else
1298 {
1299 ourstatus->kind = TARGET_WAITKIND_IGNORE;
1300 new_lp = add_lwp (BUILD_LWP (new_pid, GET_PID (inferior_ptid)));
1301 new_lp->cloned = 1;
1302
1303 if (WSTOPSIG (status) != SIGSTOP)
1304 {
1305 /* This can happen if someone starts sending signals to
1306 the new thread before it gets a chance to run, which
1307 have a lower number than SIGSTOP (e.g. SIGUSR1).
1308 This is an unlikely case, and harder to handle for
1309 fork / vfork than for clone, so we do not try - but
1310 we handle it for clone events here. We'll send
1311 the other signal on to the thread below. */
1312
1313 new_lp->signalled = 1;
1314 }
1315 else
1316 status = 0;
1317
1318 if (stopping)
1319 new_lp->stopped = 1;
1320 else
1321 {
1322 new_lp->resumed = 1;
1323 ptrace (PTRACE_CONT, lp->waitstatus.value.related_pid, 0,
1324 status ? WSTOPSIG (status) : 0);
1325 }
1326
1327 if (debug_linux_nat)
1328 fprintf_unfiltered (gdb_stdlog,
1329 "LHEW: Got clone event from LWP %ld, resuming\n",
1330 GET_LWP (lp->ptid));
1331 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
1332
1333 return 1;
1334 }
1335
1336 return 0;
1337 }
1338
1339 if (event == PTRACE_EVENT_EXEC)
1340 {
1341 ourstatus->kind = TARGET_WAITKIND_EXECD;
1342 ourstatus->value.execd_pathname
1343 = xstrdup (linux_child_pid_to_exec_file (pid));
1344
1345 if (linux_parent_pid)
1346 {
1347 detach_breakpoints (linux_parent_pid);
1348 ptrace (PTRACE_DETACH, linux_parent_pid, 0, 0);
1349
1350 linux_parent_pid = 0;
1351 }
1352
1353 return 0;
1354 }
1355
1356 internal_error (__FILE__, __LINE__,
1357 _("unknown ptrace event %d"), event);
1358 }
1359
1360 /* Wait for LP to stop. Returns the wait status, or 0 if the LWP has
1361 exited. */
1362
1363 static int
1364 wait_lwp (struct lwp_info *lp)
1365 {
1366 pid_t pid;
1367 int status;
1368 int thread_dead = 0;
1369
1370 gdb_assert (!lp->stopped);
1371 gdb_assert (lp->status == 0);
1372
1373 pid = my_waitpid (GET_LWP (lp->ptid), &status, 0);
1374 if (pid == -1 && errno == ECHILD)
1375 {
1376 pid = my_waitpid (GET_LWP (lp->ptid), &status, __WCLONE);
1377 if (pid == -1 && errno == ECHILD)
1378 {
1379 /* The thread has previously exited. We need to delete it
1380 now because, for some vendor 2.4 kernels with NPTL
1381 support backported, there won't be an exit event unless
1382 it is the main thread. 2.6 kernels will report an exit
1383 event for each thread that exits, as expected. */
1384 thread_dead = 1;
1385 if (debug_linux_nat)
1386 fprintf_unfiltered (gdb_stdlog, "WL: %s vanished.\n",
1387 target_pid_to_str (lp->ptid));
1388 }
1389 }
1390
1391 if (!thread_dead)
1392 {
1393 gdb_assert (pid == GET_LWP (lp->ptid));
1394
1395 if (debug_linux_nat)
1396 {
1397 fprintf_unfiltered (gdb_stdlog,
1398 "WL: waitpid %s received %s\n",
1399 target_pid_to_str (lp->ptid),
1400 status_to_str (status));
1401 }
1402 }
1403
1404 /* Check if the thread has exited. */
1405 if (WIFEXITED (status) || WIFSIGNALED (status))
1406 {
1407 thread_dead = 1;
1408 if (debug_linux_nat)
1409 fprintf_unfiltered (gdb_stdlog, "WL: %s exited.\n",
1410 target_pid_to_str (lp->ptid));
1411 }
1412
1413 if (thread_dead)
1414 {
1415 exit_lwp (lp);
1416 return 0;
1417 }
1418
1419 gdb_assert (WIFSTOPPED (status));
1420
1421 /* Handle GNU/Linux's extended waitstatus for trace events. */
1422 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
1423 {
1424 if (debug_linux_nat)
1425 fprintf_unfiltered (gdb_stdlog,
1426 "WL: Handling extended status 0x%06x\n",
1427 status);
1428 if (linux_handle_extended_wait (lp, status, 1))
1429 return wait_lwp (lp);
1430 }
1431
1432 return status;
1433 }
1434
1435 /* Save the most recent siginfo for LP. This is currently only called
1436 for SIGTRAP; some ports use the si_addr field for
1437 target_stopped_data_address. In the future, it may also be used to
1438 restore the siginfo of requeued signals. */
1439
1440 static void
1441 save_siginfo (struct lwp_info *lp)
1442 {
1443 errno = 0;
1444 ptrace (PTRACE_GETSIGINFO, GET_LWP (lp->ptid),
1445 (PTRACE_TYPE_ARG3) 0, &lp->siginfo);
1446
1447 if (errno != 0)
1448 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
1449 }
1450
1451 /* Send a SIGSTOP to LP. */
1452
1453 static int
1454 stop_callback (struct lwp_info *lp, void *data)
1455 {
1456 if (!lp->stopped && !lp->signalled)
1457 {
1458 int ret;
1459
1460 if (debug_linux_nat)
1461 {
1462 fprintf_unfiltered (gdb_stdlog,
1463 "SC: kill %s **<SIGSTOP>**\n",
1464 target_pid_to_str (lp->ptid));
1465 }
1466 errno = 0;
1467 ret = kill_lwp (GET_LWP (lp->ptid), SIGSTOP);
1468 if (debug_linux_nat)
1469 {
1470 fprintf_unfiltered (gdb_stdlog,
1471 "SC: lwp kill %d %s\n",
1472 ret,
1473 errno ? safe_strerror (errno) : "ERRNO-OK");
1474 }
1475
1476 lp->signalled = 1;
1477 gdb_assert (lp->status == 0);
1478 }
1479
1480 return 0;
1481 }
1482
1483 /* Wait until LP is stopped. If DATA is non-null it is interpreted as
1484 a pointer to a set of signals to be flushed immediately. */
1485
1486 static int
1487 stop_wait_callback (struct lwp_info *lp, void *data)
1488 {
1489 sigset_t *flush_mask = data;
1490
1491 if (!lp->stopped)
1492 {
1493 int status;
1494
1495 status = wait_lwp (lp);
1496 if (status == 0)
1497 return 0;
1498
1499 /* Ignore any signals in FLUSH_MASK. */
1500 if (flush_mask && sigismember (flush_mask, WSTOPSIG (status)))
1501 {
1502 if (!lp->signalled)
1503 {
1504 lp->stopped = 1;
1505 return 0;
1506 }
1507
1508 errno = 0;
1509 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
1510 if (debug_linux_nat)
1511 fprintf_unfiltered (gdb_stdlog,
1512 "PTRACE_CONT %s, 0, 0 (%s)\n",
1513 target_pid_to_str (lp->ptid),
1514 errno ? safe_strerror (errno) : "OK");
1515
1516 return stop_wait_callback (lp, flush_mask);
1517 }
1518
1519 if (WSTOPSIG (status) != SIGSTOP)
1520 {
1521 if (WSTOPSIG (status) == SIGTRAP)
1522 {
1523 /* If a LWP other than the LWP that we're reporting an
1524 event for has hit a GDB breakpoint (as opposed to
1525 some random trap signal), then just arrange for it to
1526 hit it again later. We don't keep the SIGTRAP status
1527 and don't forward the SIGTRAP signal to the LWP. We
1528 will handle the current event, eventually we will
1529 resume all LWPs, and this one will get its breakpoint
1530 trap again.
1531
1532 If we do not do this, then we run the risk that the
1533 user will delete or disable the breakpoint, but the
1534 thread will have already tripped on it. */
1535
1536 /* Save the trap's siginfo in case we need it later. */
1537 save_siginfo (lp);
1538
1539 /* Now resume this LWP and get the SIGSTOP event. */
1540 errno = 0;
1541 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
1542 if (debug_linux_nat)
1543 {
1544 fprintf_unfiltered (gdb_stdlog,
1545 "PTRACE_CONT %s, 0, 0 (%s)\n",
1546 target_pid_to_str (lp->ptid),
1547 errno ? safe_strerror (errno) : "OK");
1548
1549 fprintf_unfiltered (gdb_stdlog,
1550 "SWC: Candidate SIGTRAP event in %s\n",
1551 target_pid_to_str (lp->ptid));
1552 }
1553 /* Hold the SIGTRAP for handling by linux_nat_wait. */
1554 stop_wait_callback (lp, data);
1555 /* If there's another event, throw it back into the queue. */
1556 if (lp->status)
1557 {
1558 if (debug_linux_nat)
1559 {
1560 fprintf_unfiltered (gdb_stdlog,
1561 "SWC: kill %s, %s\n",
1562 target_pid_to_str (lp->ptid),
1563 status_to_str ((int) status));
1564 }
1565 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (lp->status));
1566 }
1567 /* Save the sigtrap event. */
1568 lp->status = status;
1569 return 0;
1570 }
1571 else
1572 {
1573 /* The thread was stopped with a signal other than
1574 SIGSTOP, and didn't accidentally trip a breakpoint. */
1575
1576 if (debug_linux_nat)
1577 {
1578 fprintf_unfiltered (gdb_stdlog,
1579 "SWC: Pending event %s in %s\n",
1580 status_to_str ((int) status),
1581 target_pid_to_str (lp->ptid));
1582 }
1583 /* Now resume this LWP and get the SIGSTOP event. */
1584 errno = 0;
1585 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
1586 if (debug_linux_nat)
1587 fprintf_unfiltered (gdb_stdlog,
1588 "SWC: PTRACE_CONT %s, 0, 0 (%s)\n",
1589 target_pid_to_str (lp->ptid),
1590 errno ? safe_strerror (errno) : "OK");
1591
1592 /* Hold this event/waitstatus while we check to see if
1593 there are any more (we still want to get that SIGSTOP). */
1594 stop_wait_callback (lp, data);
1595 /* If the lp->status field is still empty, use it to hold
1596 this event. If not, then this event must be returned
1597 to the event queue of the LWP. */
1598 if (lp->status == 0)
1599 lp->status = status;
1600 else
1601 {
1602 if (debug_linux_nat)
1603 {
1604 fprintf_unfiltered (gdb_stdlog,
1605 "SWC: kill %s, %s\n",
1606 target_pid_to_str (lp->ptid),
1607 status_to_str ((int) status));
1608 }
1609 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (status));
1610 }
1611 return 0;
1612 }
1613 }
1614 else
1615 {
1616 /* We caught the SIGSTOP that we intended to catch, so
1617 there's no SIGSTOP pending. */
1618 lp->stopped = 1;
1619 lp->signalled = 0;
1620 }
1621 }
1622
1623 return 0;
1624 }
1625
1626 /* Check whether PID has any pending signals in FLUSH_MASK. If so set
1627 the appropriate bits in PENDING, and return 1 - otherwise return 0. */
1628
1629 static int
1630 linux_nat_has_pending (int pid, sigset_t *pending, sigset_t *flush_mask)
1631 {
1632 sigset_t blocked, ignored;
1633 int i;
1634
1635 linux_proc_pending_signals (pid, pending, &blocked, &ignored);
1636
1637 if (!flush_mask)
1638 return 0;
1639
1640 for (i = 1; i < NSIG; i++)
1641 if (sigismember (pending, i))
1642 if (!sigismember (flush_mask, i)
1643 || sigismember (&blocked, i)
1644 || sigismember (&ignored, i))
1645 sigdelset (pending, i);
1646
1647 if (sigisemptyset (pending))
1648 return 0;
1649
1650 return 1;
1651 }
1652
1653 /* DATA is interpreted as a mask of signals to flush. If LP has
1654 signals pending, and they are all in the flush mask, then arrange
1655 to flush them. LP should be stopped, as should all other threads
1656 it might share a signal queue with. */
1657
1658 static int
1659 flush_callback (struct lwp_info *lp, void *data)
1660 {
1661 sigset_t *flush_mask = data;
1662 sigset_t pending, intersection, blocked, ignored;
1663 int pid, status;
1664
1665 /* Normally, when an LWP exits, it is removed from the LWP list. The
1666 last LWP isn't removed till later, however. So if there is only
1667 one LWP on the list, make sure it's alive. */
1668 if (lwp_list == lp && lp->next == NULL)
1669 if (!linux_nat_thread_alive (lp->ptid))
1670 return 0;
1671
1672 /* Just because the LWP is stopped doesn't mean that new signals
1673 can't arrive from outside, so this function must be careful of
1674 race conditions. However, because all threads are stopped, we
1675 can assume that the pending mask will not shrink unless we resume
1676 the LWP, and that it will then get another signal. We can't
1677 control which one, however. */
1678
1679 if (lp->status)
1680 {
1681 if (debug_linux_nat)
1682 printf_unfiltered (_("FC: LP has pending status %06x\n"), lp->status);
1683 if (WIFSTOPPED (lp->status) && sigismember (flush_mask, WSTOPSIG (lp->status)))
1684 lp->status = 0;
1685 }
1686
1687 /* While there is a pending signal we would like to flush, continue
1688 the inferior and collect another signal. But if there's already
1689 a saved status that we don't want to flush, we can't resume the
1690 inferior - if it stopped for some other reason we wouldn't have
1691 anywhere to save the new status. In that case, we must leave the
1692 signal unflushed (and possibly generate an extra SIGINT stop).
1693 That's much less bad than losing a signal. */
1694 while (lp->status == 0
1695 && linux_nat_has_pending (GET_LWP (lp->ptid), &pending, flush_mask))
1696 {
1697 int ret;
1698
1699 errno = 0;
1700 ret = ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
1701 if (debug_linux_nat)
1702 fprintf_unfiltered (gdb_stderr,
1703 "FC: Sent PTRACE_CONT, ret %d %d\n", ret, errno);
1704
1705 lp->stopped = 0;
1706 stop_wait_callback (lp, flush_mask);
1707 if (debug_linux_nat)
1708 fprintf_unfiltered (gdb_stderr,
1709 "FC: Wait finished; saved status is %d\n",
1710 lp->status);
1711 }
1712
1713 return 0;
1714 }
1715
1716 /* Return non-zero if LP has a wait status pending. */
1717
1718 static int
1719 status_callback (struct lwp_info *lp, void *data)
1720 {
1721 /* Only report a pending wait status if we pretend that this has
1722 indeed been resumed. */
1723 return (lp->status != 0 && lp->resumed);
1724 }
1725
1726 /* Return non-zero if LP isn't stopped. */
1727
1728 static int
1729 running_callback (struct lwp_info *lp, void *data)
1730 {
1731 return (lp->stopped == 0 || (lp->status != 0 && lp->resumed));
1732 }
1733
1734 /* Count the LWP's that have had events. */
1735
1736 static int
1737 count_events_callback (struct lwp_info *lp, void *data)
1738 {
1739 int *count = data;
1740
1741 gdb_assert (count != NULL);
1742
1743 /* Count only LWPs that have a SIGTRAP event pending. */
1744 if (lp->status != 0
1745 && WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP)
1746 (*count)++;
1747
1748 return 0;
1749 }
1750
1751 /* Select the LWP (if any) that is currently being single-stepped. */
1752
1753 static int
1754 select_singlestep_lwp_callback (struct lwp_info *lp, void *data)
1755 {
1756 if (lp->step && lp->status != 0)
1757 return 1;
1758 else
1759 return 0;
1760 }
1761
1762 /* Select the Nth LWP that has had a SIGTRAP event. */
1763
1764 static int
1765 select_event_lwp_callback (struct lwp_info *lp, void *data)
1766 {
1767 int *selector = data;
1768
1769 gdb_assert (selector != NULL);
1770
1771 /* Select only LWPs that have a SIGTRAP event pending. */
1772 if (lp->status != 0
1773 && WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP)
1774 if ((*selector)-- == 0)
1775 return 1;
1776
1777 return 0;
1778 }
1779
1780 static int
1781 cancel_breakpoints_callback (struct lwp_info *lp, void *data)
1782 {
1783 struct lwp_info *event_lp = data;
1784
1785 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
1786 if (lp == event_lp)
1787 return 0;
1788
1789 /* If a LWP other than the LWP that we're reporting an event for has
1790 hit a GDB breakpoint (as opposed to some random trap signal),
1791 then just arrange for it to hit it again later. We don't keep
1792 the SIGTRAP status and don't forward the SIGTRAP signal to the
1793 LWP. We will handle the current event, eventually we will resume
1794 all LWPs, and this one will get its breakpoint trap again.
1795
1796 If we do not do this, then we run the risk that the user will
1797 delete or disable the breakpoint, but the LWP will have already
1798 tripped on it. */
1799
1800 if (lp->status != 0
1801 && WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP
1802 && breakpoint_inserted_here_p (read_pc_pid (lp->ptid) -
1803 gdbarch_decr_pc_after_break
1804 (current_gdbarch)))
1805 {
1806 if (debug_linux_nat)
1807 fprintf_unfiltered (gdb_stdlog,
1808 "CBC: Push back breakpoint for %s\n",
1809 target_pid_to_str (lp->ptid));
1810
1811 /* Back up the PC if necessary. */
1812 if (gdbarch_decr_pc_after_break (current_gdbarch))
1813 write_pc_pid (read_pc_pid (lp->ptid) - gdbarch_decr_pc_after_break
1814 (current_gdbarch),
1815 lp->ptid);
1816
1817 /* Throw away the SIGTRAP. */
1818 lp->status = 0;
1819 }
1820
1821 return 0;
1822 }
1823
1824 /* Select one LWP out of those that have events pending. */
1825
1826 static void
1827 select_event_lwp (struct lwp_info **orig_lp, int *status)
1828 {
1829 int num_events = 0;
1830 int random_selector;
1831 struct lwp_info *event_lp;
1832
1833 /* Record the wait status for the original LWP. */
1834 (*orig_lp)->status = *status;
1835
1836 /* Give preference to any LWP that is being single-stepped. */
1837 event_lp = iterate_over_lwps (select_singlestep_lwp_callback, NULL);
1838 if (event_lp != NULL)
1839 {
1840 if (debug_linux_nat)
1841 fprintf_unfiltered (gdb_stdlog,
1842 "SEL: Select single-step %s\n",
1843 target_pid_to_str (event_lp->ptid));
1844 }
1845 else
1846 {
1847 /* No single-stepping LWP. Select one at random, out of those
1848 which have had SIGTRAP events. */
1849
1850 /* First see how many SIGTRAP events we have. */
1851 iterate_over_lwps (count_events_callback, &num_events);
1852
1853 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
1854 random_selector = (int)
1855 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
1856
1857 if (debug_linux_nat && num_events > 1)
1858 fprintf_unfiltered (gdb_stdlog,
1859 "SEL: Found %d SIGTRAP events, selecting #%d\n",
1860 num_events, random_selector);
1861
1862 event_lp = iterate_over_lwps (select_event_lwp_callback,
1863 &random_selector);
1864 }
1865
1866 if (event_lp != NULL)
1867 {
1868 /* Switch the event LWP. */
1869 *orig_lp = event_lp;
1870 *status = event_lp->status;
1871 }
1872
1873 /* Flush the wait status for the event LWP. */
1874 (*orig_lp)->status = 0;
1875 }
1876
1877 /* Return non-zero if LP has been resumed. */
1878
1879 static int
1880 resumed_callback (struct lwp_info *lp, void *data)
1881 {
1882 return lp->resumed;
1883 }
1884
1885 /* Stop an active thread, verify it still exists, then resume it. */
1886
1887 static int
1888 stop_and_resume_callback (struct lwp_info *lp, void *data)
1889 {
1890 struct lwp_info *ptr;
1891
1892 if (!lp->stopped && !lp->signalled)
1893 {
1894 stop_callback (lp, NULL);
1895 stop_wait_callback (lp, NULL);
1896 /* Resume if the lwp still exists. */
1897 for (ptr = lwp_list; ptr; ptr = ptr->next)
1898 if (lp == ptr)
1899 {
1900 resume_callback (lp, NULL);
1901 resume_set_callback (lp, NULL);
1902 }
1903 }
1904 return 0;
1905 }
1906
1907 static ptid_t
1908 linux_nat_wait (ptid_t ptid, struct target_waitstatus *ourstatus)
1909 {
1910 struct lwp_info *lp = NULL;
1911 int options = 0;
1912 int status = 0;
1913 pid_t pid = PIDGET (ptid);
1914 sigset_t flush_mask;
1915
1916 /* The first time we get here after starting a new inferior, we may
1917 not have added it to the LWP list yet - this is the earliest
1918 moment at which we know its PID. */
1919 if (num_lwps == 0)
1920 {
1921 gdb_assert (!is_lwp (inferior_ptid));
1922
1923 inferior_ptid = BUILD_LWP (GET_PID (inferior_ptid),
1924 GET_PID (inferior_ptid));
1925 lp = add_lwp (inferior_ptid);
1926 lp->resumed = 1;
1927 }
1928
1929 sigemptyset (&flush_mask);
1930
1931 /* Make sure SIGCHLD is blocked. */
1932 if (!sigismember (&blocked_mask, SIGCHLD))
1933 {
1934 sigaddset (&blocked_mask, SIGCHLD);
1935 sigprocmask (SIG_BLOCK, &blocked_mask, NULL);
1936 }
1937
1938 retry:
1939
1940 /* Make sure there is at least one LWP that has been resumed. */
1941 gdb_assert (iterate_over_lwps (resumed_callback, NULL));
1942
1943 /* First check if there is a LWP with a wait status pending. */
1944 if (pid == -1)
1945 {
1946 /* Any LWP that's been resumed will do. */
1947 lp = iterate_over_lwps (status_callback, NULL);
1948 if (lp)
1949 {
1950 status = lp->status;
1951 lp->status = 0;
1952
1953 if (debug_linux_nat && status)
1954 fprintf_unfiltered (gdb_stdlog,
1955 "LLW: Using pending wait status %s for %s.\n",
1956 status_to_str (status),
1957 target_pid_to_str (lp->ptid));
1958 }
1959
1960 /* But if we don't fine one, we'll have to wait, and check both
1961 cloned and uncloned processes. We start with the cloned
1962 processes. */
1963 options = __WCLONE | WNOHANG;
1964 }
1965 else if (is_lwp (ptid))
1966 {
1967 if (debug_linux_nat)
1968 fprintf_unfiltered (gdb_stdlog,
1969 "LLW: Waiting for specific LWP %s.\n",
1970 target_pid_to_str (ptid));
1971
1972 /* We have a specific LWP to check. */
1973 lp = find_lwp_pid (ptid);
1974 gdb_assert (lp);
1975 status = lp->status;
1976 lp->status = 0;
1977
1978 if (debug_linux_nat && status)
1979 fprintf_unfiltered (gdb_stdlog,
1980 "LLW: Using pending wait status %s for %s.\n",
1981 status_to_str (status),
1982 target_pid_to_str (lp->ptid));
1983
1984 /* If we have to wait, take into account whether PID is a cloned
1985 process or not. And we have to convert it to something that
1986 the layer beneath us can understand. */
1987 options = lp->cloned ? __WCLONE : 0;
1988 pid = GET_LWP (ptid);
1989 }
1990
1991 if (status && lp->signalled)
1992 {
1993 /* A pending SIGSTOP may interfere with the normal stream of
1994 events. In a typical case where interference is a problem,
1995 we have a SIGSTOP signal pending for LWP A while
1996 single-stepping it, encounter an event in LWP B, and take the
1997 pending SIGSTOP while trying to stop LWP A. After processing
1998 the event in LWP B, LWP A is continued, and we'll never see
1999 the SIGTRAP associated with the last time we were
2000 single-stepping LWP A. */
2001
2002 /* Resume the thread. It should halt immediately returning the
2003 pending SIGSTOP. */
2004 registers_changed ();
2005 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
2006 lp->step, TARGET_SIGNAL_0);
2007 if (debug_linux_nat)
2008 fprintf_unfiltered (gdb_stdlog,
2009 "LLW: %s %s, 0, 0 (expect SIGSTOP)\n",
2010 lp->step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2011 target_pid_to_str (lp->ptid));
2012 lp->stopped = 0;
2013 gdb_assert (lp->resumed);
2014
2015 /* This should catch the pending SIGSTOP. */
2016 stop_wait_callback (lp, NULL);
2017 }
2018
2019 set_sigint_trap (); /* Causes SIGINT to be passed on to the
2020 attached process. */
2021 set_sigio_trap ();
2022
2023 while (status == 0)
2024 {
2025 pid_t lwpid;
2026
2027 lwpid = my_waitpid (pid, &status, options);
2028 if (lwpid > 0)
2029 {
2030 gdb_assert (pid == -1 || lwpid == pid);
2031
2032 if (debug_linux_nat)
2033 {
2034 fprintf_unfiltered (gdb_stdlog,
2035 "LLW: waitpid %ld received %s\n",
2036 (long) lwpid, status_to_str (status));
2037 }
2038
2039 lp = find_lwp_pid (pid_to_ptid (lwpid));
2040
2041 /* Check for stop events reported by a process we didn't
2042 already know about - anything not already in our LWP
2043 list.
2044
2045 If we're expecting to receive stopped processes after
2046 fork, vfork, and clone events, then we'll just add the
2047 new one to our list and go back to waiting for the event
2048 to be reported - the stopped process might be returned
2049 from waitpid before or after the event is. */
2050 if (WIFSTOPPED (status) && !lp)
2051 {
2052 linux_record_stopped_pid (lwpid, status);
2053 status = 0;
2054 continue;
2055 }
2056
2057 /* Make sure we don't report an event for the exit of an LWP not in
2058 our list, i.e. not part of the current process. This can happen
2059 if we detach from a program we original forked and then it
2060 exits. */
2061 if (!WIFSTOPPED (status) && !lp)
2062 {
2063 status = 0;
2064 continue;
2065 }
2066
2067 /* NOTE drow/2003-06-17: This code seems to be meant for debugging
2068 CLONE_PTRACE processes which do not use the thread library -
2069 otherwise we wouldn't find the new LWP this way. That doesn't
2070 currently work, and the following code is currently unreachable
2071 due to the two blocks above. If it's fixed some day, this code
2072 should be broken out into a function so that we can also pick up
2073 LWPs from the new interface. */
2074 if (!lp)
2075 {
2076 lp = add_lwp (BUILD_LWP (lwpid, GET_PID (inferior_ptid)));
2077 if (options & __WCLONE)
2078 lp->cloned = 1;
2079
2080 gdb_assert (WIFSTOPPED (status)
2081 && WSTOPSIG (status) == SIGSTOP);
2082 lp->signalled = 1;
2083
2084 if (!in_thread_list (inferior_ptid))
2085 {
2086 inferior_ptid = BUILD_LWP (GET_PID (inferior_ptid),
2087 GET_PID (inferior_ptid));
2088 add_thread (inferior_ptid);
2089 }
2090
2091 add_thread (lp->ptid);
2092 printf_unfiltered (_("[New %s]\n"),
2093 target_pid_to_str (lp->ptid));
2094 }
2095
2096 /* Save the trap's siginfo in case we need it later. */
2097 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP)
2098 save_siginfo (lp);
2099
2100 /* Handle GNU/Linux's extended waitstatus for trace events. */
2101 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
2102 {
2103 if (debug_linux_nat)
2104 fprintf_unfiltered (gdb_stdlog,
2105 "LLW: Handling extended status 0x%06x\n",
2106 status);
2107 if (linux_handle_extended_wait (lp, status, 0))
2108 {
2109 status = 0;
2110 continue;
2111 }
2112 }
2113
2114 /* Check if the thread has exited. */
2115 if ((WIFEXITED (status) || WIFSIGNALED (status)) && num_lwps > 1)
2116 {
2117 /* If this is the main thread, we must stop all threads and
2118 verify if they are still alive. This is because in the nptl
2119 thread model, there is no signal issued for exiting LWPs
2120 other than the main thread. We only get the main thread
2121 exit signal once all child threads have already exited.
2122 If we stop all the threads and use the stop_wait_callback
2123 to check if they have exited we can determine whether this
2124 signal should be ignored or whether it means the end of the
2125 debugged application, regardless of which threading model
2126 is being used. */
2127 if (GET_PID (lp->ptid) == GET_LWP (lp->ptid))
2128 {
2129 lp->stopped = 1;
2130 iterate_over_lwps (stop_and_resume_callback, NULL);
2131 }
2132
2133 if (debug_linux_nat)
2134 fprintf_unfiltered (gdb_stdlog,
2135 "LLW: %s exited.\n",
2136 target_pid_to_str (lp->ptid));
2137
2138 exit_lwp (lp);
2139
2140 /* If there is at least one more LWP, then the exit signal
2141 was not the end of the debugged application and should be
2142 ignored. */
2143 if (num_lwps > 0)
2144 {
2145 /* Make sure there is at least one thread running. */
2146 gdb_assert (iterate_over_lwps (running_callback, NULL));
2147
2148 /* Discard the event. */
2149 status = 0;
2150 continue;
2151 }
2152 }
2153
2154 /* Check if the current LWP has previously exited. In the nptl
2155 thread model, LWPs other than the main thread do not issue
2156 signals when they exit so we must check whenever the thread
2157 has stopped. A similar check is made in stop_wait_callback(). */
2158 if (num_lwps > 1 && !linux_nat_thread_alive (lp->ptid))
2159 {
2160 if (debug_linux_nat)
2161 fprintf_unfiltered (gdb_stdlog,
2162 "LLW: %s exited.\n",
2163 target_pid_to_str (lp->ptid));
2164
2165 exit_lwp (lp);
2166
2167 /* Make sure there is at least one thread running. */
2168 gdb_assert (iterate_over_lwps (running_callback, NULL));
2169
2170 /* Discard the event. */
2171 status = 0;
2172 continue;
2173 }
2174
2175 /* Make sure we don't report a SIGSTOP that we sent
2176 ourselves in an attempt to stop an LWP. */
2177 if (lp->signalled
2178 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP)
2179 {
2180 if (debug_linux_nat)
2181 fprintf_unfiltered (gdb_stdlog,
2182 "LLW: Delayed SIGSTOP caught for %s.\n",
2183 target_pid_to_str (lp->ptid));
2184
2185 /* This is a delayed SIGSTOP. */
2186 lp->signalled = 0;
2187
2188 registers_changed ();
2189 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
2190 lp->step, TARGET_SIGNAL_0);
2191 if (debug_linux_nat)
2192 fprintf_unfiltered (gdb_stdlog,
2193 "LLW: %s %s, 0, 0 (discard SIGSTOP)\n",
2194 lp->step ?
2195 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2196 target_pid_to_str (lp->ptid));
2197
2198 lp->stopped = 0;
2199 gdb_assert (lp->resumed);
2200
2201 /* Discard the event. */
2202 status = 0;
2203 continue;
2204 }
2205
2206 break;
2207 }
2208
2209 if (pid == -1)
2210 {
2211 /* Alternate between checking cloned and uncloned processes. */
2212 options ^= __WCLONE;
2213
2214 /* And suspend every time we have checked both. */
2215 if (options & __WCLONE)
2216 sigsuspend (&suspend_mask);
2217 }
2218
2219 /* We shouldn't end up here unless we want to try again. */
2220 gdb_assert (status == 0);
2221 }
2222
2223 clear_sigio_trap ();
2224 clear_sigint_trap ();
2225
2226 gdb_assert (lp);
2227
2228 /* Don't report signals that GDB isn't interested in, such as
2229 signals that are neither printed nor stopped upon. Stopping all
2230 threads can be a bit time-consuming so if we want decent
2231 performance with heavily multi-threaded programs, especially when
2232 they're using a high frequency timer, we'd better avoid it if we
2233 can. */
2234
2235 if (WIFSTOPPED (status))
2236 {
2237 int signo = target_signal_from_host (WSTOPSIG (status));
2238
2239 /* If we get a signal while single-stepping, we may need special
2240 care, e.g. to skip the signal handler. Defer to common code. */
2241 if (!lp->step
2242 && signal_stop_state (signo) == 0
2243 && signal_print_state (signo) == 0
2244 && signal_pass_state (signo) == 1)
2245 {
2246 /* FIMXE: kettenis/2001-06-06: Should we resume all threads
2247 here? It is not clear we should. GDB may not expect
2248 other threads to run. On the other hand, not resuming
2249 newly attached threads may cause an unwanted delay in
2250 getting them running. */
2251 registers_changed ();
2252 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
2253 lp->step, signo);
2254 if (debug_linux_nat)
2255 fprintf_unfiltered (gdb_stdlog,
2256 "LLW: %s %s, %s (preempt 'handle')\n",
2257 lp->step ?
2258 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2259 target_pid_to_str (lp->ptid),
2260 signo ? strsignal (signo) : "0");
2261 lp->stopped = 0;
2262 status = 0;
2263 goto retry;
2264 }
2265
2266 if (signo == TARGET_SIGNAL_INT && signal_pass_state (signo) == 0)
2267 {
2268 /* If ^C/BREAK is typed at the tty/console, SIGINT gets
2269 forwarded to the entire process group, that is, all LWP's
2270 will receive it. Since we only want to report it once,
2271 we try to flush it from all LWPs except this one. */
2272 sigaddset (&flush_mask, SIGINT);
2273 }
2274 }
2275
2276 /* This LWP is stopped now. */
2277 lp->stopped = 1;
2278
2279 if (debug_linux_nat)
2280 fprintf_unfiltered (gdb_stdlog, "LLW: Candidate event %s in %s.\n",
2281 status_to_str (status), target_pid_to_str (lp->ptid));
2282
2283 /* Now stop all other LWP's ... */
2284 iterate_over_lwps (stop_callback, NULL);
2285
2286 /* ... and wait until all of them have reported back that they're no
2287 longer running. */
2288 iterate_over_lwps (stop_wait_callback, &flush_mask);
2289 iterate_over_lwps (flush_callback, &flush_mask);
2290
2291 /* If we're not waiting for a specific LWP, choose an event LWP from
2292 among those that have had events. Giving equal priority to all
2293 LWPs that have had events helps prevent starvation. */
2294 if (pid == -1)
2295 select_event_lwp (&lp, &status);
2296
2297 /* Now that we've selected our final event LWP, cancel any
2298 breakpoints in other LWPs that have hit a GDB breakpoint. See
2299 the comment in cancel_breakpoints_callback to find out why. */
2300 iterate_over_lwps (cancel_breakpoints_callback, lp);
2301
2302 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP)
2303 {
2304 trap_ptid = lp->ptid;
2305 if (debug_linux_nat)
2306 fprintf_unfiltered (gdb_stdlog,
2307 "LLW: trap_ptid is %s.\n",
2308 target_pid_to_str (trap_ptid));
2309 }
2310 else
2311 trap_ptid = null_ptid;
2312
2313 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
2314 {
2315 *ourstatus = lp->waitstatus;
2316 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
2317 }
2318 else
2319 store_waitstatus (ourstatus, status);
2320
2321 return lp->ptid;
2322 }
2323
2324 static int
2325 kill_callback (struct lwp_info *lp, void *data)
2326 {
2327 errno = 0;
2328 ptrace (PTRACE_KILL, GET_LWP (lp->ptid), 0, 0);
2329 if (debug_linux_nat)
2330 fprintf_unfiltered (gdb_stdlog,
2331 "KC: PTRACE_KILL %s, 0, 0 (%s)\n",
2332 target_pid_to_str (lp->ptid),
2333 errno ? safe_strerror (errno) : "OK");
2334
2335 return 0;
2336 }
2337
2338 static int
2339 kill_wait_callback (struct lwp_info *lp, void *data)
2340 {
2341 pid_t pid;
2342
2343 /* We must make sure that there are no pending events (delayed
2344 SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current
2345 program doesn't interfere with any following debugging session. */
2346
2347 /* For cloned processes we must check both with __WCLONE and
2348 without, since the exit status of a cloned process isn't reported
2349 with __WCLONE. */
2350 if (lp->cloned)
2351 {
2352 do
2353 {
2354 pid = my_waitpid (GET_LWP (lp->ptid), NULL, __WCLONE);
2355 if (pid != (pid_t) -1 && debug_linux_nat)
2356 {
2357 fprintf_unfiltered (gdb_stdlog,
2358 "KWC: wait %s received unknown.\n",
2359 target_pid_to_str (lp->ptid));
2360 }
2361 }
2362 while (pid == GET_LWP (lp->ptid));
2363
2364 gdb_assert (pid == -1 && errno == ECHILD);
2365 }
2366
2367 do
2368 {
2369 pid = my_waitpid (GET_LWP (lp->ptid), NULL, 0);
2370 if (pid != (pid_t) -1 && debug_linux_nat)
2371 {
2372 fprintf_unfiltered (gdb_stdlog,
2373 "KWC: wait %s received unk.\n",
2374 target_pid_to_str (lp->ptid));
2375 }
2376 }
2377 while (pid == GET_LWP (lp->ptid));
2378
2379 gdb_assert (pid == -1 && errno == ECHILD);
2380 return 0;
2381 }
2382
2383 static void
2384 linux_nat_kill (void)
2385 {
2386 struct target_waitstatus last;
2387 ptid_t last_ptid;
2388 int status;
2389
2390 /* If we're stopped while forking and we haven't followed yet,
2391 kill the other task. We need to do this first because the
2392 parent will be sleeping if this is a vfork. */
2393
2394 get_last_target_status (&last_ptid, &last);
2395
2396 if (last.kind == TARGET_WAITKIND_FORKED
2397 || last.kind == TARGET_WAITKIND_VFORKED)
2398 {
2399 ptrace (PT_KILL, last.value.related_pid, 0, 0);
2400 wait (&status);
2401 }
2402
2403 if (forks_exist_p ())
2404 linux_fork_killall ();
2405 else
2406 {
2407 /* Kill all LWP's ... */
2408 iterate_over_lwps (kill_callback, NULL);
2409
2410 /* ... and wait until we've flushed all events. */
2411 iterate_over_lwps (kill_wait_callback, NULL);
2412 }
2413
2414 target_mourn_inferior ();
2415 }
2416
2417 static void
2418 linux_nat_mourn_inferior (void)
2419 {
2420 trap_ptid = null_ptid;
2421
2422 /* Destroy LWP info; it's no longer valid. */
2423 init_lwp_list ();
2424
2425 /* Restore the original signal mask. */
2426 sigprocmask (SIG_SETMASK, &normal_mask, NULL);
2427 sigemptyset (&blocked_mask);
2428
2429 if (! forks_exist_p ())
2430 /* Normal case, no other forks available. */
2431 linux_ops->to_mourn_inferior ();
2432 else
2433 /* Multi-fork case. The current inferior_ptid has exited, but
2434 there are other viable forks to debug. Delete the exiting
2435 one and context-switch to the first available. */
2436 linux_fork_mourn_inferior ();
2437 }
2438
2439 static LONGEST
2440 linux_nat_xfer_partial (struct target_ops *ops, enum target_object object,
2441 const char *annex, gdb_byte *readbuf,
2442 const gdb_byte *writebuf,
2443 ULONGEST offset, LONGEST len)
2444 {
2445 struct cleanup *old_chain = save_inferior_ptid ();
2446 LONGEST xfer;
2447
2448 if (is_lwp (inferior_ptid))
2449 inferior_ptid = pid_to_ptid (GET_LWP (inferior_ptid));
2450
2451 xfer = linux_ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
2452 offset, len);
2453
2454 do_cleanups (old_chain);
2455 return xfer;
2456 }
2457
2458 static int
2459 linux_nat_thread_alive (ptid_t ptid)
2460 {
2461 gdb_assert (is_lwp (ptid));
2462
2463 errno = 0;
2464 ptrace (PTRACE_PEEKUSER, GET_LWP (ptid), 0, 0);
2465 if (debug_linux_nat)
2466 fprintf_unfiltered (gdb_stdlog,
2467 "LLTA: PTRACE_PEEKUSER %s, 0, 0 (%s)\n",
2468 target_pid_to_str (ptid),
2469 errno ? safe_strerror (errno) : "OK");
2470
2471 /* Not every Linux kernel implements PTRACE_PEEKUSER. But we can
2472 handle that case gracefully since ptrace will first do a lookup
2473 for the process based upon the passed-in pid. If that fails we
2474 will get either -ESRCH or -EPERM, otherwise the child exists and
2475 is alive. */
2476 if (errno == ESRCH || errno == EPERM)
2477 return 0;
2478
2479 return 1;
2480 }
2481
2482 static char *
2483 linux_nat_pid_to_str (ptid_t ptid)
2484 {
2485 static char buf[64];
2486
2487 if (lwp_list && lwp_list->next && is_lwp (ptid))
2488 {
2489 snprintf (buf, sizeof (buf), "LWP %ld", GET_LWP (ptid));
2490 return buf;
2491 }
2492
2493 return normal_pid_to_str (ptid);
2494 }
2495
2496 static void
2497 sigchld_handler (int signo)
2498 {
2499 /* Do nothing. The only reason for this handler is that it allows
2500 us to use sigsuspend in linux_nat_wait above to wait for the
2501 arrival of a SIGCHLD. */
2502 }
2503
2504 /* Accepts an integer PID; Returns a string representing a file that
2505 can be opened to get the symbols for the child process. */
2506
2507 static char *
2508 linux_child_pid_to_exec_file (int pid)
2509 {
2510 char *name1, *name2;
2511
2512 name1 = xmalloc (MAXPATHLEN);
2513 name2 = xmalloc (MAXPATHLEN);
2514 make_cleanup (xfree, name1);
2515 make_cleanup (xfree, name2);
2516 memset (name2, 0, MAXPATHLEN);
2517
2518 sprintf (name1, "/proc/%d/exe", pid);
2519 if (readlink (name1, name2, MAXPATHLEN) > 0)
2520 return name2;
2521 else
2522 return name1;
2523 }
2524
2525 /* Service function for corefiles and info proc. */
2526
2527 static int
2528 read_mapping (FILE *mapfile,
2529 long long *addr,
2530 long long *endaddr,
2531 char *permissions,
2532 long long *offset,
2533 char *device, long long *inode, char *filename)
2534 {
2535 int ret = fscanf (mapfile, "%llx-%llx %s %llx %s %llx",
2536 addr, endaddr, permissions, offset, device, inode);
2537
2538 filename[0] = '\0';
2539 if (ret > 0 && ret != EOF)
2540 {
2541 /* Eat everything up to EOL for the filename. This will prevent
2542 weird filenames (such as one with embedded whitespace) from
2543 confusing this code. It also makes this code more robust in
2544 respect to annotations the kernel may add after the filename.
2545
2546 Note the filename is used for informational purposes
2547 only. */
2548 ret += fscanf (mapfile, "%[^\n]\n", filename);
2549 }
2550
2551 return (ret != 0 && ret != EOF);
2552 }
2553
2554 /* Fills the "to_find_memory_regions" target vector. Lists the memory
2555 regions in the inferior for a corefile. */
2556
2557 static int
2558 linux_nat_find_memory_regions (int (*func) (CORE_ADDR,
2559 unsigned long,
2560 int, int, int, void *), void *obfd)
2561 {
2562 long long pid = PIDGET (inferior_ptid);
2563 char mapsfilename[MAXPATHLEN];
2564 FILE *mapsfile;
2565 long long addr, endaddr, size, offset, inode;
2566 char permissions[8], device[8], filename[MAXPATHLEN];
2567 int read, write, exec;
2568 int ret;
2569
2570 /* Compose the filename for the /proc memory map, and open it. */
2571 sprintf (mapsfilename, "/proc/%lld/maps", pid);
2572 if ((mapsfile = fopen (mapsfilename, "r")) == NULL)
2573 error (_("Could not open %s."), mapsfilename);
2574
2575 if (info_verbose)
2576 fprintf_filtered (gdb_stdout,
2577 "Reading memory regions from %s\n", mapsfilename);
2578
2579 /* Now iterate until end-of-file. */
2580 while (read_mapping (mapsfile, &addr, &endaddr, &permissions[0],
2581 &offset, &device[0], &inode, &filename[0]))
2582 {
2583 size = endaddr - addr;
2584
2585 /* Get the segment's permissions. */
2586 read = (strchr (permissions, 'r') != 0);
2587 write = (strchr (permissions, 'w') != 0);
2588 exec = (strchr (permissions, 'x') != 0);
2589
2590 if (info_verbose)
2591 {
2592 fprintf_filtered (gdb_stdout,
2593 "Save segment, %lld bytes at 0x%s (%c%c%c)",
2594 size, paddr_nz (addr),
2595 read ? 'r' : ' ',
2596 write ? 'w' : ' ', exec ? 'x' : ' ');
2597 if (filename[0])
2598 fprintf_filtered (gdb_stdout, " for %s", filename);
2599 fprintf_filtered (gdb_stdout, "\n");
2600 }
2601
2602 /* Invoke the callback function to create the corefile
2603 segment. */
2604 func (addr, size, read, write, exec, obfd);
2605 }
2606 fclose (mapsfile);
2607 return 0;
2608 }
2609
2610 /* Records the thread's register state for the corefile note
2611 section. */
2612
2613 static char *
2614 linux_nat_do_thread_registers (bfd *obfd, ptid_t ptid,
2615 char *note_data, int *note_size)
2616 {
2617 gdb_gregset_t gregs;
2618 gdb_fpregset_t fpregs;
2619 #ifdef FILL_FPXREGSET
2620 gdb_fpxregset_t fpxregs;
2621 #endif
2622 unsigned long lwp = ptid_get_lwp (ptid);
2623 struct regcache *regcache = get_thread_regcache (ptid);
2624 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2625 const struct regset *regset;
2626 int core_regset_p;
2627 struct cleanup *old_chain;
2628
2629 old_chain = save_inferior_ptid ();
2630 inferior_ptid = ptid;
2631 target_fetch_registers (regcache, -1);
2632 do_cleanups (old_chain);
2633
2634 core_regset_p = gdbarch_regset_from_core_section_p (gdbarch);
2635 if (core_regset_p
2636 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg",
2637 sizeof (gregs))) != NULL
2638 && regset->collect_regset != NULL)
2639 regset->collect_regset (regset, regcache, -1,
2640 &gregs, sizeof (gregs));
2641 else
2642 fill_gregset (regcache, &gregs, -1);
2643
2644 note_data = (char *) elfcore_write_prstatus (obfd,
2645 note_data,
2646 note_size,
2647 lwp,
2648 stop_signal, &gregs);
2649
2650 if (core_regset_p
2651 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg2",
2652 sizeof (fpregs))) != NULL
2653 && regset->collect_regset != NULL)
2654 regset->collect_regset (regset, regcache, -1,
2655 &fpregs, sizeof (fpregs));
2656 else
2657 fill_fpregset (regcache, &fpregs, -1);
2658
2659 note_data = (char *) elfcore_write_prfpreg (obfd,
2660 note_data,
2661 note_size,
2662 &fpregs, sizeof (fpregs));
2663
2664 #ifdef FILL_FPXREGSET
2665 if (core_regset_p
2666 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg-xfp",
2667 sizeof (fpxregs))) != NULL
2668 && regset->collect_regset != NULL)
2669 regset->collect_regset (regset, regcache, -1,
2670 &fpxregs, sizeof (fpxregs));
2671 else
2672 fill_fpxregset (regcache, &fpxregs, -1);
2673
2674 note_data = (char *) elfcore_write_prxfpreg (obfd,
2675 note_data,
2676 note_size,
2677 &fpxregs, sizeof (fpxregs));
2678 #endif
2679 return note_data;
2680 }
2681
2682 struct linux_nat_corefile_thread_data
2683 {
2684 bfd *obfd;
2685 char *note_data;
2686 int *note_size;
2687 int num_notes;
2688 };
2689
2690 /* Called by gdbthread.c once per thread. Records the thread's
2691 register state for the corefile note section. */
2692
2693 static int
2694 linux_nat_corefile_thread_callback (struct lwp_info *ti, void *data)
2695 {
2696 struct linux_nat_corefile_thread_data *args = data;
2697
2698 args->note_data = linux_nat_do_thread_registers (args->obfd,
2699 ti->ptid,
2700 args->note_data,
2701 args->note_size);
2702 args->num_notes++;
2703
2704 return 0;
2705 }
2706
2707 /* Records the register state for the corefile note section. */
2708
2709 static char *
2710 linux_nat_do_registers (bfd *obfd, ptid_t ptid,
2711 char *note_data, int *note_size)
2712 {
2713 return linux_nat_do_thread_registers (obfd,
2714 ptid_build (ptid_get_pid (inferior_ptid),
2715 ptid_get_pid (inferior_ptid),
2716 0),
2717 note_data, note_size);
2718 }
2719
2720 /* Fills the "to_make_corefile_note" target vector. Builds the note
2721 section for a corefile, and returns it in a malloc buffer. */
2722
2723 static char *
2724 linux_nat_make_corefile_notes (bfd *obfd, int *note_size)
2725 {
2726 struct linux_nat_corefile_thread_data thread_args;
2727 struct cleanup *old_chain;
2728 /* The variable size must be >= sizeof (prpsinfo_t.pr_fname). */
2729 char fname[16] = { '\0' };
2730 /* The variable size must be >= sizeof (prpsinfo_t.pr_psargs). */
2731 char psargs[80] = { '\0' };
2732 char *note_data = NULL;
2733 ptid_t current_ptid = inferior_ptid;
2734 gdb_byte *auxv;
2735 int auxv_len;
2736
2737 if (get_exec_file (0))
2738 {
2739 strncpy (fname, strrchr (get_exec_file (0), '/') + 1, sizeof (fname));
2740 strncpy (psargs, get_exec_file (0), sizeof (psargs));
2741 if (get_inferior_args ())
2742 {
2743 char *string_end;
2744 char *psargs_end = psargs + sizeof (psargs);
2745
2746 /* linux_elfcore_write_prpsinfo () handles zero unterminated
2747 strings fine. */
2748 string_end = memchr (psargs, 0, sizeof (psargs));
2749 if (string_end != NULL)
2750 {
2751 *string_end++ = ' ';
2752 strncpy (string_end, get_inferior_args (),
2753 psargs_end - string_end);
2754 }
2755 }
2756 note_data = (char *) elfcore_write_prpsinfo (obfd,
2757 note_data,
2758 note_size, fname, psargs);
2759 }
2760
2761 /* Dump information for threads. */
2762 thread_args.obfd = obfd;
2763 thread_args.note_data = note_data;
2764 thread_args.note_size = note_size;
2765 thread_args.num_notes = 0;
2766 iterate_over_lwps (linux_nat_corefile_thread_callback, &thread_args);
2767 if (thread_args.num_notes == 0)
2768 {
2769 /* iterate_over_threads didn't come up with any threads; just
2770 use inferior_ptid. */
2771 note_data = linux_nat_do_registers (obfd, inferior_ptid,
2772 note_data, note_size);
2773 }
2774 else
2775 {
2776 note_data = thread_args.note_data;
2777 }
2778
2779 auxv_len = target_read_alloc (&current_target, TARGET_OBJECT_AUXV,
2780 NULL, &auxv);
2781 if (auxv_len > 0)
2782 {
2783 note_data = elfcore_write_note (obfd, note_data, note_size,
2784 "CORE", NT_AUXV, auxv, auxv_len);
2785 xfree (auxv);
2786 }
2787
2788 make_cleanup (xfree, note_data);
2789 return note_data;
2790 }
2791
2792 /* Implement the "info proc" command. */
2793
2794 static void
2795 linux_nat_info_proc_cmd (char *args, int from_tty)
2796 {
2797 long long pid = PIDGET (inferior_ptid);
2798 FILE *procfile;
2799 char **argv = NULL;
2800 char buffer[MAXPATHLEN];
2801 char fname1[MAXPATHLEN], fname2[MAXPATHLEN];
2802 int cmdline_f = 1;
2803 int cwd_f = 1;
2804 int exe_f = 1;
2805 int mappings_f = 0;
2806 int environ_f = 0;
2807 int status_f = 0;
2808 int stat_f = 0;
2809 int all = 0;
2810 struct stat dummy;
2811
2812 if (args)
2813 {
2814 /* Break up 'args' into an argv array. */
2815 if ((argv = buildargv (args)) == NULL)
2816 nomem (0);
2817 else
2818 make_cleanup_freeargv (argv);
2819 }
2820 while (argv != NULL && *argv != NULL)
2821 {
2822 if (isdigit (argv[0][0]))
2823 {
2824 pid = strtoul (argv[0], NULL, 10);
2825 }
2826 else if (strncmp (argv[0], "mappings", strlen (argv[0])) == 0)
2827 {
2828 mappings_f = 1;
2829 }
2830 else if (strcmp (argv[0], "status") == 0)
2831 {
2832 status_f = 1;
2833 }
2834 else if (strcmp (argv[0], "stat") == 0)
2835 {
2836 stat_f = 1;
2837 }
2838 else if (strcmp (argv[0], "cmd") == 0)
2839 {
2840 cmdline_f = 1;
2841 }
2842 else if (strncmp (argv[0], "exe", strlen (argv[0])) == 0)
2843 {
2844 exe_f = 1;
2845 }
2846 else if (strcmp (argv[0], "cwd") == 0)
2847 {
2848 cwd_f = 1;
2849 }
2850 else if (strncmp (argv[0], "all", strlen (argv[0])) == 0)
2851 {
2852 all = 1;
2853 }
2854 else
2855 {
2856 /* [...] (future options here) */
2857 }
2858 argv++;
2859 }
2860 if (pid == 0)
2861 error (_("No current process: you must name one."));
2862
2863 sprintf (fname1, "/proc/%lld", pid);
2864 if (stat (fname1, &dummy) != 0)
2865 error (_("No /proc directory: '%s'"), fname1);
2866
2867 printf_filtered (_("process %lld\n"), pid);
2868 if (cmdline_f || all)
2869 {
2870 sprintf (fname1, "/proc/%lld/cmdline", pid);
2871 if ((procfile = fopen (fname1, "r")) != NULL)
2872 {
2873 fgets (buffer, sizeof (buffer), procfile);
2874 printf_filtered ("cmdline = '%s'\n", buffer);
2875 fclose (procfile);
2876 }
2877 else
2878 warning (_("unable to open /proc file '%s'"), fname1);
2879 }
2880 if (cwd_f || all)
2881 {
2882 sprintf (fname1, "/proc/%lld/cwd", pid);
2883 memset (fname2, 0, sizeof (fname2));
2884 if (readlink (fname1, fname2, sizeof (fname2)) > 0)
2885 printf_filtered ("cwd = '%s'\n", fname2);
2886 else
2887 warning (_("unable to read link '%s'"), fname1);
2888 }
2889 if (exe_f || all)
2890 {
2891 sprintf (fname1, "/proc/%lld/exe", pid);
2892 memset (fname2, 0, sizeof (fname2));
2893 if (readlink (fname1, fname2, sizeof (fname2)) > 0)
2894 printf_filtered ("exe = '%s'\n", fname2);
2895 else
2896 warning (_("unable to read link '%s'"), fname1);
2897 }
2898 if (mappings_f || all)
2899 {
2900 sprintf (fname1, "/proc/%lld/maps", pid);
2901 if ((procfile = fopen (fname1, "r")) != NULL)
2902 {
2903 long long addr, endaddr, size, offset, inode;
2904 char permissions[8], device[8], filename[MAXPATHLEN];
2905
2906 printf_filtered (_("Mapped address spaces:\n\n"));
2907 if (gdbarch_addr_bit (current_gdbarch) == 32)
2908 {
2909 printf_filtered ("\t%10s %10s %10s %10s %7s\n",
2910 "Start Addr",
2911 " End Addr",
2912 " Size", " Offset", "objfile");
2913 }
2914 else
2915 {
2916 printf_filtered (" %18s %18s %10s %10s %7s\n",
2917 "Start Addr",
2918 " End Addr",
2919 " Size", " Offset", "objfile");
2920 }
2921
2922 while (read_mapping (procfile, &addr, &endaddr, &permissions[0],
2923 &offset, &device[0], &inode, &filename[0]))
2924 {
2925 size = endaddr - addr;
2926
2927 /* FIXME: carlton/2003-08-27: Maybe the printf_filtered
2928 calls here (and possibly above) should be abstracted
2929 out into their own functions? Andrew suggests using
2930 a generic local_address_string instead to print out
2931 the addresses; that makes sense to me, too. */
2932
2933 if (gdbarch_addr_bit (current_gdbarch) == 32)
2934 {
2935 printf_filtered ("\t%#10lx %#10lx %#10x %#10x %7s\n",
2936 (unsigned long) addr, /* FIXME: pr_addr */
2937 (unsigned long) endaddr,
2938 (int) size,
2939 (unsigned int) offset,
2940 filename[0] ? filename : "");
2941 }
2942 else
2943 {
2944 printf_filtered (" %#18lx %#18lx %#10x %#10x %7s\n",
2945 (unsigned long) addr, /* FIXME: pr_addr */
2946 (unsigned long) endaddr,
2947 (int) size,
2948 (unsigned int) offset,
2949 filename[0] ? filename : "");
2950 }
2951 }
2952
2953 fclose (procfile);
2954 }
2955 else
2956 warning (_("unable to open /proc file '%s'"), fname1);
2957 }
2958 if (status_f || all)
2959 {
2960 sprintf (fname1, "/proc/%lld/status", pid);
2961 if ((procfile = fopen (fname1, "r")) != NULL)
2962 {
2963 while (fgets (buffer, sizeof (buffer), procfile) != NULL)
2964 puts_filtered (buffer);
2965 fclose (procfile);
2966 }
2967 else
2968 warning (_("unable to open /proc file '%s'"), fname1);
2969 }
2970 if (stat_f || all)
2971 {
2972 sprintf (fname1, "/proc/%lld/stat", pid);
2973 if ((procfile = fopen (fname1, "r")) != NULL)
2974 {
2975 int itmp;
2976 char ctmp;
2977 long ltmp;
2978
2979 if (fscanf (procfile, "%d ", &itmp) > 0)
2980 printf_filtered (_("Process: %d\n"), itmp);
2981 if (fscanf (procfile, "(%[^)]) ", &buffer[0]) > 0)
2982 printf_filtered (_("Exec file: %s\n"), buffer);
2983 if (fscanf (procfile, "%c ", &ctmp) > 0)
2984 printf_filtered (_("State: %c\n"), ctmp);
2985 if (fscanf (procfile, "%d ", &itmp) > 0)
2986 printf_filtered (_("Parent process: %d\n"), itmp);
2987 if (fscanf (procfile, "%d ", &itmp) > 0)
2988 printf_filtered (_("Process group: %d\n"), itmp);
2989 if (fscanf (procfile, "%d ", &itmp) > 0)
2990 printf_filtered (_("Session id: %d\n"), itmp);
2991 if (fscanf (procfile, "%d ", &itmp) > 0)
2992 printf_filtered (_("TTY: %d\n"), itmp);
2993 if (fscanf (procfile, "%d ", &itmp) > 0)
2994 printf_filtered (_("TTY owner process group: %d\n"), itmp);
2995 if (fscanf (procfile, "%lu ", &ltmp) > 0)
2996 printf_filtered (_("Flags: 0x%lx\n"), ltmp);
2997 if (fscanf (procfile, "%lu ", &ltmp) > 0)
2998 printf_filtered (_("Minor faults (no memory page): %lu\n"),
2999 (unsigned long) ltmp);
3000 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3001 printf_filtered (_("Minor faults, children: %lu\n"),
3002 (unsigned long) ltmp);
3003 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3004 printf_filtered (_("Major faults (memory page faults): %lu\n"),
3005 (unsigned long) ltmp);
3006 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3007 printf_filtered (_("Major faults, children: %lu\n"),
3008 (unsigned long) ltmp);
3009 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3010 printf_filtered (_("utime: %ld\n"), ltmp);
3011 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3012 printf_filtered (_("stime: %ld\n"), ltmp);
3013 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3014 printf_filtered (_("utime, children: %ld\n"), ltmp);
3015 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3016 printf_filtered (_("stime, children: %ld\n"), ltmp);
3017 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3018 printf_filtered (_("jiffies remaining in current time slice: %ld\n"),
3019 ltmp);
3020 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3021 printf_filtered (_("'nice' value: %ld\n"), ltmp);
3022 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3023 printf_filtered (_("jiffies until next timeout: %lu\n"),
3024 (unsigned long) ltmp);
3025 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3026 printf_filtered (_("jiffies until next SIGALRM: %lu\n"),
3027 (unsigned long) ltmp);
3028 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3029 printf_filtered (_("start time (jiffies since system boot): %ld\n"),
3030 ltmp);
3031 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3032 printf_filtered (_("Virtual memory size: %lu\n"),
3033 (unsigned long) ltmp);
3034 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3035 printf_filtered (_("Resident set size: %lu\n"), (unsigned long) ltmp);
3036 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3037 printf_filtered (_("rlim: %lu\n"), (unsigned long) ltmp);
3038 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3039 printf_filtered (_("Start of text: 0x%lx\n"), ltmp);
3040 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3041 printf_filtered (_("End of text: 0x%lx\n"), ltmp);
3042 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3043 printf_filtered (_("Start of stack: 0x%lx\n"), ltmp);
3044 #if 0 /* Don't know how architecture-dependent the rest is...
3045 Anyway the signal bitmap info is available from "status". */
3046 if (fscanf (procfile, "%lu ", &ltmp) > 0) /* FIXME arch? */
3047 printf_filtered (_("Kernel stack pointer: 0x%lx\n"), ltmp);
3048 if (fscanf (procfile, "%lu ", &ltmp) > 0) /* FIXME arch? */
3049 printf_filtered (_("Kernel instr pointer: 0x%lx\n"), ltmp);
3050 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3051 printf_filtered (_("Pending signals bitmap: 0x%lx\n"), ltmp);
3052 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3053 printf_filtered (_("Blocked signals bitmap: 0x%lx\n"), ltmp);
3054 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3055 printf_filtered (_("Ignored signals bitmap: 0x%lx\n"), ltmp);
3056 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3057 printf_filtered (_("Catched signals bitmap: 0x%lx\n"), ltmp);
3058 if (fscanf (procfile, "%lu ", &ltmp) > 0) /* FIXME arch? */
3059 printf_filtered (_("wchan (system call): 0x%lx\n"), ltmp);
3060 #endif
3061 fclose (procfile);
3062 }
3063 else
3064 warning (_("unable to open /proc file '%s'"), fname1);
3065 }
3066 }
3067
3068 /* Implement the to_xfer_partial interface for memory reads using the /proc
3069 filesystem. Because we can use a single read() call for /proc, this
3070 can be much more efficient than banging away at PTRACE_PEEKTEXT,
3071 but it doesn't support writes. */
3072
3073 static LONGEST
3074 linux_proc_xfer_partial (struct target_ops *ops, enum target_object object,
3075 const char *annex, gdb_byte *readbuf,
3076 const gdb_byte *writebuf,
3077 ULONGEST offset, LONGEST len)
3078 {
3079 LONGEST ret;
3080 int fd;
3081 char filename[64];
3082
3083 if (object != TARGET_OBJECT_MEMORY || !readbuf)
3084 return 0;
3085
3086 /* Don't bother for one word. */
3087 if (len < 3 * sizeof (long))
3088 return 0;
3089
3090 /* We could keep this file open and cache it - possibly one per
3091 thread. That requires some juggling, but is even faster. */
3092 sprintf (filename, "/proc/%d/mem", PIDGET (inferior_ptid));
3093 fd = open (filename, O_RDONLY | O_LARGEFILE);
3094 if (fd == -1)
3095 return 0;
3096
3097 /* If pread64 is available, use it. It's faster if the kernel
3098 supports it (only one syscall), and it's 64-bit safe even on
3099 32-bit platforms (for instance, SPARC debugging a SPARC64
3100 application). */
3101 #ifdef HAVE_PREAD64
3102 if (pread64 (fd, readbuf, len, offset) != len)
3103 #else
3104 if (lseek (fd, offset, SEEK_SET) == -1 || read (fd, readbuf, len) != len)
3105 #endif
3106 ret = 0;
3107 else
3108 ret = len;
3109
3110 close (fd);
3111 return ret;
3112 }
3113
3114 /* Parse LINE as a signal set and add its set bits to SIGS. */
3115
3116 static void
3117 add_line_to_sigset (const char *line, sigset_t *sigs)
3118 {
3119 int len = strlen (line) - 1;
3120 const char *p;
3121 int signum;
3122
3123 if (line[len] != '\n')
3124 error (_("Could not parse signal set: %s"), line);
3125
3126 p = line;
3127 signum = len * 4;
3128 while (len-- > 0)
3129 {
3130 int digit;
3131
3132 if (*p >= '0' && *p <= '9')
3133 digit = *p - '0';
3134 else if (*p >= 'a' && *p <= 'f')
3135 digit = *p - 'a' + 10;
3136 else
3137 error (_("Could not parse signal set: %s"), line);
3138
3139 signum -= 4;
3140
3141 if (digit & 1)
3142 sigaddset (sigs, signum + 1);
3143 if (digit & 2)
3144 sigaddset (sigs, signum + 2);
3145 if (digit & 4)
3146 sigaddset (sigs, signum + 3);
3147 if (digit & 8)
3148 sigaddset (sigs, signum + 4);
3149
3150 p++;
3151 }
3152 }
3153
3154 /* Find process PID's pending signals from /proc/pid/status and set
3155 SIGS to match. */
3156
3157 void
3158 linux_proc_pending_signals (int pid, sigset_t *pending, sigset_t *blocked, sigset_t *ignored)
3159 {
3160 FILE *procfile;
3161 char buffer[MAXPATHLEN], fname[MAXPATHLEN];
3162 int signum;
3163
3164 sigemptyset (pending);
3165 sigemptyset (blocked);
3166 sigemptyset (ignored);
3167 sprintf (fname, "/proc/%d/status", pid);
3168 procfile = fopen (fname, "r");
3169 if (procfile == NULL)
3170 error (_("Could not open %s"), fname);
3171
3172 while (fgets (buffer, MAXPATHLEN, procfile) != NULL)
3173 {
3174 /* Normal queued signals are on the SigPnd line in the status
3175 file. However, 2.6 kernels also have a "shared" pending
3176 queue for delivering signals to a thread group, so check for
3177 a ShdPnd line also.
3178
3179 Unfortunately some Red Hat kernels include the shared pending
3180 queue but not the ShdPnd status field. */
3181
3182 if (strncmp (buffer, "SigPnd:\t", 8) == 0)
3183 add_line_to_sigset (buffer + 8, pending);
3184 else if (strncmp (buffer, "ShdPnd:\t", 8) == 0)
3185 add_line_to_sigset (buffer + 8, pending);
3186 else if (strncmp (buffer, "SigBlk:\t", 8) == 0)
3187 add_line_to_sigset (buffer + 8, blocked);
3188 else if (strncmp (buffer, "SigIgn:\t", 8) == 0)
3189 add_line_to_sigset (buffer + 8, ignored);
3190 }
3191
3192 fclose (procfile);
3193 }
3194
3195 static LONGEST
3196 linux_xfer_partial (struct target_ops *ops, enum target_object object,
3197 const char *annex, gdb_byte *readbuf,
3198 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
3199 {
3200 LONGEST xfer;
3201
3202 if (object == TARGET_OBJECT_AUXV)
3203 return procfs_xfer_auxv (ops, object, annex, readbuf, writebuf,
3204 offset, len);
3205
3206 xfer = linux_proc_xfer_partial (ops, object, annex, readbuf, writebuf,
3207 offset, len);
3208 if (xfer != 0)
3209 return xfer;
3210
3211 return super_xfer_partial (ops, object, annex, readbuf, writebuf,
3212 offset, len);
3213 }
3214
3215 /* Create a prototype generic Linux target. The client can override
3216 it with local methods. */
3217
3218 static void
3219 linux_target_install_ops (struct target_ops *t)
3220 {
3221 t->to_insert_fork_catchpoint = linux_child_insert_fork_catchpoint;
3222 t->to_insert_vfork_catchpoint = linux_child_insert_vfork_catchpoint;
3223 t->to_insert_exec_catchpoint = linux_child_insert_exec_catchpoint;
3224 t->to_pid_to_exec_file = linux_child_pid_to_exec_file;
3225 t->to_post_startup_inferior = linux_child_post_startup_inferior;
3226 t->to_post_attach = linux_child_post_attach;
3227 t->to_follow_fork = linux_child_follow_fork;
3228 t->to_find_memory_regions = linux_nat_find_memory_regions;
3229 t->to_make_corefile_notes = linux_nat_make_corefile_notes;
3230
3231 super_xfer_partial = t->to_xfer_partial;
3232 t->to_xfer_partial = linux_xfer_partial;
3233 }
3234
3235 struct target_ops *
3236 linux_target (void)
3237 {
3238 struct target_ops *t;
3239
3240 t = inf_ptrace_target ();
3241 linux_target_install_ops (t);
3242
3243 return t;
3244 }
3245
3246 struct target_ops *
3247 linux_trad_target (CORE_ADDR (*register_u_offset)(struct gdbarch *, int, int))
3248 {
3249 struct target_ops *t;
3250
3251 t = inf_ptrace_trad_target (register_u_offset);
3252 linux_target_install_ops (t);
3253
3254 return t;
3255 }
3256
3257 void
3258 linux_nat_add_target (struct target_ops *t)
3259 {
3260 /* Save the provided single-threaded target. We save this in a separate
3261 variable because another target we've inherited from (e.g. inf-ptrace)
3262 may have saved a pointer to T; we want to use it for the final
3263 process stratum target. */
3264 linux_ops_saved = *t;
3265 linux_ops = &linux_ops_saved;
3266
3267 /* Override some methods for multithreading. */
3268 t->to_attach = linux_nat_attach;
3269 t->to_detach = linux_nat_detach;
3270 t->to_resume = linux_nat_resume;
3271 t->to_wait = linux_nat_wait;
3272 t->to_xfer_partial = linux_nat_xfer_partial;
3273 t->to_kill = linux_nat_kill;
3274 t->to_mourn_inferior = linux_nat_mourn_inferior;
3275 t->to_thread_alive = linux_nat_thread_alive;
3276 t->to_pid_to_str = linux_nat_pid_to_str;
3277 t->to_has_thread_control = tc_schedlock;
3278
3279 /* We don't change the stratum; this target will sit at
3280 process_stratum and thread_db will set at thread_stratum. This
3281 is a little strange, since this is a multi-threaded-capable
3282 target, but we want to be on the stack below thread_db, and we
3283 also want to be used for single-threaded processes. */
3284
3285 add_target (t);
3286
3287 /* TODO: Eliminate this and have libthread_db use
3288 find_target_beneath. */
3289 thread_db_init (t);
3290 }
3291
3292 /* Register a method to call whenever a new thread is attached. */
3293 void
3294 linux_nat_set_new_thread (struct target_ops *t, void (*new_thread) (ptid_t))
3295 {
3296 /* Save the pointer. We only support a single registered instance
3297 of the GNU/Linux native target, so we do not need to map this to
3298 T. */
3299 linux_nat_new_thread = new_thread;
3300 }
3301
3302 /* Return the saved siginfo associated with PTID. */
3303 struct siginfo *
3304 linux_nat_get_siginfo (ptid_t ptid)
3305 {
3306 struct lwp_info *lp = find_lwp_pid (ptid);
3307
3308 gdb_assert (lp != NULL);
3309
3310 return &lp->siginfo;
3311 }
3312
3313 void
3314 _initialize_linux_nat (void)
3315 {
3316 struct sigaction action;
3317
3318 add_info ("proc", linux_nat_info_proc_cmd, _("\
3319 Show /proc process information about any running process.\n\
3320 Specify any process id, or use the program being debugged by default.\n\
3321 Specify any of the following keywords for detailed info:\n\
3322 mappings -- list of mapped memory regions.\n\
3323 stat -- list a bunch of random process info.\n\
3324 status -- list a different bunch of random process info.\n\
3325 all -- list all available /proc info."));
3326
3327 /* Save the original signal mask. */
3328 sigprocmask (SIG_SETMASK, NULL, &normal_mask);
3329
3330 action.sa_handler = sigchld_handler;
3331 sigemptyset (&action.sa_mask);
3332 action.sa_flags = SA_RESTART;
3333 sigaction (SIGCHLD, &action, NULL);
3334
3335 /* Make sure we don't block SIGCHLD during a sigsuspend. */
3336 sigprocmask (SIG_SETMASK, NULL, &suspend_mask);
3337 sigdelset (&suspend_mask, SIGCHLD);
3338
3339 sigemptyset (&blocked_mask);
3340
3341 add_setshow_zinteger_cmd ("lin-lwp", no_class, &debug_linux_nat, _("\
3342 Set debugging of GNU/Linux lwp module."), _("\
3343 Show debugging of GNU/Linux lwp module."), _("\
3344 Enables printf debugging output."),
3345 NULL,
3346 show_debug_linux_nat,
3347 &setdebuglist, &showdebuglist);
3348 }
3349 \f
3350
3351 /* FIXME: kettenis/2000-08-26: The stuff on this page is specific to
3352 the GNU/Linux Threads library and therefore doesn't really belong
3353 here. */
3354
3355 /* Read variable NAME in the target and return its value if found.
3356 Otherwise return zero. It is assumed that the type of the variable
3357 is `int'. */
3358
3359 static int
3360 get_signo (const char *name)
3361 {
3362 struct minimal_symbol *ms;
3363 int signo;
3364
3365 ms = lookup_minimal_symbol (name, NULL, NULL);
3366 if (ms == NULL)
3367 return 0;
3368
3369 if (target_read_memory (SYMBOL_VALUE_ADDRESS (ms), (gdb_byte *) &signo,
3370 sizeof (signo)) != 0)
3371 return 0;
3372
3373 return signo;
3374 }
3375
3376 /* Return the set of signals used by the threads library in *SET. */
3377
3378 void
3379 lin_thread_get_thread_signals (sigset_t *set)
3380 {
3381 struct sigaction action;
3382 int restart, cancel;
3383
3384 sigemptyset (set);
3385
3386 restart = get_signo ("__pthread_sig_restart");
3387 cancel = get_signo ("__pthread_sig_cancel");
3388
3389 /* LinuxThreads normally uses the first two RT signals, but in some legacy
3390 cases may use SIGUSR1/SIGUSR2. NPTL always uses RT signals, but does
3391 not provide any way for the debugger to query the signal numbers -
3392 fortunately they don't change! */
3393
3394 if (restart == 0)
3395 restart = __SIGRTMIN;
3396
3397 if (cancel == 0)
3398 cancel = __SIGRTMIN + 1;
3399
3400 sigaddset (set, restart);
3401 sigaddset (set, cancel);
3402
3403 /* The GNU/Linux Threads library makes terminating threads send a
3404 special "cancel" signal instead of SIGCHLD. Make sure we catch
3405 those (to prevent them from terminating GDB itself, which is
3406 likely to be their default action) and treat them the same way as
3407 SIGCHLD. */
3408
3409 action.sa_handler = sigchld_handler;
3410 sigemptyset (&action.sa_mask);
3411 action.sa_flags = SA_RESTART;
3412 sigaction (cancel, &action, NULL);
3413
3414 /* We block the "cancel" signal throughout this code ... */
3415 sigaddset (&blocked_mask, cancel);
3416 sigprocmask (SIG_BLOCK, &blocked_mask, NULL);
3417
3418 /* ... except during a sigsuspend. */
3419 sigdelset (&suspend_mask, cancel);
3420 }
3421
This page took 0.142322 seconds and 4 git commands to generate.