* Makefile.in (symfile.o): Update.
[deliverable/binutils-gdb.git] / gdb / linux-nat.c
1 /* GNU/Linux native-dependent code common to multiple platforms.
2
3 Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
4 Free Software Foundation, Inc.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22 #include "inferior.h"
23 #include "target.h"
24 #include "gdb_string.h"
25 #include "gdb_wait.h"
26 #include "gdb_assert.h"
27 #ifdef HAVE_TKILL_SYSCALL
28 #include <unistd.h>
29 #include <sys/syscall.h>
30 #endif
31 #include <sys/ptrace.h>
32 #include "linux-nat.h"
33 #include "linux-fork.h"
34 #include "gdbthread.h"
35 #include "gdbcmd.h"
36 #include "regcache.h"
37 #include "regset.h"
38 #include "inf-ptrace.h"
39 #include "auxv.h"
40 #include <sys/param.h> /* for MAXPATHLEN */
41 #include <sys/procfs.h> /* for elf_gregset etc. */
42 #include "elf-bfd.h" /* for elfcore_write_* */
43 #include "gregset.h" /* for gregset */
44 #include "gdbcore.h" /* for get_exec_file */
45 #include <ctype.h> /* for isdigit */
46 #include "gdbthread.h" /* for struct thread_info etc. */
47 #include "gdb_stat.h" /* for struct stat */
48 #include <fcntl.h> /* for O_RDONLY */
49
50 #ifndef O_LARGEFILE
51 #define O_LARGEFILE 0
52 #endif
53
54 /* If the system headers did not provide the constants, hard-code the normal
55 values. */
56 #ifndef PTRACE_EVENT_FORK
57
58 #define PTRACE_SETOPTIONS 0x4200
59 #define PTRACE_GETEVENTMSG 0x4201
60
61 /* options set using PTRACE_SETOPTIONS */
62 #define PTRACE_O_TRACESYSGOOD 0x00000001
63 #define PTRACE_O_TRACEFORK 0x00000002
64 #define PTRACE_O_TRACEVFORK 0x00000004
65 #define PTRACE_O_TRACECLONE 0x00000008
66 #define PTRACE_O_TRACEEXEC 0x00000010
67 #define PTRACE_O_TRACEVFORKDONE 0x00000020
68 #define PTRACE_O_TRACEEXIT 0x00000040
69
70 /* Wait extended result codes for the above trace options. */
71 #define PTRACE_EVENT_FORK 1
72 #define PTRACE_EVENT_VFORK 2
73 #define PTRACE_EVENT_CLONE 3
74 #define PTRACE_EVENT_EXEC 4
75 #define PTRACE_EVENT_VFORK_DONE 5
76 #define PTRACE_EVENT_EXIT 6
77
78 #endif /* PTRACE_EVENT_FORK */
79
80 /* We can't always assume that this flag is available, but all systems
81 with the ptrace event handlers also have __WALL, so it's safe to use
82 here. */
83 #ifndef __WALL
84 #define __WALL 0x40000000 /* Wait for any child. */
85 #endif
86
87 #ifndef PTRACE_GETSIGINFO
88 #define PTRACE_GETSIGINFO 0x4202
89 #endif
90
91 /* The single-threaded native GNU/Linux target_ops. We save a pointer for
92 the use of the multi-threaded target. */
93 static struct target_ops *linux_ops;
94 static struct target_ops linux_ops_saved;
95
96 /* The method to call, if any, when a new thread is attached. */
97 static void (*linux_nat_new_thread) (ptid_t);
98
99 /* The saved to_xfer_partial method, inherited from inf-ptrace.c.
100 Called by our to_xfer_partial. */
101 static LONGEST (*super_xfer_partial) (struct target_ops *,
102 enum target_object,
103 const char *, gdb_byte *,
104 const gdb_byte *,
105 ULONGEST, LONGEST);
106
107 static int debug_linux_nat;
108 static void
109 show_debug_linux_nat (struct ui_file *file, int from_tty,
110 struct cmd_list_element *c, const char *value)
111 {
112 fprintf_filtered (file, _("Debugging of GNU/Linux lwp module is %s.\n"),
113 value);
114 }
115
116 static int linux_parent_pid;
117
118 struct simple_pid_list
119 {
120 int pid;
121 int status;
122 struct simple_pid_list *next;
123 };
124 struct simple_pid_list *stopped_pids;
125
126 /* This variable is a tri-state flag: -1 for unknown, 0 if PTRACE_O_TRACEFORK
127 can not be used, 1 if it can. */
128
129 static int linux_supports_tracefork_flag = -1;
130
131 /* If we have PTRACE_O_TRACEFORK, this flag indicates whether we also have
132 PTRACE_O_TRACEVFORKDONE. */
133
134 static int linux_supports_tracevforkdone_flag = -1;
135
136 \f
137 /* Trivial list manipulation functions to keep track of a list of
138 new stopped processes. */
139 static void
140 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
141 {
142 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
143 new_pid->pid = pid;
144 new_pid->status = status;
145 new_pid->next = *listp;
146 *listp = new_pid;
147 }
148
149 static int
150 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *status)
151 {
152 struct simple_pid_list **p;
153
154 for (p = listp; *p != NULL; p = &(*p)->next)
155 if ((*p)->pid == pid)
156 {
157 struct simple_pid_list *next = (*p)->next;
158 *status = (*p)->status;
159 xfree (*p);
160 *p = next;
161 return 1;
162 }
163 return 0;
164 }
165
166 static void
167 linux_record_stopped_pid (int pid, int status)
168 {
169 add_to_pid_list (&stopped_pids, pid, status);
170 }
171
172 \f
173 /* A helper function for linux_test_for_tracefork, called after fork (). */
174
175 static void
176 linux_tracefork_child (void)
177 {
178 int ret;
179
180 ptrace (PTRACE_TRACEME, 0, 0, 0);
181 kill (getpid (), SIGSTOP);
182 fork ();
183 _exit (0);
184 }
185
186 /* Wrapper function for waitpid which handles EINTR. */
187
188 static int
189 my_waitpid (int pid, int *status, int flags)
190 {
191 int ret;
192 do
193 {
194 ret = waitpid (pid, status, flags);
195 }
196 while (ret == -1 && errno == EINTR);
197
198 return ret;
199 }
200
201 /* Determine if PTRACE_O_TRACEFORK can be used to follow fork events.
202
203 First, we try to enable fork tracing on ORIGINAL_PID. If this fails,
204 we know that the feature is not available. This may change the tracing
205 options for ORIGINAL_PID, but we'll be setting them shortly anyway.
206
207 However, if it succeeds, we don't know for sure that the feature is
208 available; old versions of PTRACE_SETOPTIONS ignored unknown options. We
209 create a child process, attach to it, use PTRACE_SETOPTIONS to enable
210 fork tracing, and let it fork. If the process exits, we assume that we
211 can't use TRACEFORK; if we get the fork notification, and we can extract
212 the new child's PID, then we assume that we can. */
213
214 static void
215 linux_test_for_tracefork (int original_pid)
216 {
217 int child_pid, ret, status;
218 long second_pid;
219
220 linux_supports_tracefork_flag = 0;
221 linux_supports_tracevforkdone_flag = 0;
222
223 ret = ptrace (PTRACE_SETOPTIONS, original_pid, 0, PTRACE_O_TRACEFORK);
224 if (ret != 0)
225 return;
226
227 child_pid = fork ();
228 if (child_pid == -1)
229 perror_with_name (("fork"));
230
231 if (child_pid == 0)
232 linux_tracefork_child ();
233
234 ret = my_waitpid (child_pid, &status, 0);
235 if (ret == -1)
236 perror_with_name (("waitpid"));
237 else if (ret != child_pid)
238 error (_("linux_test_for_tracefork: waitpid: unexpected result %d."), ret);
239 if (! WIFSTOPPED (status))
240 error (_("linux_test_for_tracefork: waitpid: unexpected status %d."), status);
241
242 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0, PTRACE_O_TRACEFORK);
243 if (ret != 0)
244 {
245 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
246 if (ret != 0)
247 {
248 warning (_("linux_test_for_tracefork: failed to kill child"));
249 return;
250 }
251
252 ret = my_waitpid (child_pid, &status, 0);
253 if (ret != child_pid)
254 warning (_("linux_test_for_tracefork: failed to wait for killed child"));
255 else if (!WIFSIGNALED (status))
256 warning (_("linux_test_for_tracefork: unexpected wait status 0x%x from "
257 "killed child"), status);
258
259 return;
260 }
261
262 /* Check whether PTRACE_O_TRACEVFORKDONE is available. */
263 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0,
264 PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORKDONE);
265 linux_supports_tracevforkdone_flag = (ret == 0);
266
267 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
268 if (ret != 0)
269 warning (_("linux_test_for_tracefork: failed to resume child"));
270
271 ret = my_waitpid (child_pid, &status, 0);
272
273 if (ret == child_pid && WIFSTOPPED (status)
274 && status >> 16 == PTRACE_EVENT_FORK)
275 {
276 second_pid = 0;
277 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
278 if (ret == 0 && second_pid != 0)
279 {
280 int second_status;
281
282 linux_supports_tracefork_flag = 1;
283 my_waitpid (second_pid, &second_status, 0);
284 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
285 if (ret != 0)
286 warning (_("linux_test_for_tracefork: failed to kill second child"));
287 my_waitpid (second_pid, &status, 0);
288 }
289 }
290 else
291 warning (_("linux_test_for_tracefork: unexpected result from waitpid "
292 "(%d, status 0x%x)"), ret, status);
293
294 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
295 if (ret != 0)
296 warning (_("linux_test_for_tracefork: failed to kill child"));
297 my_waitpid (child_pid, &status, 0);
298 }
299
300 /* Return non-zero iff we have tracefork functionality available.
301 This function also sets linux_supports_tracefork_flag. */
302
303 static int
304 linux_supports_tracefork (int pid)
305 {
306 if (linux_supports_tracefork_flag == -1)
307 linux_test_for_tracefork (pid);
308 return linux_supports_tracefork_flag;
309 }
310
311 static int
312 linux_supports_tracevforkdone (int pid)
313 {
314 if (linux_supports_tracefork_flag == -1)
315 linux_test_for_tracefork (pid);
316 return linux_supports_tracevforkdone_flag;
317 }
318
319 \f
320 void
321 linux_enable_event_reporting (ptid_t ptid)
322 {
323 int pid = ptid_get_lwp (ptid);
324 int options;
325
326 if (pid == 0)
327 pid = ptid_get_pid (ptid);
328
329 if (! linux_supports_tracefork (pid))
330 return;
331
332 options = PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORK | PTRACE_O_TRACEEXEC
333 | PTRACE_O_TRACECLONE;
334 if (linux_supports_tracevforkdone (pid))
335 options |= PTRACE_O_TRACEVFORKDONE;
336
337 /* Do not enable PTRACE_O_TRACEEXIT until GDB is more prepared to support
338 read-only process state. */
339
340 ptrace (PTRACE_SETOPTIONS, pid, 0, options);
341 }
342
343 static void
344 linux_child_post_attach (int pid)
345 {
346 linux_enable_event_reporting (pid_to_ptid (pid));
347 check_for_thread_db ();
348 }
349
350 static void
351 linux_child_post_startup_inferior (ptid_t ptid)
352 {
353 linux_enable_event_reporting (ptid);
354 check_for_thread_db ();
355 }
356
357 static int
358 linux_child_follow_fork (struct target_ops *ops, int follow_child)
359 {
360 ptid_t last_ptid;
361 struct target_waitstatus last_status;
362 int has_vforked;
363 int parent_pid, child_pid;
364
365 get_last_target_status (&last_ptid, &last_status);
366 has_vforked = (last_status.kind == TARGET_WAITKIND_VFORKED);
367 parent_pid = ptid_get_lwp (last_ptid);
368 if (parent_pid == 0)
369 parent_pid = ptid_get_pid (last_ptid);
370 child_pid = last_status.value.related_pid;
371
372 if (! follow_child)
373 {
374 /* We're already attached to the parent, by default. */
375
376 /* Before detaching from the child, remove all breakpoints from
377 it. (This won't actually modify the breakpoint list, but will
378 physically remove the breakpoints from the child.) */
379 /* If we vforked this will remove the breakpoints from the parent
380 also, but they'll be reinserted below. */
381 detach_breakpoints (child_pid);
382
383 /* Detach new forked process? */
384 if (detach_fork)
385 {
386 if (info_verbose || debug_linux_nat)
387 {
388 target_terminal_ours ();
389 fprintf_filtered (gdb_stdlog,
390 "Detaching after fork from child process %d.\n",
391 child_pid);
392 }
393
394 ptrace (PTRACE_DETACH, child_pid, 0, 0);
395 }
396 else
397 {
398 struct fork_info *fp;
399 /* Retain child fork in ptrace (stopped) state. */
400 fp = find_fork_pid (child_pid);
401 if (!fp)
402 fp = add_fork (child_pid);
403 fork_save_infrun_state (fp, 0);
404 }
405
406 if (has_vforked)
407 {
408 gdb_assert (linux_supports_tracefork_flag >= 0);
409 if (linux_supports_tracevforkdone (0))
410 {
411 int status;
412
413 ptrace (PTRACE_CONT, parent_pid, 0, 0);
414 my_waitpid (parent_pid, &status, __WALL);
415 if ((status >> 16) != PTRACE_EVENT_VFORK_DONE)
416 warning (_("Unexpected waitpid result %06x when waiting for "
417 "vfork-done"), status);
418 }
419 else
420 {
421 /* We can't insert breakpoints until the child has
422 finished with the shared memory region. We need to
423 wait until that happens. Ideal would be to just
424 call:
425 - ptrace (PTRACE_SYSCALL, parent_pid, 0, 0);
426 - waitpid (parent_pid, &status, __WALL);
427 However, most architectures can't handle a syscall
428 being traced on the way out if it wasn't traced on
429 the way in.
430
431 We might also think to loop, continuing the child
432 until it exits or gets a SIGTRAP. One problem is
433 that the child might call ptrace with PTRACE_TRACEME.
434
435 There's no simple and reliable way to figure out when
436 the vforked child will be done with its copy of the
437 shared memory. We could step it out of the syscall,
438 two instructions, let it go, and then single-step the
439 parent once. When we have hardware single-step, this
440 would work; with software single-step it could still
441 be made to work but we'd have to be able to insert
442 single-step breakpoints in the child, and we'd have
443 to insert -just- the single-step breakpoint in the
444 parent. Very awkward.
445
446 In the end, the best we can do is to make sure it
447 runs for a little while. Hopefully it will be out of
448 range of any breakpoints we reinsert. Usually this
449 is only the single-step breakpoint at vfork's return
450 point. */
451
452 usleep (10000);
453 }
454
455 /* Since we vforked, breakpoints were removed in the parent
456 too. Put them back. */
457 reattach_breakpoints (parent_pid);
458 }
459 }
460 else
461 {
462 char child_pid_spelling[40];
463
464 /* Needed to keep the breakpoint lists in sync. */
465 if (! has_vforked)
466 detach_breakpoints (child_pid);
467
468 /* Before detaching from the parent, remove all breakpoints from it. */
469 remove_breakpoints ();
470
471 if (info_verbose || debug_linux_nat)
472 {
473 target_terminal_ours ();
474 fprintf_filtered (gdb_stdlog,
475 "Attaching after fork to child process %d.\n",
476 child_pid);
477 }
478
479 /* If we're vforking, we may want to hold on to the parent until
480 the child exits or execs. At exec time we can remove the old
481 breakpoints from the parent and detach it; at exit time we
482 could do the same (or even, sneakily, resume debugging it - the
483 child's exec has failed, or something similar).
484
485 This doesn't clean up "properly", because we can't call
486 target_detach, but that's OK; if the current target is "child",
487 then it doesn't need any further cleanups, and lin_lwp will
488 generally not encounter vfork (vfork is defined to fork
489 in libpthread.so).
490
491 The holding part is very easy if we have VFORKDONE events;
492 but keeping track of both processes is beyond GDB at the
493 moment. So we don't expose the parent to the rest of GDB.
494 Instead we quietly hold onto it until such time as we can
495 safely resume it. */
496
497 if (has_vforked)
498 linux_parent_pid = parent_pid;
499 else if (!detach_fork)
500 {
501 struct fork_info *fp;
502 /* Retain parent fork in ptrace (stopped) state. */
503 fp = find_fork_pid (parent_pid);
504 if (!fp)
505 fp = add_fork (parent_pid);
506 fork_save_infrun_state (fp, 0);
507 }
508 else
509 {
510 target_detach (NULL, 0);
511 }
512
513 inferior_ptid = ptid_build (child_pid, child_pid, 0);
514
515 /* Reinstall ourselves, since we might have been removed in
516 target_detach (which does other necessary cleanup). */
517
518 push_target (ops);
519 linux_nat_switch_fork (inferior_ptid);
520 check_for_thread_db ();
521
522 /* Reset breakpoints in the child as appropriate. */
523 follow_inferior_reset_breakpoints ();
524 }
525
526 return 0;
527 }
528
529 \f
530 static void
531 linux_child_insert_fork_catchpoint (int pid)
532 {
533 if (! linux_supports_tracefork (pid))
534 error (_("Your system does not support fork catchpoints."));
535 }
536
537 static void
538 linux_child_insert_vfork_catchpoint (int pid)
539 {
540 if (!linux_supports_tracefork (pid))
541 error (_("Your system does not support vfork catchpoints."));
542 }
543
544 static void
545 linux_child_insert_exec_catchpoint (int pid)
546 {
547 if (!linux_supports_tracefork (pid))
548 error (_("Your system does not support exec catchpoints."));
549 }
550
551 /* On GNU/Linux there are no real LWP's. The closest thing to LWP's
552 are processes sharing the same VM space. A multi-threaded process
553 is basically a group of such processes. However, such a grouping
554 is almost entirely a user-space issue; the kernel doesn't enforce
555 such a grouping at all (this might change in the future). In
556 general, we'll rely on the threads library (i.e. the GNU/Linux
557 Threads library) to provide such a grouping.
558
559 It is perfectly well possible to write a multi-threaded application
560 without the assistance of a threads library, by using the clone
561 system call directly. This module should be able to give some
562 rudimentary support for debugging such applications if developers
563 specify the CLONE_PTRACE flag in the clone system call, and are
564 using the Linux kernel 2.4 or above.
565
566 Note that there are some peculiarities in GNU/Linux that affect
567 this code:
568
569 - In general one should specify the __WCLONE flag to waitpid in
570 order to make it report events for any of the cloned processes
571 (and leave it out for the initial process). However, if a cloned
572 process has exited the exit status is only reported if the
573 __WCLONE flag is absent. Linux kernel 2.4 has a __WALL flag, but
574 we cannot use it since GDB must work on older systems too.
575
576 - When a traced, cloned process exits and is waited for by the
577 debugger, the kernel reassigns it to the original parent and
578 keeps it around as a "zombie". Somehow, the GNU/Linux Threads
579 library doesn't notice this, which leads to the "zombie problem":
580 When debugged a multi-threaded process that spawns a lot of
581 threads will run out of processes, even if the threads exit,
582 because the "zombies" stay around. */
583
584 /* List of known LWPs. */
585 struct lwp_info *lwp_list;
586
587 /* Number of LWPs in the list. */
588 static int num_lwps;
589 \f
590
591 #define GET_LWP(ptid) ptid_get_lwp (ptid)
592 #define GET_PID(ptid) ptid_get_pid (ptid)
593 #define is_lwp(ptid) (GET_LWP (ptid) != 0)
594 #define BUILD_LWP(lwp, pid) ptid_build (pid, lwp, 0)
595
596 /* If the last reported event was a SIGTRAP, this variable is set to
597 the process id of the LWP/thread that got it. */
598 ptid_t trap_ptid;
599 \f
600
601 /* Since we cannot wait (in linux_nat_wait) for the initial process and
602 any cloned processes with a single call to waitpid, we have to use
603 the WNOHANG flag and call waitpid in a loop. To optimize
604 things a bit we use `sigsuspend' to wake us up when a process has
605 something to report (it will send us a SIGCHLD if it has). To make
606 this work we have to juggle with the signal mask. We save the
607 original signal mask such that we can restore it before creating a
608 new process in order to avoid blocking certain signals in the
609 inferior. We then block SIGCHLD during the waitpid/sigsuspend
610 loop. */
611
612 /* Original signal mask. */
613 static sigset_t normal_mask;
614
615 /* Signal mask for use with sigsuspend in linux_nat_wait, initialized in
616 _initialize_linux_nat. */
617 static sigset_t suspend_mask;
618
619 /* Signals to block to make that sigsuspend work. */
620 static sigset_t blocked_mask;
621 \f
622
623 /* Prototypes for local functions. */
624 static int stop_wait_callback (struct lwp_info *lp, void *data);
625 static int linux_nat_thread_alive (ptid_t ptid);
626 static char *linux_child_pid_to_exec_file (int pid);
627 \f
628 /* Convert wait status STATUS to a string. Used for printing debug
629 messages only. */
630
631 static char *
632 status_to_str (int status)
633 {
634 static char buf[64];
635
636 if (WIFSTOPPED (status))
637 snprintf (buf, sizeof (buf), "%s (stopped)",
638 strsignal (WSTOPSIG (status)));
639 else if (WIFSIGNALED (status))
640 snprintf (buf, sizeof (buf), "%s (terminated)",
641 strsignal (WSTOPSIG (status)));
642 else
643 snprintf (buf, sizeof (buf), "%d (exited)", WEXITSTATUS (status));
644
645 return buf;
646 }
647
648 /* Initialize the list of LWPs. Note that this module, contrary to
649 what GDB's generic threads layer does for its thread list,
650 re-initializes the LWP lists whenever we mourn or detach (which
651 doesn't involve mourning) the inferior. */
652
653 static void
654 init_lwp_list (void)
655 {
656 struct lwp_info *lp, *lpnext;
657
658 for (lp = lwp_list; lp; lp = lpnext)
659 {
660 lpnext = lp->next;
661 xfree (lp);
662 }
663
664 lwp_list = NULL;
665 num_lwps = 0;
666 }
667
668 /* Add the LWP specified by PID to the list. Return a pointer to the
669 structure describing the new LWP. The LWP should already be stopped
670 (with an exception for the very first LWP). */
671
672 static struct lwp_info *
673 add_lwp (ptid_t ptid)
674 {
675 struct lwp_info *lp;
676
677 gdb_assert (is_lwp (ptid));
678
679 lp = (struct lwp_info *) xmalloc (sizeof (struct lwp_info));
680
681 memset (lp, 0, sizeof (struct lwp_info));
682
683 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
684
685 lp->ptid = ptid;
686
687 lp->next = lwp_list;
688 lwp_list = lp;
689 ++num_lwps;
690
691 if (num_lwps > 1 && linux_nat_new_thread != NULL)
692 linux_nat_new_thread (ptid);
693
694 return lp;
695 }
696
697 /* Remove the LWP specified by PID from the list. */
698
699 static void
700 delete_lwp (ptid_t ptid)
701 {
702 struct lwp_info *lp, *lpprev;
703
704 lpprev = NULL;
705
706 for (lp = lwp_list; lp; lpprev = lp, lp = lp->next)
707 if (ptid_equal (lp->ptid, ptid))
708 break;
709
710 if (!lp)
711 return;
712
713 num_lwps--;
714
715 if (lpprev)
716 lpprev->next = lp->next;
717 else
718 lwp_list = lp->next;
719
720 xfree (lp);
721 }
722
723 /* Return a pointer to the structure describing the LWP corresponding
724 to PID. If no corresponding LWP could be found, return NULL. */
725
726 static struct lwp_info *
727 find_lwp_pid (ptid_t ptid)
728 {
729 struct lwp_info *lp;
730 int lwp;
731
732 if (is_lwp (ptid))
733 lwp = GET_LWP (ptid);
734 else
735 lwp = GET_PID (ptid);
736
737 for (lp = lwp_list; lp; lp = lp->next)
738 if (lwp == GET_LWP (lp->ptid))
739 return lp;
740
741 return NULL;
742 }
743
744 /* Call CALLBACK with its second argument set to DATA for every LWP in
745 the list. If CALLBACK returns 1 for a particular LWP, return a
746 pointer to the structure describing that LWP immediately.
747 Otherwise return NULL. */
748
749 struct lwp_info *
750 iterate_over_lwps (int (*callback) (struct lwp_info *, void *), void *data)
751 {
752 struct lwp_info *lp, *lpnext;
753
754 for (lp = lwp_list; lp; lp = lpnext)
755 {
756 lpnext = lp->next;
757 if ((*callback) (lp, data))
758 return lp;
759 }
760
761 return NULL;
762 }
763
764 /* Update our internal state when changing from one fork (checkpoint,
765 et cetera) to another indicated by NEW_PTID. We can only switch
766 single-threaded applications, so we only create one new LWP, and
767 the previous list is discarded. */
768
769 void
770 linux_nat_switch_fork (ptid_t new_ptid)
771 {
772 struct lwp_info *lp;
773
774 init_lwp_list ();
775 lp = add_lwp (new_ptid);
776 lp->stopped = 1;
777 }
778
779 /* Record a PTID for later deletion. */
780
781 struct saved_ptids
782 {
783 ptid_t ptid;
784 struct saved_ptids *next;
785 };
786 static struct saved_ptids *threads_to_delete;
787
788 static void
789 record_dead_thread (ptid_t ptid)
790 {
791 struct saved_ptids *p = xmalloc (sizeof (struct saved_ptids));
792 p->ptid = ptid;
793 p->next = threads_to_delete;
794 threads_to_delete = p;
795 }
796
797 /* Delete any dead threads which are not the current thread. */
798
799 static void
800 prune_lwps (void)
801 {
802 struct saved_ptids **p = &threads_to_delete;
803
804 while (*p)
805 if (! ptid_equal ((*p)->ptid, inferior_ptid))
806 {
807 struct saved_ptids *tmp = *p;
808 delete_thread (tmp->ptid);
809 *p = tmp->next;
810 xfree (tmp);
811 }
812 else
813 p = &(*p)->next;
814 }
815
816 /* Callback for iterate_over_threads that finds a thread corresponding
817 to the given LWP. */
818
819 static int
820 find_thread_from_lwp (struct thread_info *thr, void *dummy)
821 {
822 ptid_t *ptid_p = dummy;
823
824 if (GET_LWP (thr->ptid) && GET_LWP (thr->ptid) == GET_LWP (*ptid_p))
825 return 1;
826 else
827 return 0;
828 }
829
830 /* Handle the exit of a single thread LP. */
831
832 static void
833 exit_lwp (struct lwp_info *lp)
834 {
835 if (in_thread_list (lp->ptid))
836 {
837 /* Core GDB cannot deal with us deleting the current thread. */
838 if (!ptid_equal (lp->ptid, inferior_ptid))
839 delete_thread (lp->ptid);
840 else
841 record_dead_thread (lp->ptid);
842 printf_unfiltered (_("[%s exited]\n"),
843 target_pid_to_str (lp->ptid));
844 }
845 else
846 {
847 /* Even if LP->PTID is not in the global GDB thread list, the
848 LWP may be - with an additional thread ID. We don't need
849 to print anything in this case; thread_db is in use and
850 already took care of that. But it didn't delete the thread
851 in order to handle zombies correctly. */
852
853 struct thread_info *thr;
854
855 thr = iterate_over_threads (find_thread_from_lwp, &lp->ptid);
856 if (thr)
857 {
858 if (!ptid_equal (thr->ptid, inferior_ptid))
859 delete_thread (thr->ptid);
860 else
861 record_dead_thread (thr->ptid);
862 }
863 }
864
865 delete_lwp (lp->ptid);
866 }
867
868 /* Attach to the LWP specified by PID. If VERBOSE is non-zero, print
869 a message telling the user that a new LWP has been added to the
870 process. Return 0 if successful or -1 if the new LWP could not
871 be attached. */
872
873 int
874 lin_lwp_attach_lwp (ptid_t ptid)
875 {
876 struct lwp_info *lp;
877
878 gdb_assert (is_lwp (ptid));
879
880 /* Make sure SIGCHLD is blocked. We don't want SIGCHLD events
881 to interrupt either the ptrace() or waitpid() calls below. */
882 if (!sigismember (&blocked_mask, SIGCHLD))
883 {
884 sigaddset (&blocked_mask, SIGCHLD);
885 sigprocmask (SIG_BLOCK, &blocked_mask, NULL);
886 }
887
888 lp = find_lwp_pid (ptid);
889
890 /* We assume that we're already attached to any LWP that has an id
891 equal to the overall process id, and to any LWP that is already
892 in our list of LWPs. If we're not seeing exit events from threads
893 and we've had PID wraparound since we last tried to stop all threads,
894 this assumption might be wrong; fortunately, this is very unlikely
895 to happen. */
896 if (GET_LWP (ptid) != GET_PID (ptid) && lp == NULL)
897 {
898 pid_t pid;
899 int status;
900 int cloned = 0;
901
902 if (ptrace (PTRACE_ATTACH, GET_LWP (ptid), 0, 0) < 0)
903 {
904 /* If we fail to attach to the thread, issue a warning,
905 but continue. One way this can happen is if thread
906 creation is interrupted; as of Linux kernel 2.6.19, a
907 bug may place threads in the thread list and then fail
908 to create them. */
909 warning (_("Can't attach %s: %s"), target_pid_to_str (ptid),
910 safe_strerror (errno));
911 return -1;
912 }
913
914 if (debug_linux_nat)
915 fprintf_unfiltered (gdb_stdlog,
916 "LLAL: PTRACE_ATTACH %s, 0, 0 (OK)\n",
917 target_pid_to_str (ptid));
918
919 pid = my_waitpid (GET_LWP (ptid), &status, 0);
920 if (pid == -1 && errno == ECHILD)
921 {
922 /* Try again with __WCLONE to check cloned processes. */
923 pid = my_waitpid (GET_LWP (ptid), &status, __WCLONE);
924 cloned = 1;
925 }
926
927 gdb_assert (pid == GET_LWP (ptid)
928 && WIFSTOPPED (status) && WSTOPSIG (status));
929
930 if (lp == NULL)
931 lp = add_lwp (ptid);
932 lp->cloned = cloned;
933
934 target_post_attach (pid);
935
936 lp->stopped = 1;
937
938 if (debug_linux_nat)
939 {
940 fprintf_unfiltered (gdb_stdlog,
941 "LLAL: waitpid %s received %s\n",
942 target_pid_to_str (ptid),
943 status_to_str (status));
944 }
945 }
946 else
947 {
948 /* We assume that the LWP representing the original process is
949 already stopped. Mark it as stopped in the data structure
950 that the GNU/linux ptrace layer uses to keep track of
951 threads. Note that this won't have already been done since
952 the main thread will have, we assume, been stopped by an
953 attach from a different layer. */
954 if (lp == NULL)
955 lp = add_lwp (ptid);
956 lp->stopped = 1;
957 }
958
959 return 0;
960 }
961
962 static void
963 linux_nat_attach (char *args, int from_tty)
964 {
965 struct lwp_info *lp;
966 pid_t pid;
967 int status;
968 int cloned = 0;
969
970 /* FIXME: We should probably accept a list of process id's, and
971 attach all of them. */
972 linux_ops->to_attach (args, from_tty);
973
974 /* Make sure the initial process is stopped. The user-level threads
975 layer might want to poke around in the inferior, and that won't
976 work if things haven't stabilized yet. */
977 pid = my_waitpid (GET_PID (inferior_ptid), &status, 0);
978 if (pid == -1 && errno == ECHILD)
979 {
980 warning (_("%s is a cloned process"), target_pid_to_str (inferior_ptid));
981
982 /* Try again with __WCLONE to check cloned processes. */
983 pid = my_waitpid (GET_PID (inferior_ptid), &status, __WCLONE);
984 cloned = 1;
985 }
986
987 gdb_assert (pid == GET_PID (inferior_ptid)
988 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP);
989
990 /* Add the initial process as the first LWP to the list. */
991 inferior_ptid = BUILD_LWP (GET_PID (inferior_ptid), GET_PID (inferior_ptid));
992 lp = add_lwp (inferior_ptid);
993 lp->cloned = cloned;
994
995 lp->stopped = 1;
996
997 /* Fake the SIGSTOP that core GDB expects. */
998 lp->status = W_STOPCODE (SIGSTOP);
999 lp->resumed = 1;
1000 if (debug_linux_nat)
1001 {
1002 fprintf_unfiltered (gdb_stdlog,
1003 "LLA: waitpid %ld, faking SIGSTOP\n", (long) pid);
1004 }
1005 }
1006
1007 static int
1008 detach_callback (struct lwp_info *lp, void *data)
1009 {
1010 gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status));
1011
1012 if (debug_linux_nat && lp->status)
1013 fprintf_unfiltered (gdb_stdlog, "DC: Pending %s for %s on detach.\n",
1014 strsignal (WSTOPSIG (lp->status)),
1015 target_pid_to_str (lp->ptid));
1016
1017 while (lp->signalled && lp->stopped)
1018 {
1019 errno = 0;
1020 if (ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0,
1021 WSTOPSIG (lp->status)) < 0)
1022 error (_("Can't continue %s: %s"), target_pid_to_str (lp->ptid),
1023 safe_strerror (errno));
1024
1025 if (debug_linux_nat)
1026 fprintf_unfiltered (gdb_stdlog,
1027 "DC: PTRACE_CONTINUE (%s, 0, %s) (OK)\n",
1028 target_pid_to_str (lp->ptid),
1029 status_to_str (lp->status));
1030
1031 lp->stopped = 0;
1032 lp->signalled = 0;
1033 lp->status = 0;
1034 /* FIXME drow/2003-08-26: There was a call to stop_wait_callback
1035 here. But since lp->signalled was cleared above,
1036 stop_wait_callback didn't do anything; the process was left
1037 running. Shouldn't we be waiting for it to stop?
1038 I've removed the call, since stop_wait_callback now does do
1039 something when called with lp->signalled == 0. */
1040
1041 gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status));
1042 }
1043
1044 /* We don't actually detach from the LWP that has an id equal to the
1045 overall process id just yet. */
1046 if (GET_LWP (lp->ptid) != GET_PID (lp->ptid))
1047 {
1048 errno = 0;
1049 if (ptrace (PTRACE_DETACH, GET_LWP (lp->ptid), 0,
1050 WSTOPSIG (lp->status)) < 0)
1051 error (_("Can't detach %s: %s"), target_pid_to_str (lp->ptid),
1052 safe_strerror (errno));
1053
1054 if (debug_linux_nat)
1055 fprintf_unfiltered (gdb_stdlog,
1056 "PTRACE_DETACH (%s, %s, 0) (OK)\n",
1057 target_pid_to_str (lp->ptid),
1058 strsignal (WSTOPSIG (lp->status)));
1059
1060 delete_lwp (lp->ptid);
1061 }
1062
1063 return 0;
1064 }
1065
1066 static void
1067 linux_nat_detach (char *args, int from_tty)
1068 {
1069 iterate_over_lwps (detach_callback, NULL);
1070
1071 /* Only the initial process should be left right now. */
1072 gdb_assert (num_lwps == 1);
1073
1074 trap_ptid = null_ptid;
1075
1076 /* Destroy LWP info; it's no longer valid. */
1077 init_lwp_list ();
1078
1079 /* Restore the original signal mask. */
1080 sigprocmask (SIG_SETMASK, &normal_mask, NULL);
1081 sigemptyset (&blocked_mask);
1082
1083 inferior_ptid = pid_to_ptid (GET_PID (inferior_ptid));
1084 linux_ops->to_detach (args, from_tty);
1085 }
1086
1087 /* Resume LP. */
1088
1089 static int
1090 resume_callback (struct lwp_info *lp, void *data)
1091 {
1092 if (lp->stopped && lp->status == 0)
1093 {
1094 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
1095 0, TARGET_SIGNAL_0);
1096 if (debug_linux_nat)
1097 fprintf_unfiltered (gdb_stdlog,
1098 "RC: PTRACE_CONT %s, 0, 0 (resume sibling)\n",
1099 target_pid_to_str (lp->ptid));
1100 lp->stopped = 0;
1101 lp->step = 0;
1102 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
1103 }
1104
1105 return 0;
1106 }
1107
1108 static int
1109 resume_clear_callback (struct lwp_info *lp, void *data)
1110 {
1111 lp->resumed = 0;
1112 return 0;
1113 }
1114
1115 static int
1116 resume_set_callback (struct lwp_info *lp, void *data)
1117 {
1118 lp->resumed = 1;
1119 return 0;
1120 }
1121
1122 static void
1123 linux_nat_resume (ptid_t ptid, int step, enum target_signal signo)
1124 {
1125 struct lwp_info *lp;
1126 int resume_all;
1127
1128 if (debug_linux_nat)
1129 fprintf_unfiltered (gdb_stdlog,
1130 "LLR: Preparing to %s %s, %s, inferior_ptid %s\n",
1131 step ? "step" : "resume",
1132 target_pid_to_str (ptid),
1133 signo ? strsignal (signo) : "0",
1134 target_pid_to_str (inferior_ptid));
1135
1136 prune_lwps ();
1137
1138 /* A specific PTID means `step only this process id'. */
1139 resume_all = (PIDGET (ptid) == -1);
1140
1141 if (resume_all)
1142 iterate_over_lwps (resume_set_callback, NULL);
1143 else
1144 iterate_over_lwps (resume_clear_callback, NULL);
1145
1146 /* If PID is -1, it's the current inferior that should be
1147 handled specially. */
1148 if (PIDGET (ptid) == -1)
1149 ptid = inferior_ptid;
1150
1151 lp = find_lwp_pid (ptid);
1152 gdb_assert (lp != NULL);
1153
1154 ptid = pid_to_ptid (GET_LWP (lp->ptid));
1155
1156 /* Remember if we're stepping. */
1157 lp->step = step;
1158
1159 /* Mark this LWP as resumed. */
1160 lp->resumed = 1;
1161
1162 /* If we have a pending wait status for this thread, there is no
1163 point in resuming the process. But first make sure that
1164 linux_nat_wait won't preemptively handle the event - we
1165 should never take this short-circuit if we are going to
1166 leave LP running, since we have skipped resuming all the
1167 other threads. This bit of code needs to be synchronized
1168 with linux_nat_wait. */
1169
1170 if (lp->status && WIFSTOPPED (lp->status))
1171 {
1172 int saved_signo = target_signal_from_host (WSTOPSIG (lp->status));
1173
1174 if (signal_stop_state (saved_signo) == 0
1175 && signal_print_state (saved_signo) == 0
1176 && signal_pass_state (saved_signo) == 1)
1177 {
1178 if (debug_linux_nat)
1179 fprintf_unfiltered (gdb_stdlog,
1180 "LLR: Not short circuiting for ignored "
1181 "status 0x%x\n", lp->status);
1182
1183 /* FIXME: What should we do if we are supposed to continue
1184 this thread with a signal? */
1185 gdb_assert (signo == TARGET_SIGNAL_0);
1186 signo = saved_signo;
1187 lp->status = 0;
1188 }
1189 }
1190
1191 if (lp->status)
1192 {
1193 /* FIXME: What should we do if we are supposed to continue
1194 this thread with a signal? */
1195 gdb_assert (signo == TARGET_SIGNAL_0);
1196
1197 if (debug_linux_nat)
1198 fprintf_unfiltered (gdb_stdlog,
1199 "LLR: Short circuiting for status 0x%x\n",
1200 lp->status);
1201
1202 return;
1203 }
1204
1205 /* Mark LWP as not stopped to prevent it from being continued by
1206 resume_callback. */
1207 lp->stopped = 0;
1208
1209 if (resume_all)
1210 iterate_over_lwps (resume_callback, NULL);
1211
1212 linux_ops->to_resume (ptid, step, signo);
1213 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
1214
1215 if (debug_linux_nat)
1216 fprintf_unfiltered (gdb_stdlog,
1217 "LLR: %s %s, %s (resume event thread)\n",
1218 step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
1219 target_pid_to_str (ptid),
1220 signo ? strsignal (signo) : "0");
1221 }
1222
1223 /* Issue kill to specified lwp. */
1224
1225 static int tkill_failed;
1226
1227 static int
1228 kill_lwp (int lwpid, int signo)
1229 {
1230 errno = 0;
1231
1232 /* Use tkill, if possible, in case we are using nptl threads. If tkill
1233 fails, then we are not using nptl threads and we should be using kill. */
1234
1235 #ifdef HAVE_TKILL_SYSCALL
1236 if (!tkill_failed)
1237 {
1238 int ret = syscall (__NR_tkill, lwpid, signo);
1239 if (errno != ENOSYS)
1240 return ret;
1241 errno = 0;
1242 tkill_failed = 1;
1243 }
1244 #endif
1245
1246 return kill (lwpid, signo);
1247 }
1248
1249 /* Handle a GNU/Linux extended wait response. If we see a clone
1250 event, we need to add the new LWP to our list (and not report the
1251 trap to higher layers). This function returns non-zero if the
1252 event should be ignored and we should wait again. If STOPPING is
1253 true, the new LWP remains stopped, otherwise it is continued. */
1254
1255 static int
1256 linux_handle_extended_wait (struct lwp_info *lp, int status,
1257 int stopping)
1258 {
1259 int pid = GET_LWP (lp->ptid);
1260 struct target_waitstatus *ourstatus = &lp->waitstatus;
1261 struct lwp_info *new_lp = NULL;
1262 int event = status >> 16;
1263
1264 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK
1265 || event == PTRACE_EVENT_CLONE)
1266 {
1267 unsigned long new_pid;
1268 int ret;
1269
1270 ptrace (PTRACE_GETEVENTMSG, pid, 0, &new_pid);
1271
1272 /* If we haven't already seen the new PID stop, wait for it now. */
1273 if (! pull_pid_from_list (&stopped_pids, new_pid, &status))
1274 {
1275 /* The new child has a pending SIGSTOP. We can't affect it until it
1276 hits the SIGSTOP, but we're already attached. */
1277 ret = my_waitpid (new_pid, &status,
1278 (event == PTRACE_EVENT_CLONE) ? __WCLONE : 0);
1279 if (ret == -1)
1280 perror_with_name (_("waiting for new child"));
1281 else if (ret != new_pid)
1282 internal_error (__FILE__, __LINE__,
1283 _("wait returned unexpected PID %d"), ret);
1284 else if (!WIFSTOPPED (status))
1285 internal_error (__FILE__, __LINE__,
1286 _("wait returned unexpected status 0x%x"), status);
1287 }
1288
1289 ourstatus->value.related_pid = new_pid;
1290
1291 if (event == PTRACE_EVENT_FORK)
1292 ourstatus->kind = TARGET_WAITKIND_FORKED;
1293 else if (event == PTRACE_EVENT_VFORK)
1294 ourstatus->kind = TARGET_WAITKIND_VFORKED;
1295 else
1296 {
1297 ourstatus->kind = TARGET_WAITKIND_IGNORE;
1298 new_lp = add_lwp (BUILD_LWP (new_pid, GET_PID (inferior_ptid)));
1299 new_lp->cloned = 1;
1300
1301 if (WSTOPSIG (status) != SIGSTOP)
1302 {
1303 /* This can happen if someone starts sending signals to
1304 the new thread before it gets a chance to run, which
1305 have a lower number than SIGSTOP (e.g. SIGUSR1).
1306 This is an unlikely case, and harder to handle for
1307 fork / vfork than for clone, so we do not try - but
1308 we handle it for clone events here. We'll send
1309 the other signal on to the thread below. */
1310
1311 new_lp->signalled = 1;
1312 }
1313 else
1314 status = 0;
1315
1316 if (stopping)
1317 new_lp->stopped = 1;
1318 else
1319 {
1320 new_lp->resumed = 1;
1321 ptrace (PTRACE_CONT, lp->waitstatus.value.related_pid, 0,
1322 status ? WSTOPSIG (status) : 0);
1323 }
1324
1325 if (debug_linux_nat)
1326 fprintf_unfiltered (gdb_stdlog,
1327 "LHEW: Got clone event from LWP %ld, resuming\n",
1328 GET_LWP (lp->ptid));
1329 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
1330
1331 return 1;
1332 }
1333
1334 return 0;
1335 }
1336
1337 if (event == PTRACE_EVENT_EXEC)
1338 {
1339 ourstatus->kind = TARGET_WAITKIND_EXECD;
1340 ourstatus->value.execd_pathname
1341 = xstrdup (linux_child_pid_to_exec_file (pid));
1342
1343 if (linux_parent_pid)
1344 {
1345 detach_breakpoints (linux_parent_pid);
1346 ptrace (PTRACE_DETACH, linux_parent_pid, 0, 0);
1347
1348 linux_parent_pid = 0;
1349 }
1350
1351 return 0;
1352 }
1353
1354 internal_error (__FILE__, __LINE__,
1355 _("unknown ptrace event %d"), event);
1356 }
1357
1358 /* Wait for LP to stop. Returns the wait status, or 0 if the LWP has
1359 exited. */
1360
1361 static int
1362 wait_lwp (struct lwp_info *lp)
1363 {
1364 pid_t pid;
1365 int status;
1366 int thread_dead = 0;
1367
1368 gdb_assert (!lp->stopped);
1369 gdb_assert (lp->status == 0);
1370
1371 pid = my_waitpid (GET_LWP (lp->ptid), &status, 0);
1372 if (pid == -1 && errno == ECHILD)
1373 {
1374 pid = my_waitpid (GET_LWP (lp->ptid), &status, __WCLONE);
1375 if (pid == -1 && errno == ECHILD)
1376 {
1377 /* The thread has previously exited. We need to delete it
1378 now because, for some vendor 2.4 kernels with NPTL
1379 support backported, there won't be an exit event unless
1380 it is the main thread. 2.6 kernels will report an exit
1381 event for each thread that exits, as expected. */
1382 thread_dead = 1;
1383 if (debug_linux_nat)
1384 fprintf_unfiltered (gdb_stdlog, "WL: %s vanished.\n",
1385 target_pid_to_str (lp->ptid));
1386 }
1387 }
1388
1389 if (!thread_dead)
1390 {
1391 gdb_assert (pid == GET_LWP (lp->ptid));
1392
1393 if (debug_linux_nat)
1394 {
1395 fprintf_unfiltered (gdb_stdlog,
1396 "WL: waitpid %s received %s\n",
1397 target_pid_to_str (lp->ptid),
1398 status_to_str (status));
1399 }
1400 }
1401
1402 /* Check if the thread has exited. */
1403 if (WIFEXITED (status) || WIFSIGNALED (status))
1404 {
1405 thread_dead = 1;
1406 if (debug_linux_nat)
1407 fprintf_unfiltered (gdb_stdlog, "WL: %s exited.\n",
1408 target_pid_to_str (lp->ptid));
1409 }
1410
1411 if (thread_dead)
1412 {
1413 exit_lwp (lp);
1414 return 0;
1415 }
1416
1417 gdb_assert (WIFSTOPPED (status));
1418
1419 /* Handle GNU/Linux's extended waitstatus for trace events. */
1420 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
1421 {
1422 if (debug_linux_nat)
1423 fprintf_unfiltered (gdb_stdlog,
1424 "WL: Handling extended status 0x%06x\n",
1425 status);
1426 if (linux_handle_extended_wait (lp, status, 1))
1427 return wait_lwp (lp);
1428 }
1429
1430 return status;
1431 }
1432
1433 /* Save the most recent siginfo for LP. This is currently only called
1434 for SIGTRAP; some ports use the si_addr field for
1435 target_stopped_data_address. In the future, it may also be used to
1436 restore the siginfo of requeued signals. */
1437
1438 static void
1439 save_siginfo (struct lwp_info *lp)
1440 {
1441 errno = 0;
1442 ptrace (PTRACE_GETSIGINFO, GET_LWP (lp->ptid),
1443 (PTRACE_TYPE_ARG3) 0, &lp->siginfo);
1444
1445 if (errno != 0)
1446 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
1447 }
1448
1449 /* Send a SIGSTOP to LP. */
1450
1451 static int
1452 stop_callback (struct lwp_info *lp, void *data)
1453 {
1454 if (!lp->stopped && !lp->signalled)
1455 {
1456 int ret;
1457
1458 if (debug_linux_nat)
1459 {
1460 fprintf_unfiltered (gdb_stdlog,
1461 "SC: kill %s **<SIGSTOP>**\n",
1462 target_pid_to_str (lp->ptid));
1463 }
1464 errno = 0;
1465 ret = kill_lwp (GET_LWP (lp->ptid), SIGSTOP);
1466 if (debug_linux_nat)
1467 {
1468 fprintf_unfiltered (gdb_stdlog,
1469 "SC: lwp kill %d %s\n",
1470 ret,
1471 errno ? safe_strerror (errno) : "ERRNO-OK");
1472 }
1473
1474 lp->signalled = 1;
1475 gdb_assert (lp->status == 0);
1476 }
1477
1478 return 0;
1479 }
1480
1481 /* Wait until LP is stopped. If DATA is non-null it is interpreted as
1482 a pointer to a set of signals to be flushed immediately. */
1483
1484 static int
1485 stop_wait_callback (struct lwp_info *lp, void *data)
1486 {
1487 sigset_t *flush_mask = data;
1488
1489 if (!lp->stopped)
1490 {
1491 int status;
1492
1493 status = wait_lwp (lp);
1494 if (status == 0)
1495 return 0;
1496
1497 /* Ignore any signals in FLUSH_MASK. */
1498 if (flush_mask && sigismember (flush_mask, WSTOPSIG (status)))
1499 {
1500 if (!lp->signalled)
1501 {
1502 lp->stopped = 1;
1503 return 0;
1504 }
1505
1506 errno = 0;
1507 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
1508 if (debug_linux_nat)
1509 fprintf_unfiltered (gdb_stdlog,
1510 "PTRACE_CONT %s, 0, 0 (%s)\n",
1511 target_pid_to_str (lp->ptid),
1512 errno ? safe_strerror (errno) : "OK");
1513
1514 return stop_wait_callback (lp, flush_mask);
1515 }
1516
1517 if (WSTOPSIG (status) != SIGSTOP)
1518 {
1519 if (WSTOPSIG (status) == SIGTRAP)
1520 {
1521 /* If a LWP other than the LWP that we're reporting an
1522 event for has hit a GDB breakpoint (as opposed to
1523 some random trap signal), then just arrange for it to
1524 hit it again later. We don't keep the SIGTRAP status
1525 and don't forward the SIGTRAP signal to the LWP. We
1526 will handle the current event, eventually we will
1527 resume all LWPs, and this one will get its breakpoint
1528 trap again.
1529
1530 If we do not do this, then we run the risk that the
1531 user will delete or disable the breakpoint, but the
1532 thread will have already tripped on it. */
1533
1534 /* Save the trap's siginfo in case we need it later. */
1535 save_siginfo (lp);
1536
1537 /* Now resume this LWP and get the SIGSTOP event. */
1538 errno = 0;
1539 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
1540 if (debug_linux_nat)
1541 {
1542 fprintf_unfiltered (gdb_stdlog,
1543 "PTRACE_CONT %s, 0, 0 (%s)\n",
1544 target_pid_to_str (lp->ptid),
1545 errno ? safe_strerror (errno) : "OK");
1546
1547 fprintf_unfiltered (gdb_stdlog,
1548 "SWC: Candidate SIGTRAP event in %s\n",
1549 target_pid_to_str (lp->ptid));
1550 }
1551 /* Hold the SIGTRAP for handling by linux_nat_wait. */
1552 stop_wait_callback (lp, data);
1553 /* If there's another event, throw it back into the queue. */
1554 if (lp->status)
1555 {
1556 if (debug_linux_nat)
1557 {
1558 fprintf_unfiltered (gdb_stdlog,
1559 "SWC: kill %s, %s\n",
1560 target_pid_to_str (lp->ptid),
1561 status_to_str ((int) status));
1562 }
1563 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (lp->status));
1564 }
1565 /* Save the sigtrap event. */
1566 lp->status = status;
1567 return 0;
1568 }
1569 else
1570 {
1571 /* The thread was stopped with a signal other than
1572 SIGSTOP, and didn't accidentally trip a breakpoint. */
1573
1574 if (debug_linux_nat)
1575 {
1576 fprintf_unfiltered (gdb_stdlog,
1577 "SWC: Pending event %s in %s\n",
1578 status_to_str ((int) status),
1579 target_pid_to_str (lp->ptid));
1580 }
1581 /* Now resume this LWP and get the SIGSTOP event. */
1582 errno = 0;
1583 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
1584 if (debug_linux_nat)
1585 fprintf_unfiltered (gdb_stdlog,
1586 "SWC: PTRACE_CONT %s, 0, 0 (%s)\n",
1587 target_pid_to_str (lp->ptid),
1588 errno ? safe_strerror (errno) : "OK");
1589
1590 /* Hold this event/waitstatus while we check to see if
1591 there are any more (we still want to get that SIGSTOP). */
1592 stop_wait_callback (lp, data);
1593 /* If the lp->status field is still empty, use it to hold
1594 this event. If not, then this event must be returned
1595 to the event queue of the LWP. */
1596 if (lp->status == 0)
1597 lp->status = status;
1598 else
1599 {
1600 if (debug_linux_nat)
1601 {
1602 fprintf_unfiltered (gdb_stdlog,
1603 "SWC: kill %s, %s\n",
1604 target_pid_to_str (lp->ptid),
1605 status_to_str ((int) status));
1606 }
1607 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (status));
1608 }
1609 return 0;
1610 }
1611 }
1612 else
1613 {
1614 /* We caught the SIGSTOP that we intended to catch, so
1615 there's no SIGSTOP pending. */
1616 lp->stopped = 1;
1617 lp->signalled = 0;
1618 }
1619 }
1620
1621 return 0;
1622 }
1623
1624 /* Check whether PID has any pending signals in FLUSH_MASK. If so set
1625 the appropriate bits in PENDING, and return 1 - otherwise return 0. */
1626
1627 static int
1628 linux_nat_has_pending (int pid, sigset_t *pending, sigset_t *flush_mask)
1629 {
1630 sigset_t blocked, ignored;
1631 int i;
1632
1633 linux_proc_pending_signals (pid, pending, &blocked, &ignored);
1634
1635 if (!flush_mask)
1636 return 0;
1637
1638 for (i = 1; i < NSIG; i++)
1639 if (sigismember (pending, i))
1640 if (!sigismember (flush_mask, i)
1641 || sigismember (&blocked, i)
1642 || sigismember (&ignored, i))
1643 sigdelset (pending, i);
1644
1645 if (sigisemptyset (pending))
1646 return 0;
1647
1648 return 1;
1649 }
1650
1651 /* DATA is interpreted as a mask of signals to flush. If LP has
1652 signals pending, and they are all in the flush mask, then arrange
1653 to flush them. LP should be stopped, as should all other threads
1654 it might share a signal queue with. */
1655
1656 static int
1657 flush_callback (struct lwp_info *lp, void *data)
1658 {
1659 sigset_t *flush_mask = data;
1660 sigset_t pending, intersection, blocked, ignored;
1661 int pid, status;
1662
1663 /* Normally, when an LWP exits, it is removed from the LWP list. The
1664 last LWP isn't removed till later, however. So if there is only
1665 one LWP on the list, make sure it's alive. */
1666 if (lwp_list == lp && lp->next == NULL)
1667 if (!linux_nat_thread_alive (lp->ptid))
1668 return 0;
1669
1670 /* Just because the LWP is stopped doesn't mean that new signals
1671 can't arrive from outside, so this function must be careful of
1672 race conditions. However, because all threads are stopped, we
1673 can assume that the pending mask will not shrink unless we resume
1674 the LWP, and that it will then get another signal. We can't
1675 control which one, however. */
1676
1677 if (lp->status)
1678 {
1679 if (debug_linux_nat)
1680 printf_unfiltered (_("FC: LP has pending status %06x\n"), lp->status);
1681 if (WIFSTOPPED (lp->status) && sigismember (flush_mask, WSTOPSIG (lp->status)))
1682 lp->status = 0;
1683 }
1684
1685 /* While there is a pending signal we would like to flush, continue
1686 the inferior and collect another signal. But if there's already
1687 a saved status that we don't want to flush, we can't resume the
1688 inferior - if it stopped for some other reason we wouldn't have
1689 anywhere to save the new status. In that case, we must leave the
1690 signal unflushed (and possibly generate an extra SIGINT stop).
1691 That's much less bad than losing a signal. */
1692 while (lp->status == 0
1693 && linux_nat_has_pending (GET_LWP (lp->ptid), &pending, flush_mask))
1694 {
1695 int ret;
1696
1697 errno = 0;
1698 ret = ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
1699 if (debug_linux_nat)
1700 fprintf_unfiltered (gdb_stderr,
1701 "FC: Sent PTRACE_CONT, ret %d %d\n", ret, errno);
1702
1703 lp->stopped = 0;
1704 stop_wait_callback (lp, flush_mask);
1705 if (debug_linux_nat)
1706 fprintf_unfiltered (gdb_stderr,
1707 "FC: Wait finished; saved status is %d\n",
1708 lp->status);
1709 }
1710
1711 return 0;
1712 }
1713
1714 /* Return non-zero if LP has a wait status pending. */
1715
1716 static int
1717 status_callback (struct lwp_info *lp, void *data)
1718 {
1719 /* Only report a pending wait status if we pretend that this has
1720 indeed been resumed. */
1721 return (lp->status != 0 && lp->resumed);
1722 }
1723
1724 /* Return non-zero if LP isn't stopped. */
1725
1726 static int
1727 running_callback (struct lwp_info *lp, void *data)
1728 {
1729 return (lp->stopped == 0 || (lp->status != 0 && lp->resumed));
1730 }
1731
1732 /* Count the LWP's that have had events. */
1733
1734 static int
1735 count_events_callback (struct lwp_info *lp, void *data)
1736 {
1737 int *count = data;
1738
1739 gdb_assert (count != NULL);
1740
1741 /* Count only LWPs that have a SIGTRAP event pending. */
1742 if (lp->status != 0
1743 && WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP)
1744 (*count)++;
1745
1746 return 0;
1747 }
1748
1749 /* Select the LWP (if any) that is currently being single-stepped. */
1750
1751 static int
1752 select_singlestep_lwp_callback (struct lwp_info *lp, void *data)
1753 {
1754 if (lp->step && lp->status != 0)
1755 return 1;
1756 else
1757 return 0;
1758 }
1759
1760 /* Select the Nth LWP that has had a SIGTRAP event. */
1761
1762 static int
1763 select_event_lwp_callback (struct lwp_info *lp, void *data)
1764 {
1765 int *selector = data;
1766
1767 gdb_assert (selector != NULL);
1768
1769 /* Select only LWPs that have a SIGTRAP event pending. */
1770 if (lp->status != 0
1771 && WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP)
1772 if ((*selector)-- == 0)
1773 return 1;
1774
1775 return 0;
1776 }
1777
1778 static int
1779 cancel_breakpoints_callback (struct lwp_info *lp, void *data)
1780 {
1781 struct lwp_info *event_lp = data;
1782
1783 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
1784 if (lp == event_lp)
1785 return 0;
1786
1787 /* If a LWP other than the LWP that we're reporting an event for has
1788 hit a GDB breakpoint (as opposed to some random trap signal),
1789 then just arrange for it to hit it again later. We don't keep
1790 the SIGTRAP status and don't forward the SIGTRAP signal to the
1791 LWP. We will handle the current event, eventually we will resume
1792 all LWPs, and this one will get its breakpoint trap again.
1793
1794 If we do not do this, then we run the risk that the user will
1795 delete or disable the breakpoint, but the LWP will have already
1796 tripped on it. */
1797
1798 if (lp->status != 0
1799 && WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP
1800 && breakpoint_inserted_here_p (read_pc_pid (lp->ptid) -
1801 gdbarch_decr_pc_after_break
1802 (current_gdbarch)))
1803 {
1804 if (debug_linux_nat)
1805 fprintf_unfiltered (gdb_stdlog,
1806 "CBC: Push back breakpoint for %s\n",
1807 target_pid_to_str (lp->ptid));
1808
1809 /* Back up the PC if necessary. */
1810 if (gdbarch_decr_pc_after_break (current_gdbarch))
1811 write_pc_pid (read_pc_pid (lp->ptid) - gdbarch_decr_pc_after_break
1812 (current_gdbarch),
1813 lp->ptid);
1814
1815 /* Throw away the SIGTRAP. */
1816 lp->status = 0;
1817 }
1818
1819 return 0;
1820 }
1821
1822 /* Select one LWP out of those that have events pending. */
1823
1824 static void
1825 select_event_lwp (struct lwp_info **orig_lp, int *status)
1826 {
1827 int num_events = 0;
1828 int random_selector;
1829 struct lwp_info *event_lp;
1830
1831 /* Record the wait status for the original LWP. */
1832 (*orig_lp)->status = *status;
1833
1834 /* Give preference to any LWP that is being single-stepped. */
1835 event_lp = iterate_over_lwps (select_singlestep_lwp_callback, NULL);
1836 if (event_lp != NULL)
1837 {
1838 if (debug_linux_nat)
1839 fprintf_unfiltered (gdb_stdlog,
1840 "SEL: Select single-step %s\n",
1841 target_pid_to_str (event_lp->ptid));
1842 }
1843 else
1844 {
1845 /* No single-stepping LWP. Select one at random, out of those
1846 which have had SIGTRAP events. */
1847
1848 /* First see how many SIGTRAP events we have. */
1849 iterate_over_lwps (count_events_callback, &num_events);
1850
1851 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
1852 random_selector = (int)
1853 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
1854
1855 if (debug_linux_nat && num_events > 1)
1856 fprintf_unfiltered (gdb_stdlog,
1857 "SEL: Found %d SIGTRAP events, selecting #%d\n",
1858 num_events, random_selector);
1859
1860 event_lp = iterate_over_lwps (select_event_lwp_callback,
1861 &random_selector);
1862 }
1863
1864 if (event_lp != NULL)
1865 {
1866 /* Switch the event LWP. */
1867 *orig_lp = event_lp;
1868 *status = event_lp->status;
1869 }
1870
1871 /* Flush the wait status for the event LWP. */
1872 (*orig_lp)->status = 0;
1873 }
1874
1875 /* Return non-zero if LP has been resumed. */
1876
1877 static int
1878 resumed_callback (struct lwp_info *lp, void *data)
1879 {
1880 return lp->resumed;
1881 }
1882
1883 /* Stop an active thread, verify it still exists, then resume it. */
1884
1885 static int
1886 stop_and_resume_callback (struct lwp_info *lp, void *data)
1887 {
1888 struct lwp_info *ptr;
1889
1890 if (!lp->stopped && !lp->signalled)
1891 {
1892 stop_callback (lp, NULL);
1893 stop_wait_callback (lp, NULL);
1894 /* Resume if the lwp still exists. */
1895 for (ptr = lwp_list; ptr; ptr = ptr->next)
1896 if (lp == ptr)
1897 {
1898 resume_callback (lp, NULL);
1899 resume_set_callback (lp, NULL);
1900 }
1901 }
1902 return 0;
1903 }
1904
1905 static ptid_t
1906 linux_nat_wait (ptid_t ptid, struct target_waitstatus *ourstatus)
1907 {
1908 struct lwp_info *lp = NULL;
1909 int options = 0;
1910 int status = 0;
1911 pid_t pid = PIDGET (ptid);
1912 sigset_t flush_mask;
1913
1914 /* The first time we get here after starting a new inferior, we may
1915 not have added it to the LWP list yet - this is the earliest
1916 moment at which we know its PID. */
1917 if (num_lwps == 0)
1918 {
1919 gdb_assert (!is_lwp (inferior_ptid));
1920
1921 inferior_ptid = BUILD_LWP (GET_PID (inferior_ptid),
1922 GET_PID (inferior_ptid));
1923 lp = add_lwp (inferior_ptid);
1924 lp->resumed = 1;
1925 }
1926
1927 sigemptyset (&flush_mask);
1928
1929 /* Make sure SIGCHLD is blocked. */
1930 if (!sigismember (&blocked_mask, SIGCHLD))
1931 {
1932 sigaddset (&blocked_mask, SIGCHLD);
1933 sigprocmask (SIG_BLOCK, &blocked_mask, NULL);
1934 }
1935
1936 retry:
1937
1938 /* Make sure there is at least one LWP that has been resumed. */
1939 gdb_assert (iterate_over_lwps (resumed_callback, NULL));
1940
1941 /* First check if there is a LWP with a wait status pending. */
1942 if (pid == -1)
1943 {
1944 /* Any LWP that's been resumed will do. */
1945 lp = iterate_over_lwps (status_callback, NULL);
1946 if (lp)
1947 {
1948 status = lp->status;
1949 lp->status = 0;
1950
1951 if (debug_linux_nat && status)
1952 fprintf_unfiltered (gdb_stdlog,
1953 "LLW: Using pending wait status %s for %s.\n",
1954 status_to_str (status),
1955 target_pid_to_str (lp->ptid));
1956 }
1957
1958 /* But if we don't fine one, we'll have to wait, and check both
1959 cloned and uncloned processes. We start with the cloned
1960 processes. */
1961 options = __WCLONE | WNOHANG;
1962 }
1963 else if (is_lwp (ptid))
1964 {
1965 if (debug_linux_nat)
1966 fprintf_unfiltered (gdb_stdlog,
1967 "LLW: Waiting for specific LWP %s.\n",
1968 target_pid_to_str (ptid));
1969
1970 /* We have a specific LWP to check. */
1971 lp = find_lwp_pid (ptid);
1972 gdb_assert (lp);
1973 status = lp->status;
1974 lp->status = 0;
1975
1976 if (debug_linux_nat && status)
1977 fprintf_unfiltered (gdb_stdlog,
1978 "LLW: Using pending wait status %s for %s.\n",
1979 status_to_str (status),
1980 target_pid_to_str (lp->ptid));
1981
1982 /* If we have to wait, take into account whether PID is a cloned
1983 process or not. And we have to convert it to something that
1984 the layer beneath us can understand. */
1985 options = lp->cloned ? __WCLONE : 0;
1986 pid = GET_LWP (ptid);
1987 }
1988
1989 if (status && lp->signalled)
1990 {
1991 /* A pending SIGSTOP may interfere with the normal stream of
1992 events. In a typical case where interference is a problem,
1993 we have a SIGSTOP signal pending for LWP A while
1994 single-stepping it, encounter an event in LWP B, and take the
1995 pending SIGSTOP while trying to stop LWP A. After processing
1996 the event in LWP B, LWP A is continued, and we'll never see
1997 the SIGTRAP associated with the last time we were
1998 single-stepping LWP A. */
1999
2000 /* Resume the thread. It should halt immediately returning the
2001 pending SIGSTOP. */
2002 registers_changed ();
2003 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
2004 lp->step, TARGET_SIGNAL_0);
2005 if (debug_linux_nat)
2006 fprintf_unfiltered (gdb_stdlog,
2007 "LLW: %s %s, 0, 0 (expect SIGSTOP)\n",
2008 lp->step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2009 target_pid_to_str (lp->ptid));
2010 lp->stopped = 0;
2011 gdb_assert (lp->resumed);
2012
2013 /* This should catch the pending SIGSTOP. */
2014 stop_wait_callback (lp, NULL);
2015 }
2016
2017 set_sigint_trap (); /* Causes SIGINT to be passed on to the
2018 attached process. */
2019 set_sigio_trap ();
2020
2021 while (status == 0)
2022 {
2023 pid_t lwpid;
2024
2025 lwpid = my_waitpid (pid, &status, options);
2026 if (lwpid > 0)
2027 {
2028 gdb_assert (pid == -1 || lwpid == pid);
2029
2030 if (debug_linux_nat)
2031 {
2032 fprintf_unfiltered (gdb_stdlog,
2033 "LLW: waitpid %ld received %s\n",
2034 (long) lwpid, status_to_str (status));
2035 }
2036
2037 lp = find_lwp_pid (pid_to_ptid (lwpid));
2038
2039 /* Check for stop events reported by a process we didn't
2040 already know about - anything not already in our LWP
2041 list.
2042
2043 If we're expecting to receive stopped processes after
2044 fork, vfork, and clone events, then we'll just add the
2045 new one to our list and go back to waiting for the event
2046 to be reported - the stopped process might be returned
2047 from waitpid before or after the event is. */
2048 if (WIFSTOPPED (status) && !lp)
2049 {
2050 linux_record_stopped_pid (lwpid, status);
2051 status = 0;
2052 continue;
2053 }
2054
2055 /* Make sure we don't report an event for the exit of an LWP not in
2056 our list, i.e. not part of the current process. This can happen
2057 if we detach from a program we original forked and then it
2058 exits. */
2059 if (!WIFSTOPPED (status) && !lp)
2060 {
2061 status = 0;
2062 continue;
2063 }
2064
2065 /* NOTE drow/2003-06-17: This code seems to be meant for debugging
2066 CLONE_PTRACE processes which do not use the thread library -
2067 otherwise we wouldn't find the new LWP this way. That doesn't
2068 currently work, and the following code is currently unreachable
2069 due to the two blocks above. If it's fixed some day, this code
2070 should be broken out into a function so that we can also pick up
2071 LWPs from the new interface. */
2072 if (!lp)
2073 {
2074 lp = add_lwp (BUILD_LWP (lwpid, GET_PID (inferior_ptid)));
2075 if (options & __WCLONE)
2076 lp->cloned = 1;
2077
2078 gdb_assert (WIFSTOPPED (status)
2079 && WSTOPSIG (status) == SIGSTOP);
2080 lp->signalled = 1;
2081
2082 if (!in_thread_list (inferior_ptid))
2083 {
2084 inferior_ptid = BUILD_LWP (GET_PID (inferior_ptid),
2085 GET_PID (inferior_ptid));
2086 add_thread (inferior_ptid);
2087 }
2088
2089 add_thread (lp->ptid);
2090 }
2091
2092 /* Save the trap's siginfo in case we need it later. */
2093 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP)
2094 save_siginfo (lp);
2095
2096 /* Handle GNU/Linux's extended waitstatus for trace events. */
2097 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
2098 {
2099 if (debug_linux_nat)
2100 fprintf_unfiltered (gdb_stdlog,
2101 "LLW: Handling extended status 0x%06x\n",
2102 status);
2103 if (linux_handle_extended_wait (lp, status, 0))
2104 {
2105 status = 0;
2106 continue;
2107 }
2108 }
2109
2110 /* Check if the thread has exited. */
2111 if ((WIFEXITED (status) || WIFSIGNALED (status)) && num_lwps > 1)
2112 {
2113 /* If this is the main thread, we must stop all threads and
2114 verify if they are still alive. This is because in the nptl
2115 thread model, there is no signal issued for exiting LWPs
2116 other than the main thread. We only get the main thread
2117 exit signal once all child threads have already exited.
2118 If we stop all the threads and use the stop_wait_callback
2119 to check if they have exited we can determine whether this
2120 signal should be ignored or whether it means the end of the
2121 debugged application, regardless of which threading model
2122 is being used. */
2123 if (GET_PID (lp->ptid) == GET_LWP (lp->ptid))
2124 {
2125 lp->stopped = 1;
2126 iterate_over_lwps (stop_and_resume_callback, NULL);
2127 }
2128
2129 if (debug_linux_nat)
2130 fprintf_unfiltered (gdb_stdlog,
2131 "LLW: %s exited.\n",
2132 target_pid_to_str (lp->ptid));
2133
2134 exit_lwp (lp);
2135
2136 /* If there is at least one more LWP, then the exit signal
2137 was not the end of the debugged application and should be
2138 ignored. */
2139 if (num_lwps > 0)
2140 {
2141 /* Make sure there is at least one thread running. */
2142 gdb_assert (iterate_over_lwps (running_callback, NULL));
2143
2144 /* Discard the event. */
2145 status = 0;
2146 continue;
2147 }
2148 }
2149
2150 /* Check if the current LWP has previously exited. In the nptl
2151 thread model, LWPs other than the main thread do not issue
2152 signals when they exit so we must check whenever the thread
2153 has stopped. A similar check is made in stop_wait_callback(). */
2154 if (num_lwps > 1 && !linux_nat_thread_alive (lp->ptid))
2155 {
2156 if (debug_linux_nat)
2157 fprintf_unfiltered (gdb_stdlog,
2158 "LLW: %s exited.\n",
2159 target_pid_to_str (lp->ptid));
2160
2161 exit_lwp (lp);
2162
2163 /* Make sure there is at least one thread running. */
2164 gdb_assert (iterate_over_lwps (running_callback, NULL));
2165
2166 /* Discard the event. */
2167 status = 0;
2168 continue;
2169 }
2170
2171 /* Make sure we don't report a SIGSTOP that we sent
2172 ourselves in an attempt to stop an LWP. */
2173 if (lp->signalled
2174 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP)
2175 {
2176 if (debug_linux_nat)
2177 fprintf_unfiltered (gdb_stdlog,
2178 "LLW: Delayed SIGSTOP caught for %s.\n",
2179 target_pid_to_str (lp->ptid));
2180
2181 /* This is a delayed SIGSTOP. */
2182 lp->signalled = 0;
2183
2184 registers_changed ();
2185 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
2186 lp->step, TARGET_SIGNAL_0);
2187 if (debug_linux_nat)
2188 fprintf_unfiltered (gdb_stdlog,
2189 "LLW: %s %s, 0, 0 (discard SIGSTOP)\n",
2190 lp->step ?
2191 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2192 target_pid_to_str (lp->ptid));
2193
2194 lp->stopped = 0;
2195 gdb_assert (lp->resumed);
2196
2197 /* Discard the event. */
2198 status = 0;
2199 continue;
2200 }
2201
2202 break;
2203 }
2204
2205 if (pid == -1)
2206 {
2207 /* Alternate between checking cloned and uncloned processes. */
2208 options ^= __WCLONE;
2209
2210 /* And suspend every time we have checked both. */
2211 if (options & __WCLONE)
2212 sigsuspend (&suspend_mask);
2213 }
2214
2215 /* We shouldn't end up here unless we want to try again. */
2216 gdb_assert (status == 0);
2217 }
2218
2219 clear_sigio_trap ();
2220 clear_sigint_trap ();
2221
2222 gdb_assert (lp);
2223
2224 /* Don't report signals that GDB isn't interested in, such as
2225 signals that are neither printed nor stopped upon. Stopping all
2226 threads can be a bit time-consuming so if we want decent
2227 performance with heavily multi-threaded programs, especially when
2228 they're using a high frequency timer, we'd better avoid it if we
2229 can. */
2230
2231 if (WIFSTOPPED (status))
2232 {
2233 int signo = target_signal_from_host (WSTOPSIG (status));
2234
2235 /* If we get a signal while single-stepping, we may need special
2236 care, e.g. to skip the signal handler. Defer to common code. */
2237 if (!lp->step
2238 && signal_stop_state (signo) == 0
2239 && signal_print_state (signo) == 0
2240 && signal_pass_state (signo) == 1)
2241 {
2242 /* FIMXE: kettenis/2001-06-06: Should we resume all threads
2243 here? It is not clear we should. GDB may not expect
2244 other threads to run. On the other hand, not resuming
2245 newly attached threads may cause an unwanted delay in
2246 getting them running. */
2247 registers_changed ();
2248 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
2249 lp->step, signo);
2250 if (debug_linux_nat)
2251 fprintf_unfiltered (gdb_stdlog,
2252 "LLW: %s %s, %s (preempt 'handle')\n",
2253 lp->step ?
2254 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2255 target_pid_to_str (lp->ptid),
2256 signo ? strsignal (signo) : "0");
2257 lp->stopped = 0;
2258 status = 0;
2259 goto retry;
2260 }
2261
2262 if (signo == TARGET_SIGNAL_INT && signal_pass_state (signo) == 0)
2263 {
2264 /* If ^C/BREAK is typed at the tty/console, SIGINT gets
2265 forwarded to the entire process group, that is, all LWP's
2266 will receive it. Since we only want to report it once,
2267 we try to flush it from all LWPs except this one. */
2268 sigaddset (&flush_mask, SIGINT);
2269 }
2270 }
2271
2272 /* This LWP is stopped now. */
2273 lp->stopped = 1;
2274
2275 if (debug_linux_nat)
2276 fprintf_unfiltered (gdb_stdlog, "LLW: Candidate event %s in %s.\n",
2277 status_to_str (status), target_pid_to_str (lp->ptid));
2278
2279 /* Now stop all other LWP's ... */
2280 iterate_over_lwps (stop_callback, NULL);
2281
2282 /* ... and wait until all of them have reported back that they're no
2283 longer running. */
2284 iterate_over_lwps (stop_wait_callback, &flush_mask);
2285 iterate_over_lwps (flush_callback, &flush_mask);
2286
2287 /* If we're not waiting for a specific LWP, choose an event LWP from
2288 among those that have had events. Giving equal priority to all
2289 LWPs that have had events helps prevent starvation. */
2290 if (pid == -1)
2291 select_event_lwp (&lp, &status);
2292
2293 /* Now that we've selected our final event LWP, cancel any
2294 breakpoints in other LWPs that have hit a GDB breakpoint. See
2295 the comment in cancel_breakpoints_callback to find out why. */
2296 iterate_over_lwps (cancel_breakpoints_callback, lp);
2297
2298 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP)
2299 {
2300 trap_ptid = lp->ptid;
2301 if (debug_linux_nat)
2302 fprintf_unfiltered (gdb_stdlog,
2303 "LLW: trap_ptid is %s.\n",
2304 target_pid_to_str (trap_ptid));
2305 }
2306 else
2307 trap_ptid = null_ptid;
2308
2309 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
2310 {
2311 *ourstatus = lp->waitstatus;
2312 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
2313 }
2314 else
2315 store_waitstatus (ourstatus, status);
2316
2317 return lp->ptid;
2318 }
2319
2320 static int
2321 kill_callback (struct lwp_info *lp, void *data)
2322 {
2323 errno = 0;
2324 ptrace (PTRACE_KILL, GET_LWP (lp->ptid), 0, 0);
2325 if (debug_linux_nat)
2326 fprintf_unfiltered (gdb_stdlog,
2327 "KC: PTRACE_KILL %s, 0, 0 (%s)\n",
2328 target_pid_to_str (lp->ptid),
2329 errno ? safe_strerror (errno) : "OK");
2330
2331 return 0;
2332 }
2333
2334 static int
2335 kill_wait_callback (struct lwp_info *lp, void *data)
2336 {
2337 pid_t pid;
2338
2339 /* We must make sure that there are no pending events (delayed
2340 SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current
2341 program doesn't interfere with any following debugging session. */
2342
2343 /* For cloned processes we must check both with __WCLONE and
2344 without, since the exit status of a cloned process isn't reported
2345 with __WCLONE. */
2346 if (lp->cloned)
2347 {
2348 do
2349 {
2350 pid = my_waitpid (GET_LWP (lp->ptid), NULL, __WCLONE);
2351 if (pid != (pid_t) -1)
2352 {
2353 if (debug_linux_nat)
2354 fprintf_unfiltered (gdb_stdlog,
2355 "KWC: wait %s received unknown.\n",
2356 target_pid_to_str (lp->ptid));
2357 /* The Linux kernel sometimes fails to kill a thread
2358 completely after PTRACE_KILL; that goes from the stop
2359 point in do_fork out to the one in
2360 get_signal_to_deliever and waits again. So kill it
2361 again. */
2362 kill_callback (lp, NULL);
2363 }
2364 }
2365 while (pid == GET_LWP (lp->ptid));
2366
2367 gdb_assert (pid == -1 && errno == ECHILD);
2368 }
2369
2370 do
2371 {
2372 pid = my_waitpid (GET_LWP (lp->ptid), NULL, 0);
2373 if (pid != (pid_t) -1)
2374 {
2375 if (debug_linux_nat)
2376 fprintf_unfiltered (gdb_stdlog,
2377 "KWC: wait %s received unk.\n",
2378 target_pid_to_str (lp->ptid));
2379 /* See the call to kill_callback above. */
2380 kill_callback (lp, NULL);
2381 }
2382 }
2383 while (pid == GET_LWP (lp->ptid));
2384
2385 gdb_assert (pid == -1 && errno == ECHILD);
2386 return 0;
2387 }
2388
2389 static void
2390 linux_nat_kill (void)
2391 {
2392 struct target_waitstatus last;
2393 ptid_t last_ptid;
2394 int status;
2395
2396 /* If we're stopped while forking and we haven't followed yet,
2397 kill the other task. We need to do this first because the
2398 parent will be sleeping if this is a vfork. */
2399
2400 get_last_target_status (&last_ptid, &last);
2401
2402 if (last.kind == TARGET_WAITKIND_FORKED
2403 || last.kind == TARGET_WAITKIND_VFORKED)
2404 {
2405 ptrace (PT_KILL, last.value.related_pid, 0, 0);
2406 wait (&status);
2407 }
2408
2409 if (forks_exist_p ())
2410 linux_fork_killall ();
2411 else
2412 {
2413 /* Kill all LWP's ... */
2414 iterate_over_lwps (kill_callback, NULL);
2415
2416 /* ... and wait until we've flushed all events. */
2417 iterate_over_lwps (kill_wait_callback, NULL);
2418 }
2419
2420 target_mourn_inferior ();
2421 }
2422
2423 static void
2424 linux_nat_mourn_inferior (void)
2425 {
2426 trap_ptid = null_ptid;
2427
2428 /* Destroy LWP info; it's no longer valid. */
2429 init_lwp_list ();
2430
2431 /* Restore the original signal mask. */
2432 sigprocmask (SIG_SETMASK, &normal_mask, NULL);
2433 sigemptyset (&blocked_mask);
2434
2435 if (! forks_exist_p ())
2436 /* Normal case, no other forks available. */
2437 linux_ops->to_mourn_inferior ();
2438 else
2439 /* Multi-fork case. The current inferior_ptid has exited, but
2440 there are other viable forks to debug. Delete the exiting
2441 one and context-switch to the first available. */
2442 linux_fork_mourn_inferior ();
2443 }
2444
2445 static LONGEST
2446 linux_nat_xfer_partial (struct target_ops *ops, enum target_object object,
2447 const char *annex, gdb_byte *readbuf,
2448 const gdb_byte *writebuf,
2449 ULONGEST offset, LONGEST len)
2450 {
2451 struct cleanup *old_chain = save_inferior_ptid ();
2452 LONGEST xfer;
2453
2454 if (is_lwp (inferior_ptid))
2455 inferior_ptid = pid_to_ptid (GET_LWP (inferior_ptid));
2456
2457 xfer = linux_ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
2458 offset, len);
2459
2460 do_cleanups (old_chain);
2461 return xfer;
2462 }
2463
2464 static int
2465 linux_nat_thread_alive (ptid_t ptid)
2466 {
2467 gdb_assert (is_lwp (ptid));
2468
2469 errno = 0;
2470 ptrace (PTRACE_PEEKUSER, GET_LWP (ptid), 0, 0);
2471 if (debug_linux_nat)
2472 fprintf_unfiltered (gdb_stdlog,
2473 "LLTA: PTRACE_PEEKUSER %s, 0, 0 (%s)\n",
2474 target_pid_to_str (ptid),
2475 errno ? safe_strerror (errno) : "OK");
2476
2477 /* Not every Linux kernel implements PTRACE_PEEKUSER. But we can
2478 handle that case gracefully since ptrace will first do a lookup
2479 for the process based upon the passed-in pid. If that fails we
2480 will get either -ESRCH or -EPERM, otherwise the child exists and
2481 is alive. */
2482 if (errno == ESRCH || errno == EPERM)
2483 return 0;
2484
2485 return 1;
2486 }
2487
2488 static char *
2489 linux_nat_pid_to_str (ptid_t ptid)
2490 {
2491 static char buf[64];
2492
2493 if (lwp_list && lwp_list->next && is_lwp (ptid))
2494 {
2495 snprintf (buf, sizeof (buf), "LWP %ld", GET_LWP (ptid));
2496 return buf;
2497 }
2498
2499 return normal_pid_to_str (ptid);
2500 }
2501
2502 static void
2503 sigchld_handler (int signo)
2504 {
2505 /* Do nothing. The only reason for this handler is that it allows
2506 us to use sigsuspend in linux_nat_wait above to wait for the
2507 arrival of a SIGCHLD. */
2508 }
2509
2510 /* Accepts an integer PID; Returns a string representing a file that
2511 can be opened to get the symbols for the child process. */
2512
2513 static char *
2514 linux_child_pid_to_exec_file (int pid)
2515 {
2516 char *name1, *name2;
2517
2518 name1 = xmalloc (MAXPATHLEN);
2519 name2 = xmalloc (MAXPATHLEN);
2520 make_cleanup (xfree, name1);
2521 make_cleanup (xfree, name2);
2522 memset (name2, 0, MAXPATHLEN);
2523
2524 sprintf (name1, "/proc/%d/exe", pid);
2525 if (readlink (name1, name2, MAXPATHLEN) > 0)
2526 return name2;
2527 else
2528 return name1;
2529 }
2530
2531 /* Service function for corefiles and info proc. */
2532
2533 static int
2534 read_mapping (FILE *mapfile,
2535 long long *addr,
2536 long long *endaddr,
2537 char *permissions,
2538 long long *offset,
2539 char *device, long long *inode, char *filename)
2540 {
2541 int ret = fscanf (mapfile, "%llx-%llx %s %llx %s %llx",
2542 addr, endaddr, permissions, offset, device, inode);
2543
2544 filename[0] = '\0';
2545 if (ret > 0 && ret != EOF)
2546 {
2547 /* Eat everything up to EOL for the filename. This will prevent
2548 weird filenames (such as one with embedded whitespace) from
2549 confusing this code. It also makes this code more robust in
2550 respect to annotations the kernel may add after the filename.
2551
2552 Note the filename is used for informational purposes
2553 only. */
2554 ret += fscanf (mapfile, "%[^\n]\n", filename);
2555 }
2556
2557 return (ret != 0 && ret != EOF);
2558 }
2559
2560 /* Fills the "to_find_memory_regions" target vector. Lists the memory
2561 regions in the inferior for a corefile. */
2562
2563 static int
2564 linux_nat_find_memory_regions (int (*func) (CORE_ADDR,
2565 unsigned long,
2566 int, int, int, void *), void *obfd)
2567 {
2568 long long pid = PIDGET (inferior_ptid);
2569 char mapsfilename[MAXPATHLEN];
2570 FILE *mapsfile;
2571 long long addr, endaddr, size, offset, inode;
2572 char permissions[8], device[8], filename[MAXPATHLEN];
2573 int read, write, exec;
2574 int ret;
2575
2576 /* Compose the filename for the /proc memory map, and open it. */
2577 sprintf (mapsfilename, "/proc/%lld/maps", pid);
2578 if ((mapsfile = fopen (mapsfilename, "r")) == NULL)
2579 error (_("Could not open %s."), mapsfilename);
2580
2581 if (info_verbose)
2582 fprintf_filtered (gdb_stdout,
2583 "Reading memory regions from %s\n", mapsfilename);
2584
2585 /* Now iterate until end-of-file. */
2586 while (read_mapping (mapsfile, &addr, &endaddr, &permissions[0],
2587 &offset, &device[0], &inode, &filename[0]))
2588 {
2589 size = endaddr - addr;
2590
2591 /* Get the segment's permissions. */
2592 read = (strchr (permissions, 'r') != 0);
2593 write = (strchr (permissions, 'w') != 0);
2594 exec = (strchr (permissions, 'x') != 0);
2595
2596 if (info_verbose)
2597 {
2598 fprintf_filtered (gdb_stdout,
2599 "Save segment, %lld bytes at 0x%s (%c%c%c)",
2600 size, paddr_nz (addr),
2601 read ? 'r' : ' ',
2602 write ? 'w' : ' ', exec ? 'x' : ' ');
2603 if (filename[0])
2604 fprintf_filtered (gdb_stdout, " for %s", filename);
2605 fprintf_filtered (gdb_stdout, "\n");
2606 }
2607
2608 /* Invoke the callback function to create the corefile
2609 segment. */
2610 func (addr, size, read, write, exec, obfd);
2611 }
2612 fclose (mapsfile);
2613 return 0;
2614 }
2615
2616 /* Records the thread's register state for the corefile note
2617 section. */
2618
2619 static char *
2620 linux_nat_do_thread_registers (bfd *obfd, ptid_t ptid,
2621 char *note_data, int *note_size)
2622 {
2623 gdb_gregset_t gregs;
2624 gdb_fpregset_t fpregs;
2625 #ifdef FILL_FPXREGSET
2626 gdb_fpxregset_t fpxregs;
2627 #endif
2628 unsigned long lwp = ptid_get_lwp (ptid);
2629 struct regcache *regcache = get_thread_regcache (ptid);
2630 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2631 const struct regset *regset;
2632 int core_regset_p;
2633 struct cleanup *old_chain;
2634
2635 old_chain = save_inferior_ptid ();
2636 inferior_ptid = ptid;
2637 target_fetch_registers (regcache, -1);
2638 do_cleanups (old_chain);
2639
2640 core_regset_p = gdbarch_regset_from_core_section_p (gdbarch);
2641 if (core_regset_p
2642 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg",
2643 sizeof (gregs))) != NULL
2644 && regset->collect_regset != NULL)
2645 regset->collect_regset (regset, regcache, -1,
2646 &gregs, sizeof (gregs));
2647 else
2648 fill_gregset (regcache, &gregs, -1);
2649
2650 note_data = (char *) elfcore_write_prstatus (obfd,
2651 note_data,
2652 note_size,
2653 lwp,
2654 stop_signal, &gregs);
2655
2656 if (core_regset_p
2657 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg2",
2658 sizeof (fpregs))) != NULL
2659 && regset->collect_regset != NULL)
2660 regset->collect_regset (regset, regcache, -1,
2661 &fpregs, sizeof (fpregs));
2662 else
2663 fill_fpregset (regcache, &fpregs, -1);
2664
2665 note_data = (char *) elfcore_write_prfpreg (obfd,
2666 note_data,
2667 note_size,
2668 &fpregs, sizeof (fpregs));
2669
2670 #ifdef FILL_FPXREGSET
2671 if (core_regset_p
2672 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg-xfp",
2673 sizeof (fpxregs))) != NULL
2674 && regset->collect_regset != NULL)
2675 regset->collect_regset (regset, regcache, -1,
2676 &fpxregs, sizeof (fpxregs));
2677 else
2678 fill_fpxregset (regcache, &fpxregs, -1);
2679
2680 note_data = (char *) elfcore_write_prxfpreg (obfd,
2681 note_data,
2682 note_size,
2683 &fpxregs, sizeof (fpxregs));
2684 #endif
2685 return note_data;
2686 }
2687
2688 struct linux_nat_corefile_thread_data
2689 {
2690 bfd *obfd;
2691 char *note_data;
2692 int *note_size;
2693 int num_notes;
2694 };
2695
2696 /* Called by gdbthread.c once per thread. Records the thread's
2697 register state for the corefile note section. */
2698
2699 static int
2700 linux_nat_corefile_thread_callback (struct lwp_info *ti, void *data)
2701 {
2702 struct linux_nat_corefile_thread_data *args = data;
2703
2704 args->note_data = linux_nat_do_thread_registers (args->obfd,
2705 ti->ptid,
2706 args->note_data,
2707 args->note_size);
2708 args->num_notes++;
2709
2710 return 0;
2711 }
2712
2713 /* Records the register state for the corefile note section. */
2714
2715 static char *
2716 linux_nat_do_registers (bfd *obfd, ptid_t ptid,
2717 char *note_data, int *note_size)
2718 {
2719 return linux_nat_do_thread_registers (obfd,
2720 ptid_build (ptid_get_pid (inferior_ptid),
2721 ptid_get_pid (inferior_ptid),
2722 0),
2723 note_data, note_size);
2724 }
2725
2726 /* Fills the "to_make_corefile_note" target vector. Builds the note
2727 section for a corefile, and returns it in a malloc buffer. */
2728
2729 static char *
2730 linux_nat_make_corefile_notes (bfd *obfd, int *note_size)
2731 {
2732 struct linux_nat_corefile_thread_data thread_args;
2733 struct cleanup *old_chain;
2734 /* The variable size must be >= sizeof (prpsinfo_t.pr_fname). */
2735 char fname[16] = { '\0' };
2736 /* The variable size must be >= sizeof (prpsinfo_t.pr_psargs). */
2737 char psargs[80] = { '\0' };
2738 char *note_data = NULL;
2739 ptid_t current_ptid = inferior_ptid;
2740 gdb_byte *auxv;
2741 int auxv_len;
2742
2743 if (get_exec_file (0))
2744 {
2745 strncpy (fname, strrchr (get_exec_file (0), '/') + 1, sizeof (fname));
2746 strncpy (psargs, get_exec_file (0), sizeof (psargs));
2747 if (get_inferior_args ())
2748 {
2749 char *string_end;
2750 char *psargs_end = psargs + sizeof (psargs);
2751
2752 /* linux_elfcore_write_prpsinfo () handles zero unterminated
2753 strings fine. */
2754 string_end = memchr (psargs, 0, sizeof (psargs));
2755 if (string_end != NULL)
2756 {
2757 *string_end++ = ' ';
2758 strncpy (string_end, get_inferior_args (),
2759 psargs_end - string_end);
2760 }
2761 }
2762 note_data = (char *) elfcore_write_prpsinfo (obfd,
2763 note_data,
2764 note_size, fname, psargs);
2765 }
2766
2767 /* Dump information for threads. */
2768 thread_args.obfd = obfd;
2769 thread_args.note_data = note_data;
2770 thread_args.note_size = note_size;
2771 thread_args.num_notes = 0;
2772 iterate_over_lwps (linux_nat_corefile_thread_callback, &thread_args);
2773 if (thread_args.num_notes == 0)
2774 {
2775 /* iterate_over_threads didn't come up with any threads; just
2776 use inferior_ptid. */
2777 note_data = linux_nat_do_registers (obfd, inferior_ptid,
2778 note_data, note_size);
2779 }
2780 else
2781 {
2782 note_data = thread_args.note_data;
2783 }
2784
2785 auxv_len = target_read_alloc (&current_target, TARGET_OBJECT_AUXV,
2786 NULL, &auxv);
2787 if (auxv_len > 0)
2788 {
2789 note_data = elfcore_write_note (obfd, note_data, note_size,
2790 "CORE", NT_AUXV, auxv, auxv_len);
2791 xfree (auxv);
2792 }
2793
2794 make_cleanup (xfree, note_data);
2795 return note_data;
2796 }
2797
2798 /* Implement the "info proc" command. */
2799
2800 static void
2801 linux_nat_info_proc_cmd (char *args, int from_tty)
2802 {
2803 long long pid = PIDGET (inferior_ptid);
2804 FILE *procfile;
2805 char **argv = NULL;
2806 char buffer[MAXPATHLEN];
2807 char fname1[MAXPATHLEN], fname2[MAXPATHLEN];
2808 int cmdline_f = 1;
2809 int cwd_f = 1;
2810 int exe_f = 1;
2811 int mappings_f = 0;
2812 int environ_f = 0;
2813 int status_f = 0;
2814 int stat_f = 0;
2815 int all = 0;
2816 struct stat dummy;
2817
2818 if (args)
2819 {
2820 /* Break up 'args' into an argv array. */
2821 if ((argv = buildargv (args)) == NULL)
2822 nomem (0);
2823 else
2824 make_cleanup_freeargv (argv);
2825 }
2826 while (argv != NULL && *argv != NULL)
2827 {
2828 if (isdigit (argv[0][0]))
2829 {
2830 pid = strtoul (argv[0], NULL, 10);
2831 }
2832 else if (strncmp (argv[0], "mappings", strlen (argv[0])) == 0)
2833 {
2834 mappings_f = 1;
2835 }
2836 else if (strcmp (argv[0], "status") == 0)
2837 {
2838 status_f = 1;
2839 }
2840 else if (strcmp (argv[0], "stat") == 0)
2841 {
2842 stat_f = 1;
2843 }
2844 else if (strcmp (argv[0], "cmd") == 0)
2845 {
2846 cmdline_f = 1;
2847 }
2848 else if (strncmp (argv[0], "exe", strlen (argv[0])) == 0)
2849 {
2850 exe_f = 1;
2851 }
2852 else if (strcmp (argv[0], "cwd") == 0)
2853 {
2854 cwd_f = 1;
2855 }
2856 else if (strncmp (argv[0], "all", strlen (argv[0])) == 0)
2857 {
2858 all = 1;
2859 }
2860 else
2861 {
2862 /* [...] (future options here) */
2863 }
2864 argv++;
2865 }
2866 if (pid == 0)
2867 error (_("No current process: you must name one."));
2868
2869 sprintf (fname1, "/proc/%lld", pid);
2870 if (stat (fname1, &dummy) != 0)
2871 error (_("No /proc directory: '%s'"), fname1);
2872
2873 printf_filtered (_("process %lld\n"), pid);
2874 if (cmdline_f || all)
2875 {
2876 sprintf (fname1, "/proc/%lld/cmdline", pid);
2877 if ((procfile = fopen (fname1, "r")) != NULL)
2878 {
2879 fgets (buffer, sizeof (buffer), procfile);
2880 printf_filtered ("cmdline = '%s'\n", buffer);
2881 fclose (procfile);
2882 }
2883 else
2884 warning (_("unable to open /proc file '%s'"), fname1);
2885 }
2886 if (cwd_f || all)
2887 {
2888 sprintf (fname1, "/proc/%lld/cwd", pid);
2889 memset (fname2, 0, sizeof (fname2));
2890 if (readlink (fname1, fname2, sizeof (fname2)) > 0)
2891 printf_filtered ("cwd = '%s'\n", fname2);
2892 else
2893 warning (_("unable to read link '%s'"), fname1);
2894 }
2895 if (exe_f || all)
2896 {
2897 sprintf (fname1, "/proc/%lld/exe", pid);
2898 memset (fname2, 0, sizeof (fname2));
2899 if (readlink (fname1, fname2, sizeof (fname2)) > 0)
2900 printf_filtered ("exe = '%s'\n", fname2);
2901 else
2902 warning (_("unable to read link '%s'"), fname1);
2903 }
2904 if (mappings_f || all)
2905 {
2906 sprintf (fname1, "/proc/%lld/maps", pid);
2907 if ((procfile = fopen (fname1, "r")) != NULL)
2908 {
2909 long long addr, endaddr, size, offset, inode;
2910 char permissions[8], device[8], filename[MAXPATHLEN];
2911
2912 printf_filtered (_("Mapped address spaces:\n\n"));
2913 if (gdbarch_addr_bit (current_gdbarch) == 32)
2914 {
2915 printf_filtered ("\t%10s %10s %10s %10s %7s\n",
2916 "Start Addr",
2917 " End Addr",
2918 " Size", " Offset", "objfile");
2919 }
2920 else
2921 {
2922 printf_filtered (" %18s %18s %10s %10s %7s\n",
2923 "Start Addr",
2924 " End Addr",
2925 " Size", " Offset", "objfile");
2926 }
2927
2928 while (read_mapping (procfile, &addr, &endaddr, &permissions[0],
2929 &offset, &device[0], &inode, &filename[0]))
2930 {
2931 size = endaddr - addr;
2932
2933 /* FIXME: carlton/2003-08-27: Maybe the printf_filtered
2934 calls here (and possibly above) should be abstracted
2935 out into their own functions? Andrew suggests using
2936 a generic local_address_string instead to print out
2937 the addresses; that makes sense to me, too. */
2938
2939 if (gdbarch_addr_bit (current_gdbarch) == 32)
2940 {
2941 printf_filtered ("\t%#10lx %#10lx %#10x %#10x %7s\n",
2942 (unsigned long) addr, /* FIXME: pr_addr */
2943 (unsigned long) endaddr,
2944 (int) size,
2945 (unsigned int) offset,
2946 filename[0] ? filename : "");
2947 }
2948 else
2949 {
2950 printf_filtered (" %#18lx %#18lx %#10x %#10x %7s\n",
2951 (unsigned long) addr, /* FIXME: pr_addr */
2952 (unsigned long) endaddr,
2953 (int) size,
2954 (unsigned int) offset,
2955 filename[0] ? filename : "");
2956 }
2957 }
2958
2959 fclose (procfile);
2960 }
2961 else
2962 warning (_("unable to open /proc file '%s'"), fname1);
2963 }
2964 if (status_f || all)
2965 {
2966 sprintf (fname1, "/proc/%lld/status", pid);
2967 if ((procfile = fopen (fname1, "r")) != NULL)
2968 {
2969 while (fgets (buffer, sizeof (buffer), procfile) != NULL)
2970 puts_filtered (buffer);
2971 fclose (procfile);
2972 }
2973 else
2974 warning (_("unable to open /proc file '%s'"), fname1);
2975 }
2976 if (stat_f || all)
2977 {
2978 sprintf (fname1, "/proc/%lld/stat", pid);
2979 if ((procfile = fopen (fname1, "r")) != NULL)
2980 {
2981 int itmp;
2982 char ctmp;
2983 long ltmp;
2984
2985 if (fscanf (procfile, "%d ", &itmp) > 0)
2986 printf_filtered (_("Process: %d\n"), itmp);
2987 if (fscanf (procfile, "(%[^)]) ", &buffer[0]) > 0)
2988 printf_filtered (_("Exec file: %s\n"), buffer);
2989 if (fscanf (procfile, "%c ", &ctmp) > 0)
2990 printf_filtered (_("State: %c\n"), ctmp);
2991 if (fscanf (procfile, "%d ", &itmp) > 0)
2992 printf_filtered (_("Parent process: %d\n"), itmp);
2993 if (fscanf (procfile, "%d ", &itmp) > 0)
2994 printf_filtered (_("Process group: %d\n"), itmp);
2995 if (fscanf (procfile, "%d ", &itmp) > 0)
2996 printf_filtered (_("Session id: %d\n"), itmp);
2997 if (fscanf (procfile, "%d ", &itmp) > 0)
2998 printf_filtered (_("TTY: %d\n"), itmp);
2999 if (fscanf (procfile, "%d ", &itmp) > 0)
3000 printf_filtered (_("TTY owner process group: %d\n"), itmp);
3001 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3002 printf_filtered (_("Flags: 0x%lx\n"), ltmp);
3003 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3004 printf_filtered (_("Minor faults (no memory page): %lu\n"),
3005 (unsigned long) ltmp);
3006 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3007 printf_filtered (_("Minor faults, children: %lu\n"),
3008 (unsigned long) ltmp);
3009 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3010 printf_filtered (_("Major faults (memory page faults): %lu\n"),
3011 (unsigned long) ltmp);
3012 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3013 printf_filtered (_("Major faults, children: %lu\n"),
3014 (unsigned long) ltmp);
3015 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3016 printf_filtered (_("utime: %ld\n"), ltmp);
3017 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3018 printf_filtered (_("stime: %ld\n"), ltmp);
3019 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3020 printf_filtered (_("utime, children: %ld\n"), ltmp);
3021 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3022 printf_filtered (_("stime, children: %ld\n"), ltmp);
3023 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3024 printf_filtered (_("jiffies remaining in current time slice: %ld\n"),
3025 ltmp);
3026 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3027 printf_filtered (_("'nice' value: %ld\n"), ltmp);
3028 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3029 printf_filtered (_("jiffies until next timeout: %lu\n"),
3030 (unsigned long) ltmp);
3031 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3032 printf_filtered (_("jiffies until next SIGALRM: %lu\n"),
3033 (unsigned long) ltmp);
3034 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3035 printf_filtered (_("start time (jiffies since system boot): %ld\n"),
3036 ltmp);
3037 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3038 printf_filtered (_("Virtual memory size: %lu\n"),
3039 (unsigned long) ltmp);
3040 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3041 printf_filtered (_("Resident set size: %lu\n"), (unsigned long) ltmp);
3042 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3043 printf_filtered (_("rlim: %lu\n"), (unsigned long) ltmp);
3044 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3045 printf_filtered (_("Start of text: 0x%lx\n"), ltmp);
3046 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3047 printf_filtered (_("End of text: 0x%lx\n"), ltmp);
3048 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3049 printf_filtered (_("Start of stack: 0x%lx\n"), ltmp);
3050 #if 0 /* Don't know how architecture-dependent the rest is...
3051 Anyway the signal bitmap info is available from "status". */
3052 if (fscanf (procfile, "%lu ", &ltmp) > 0) /* FIXME arch? */
3053 printf_filtered (_("Kernel stack pointer: 0x%lx\n"), ltmp);
3054 if (fscanf (procfile, "%lu ", &ltmp) > 0) /* FIXME arch? */
3055 printf_filtered (_("Kernel instr pointer: 0x%lx\n"), ltmp);
3056 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3057 printf_filtered (_("Pending signals bitmap: 0x%lx\n"), ltmp);
3058 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3059 printf_filtered (_("Blocked signals bitmap: 0x%lx\n"), ltmp);
3060 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3061 printf_filtered (_("Ignored signals bitmap: 0x%lx\n"), ltmp);
3062 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3063 printf_filtered (_("Catched signals bitmap: 0x%lx\n"), ltmp);
3064 if (fscanf (procfile, "%lu ", &ltmp) > 0) /* FIXME arch? */
3065 printf_filtered (_("wchan (system call): 0x%lx\n"), ltmp);
3066 #endif
3067 fclose (procfile);
3068 }
3069 else
3070 warning (_("unable to open /proc file '%s'"), fname1);
3071 }
3072 }
3073
3074 /* Implement the to_xfer_partial interface for memory reads using the /proc
3075 filesystem. Because we can use a single read() call for /proc, this
3076 can be much more efficient than banging away at PTRACE_PEEKTEXT,
3077 but it doesn't support writes. */
3078
3079 static LONGEST
3080 linux_proc_xfer_partial (struct target_ops *ops, enum target_object object,
3081 const char *annex, gdb_byte *readbuf,
3082 const gdb_byte *writebuf,
3083 ULONGEST offset, LONGEST len)
3084 {
3085 LONGEST ret;
3086 int fd;
3087 char filename[64];
3088
3089 if (object != TARGET_OBJECT_MEMORY || !readbuf)
3090 return 0;
3091
3092 /* Don't bother for one word. */
3093 if (len < 3 * sizeof (long))
3094 return 0;
3095
3096 /* We could keep this file open and cache it - possibly one per
3097 thread. That requires some juggling, but is even faster. */
3098 sprintf (filename, "/proc/%d/mem", PIDGET (inferior_ptid));
3099 fd = open (filename, O_RDONLY | O_LARGEFILE);
3100 if (fd == -1)
3101 return 0;
3102
3103 /* If pread64 is available, use it. It's faster if the kernel
3104 supports it (only one syscall), and it's 64-bit safe even on
3105 32-bit platforms (for instance, SPARC debugging a SPARC64
3106 application). */
3107 #ifdef HAVE_PREAD64
3108 if (pread64 (fd, readbuf, len, offset) != len)
3109 #else
3110 if (lseek (fd, offset, SEEK_SET) == -1 || read (fd, readbuf, len) != len)
3111 #endif
3112 ret = 0;
3113 else
3114 ret = len;
3115
3116 close (fd);
3117 return ret;
3118 }
3119
3120 /* Parse LINE as a signal set and add its set bits to SIGS. */
3121
3122 static void
3123 add_line_to_sigset (const char *line, sigset_t *sigs)
3124 {
3125 int len = strlen (line) - 1;
3126 const char *p;
3127 int signum;
3128
3129 if (line[len] != '\n')
3130 error (_("Could not parse signal set: %s"), line);
3131
3132 p = line;
3133 signum = len * 4;
3134 while (len-- > 0)
3135 {
3136 int digit;
3137
3138 if (*p >= '0' && *p <= '9')
3139 digit = *p - '0';
3140 else if (*p >= 'a' && *p <= 'f')
3141 digit = *p - 'a' + 10;
3142 else
3143 error (_("Could not parse signal set: %s"), line);
3144
3145 signum -= 4;
3146
3147 if (digit & 1)
3148 sigaddset (sigs, signum + 1);
3149 if (digit & 2)
3150 sigaddset (sigs, signum + 2);
3151 if (digit & 4)
3152 sigaddset (sigs, signum + 3);
3153 if (digit & 8)
3154 sigaddset (sigs, signum + 4);
3155
3156 p++;
3157 }
3158 }
3159
3160 /* Find process PID's pending signals from /proc/pid/status and set
3161 SIGS to match. */
3162
3163 void
3164 linux_proc_pending_signals (int pid, sigset_t *pending, sigset_t *blocked, sigset_t *ignored)
3165 {
3166 FILE *procfile;
3167 char buffer[MAXPATHLEN], fname[MAXPATHLEN];
3168 int signum;
3169
3170 sigemptyset (pending);
3171 sigemptyset (blocked);
3172 sigemptyset (ignored);
3173 sprintf (fname, "/proc/%d/status", pid);
3174 procfile = fopen (fname, "r");
3175 if (procfile == NULL)
3176 error (_("Could not open %s"), fname);
3177
3178 while (fgets (buffer, MAXPATHLEN, procfile) != NULL)
3179 {
3180 /* Normal queued signals are on the SigPnd line in the status
3181 file. However, 2.6 kernels also have a "shared" pending
3182 queue for delivering signals to a thread group, so check for
3183 a ShdPnd line also.
3184
3185 Unfortunately some Red Hat kernels include the shared pending
3186 queue but not the ShdPnd status field. */
3187
3188 if (strncmp (buffer, "SigPnd:\t", 8) == 0)
3189 add_line_to_sigset (buffer + 8, pending);
3190 else if (strncmp (buffer, "ShdPnd:\t", 8) == 0)
3191 add_line_to_sigset (buffer + 8, pending);
3192 else if (strncmp (buffer, "SigBlk:\t", 8) == 0)
3193 add_line_to_sigset (buffer + 8, blocked);
3194 else if (strncmp (buffer, "SigIgn:\t", 8) == 0)
3195 add_line_to_sigset (buffer + 8, ignored);
3196 }
3197
3198 fclose (procfile);
3199 }
3200
3201 static LONGEST
3202 linux_xfer_partial (struct target_ops *ops, enum target_object object,
3203 const char *annex, gdb_byte *readbuf,
3204 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
3205 {
3206 LONGEST xfer;
3207
3208 if (object == TARGET_OBJECT_AUXV)
3209 return procfs_xfer_auxv (ops, object, annex, readbuf, writebuf,
3210 offset, len);
3211
3212 xfer = linux_proc_xfer_partial (ops, object, annex, readbuf, writebuf,
3213 offset, len);
3214 if (xfer != 0)
3215 return xfer;
3216
3217 return super_xfer_partial (ops, object, annex, readbuf, writebuf,
3218 offset, len);
3219 }
3220
3221 /* Create a prototype generic GNU/Linux target. The client can override
3222 it with local methods. */
3223
3224 static void
3225 linux_target_install_ops (struct target_ops *t)
3226 {
3227 t->to_insert_fork_catchpoint = linux_child_insert_fork_catchpoint;
3228 t->to_insert_vfork_catchpoint = linux_child_insert_vfork_catchpoint;
3229 t->to_insert_exec_catchpoint = linux_child_insert_exec_catchpoint;
3230 t->to_pid_to_exec_file = linux_child_pid_to_exec_file;
3231 t->to_post_startup_inferior = linux_child_post_startup_inferior;
3232 t->to_post_attach = linux_child_post_attach;
3233 t->to_follow_fork = linux_child_follow_fork;
3234 t->to_find_memory_regions = linux_nat_find_memory_regions;
3235 t->to_make_corefile_notes = linux_nat_make_corefile_notes;
3236
3237 super_xfer_partial = t->to_xfer_partial;
3238 t->to_xfer_partial = linux_xfer_partial;
3239 }
3240
3241 struct target_ops *
3242 linux_target (void)
3243 {
3244 struct target_ops *t;
3245
3246 t = inf_ptrace_target ();
3247 linux_target_install_ops (t);
3248
3249 return t;
3250 }
3251
3252 struct target_ops *
3253 linux_trad_target (CORE_ADDR (*register_u_offset)(struct gdbarch *, int, int))
3254 {
3255 struct target_ops *t;
3256
3257 t = inf_ptrace_trad_target (register_u_offset);
3258 linux_target_install_ops (t);
3259
3260 return t;
3261 }
3262
3263 void
3264 linux_nat_add_target (struct target_ops *t)
3265 {
3266 /* Save the provided single-threaded target. We save this in a separate
3267 variable because another target we've inherited from (e.g. inf-ptrace)
3268 may have saved a pointer to T; we want to use it for the final
3269 process stratum target. */
3270 linux_ops_saved = *t;
3271 linux_ops = &linux_ops_saved;
3272
3273 /* Override some methods for multithreading. */
3274 t->to_attach = linux_nat_attach;
3275 t->to_detach = linux_nat_detach;
3276 t->to_resume = linux_nat_resume;
3277 t->to_wait = linux_nat_wait;
3278 t->to_xfer_partial = linux_nat_xfer_partial;
3279 t->to_kill = linux_nat_kill;
3280 t->to_mourn_inferior = linux_nat_mourn_inferior;
3281 t->to_thread_alive = linux_nat_thread_alive;
3282 t->to_pid_to_str = linux_nat_pid_to_str;
3283 t->to_has_thread_control = tc_schedlock;
3284
3285 /* We don't change the stratum; this target will sit at
3286 process_stratum and thread_db will set at thread_stratum. This
3287 is a little strange, since this is a multi-threaded-capable
3288 target, but we want to be on the stack below thread_db, and we
3289 also want to be used for single-threaded processes. */
3290
3291 add_target (t);
3292
3293 /* TODO: Eliminate this and have libthread_db use
3294 find_target_beneath. */
3295 thread_db_init (t);
3296 }
3297
3298 /* Register a method to call whenever a new thread is attached. */
3299 void
3300 linux_nat_set_new_thread (struct target_ops *t, void (*new_thread) (ptid_t))
3301 {
3302 /* Save the pointer. We only support a single registered instance
3303 of the GNU/Linux native target, so we do not need to map this to
3304 T. */
3305 linux_nat_new_thread = new_thread;
3306 }
3307
3308 /* Return the saved siginfo associated with PTID. */
3309 struct siginfo *
3310 linux_nat_get_siginfo (ptid_t ptid)
3311 {
3312 struct lwp_info *lp = find_lwp_pid (ptid);
3313
3314 gdb_assert (lp != NULL);
3315
3316 return &lp->siginfo;
3317 }
3318
3319 void
3320 _initialize_linux_nat (void)
3321 {
3322 struct sigaction action;
3323
3324 add_info ("proc", linux_nat_info_proc_cmd, _("\
3325 Show /proc process information about any running process.\n\
3326 Specify any process id, or use the program being debugged by default.\n\
3327 Specify any of the following keywords for detailed info:\n\
3328 mappings -- list of mapped memory regions.\n\
3329 stat -- list a bunch of random process info.\n\
3330 status -- list a different bunch of random process info.\n\
3331 all -- list all available /proc info."));
3332
3333 /* Save the original signal mask. */
3334 sigprocmask (SIG_SETMASK, NULL, &normal_mask);
3335
3336 action.sa_handler = sigchld_handler;
3337 sigemptyset (&action.sa_mask);
3338 action.sa_flags = SA_RESTART;
3339 sigaction (SIGCHLD, &action, NULL);
3340
3341 /* Make sure we don't block SIGCHLD during a sigsuspend. */
3342 sigprocmask (SIG_SETMASK, NULL, &suspend_mask);
3343 sigdelset (&suspend_mask, SIGCHLD);
3344
3345 sigemptyset (&blocked_mask);
3346
3347 add_setshow_zinteger_cmd ("lin-lwp", no_class, &debug_linux_nat, _("\
3348 Set debugging of GNU/Linux lwp module."), _("\
3349 Show debugging of GNU/Linux lwp module."), _("\
3350 Enables printf debugging output."),
3351 NULL,
3352 show_debug_linux_nat,
3353 &setdebuglist, &showdebuglist);
3354 }
3355 \f
3356
3357 /* FIXME: kettenis/2000-08-26: The stuff on this page is specific to
3358 the GNU/Linux Threads library and therefore doesn't really belong
3359 here. */
3360
3361 /* Read variable NAME in the target and return its value if found.
3362 Otherwise return zero. It is assumed that the type of the variable
3363 is `int'. */
3364
3365 static int
3366 get_signo (const char *name)
3367 {
3368 struct minimal_symbol *ms;
3369 int signo;
3370
3371 ms = lookup_minimal_symbol (name, NULL, NULL);
3372 if (ms == NULL)
3373 return 0;
3374
3375 if (target_read_memory (SYMBOL_VALUE_ADDRESS (ms), (gdb_byte *) &signo,
3376 sizeof (signo)) != 0)
3377 return 0;
3378
3379 return signo;
3380 }
3381
3382 /* Return the set of signals used by the threads library in *SET. */
3383
3384 void
3385 lin_thread_get_thread_signals (sigset_t *set)
3386 {
3387 struct sigaction action;
3388 int restart, cancel;
3389
3390 sigemptyset (set);
3391
3392 restart = get_signo ("__pthread_sig_restart");
3393 cancel = get_signo ("__pthread_sig_cancel");
3394
3395 /* LinuxThreads normally uses the first two RT signals, but in some legacy
3396 cases may use SIGUSR1/SIGUSR2. NPTL always uses RT signals, but does
3397 not provide any way for the debugger to query the signal numbers -
3398 fortunately they don't change! */
3399
3400 if (restart == 0)
3401 restart = __SIGRTMIN;
3402
3403 if (cancel == 0)
3404 cancel = __SIGRTMIN + 1;
3405
3406 sigaddset (set, restart);
3407 sigaddset (set, cancel);
3408
3409 /* The GNU/Linux Threads library makes terminating threads send a
3410 special "cancel" signal instead of SIGCHLD. Make sure we catch
3411 those (to prevent them from terminating GDB itself, which is
3412 likely to be their default action) and treat them the same way as
3413 SIGCHLD. */
3414
3415 action.sa_handler = sigchld_handler;
3416 sigemptyset (&action.sa_mask);
3417 action.sa_flags = SA_RESTART;
3418 sigaction (cancel, &action, NULL);
3419
3420 /* We block the "cancel" signal throughout this code ... */
3421 sigaddset (&blocked_mask, cancel);
3422 sigprocmask (SIG_BLOCK, &blocked_mask, NULL);
3423
3424 /* ... except during a sigsuspend. */
3425 sigdelset (&suspend_mask, cancel);
3426 }
3427
This page took 0.105947 seconds and 4 git commands to generate.