* MAINTAINERS: Move NEWS to the documentation entry.
[deliverable/binutils-gdb.git] / gdb / linux-nat.c
1 /* GNU/Linux native-dependent code common to multiple platforms.
2
3 Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006
4 Free Software Foundation, Inc.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; if not, write to the Free Software
20 Foundation, Inc., 51 Franklin Street, Fifth Floor,
21 Boston, MA 02110-1301, USA. */
22
23 #include "defs.h"
24 #include "inferior.h"
25 #include "target.h"
26 #include "gdb_string.h"
27 #include "gdb_wait.h"
28 #include "gdb_assert.h"
29 #ifdef HAVE_TKILL_SYSCALL
30 #include <unistd.h>
31 #include <sys/syscall.h>
32 #endif
33 #include <sys/ptrace.h>
34 #include "linux-nat.h"
35 #include "linux-fork.h"
36 #include "gdbthread.h"
37 #include "gdbcmd.h"
38 #include "regcache.h"
39 #include "regset.h"
40 #include "inf-ptrace.h"
41 #include "auxv.h"
42 #include <sys/param.h> /* for MAXPATHLEN */
43 #include <sys/procfs.h> /* for elf_gregset etc. */
44 #include "elf-bfd.h" /* for elfcore_write_* */
45 #include "gregset.h" /* for gregset */
46 #include "gdbcore.h" /* for get_exec_file */
47 #include <ctype.h> /* for isdigit */
48 #include "gdbthread.h" /* for struct thread_info etc. */
49 #include "gdb_stat.h" /* for struct stat */
50 #include <fcntl.h> /* for O_RDONLY */
51
52 #ifndef O_LARGEFILE
53 #define O_LARGEFILE 0
54 #endif
55
56 /* If the system headers did not provide the constants, hard-code the normal
57 values. */
58 #ifndef PTRACE_EVENT_FORK
59
60 #define PTRACE_SETOPTIONS 0x4200
61 #define PTRACE_GETEVENTMSG 0x4201
62
63 /* options set using PTRACE_SETOPTIONS */
64 #define PTRACE_O_TRACESYSGOOD 0x00000001
65 #define PTRACE_O_TRACEFORK 0x00000002
66 #define PTRACE_O_TRACEVFORK 0x00000004
67 #define PTRACE_O_TRACECLONE 0x00000008
68 #define PTRACE_O_TRACEEXEC 0x00000010
69 #define PTRACE_O_TRACEVFORKDONE 0x00000020
70 #define PTRACE_O_TRACEEXIT 0x00000040
71
72 /* Wait extended result codes for the above trace options. */
73 #define PTRACE_EVENT_FORK 1
74 #define PTRACE_EVENT_VFORK 2
75 #define PTRACE_EVENT_CLONE 3
76 #define PTRACE_EVENT_EXEC 4
77 #define PTRACE_EVENT_VFORK_DONE 5
78 #define PTRACE_EVENT_EXIT 6
79
80 #endif /* PTRACE_EVENT_FORK */
81
82 /* We can't always assume that this flag is available, but all systems
83 with the ptrace event handlers also have __WALL, so it's safe to use
84 here. */
85 #ifndef __WALL
86 #define __WALL 0x40000000 /* Wait for any child. */
87 #endif
88
89 /* The single-threaded native GNU/Linux target_ops. We save a pointer for
90 the use of the multi-threaded target. */
91 static struct target_ops *linux_ops;
92 static struct target_ops linux_ops_saved;
93
94 /* The saved to_xfer_partial method, inherited from inf-ptrace.c.
95 Called by our to_xfer_partial. */
96 static LONGEST (*super_xfer_partial) (struct target_ops *,
97 enum target_object,
98 const char *, gdb_byte *,
99 const gdb_byte *,
100 ULONGEST, LONGEST);
101
102 static int debug_linux_nat;
103 static void
104 show_debug_linux_nat (struct ui_file *file, int from_tty,
105 struct cmd_list_element *c, const char *value)
106 {
107 fprintf_filtered (file, _("Debugging of GNU/Linux lwp module is %s.\n"),
108 value);
109 }
110
111 static int linux_parent_pid;
112
113 struct simple_pid_list
114 {
115 int pid;
116 struct simple_pid_list *next;
117 };
118 struct simple_pid_list *stopped_pids;
119
120 /* This variable is a tri-state flag: -1 for unknown, 0 if PTRACE_O_TRACEFORK
121 can not be used, 1 if it can. */
122
123 static int linux_supports_tracefork_flag = -1;
124
125 /* If we have PTRACE_O_TRACEFORK, this flag indicates whether we also have
126 PTRACE_O_TRACEVFORKDONE. */
127
128 static int linux_supports_tracevforkdone_flag = -1;
129
130 \f
131 /* Trivial list manipulation functions to keep track of a list of
132 new stopped processes. */
133 static void
134 add_to_pid_list (struct simple_pid_list **listp, int pid)
135 {
136 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
137 new_pid->pid = pid;
138 new_pid->next = *listp;
139 *listp = new_pid;
140 }
141
142 static int
143 pull_pid_from_list (struct simple_pid_list **listp, int pid)
144 {
145 struct simple_pid_list **p;
146
147 for (p = listp; *p != NULL; p = &(*p)->next)
148 if ((*p)->pid == pid)
149 {
150 struct simple_pid_list *next = (*p)->next;
151 xfree (*p);
152 *p = next;
153 return 1;
154 }
155 return 0;
156 }
157
158 void
159 linux_record_stopped_pid (int pid)
160 {
161 add_to_pid_list (&stopped_pids, pid);
162 }
163
164 \f
165 /* A helper function for linux_test_for_tracefork, called after fork (). */
166
167 static void
168 linux_tracefork_child (void)
169 {
170 int ret;
171
172 ptrace (PTRACE_TRACEME, 0, 0, 0);
173 kill (getpid (), SIGSTOP);
174 fork ();
175 _exit (0);
176 }
177
178 /* Wrapper function for waitpid which handles EINTR. */
179
180 static int
181 my_waitpid (int pid, int *status, int flags)
182 {
183 int ret;
184 do
185 {
186 ret = waitpid (pid, status, flags);
187 }
188 while (ret == -1 && errno == EINTR);
189
190 return ret;
191 }
192
193 /* Determine if PTRACE_O_TRACEFORK can be used to follow fork events.
194
195 First, we try to enable fork tracing on ORIGINAL_PID. If this fails,
196 we know that the feature is not available. This may change the tracing
197 options for ORIGINAL_PID, but we'll be setting them shortly anyway.
198
199 However, if it succeeds, we don't know for sure that the feature is
200 available; old versions of PTRACE_SETOPTIONS ignored unknown options. We
201 create a child process, attach to it, use PTRACE_SETOPTIONS to enable
202 fork tracing, and let it fork. If the process exits, we assume that we
203 can't use TRACEFORK; if we get the fork notification, and we can extract
204 the new child's PID, then we assume that we can. */
205
206 static void
207 linux_test_for_tracefork (int original_pid)
208 {
209 int child_pid, ret, status;
210 long second_pid;
211
212 linux_supports_tracefork_flag = 0;
213 linux_supports_tracevforkdone_flag = 0;
214
215 ret = ptrace (PTRACE_SETOPTIONS, original_pid, 0, PTRACE_O_TRACEFORK);
216 if (ret != 0)
217 return;
218
219 child_pid = fork ();
220 if (child_pid == -1)
221 perror_with_name (("fork"));
222
223 if (child_pid == 0)
224 linux_tracefork_child ();
225
226 ret = my_waitpid (child_pid, &status, 0);
227 if (ret == -1)
228 perror_with_name (("waitpid"));
229 else if (ret != child_pid)
230 error (_("linux_test_for_tracefork: waitpid: unexpected result %d."), ret);
231 if (! WIFSTOPPED (status))
232 error (_("linux_test_for_tracefork: waitpid: unexpected status %d."), status);
233
234 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0, PTRACE_O_TRACEFORK);
235 if (ret != 0)
236 {
237 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
238 if (ret != 0)
239 {
240 warning (_("linux_test_for_tracefork: failed to kill child"));
241 return;
242 }
243
244 ret = my_waitpid (child_pid, &status, 0);
245 if (ret != child_pid)
246 warning (_("linux_test_for_tracefork: failed to wait for killed child"));
247 else if (!WIFSIGNALED (status))
248 warning (_("linux_test_for_tracefork: unexpected wait status 0x%x from "
249 "killed child"), status);
250
251 return;
252 }
253
254 /* Check whether PTRACE_O_TRACEVFORKDONE is available. */
255 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0,
256 PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORKDONE);
257 linux_supports_tracevforkdone_flag = (ret == 0);
258
259 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
260 if (ret != 0)
261 warning (_("linux_test_for_tracefork: failed to resume child"));
262
263 ret = my_waitpid (child_pid, &status, 0);
264
265 if (ret == child_pid && WIFSTOPPED (status)
266 && status >> 16 == PTRACE_EVENT_FORK)
267 {
268 second_pid = 0;
269 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
270 if (ret == 0 && second_pid != 0)
271 {
272 int second_status;
273
274 linux_supports_tracefork_flag = 1;
275 my_waitpid (second_pid, &second_status, 0);
276 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
277 if (ret != 0)
278 warning (_("linux_test_for_tracefork: failed to kill second child"));
279 }
280 }
281 else
282 warning (_("linux_test_for_tracefork: unexpected result from waitpid "
283 "(%d, status 0x%x)"), ret, status);
284
285 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
286 if (ret != 0)
287 warning (_("linux_test_for_tracefork: failed to kill child"));
288 my_waitpid (child_pid, &status, 0);
289 }
290
291 /* Return non-zero iff we have tracefork functionality available.
292 This function also sets linux_supports_tracefork_flag. */
293
294 static int
295 linux_supports_tracefork (int pid)
296 {
297 if (linux_supports_tracefork_flag == -1)
298 linux_test_for_tracefork (pid);
299 return linux_supports_tracefork_flag;
300 }
301
302 static int
303 linux_supports_tracevforkdone (int pid)
304 {
305 if (linux_supports_tracefork_flag == -1)
306 linux_test_for_tracefork (pid);
307 return linux_supports_tracevforkdone_flag;
308 }
309
310 \f
311 void
312 linux_enable_event_reporting (ptid_t ptid)
313 {
314 int pid = ptid_get_lwp (ptid);
315 int options;
316
317 if (pid == 0)
318 pid = ptid_get_pid (ptid);
319
320 if (! linux_supports_tracefork (pid))
321 return;
322
323 options = PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORK | PTRACE_O_TRACEEXEC
324 | PTRACE_O_TRACECLONE;
325 if (linux_supports_tracevforkdone (pid))
326 options |= PTRACE_O_TRACEVFORKDONE;
327
328 /* Do not enable PTRACE_O_TRACEEXIT until GDB is more prepared to support
329 read-only process state. */
330
331 ptrace (PTRACE_SETOPTIONS, pid, 0, options);
332 }
333
334 void
335 child_post_attach (int pid)
336 {
337 linux_enable_event_reporting (pid_to_ptid (pid));
338 check_for_thread_db ();
339 }
340
341 static void
342 linux_child_post_startup_inferior (ptid_t ptid)
343 {
344 linux_enable_event_reporting (ptid);
345 check_for_thread_db ();
346 }
347
348 int
349 child_follow_fork (struct target_ops *ops, int follow_child)
350 {
351 ptid_t last_ptid;
352 struct target_waitstatus last_status;
353 int has_vforked;
354 int parent_pid, child_pid;
355
356 get_last_target_status (&last_ptid, &last_status);
357 has_vforked = (last_status.kind == TARGET_WAITKIND_VFORKED);
358 parent_pid = ptid_get_lwp (last_ptid);
359 if (parent_pid == 0)
360 parent_pid = ptid_get_pid (last_ptid);
361 child_pid = last_status.value.related_pid;
362
363 if (! follow_child)
364 {
365 /* We're already attached to the parent, by default. */
366
367 /* Before detaching from the child, remove all breakpoints from
368 it. (This won't actually modify the breakpoint list, but will
369 physically remove the breakpoints from the child.) */
370 /* If we vforked this will remove the breakpoints from the parent
371 also, but they'll be reinserted below. */
372 detach_breakpoints (child_pid);
373
374 /* Detach new forked process? */
375 if (detach_fork)
376 {
377 if (debug_linux_nat)
378 {
379 target_terminal_ours ();
380 fprintf_filtered (gdb_stdlog,
381 "Detaching after fork from child process %d.\n",
382 child_pid);
383 }
384
385 ptrace (PTRACE_DETACH, child_pid, 0, 0);
386 }
387 else
388 {
389 struct fork_info *fp;
390 /* Retain child fork in ptrace (stopped) state. */
391 fp = find_fork_pid (child_pid);
392 if (!fp)
393 fp = add_fork (child_pid);
394 fork_save_infrun_state (fp, 0);
395 }
396
397 if (has_vforked)
398 {
399 gdb_assert (linux_supports_tracefork_flag >= 0);
400 if (linux_supports_tracevforkdone (0))
401 {
402 int status;
403
404 ptrace (PTRACE_CONT, parent_pid, 0, 0);
405 my_waitpid (parent_pid, &status, __WALL);
406 if ((status >> 16) != PTRACE_EVENT_VFORK_DONE)
407 warning (_("Unexpected waitpid result %06x when waiting for "
408 "vfork-done"), status);
409 }
410 else
411 {
412 /* We can't insert breakpoints until the child has
413 finished with the shared memory region. We need to
414 wait until that happens. Ideal would be to just
415 call:
416 - ptrace (PTRACE_SYSCALL, parent_pid, 0, 0);
417 - waitpid (parent_pid, &status, __WALL);
418 However, most architectures can't handle a syscall
419 being traced on the way out if it wasn't traced on
420 the way in.
421
422 We might also think to loop, continuing the child
423 until it exits or gets a SIGTRAP. One problem is
424 that the child might call ptrace with PTRACE_TRACEME.
425
426 There's no simple and reliable way to figure out when
427 the vforked child will be done with its copy of the
428 shared memory. We could step it out of the syscall,
429 two instructions, let it go, and then single-step the
430 parent once. When we have hardware single-step, this
431 would work; with software single-step it could still
432 be made to work but we'd have to be able to insert
433 single-step breakpoints in the child, and we'd have
434 to insert -just- the single-step breakpoint in the
435 parent. Very awkward.
436
437 In the end, the best we can do is to make sure it
438 runs for a little while. Hopefully it will be out of
439 range of any breakpoints we reinsert. Usually this
440 is only the single-step breakpoint at vfork's return
441 point. */
442
443 usleep (10000);
444 }
445
446 /* Since we vforked, breakpoints were removed in the parent
447 too. Put them back. */
448 reattach_breakpoints (parent_pid);
449 }
450 }
451 else
452 {
453 char child_pid_spelling[40];
454
455 /* Needed to keep the breakpoint lists in sync. */
456 if (! has_vforked)
457 detach_breakpoints (child_pid);
458
459 /* Before detaching from the parent, remove all breakpoints from it. */
460 remove_breakpoints ();
461
462 if (debug_linux_nat)
463 {
464 target_terminal_ours ();
465 fprintf_filtered (gdb_stdlog,
466 "Attaching after fork to child process %d.\n",
467 child_pid);
468 }
469
470 /* If we're vforking, we may want to hold on to the parent until
471 the child exits or execs. At exec time we can remove the old
472 breakpoints from the parent and detach it; at exit time we
473 could do the same (or even, sneakily, resume debugging it - the
474 child's exec has failed, or something similar).
475
476 This doesn't clean up "properly", because we can't call
477 target_detach, but that's OK; if the current target is "child",
478 then it doesn't need any further cleanups, and lin_lwp will
479 generally not encounter vfork (vfork is defined to fork
480 in libpthread.so).
481
482 The holding part is very easy if we have VFORKDONE events;
483 but keeping track of both processes is beyond GDB at the
484 moment. So we don't expose the parent to the rest of GDB.
485 Instead we quietly hold onto it until such time as we can
486 safely resume it. */
487
488 if (has_vforked)
489 linux_parent_pid = parent_pid;
490 else if (!detach_fork)
491 {
492 struct fork_info *fp;
493 /* Retain parent fork in ptrace (stopped) state. */
494 fp = find_fork_pid (parent_pid);
495 if (!fp)
496 fp = add_fork (parent_pid);
497 fork_save_infrun_state (fp, 0);
498 }
499 else
500 {
501 target_detach (NULL, 0);
502 }
503
504 inferior_ptid = pid_to_ptid (child_pid);
505
506 /* Reinstall ourselves, since we might have been removed in
507 target_detach (which does other necessary cleanup). */
508
509 push_target (ops);
510
511 /* Reset breakpoints in the child as appropriate. */
512 follow_inferior_reset_breakpoints ();
513 }
514
515 return 0;
516 }
517
518 ptid_t
519 linux_handle_extended_wait (int pid, int status,
520 struct target_waitstatus *ourstatus)
521 {
522 int event = status >> 16;
523
524 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK
525 || event == PTRACE_EVENT_CLONE)
526 {
527 unsigned long new_pid;
528 int ret;
529
530 ptrace (PTRACE_GETEVENTMSG, pid, 0, &new_pid);
531
532 /* If we haven't already seen the new PID stop, wait for it now. */
533 if (! pull_pid_from_list (&stopped_pids, new_pid))
534 {
535 /* The new child has a pending SIGSTOP. We can't affect it until it
536 hits the SIGSTOP, but we're already attached. */
537 ret = my_waitpid (new_pid, &status,
538 (event == PTRACE_EVENT_CLONE) ? __WCLONE : 0);
539 if (ret == -1)
540 perror_with_name (_("waiting for new child"));
541 else if (ret != new_pid)
542 internal_error (__FILE__, __LINE__,
543 _("wait returned unexpected PID %d"), ret);
544 else if (!WIFSTOPPED (status) || WSTOPSIG (status) != SIGSTOP)
545 internal_error (__FILE__, __LINE__,
546 _("wait returned unexpected status 0x%x"), status);
547 }
548
549 if (event == PTRACE_EVENT_FORK)
550 ourstatus->kind = TARGET_WAITKIND_FORKED;
551 else if (event == PTRACE_EVENT_VFORK)
552 ourstatus->kind = TARGET_WAITKIND_VFORKED;
553 else
554 ourstatus->kind = TARGET_WAITKIND_SPURIOUS;
555
556 ourstatus->value.related_pid = new_pid;
557 return inferior_ptid;
558 }
559
560 if (event == PTRACE_EVENT_EXEC)
561 {
562 ourstatus->kind = TARGET_WAITKIND_EXECD;
563 ourstatus->value.execd_pathname
564 = xstrdup (child_pid_to_exec_file (pid));
565
566 if (linux_parent_pid)
567 {
568 detach_breakpoints (linux_parent_pid);
569 ptrace (PTRACE_DETACH, linux_parent_pid, 0, 0);
570
571 linux_parent_pid = 0;
572 }
573
574 return inferior_ptid;
575 }
576
577 internal_error (__FILE__, __LINE__,
578 _("unknown ptrace event %d"), event);
579 }
580
581 \f
582 void
583 child_insert_fork_catchpoint (int pid)
584 {
585 if (! linux_supports_tracefork (pid))
586 error (_("Your system does not support fork catchpoints."));
587 }
588
589 void
590 child_insert_vfork_catchpoint (int pid)
591 {
592 if (!linux_supports_tracefork (pid))
593 error (_("Your system does not support vfork catchpoints."));
594 }
595
596 void
597 child_insert_exec_catchpoint (int pid)
598 {
599 if (!linux_supports_tracefork (pid))
600 error (_("Your system does not support exec catchpoints."));
601 }
602
603 /* On GNU/Linux there are no real LWP's. The closest thing to LWP's
604 are processes sharing the same VM space. A multi-threaded process
605 is basically a group of such processes. However, such a grouping
606 is almost entirely a user-space issue; the kernel doesn't enforce
607 such a grouping at all (this might change in the future). In
608 general, we'll rely on the threads library (i.e. the GNU/Linux
609 Threads library) to provide such a grouping.
610
611 It is perfectly well possible to write a multi-threaded application
612 without the assistance of a threads library, by using the clone
613 system call directly. This module should be able to give some
614 rudimentary support for debugging such applications if developers
615 specify the CLONE_PTRACE flag in the clone system call, and are
616 using the Linux kernel 2.4 or above.
617
618 Note that there are some peculiarities in GNU/Linux that affect
619 this code:
620
621 - In general one should specify the __WCLONE flag to waitpid in
622 order to make it report events for any of the cloned processes
623 (and leave it out for the initial process). However, if a cloned
624 process has exited the exit status is only reported if the
625 __WCLONE flag is absent. Linux kernel 2.4 has a __WALL flag, but
626 we cannot use it since GDB must work on older systems too.
627
628 - When a traced, cloned process exits and is waited for by the
629 debugger, the kernel reassigns it to the original parent and
630 keeps it around as a "zombie". Somehow, the GNU/Linux Threads
631 library doesn't notice this, which leads to the "zombie problem":
632 When debugged a multi-threaded process that spawns a lot of
633 threads will run out of processes, even if the threads exit,
634 because the "zombies" stay around. */
635
636 /* List of known LWPs. */
637 static struct lwp_info *lwp_list;
638
639 /* Number of LWPs in the list. */
640 static int num_lwps;
641 \f
642
643 #define GET_LWP(ptid) ptid_get_lwp (ptid)
644 #define GET_PID(ptid) ptid_get_pid (ptid)
645 #define is_lwp(ptid) (GET_LWP (ptid) != 0)
646 #define BUILD_LWP(lwp, pid) ptid_build (pid, lwp, 0)
647
648 /* If the last reported event was a SIGTRAP, this variable is set to
649 the process id of the LWP/thread that got it. */
650 ptid_t trap_ptid;
651 \f
652
653 /* Since we cannot wait (in linux_nat_wait) for the initial process and
654 any cloned processes with a single call to waitpid, we have to use
655 the WNOHANG flag and call waitpid in a loop. To optimize
656 things a bit we use `sigsuspend' to wake us up when a process has
657 something to report (it will send us a SIGCHLD if it has). To make
658 this work we have to juggle with the signal mask. We save the
659 original signal mask such that we can restore it before creating a
660 new process in order to avoid blocking certain signals in the
661 inferior. We then block SIGCHLD during the waitpid/sigsuspend
662 loop. */
663
664 /* Original signal mask. */
665 static sigset_t normal_mask;
666
667 /* Signal mask for use with sigsuspend in linux_nat_wait, initialized in
668 _initialize_linux_nat. */
669 static sigset_t suspend_mask;
670
671 /* Signals to block to make that sigsuspend work. */
672 static sigset_t blocked_mask;
673 \f
674
675 /* Prototypes for local functions. */
676 static int stop_wait_callback (struct lwp_info *lp, void *data);
677 static int linux_nat_thread_alive (ptid_t ptid);
678 \f
679 /* Convert wait status STATUS to a string. Used for printing debug
680 messages only. */
681
682 static char *
683 status_to_str (int status)
684 {
685 static char buf[64];
686
687 if (WIFSTOPPED (status))
688 snprintf (buf, sizeof (buf), "%s (stopped)",
689 strsignal (WSTOPSIG (status)));
690 else if (WIFSIGNALED (status))
691 snprintf (buf, sizeof (buf), "%s (terminated)",
692 strsignal (WSTOPSIG (status)));
693 else
694 snprintf (buf, sizeof (buf), "%d (exited)", WEXITSTATUS (status));
695
696 return buf;
697 }
698
699 /* Initialize the list of LWPs. Note that this module, contrary to
700 what GDB's generic threads layer does for its thread list,
701 re-initializes the LWP lists whenever we mourn or detach (which
702 doesn't involve mourning) the inferior. */
703
704 static void
705 init_lwp_list (void)
706 {
707 struct lwp_info *lp, *lpnext;
708
709 for (lp = lwp_list; lp; lp = lpnext)
710 {
711 lpnext = lp->next;
712 xfree (lp);
713 }
714
715 lwp_list = NULL;
716 num_lwps = 0;
717 }
718
719 /* Add the LWP specified by PID to the list. Return a pointer to the
720 structure describing the new LWP. */
721
722 static struct lwp_info *
723 add_lwp (ptid_t ptid)
724 {
725 struct lwp_info *lp;
726
727 gdb_assert (is_lwp (ptid));
728
729 lp = (struct lwp_info *) xmalloc (sizeof (struct lwp_info));
730
731 memset (lp, 0, sizeof (struct lwp_info));
732
733 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
734
735 lp->ptid = ptid;
736
737 lp->next = lwp_list;
738 lwp_list = lp;
739 ++num_lwps;
740
741 return lp;
742 }
743
744 /* Remove the LWP specified by PID from the list. */
745
746 static void
747 delete_lwp (ptid_t ptid)
748 {
749 struct lwp_info *lp, *lpprev;
750
751 lpprev = NULL;
752
753 for (lp = lwp_list; lp; lpprev = lp, lp = lp->next)
754 if (ptid_equal (lp->ptid, ptid))
755 break;
756
757 if (!lp)
758 return;
759
760 num_lwps--;
761
762 if (lpprev)
763 lpprev->next = lp->next;
764 else
765 lwp_list = lp->next;
766
767 xfree (lp);
768 }
769
770 /* Return a pointer to the structure describing the LWP corresponding
771 to PID. If no corresponding LWP could be found, return NULL. */
772
773 static struct lwp_info *
774 find_lwp_pid (ptid_t ptid)
775 {
776 struct lwp_info *lp;
777 int lwp;
778
779 if (is_lwp (ptid))
780 lwp = GET_LWP (ptid);
781 else
782 lwp = GET_PID (ptid);
783
784 for (lp = lwp_list; lp; lp = lp->next)
785 if (lwp == GET_LWP (lp->ptid))
786 return lp;
787
788 return NULL;
789 }
790
791 /* Call CALLBACK with its second argument set to DATA for every LWP in
792 the list. If CALLBACK returns 1 for a particular LWP, return a
793 pointer to the structure describing that LWP immediately.
794 Otherwise return NULL. */
795
796 struct lwp_info *
797 iterate_over_lwps (int (*callback) (struct lwp_info *, void *), void *data)
798 {
799 struct lwp_info *lp, *lpnext;
800
801 for (lp = lwp_list; lp; lp = lpnext)
802 {
803 lpnext = lp->next;
804 if ((*callback) (lp, data))
805 return lp;
806 }
807
808 return NULL;
809 }
810
811 /* Update our internal state when changing from one fork (checkpoint,
812 et cetera) to another indicated by NEW_PTID. We can only switch
813 single-threaded applications, so we only create one new LWP, and
814 the previous list is discarded. */
815
816 void
817 linux_nat_switch_fork (ptid_t new_ptid)
818 {
819 struct lwp_info *lp;
820
821 init_lwp_list ();
822 lp = add_lwp (new_ptid);
823 lp->stopped = 1;
824 }
825
826 /* Record a PTID for later deletion. */
827
828 struct saved_ptids
829 {
830 ptid_t ptid;
831 struct saved_ptids *next;
832 };
833 static struct saved_ptids *threads_to_delete;
834
835 static void
836 record_dead_thread (ptid_t ptid)
837 {
838 struct saved_ptids *p = xmalloc (sizeof (struct saved_ptids));
839 p->ptid = ptid;
840 p->next = threads_to_delete;
841 threads_to_delete = p;
842 }
843
844 /* Delete any dead threads which are not the current thread. */
845
846 static void
847 prune_lwps (void)
848 {
849 struct saved_ptids **p = &threads_to_delete;
850
851 while (*p)
852 if (! ptid_equal ((*p)->ptid, inferior_ptid))
853 {
854 struct saved_ptids *tmp = *p;
855 delete_thread (tmp->ptid);
856 *p = tmp->next;
857 xfree (tmp);
858 }
859 else
860 p = &(*p)->next;
861 }
862
863 /* Callback for iterate_over_threads that finds a thread corresponding
864 to the given LWP. */
865
866 static int
867 find_thread_from_lwp (struct thread_info *thr, void *dummy)
868 {
869 ptid_t *ptid_p = dummy;
870
871 if (GET_LWP (thr->ptid) && GET_LWP (thr->ptid) == GET_LWP (*ptid_p))
872 return 1;
873 else
874 return 0;
875 }
876
877 /* Handle the exit of a single thread LP. */
878
879 static void
880 exit_lwp (struct lwp_info *lp)
881 {
882 if (in_thread_list (lp->ptid))
883 {
884 /* Core GDB cannot deal with us deleting the current thread. */
885 if (!ptid_equal (lp->ptid, inferior_ptid))
886 delete_thread (lp->ptid);
887 else
888 record_dead_thread (lp->ptid);
889 printf_unfiltered (_("[%s exited]\n"),
890 target_pid_to_str (lp->ptid));
891 }
892 else
893 {
894 /* Even if LP->PTID is not in the global GDB thread list, the
895 LWP may be - with an additional thread ID. We don't need
896 to print anything in this case; thread_db is in use and
897 already took care of that. But it didn't delete the thread
898 in order to handle zombies correctly. */
899
900 struct thread_info *thr;
901
902 thr = iterate_over_threads (find_thread_from_lwp, &lp->ptid);
903 if (thr && !ptid_equal (thr->ptid, inferior_ptid))
904 delete_thread (thr->ptid);
905 else
906 record_dead_thread (thr->ptid);
907 }
908
909 delete_lwp (lp->ptid);
910 }
911
912 /* Attach to the LWP specified by PID. If VERBOSE is non-zero, print
913 a message telling the user that a new LWP has been added to the
914 process. */
915
916 void
917 lin_lwp_attach_lwp (ptid_t ptid, int verbose)
918 {
919 struct lwp_info *lp, *found_lp;
920
921 gdb_assert (is_lwp (ptid));
922
923 /* Make sure SIGCHLD is blocked. We don't want SIGCHLD events
924 to interrupt either the ptrace() or waitpid() calls below. */
925 if (!sigismember (&blocked_mask, SIGCHLD))
926 {
927 sigaddset (&blocked_mask, SIGCHLD);
928 sigprocmask (SIG_BLOCK, &blocked_mask, NULL);
929 }
930
931 if (verbose)
932 printf_filtered (_("[New %s]\n"), target_pid_to_str (ptid));
933
934 found_lp = lp = find_lwp_pid (ptid);
935 if (lp == NULL)
936 lp = add_lwp (ptid);
937
938 /* We assume that we're already attached to any LWP that has an id
939 equal to the overall process id, and to any LWP that is already
940 in our list of LWPs. If we're not seeing exit events from threads
941 and we've had PID wraparound since we last tried to stop all threads,
942 this assumption might be wrong; fortunately, this is very unlikely
943 to happen. */
944 if (GET_LWP (ptid) != GET_PID (ptid) && found_lp == NULL)
945 {
946 pid_t pid;
947 int status;
948
949 if (ptrace (PTRACE_ATTACH, GET_LWP (ptid), 0, 0) < 0)
950 error (_("Can't attach %s: %s"), target_pid_to_str (ptid),
951 safe_strerror (errno));
952
953 if (debug_linux_nat)
954 fprintf_unfiltered (gdb_stdlog,
955 "LLAL: PTRACE_ATTACH %s, 0, 0 (OK)\n",
956 target_pid_to_str (ptid));
957
958 pid = my_waitpid (GET_LWP (ptid), &status, 0);
959 if (pid == -1 && errno == ECHILD)
960 {
961 /* Try again with __WCLONE to check cloned processes. */
962 pid = my_waitpid (GET_LWP (ptid), &status, __WCLONE);
963 lp->cloned = 1;
964 }
965
966 gdb_assert (pid == GET_LWP (ptid)
967 && WIFSTOPPED (status) && WSTOPSIG (status));
968
969 target_post_attach (pid);
970
971 lp->stopped = 1;
972
973 if (debug_linux_nat)
974 {
975 fprintf_unfiltered (gdb_stdlog,
976 "LLAL: waitpid %s received %s\n",
977 target_pid_to_str (ptid),
978 status_to_str (status));
979 }
980 }
981 else
982 {
983 /* We assume that the LWP representing the original process is
984 already stopped. Mark it as stopped in the data structure
985 that the linux ptrace layer uses to keep track of threads.
986 Note that this won't have already been done since the main
987 thread will have, we assume, been stopped by an attach from a
988 different layer. */
989 lp->stopped = 1;
990 }
991 }
992
993 static void
994 linux_nat_attach (char *args, int from_tty)
995 {
996 struct lwp_info *lp;
997 pid_t pid;
998 int status;
999
1000 /* FIXME: We should probably accept a list of process id's, and
1001 attach all of them. */
1002 linux_ops->to_attach (args, from_tty);
1003
1004 /* Add the initial process as the first LWP to the list. */
1005 inferior_ptid = BUILD_LWP (GET_PID (inferior_ptid), GET_PID (inferior_ptid));
1006 lp = add_lwp (inferior_ptid);
1007
1008 /* Make sure the initial process is stopped. The user-level threads
1009 layer might want to poke around in the inferior, and that won't
1010 work if things haven't stabilized yet. */
1011 pid = my_waitpid (GET_PID (inferior_ptid), &status, 0);
1012 if (pid == -1 && errno == ECHILD)
1013 {
1014 warning (_("%s is a cloned process"), target_pid_to_str (inferior_ptid));
1015
1016 /* Try again with __WCLONE to check cloned processes. */
1017 pid = my_waitpid (GET_PID (inferior_ptid), &status, __WCLONE);
1018 lp->cloned = 1;
1019 }
1020
1021 gdb_assert (pid == GET_PID (inferior_ptid)
1022 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP);
1023
1024 lp->stopped = 1;
1025
1026 /* Fake the SIGSTOP that core GDB expects. */
1027 lp->status = W_STOPCODE (SIGSTOP);
1028 lp->resumed = 1;
1029 if (debug_linux_nat)
1030 {
1031 fprintf_unfiltered (gdb_stdlog,
1032 "LLA: waitpid %ld, faking SIGSTOP\n", (long) pid);
1033 }
1034 }
1035
1036 static int
1037 detach_callback (struct lwp_info *lp, void *data)
1038 {
1039 gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status));
1040
1041 if (debug_linux_nat && lp->status)
1042 fprintf_unfiltered (gdb_stdlog, "DC: Pending %s for %s on detach.\n",
1043 strsignal (WSTOPSIG (lp->status)),
1044 target_pid_to_str (lp->ptid));
1045
1046 while (lp->signalled && lp->stopped)
1047 {
1048 errno = 0;
1049 if (ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0,
1050 WSTOPSIG (lp->status)) < 0)
1051 error (_("Can't continue %s: %s"), target_pid_to_str (lp->ptid),
1052 safe_strerror (errno));
1053
1054 if (debug_linux_nat)
1055 fprintf_unfiltered (gdb_stdlog,
1056 "DC: PTRACE_CONTINUE (%s, 0, %s) (OK)\n",
1057 target_pid_to_str (lp->ptid),
1058 status_to_str (lp->status));
1059
1060 lp->stopped = 0;
1061 lp->signalled = 0;
1062 lp->status = 0;
1063 /* FIXME drow/2003-08-26: There was a call to stop_wait_callback
1064 here. But since lp->signalled was cleared above,
1065 stop_wait_callback didn't do anything; the process was left
1066 running. Shouldn't we be waiting for it to stop?
1067 I've removed the call, since stop_wait_callback now does do
1068 something when called with lp->signalled == 0. */
1069
1070 gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status));
1071 }
1072
1073 /* We don't actually detach from the LWP that has an id equal to the
1074 overall process id just yet. */
1075 if (GET_LWP (lp->ptid) != GET_PID (lp->ptid))
1076 {
1077 errno = 0;
1078 if (ptrace (PTRACE_DETACH, GET_LWP (lp->ptid), 0,
1079 WSTOPSIG (lp->status)) < 0)
1080 error (_("Can't detach %s: %s"), target_pid_to_str (lp->ptid),
1081 safe_strerror (errno));
1082
1083 if (debug_linux_nat)
1084 fprintf_unfiltered (gdb_stdlog,
1085 "PTRACE_DETACH (%s, %s, 0) (OK)\n",
1086 target_pid_to_str (lp->ptid),
1087 strsignal (WSTOPSIG (lp->status)));
1088
1089 delete_lwp (lp->ptid);
1090 }
1091
1092 return 0;
1093 }
1094
1095 static void
1096 linux_nat_detach (char *args, int from_tty)
1097 {
1098 iterate_over_lwps (detach_callback, NULL);
1099
1100 /* Only the initial process should be left right now. */
1101 gdb_assert (num_lwps == 1);
1102
1103 trap_ptid = null_ptid;
1104
1105 /* Destroy LWP info; it's no longer valid. */
1106 init_lwp_list ();
1107
1108 /* Restore the original signal mask. */
1109 sigprocmask (SIG_SETMASK, &normal_mask, NULL);
1110 sigemptyset (&blocked_mask);
1111
1112 inferior_ptid = pid_to_ptid (GET_PID (inferior_ptid));
1113 linux_ops->to_detach (args, from_tty);
1114 }
1115
1116 /* Resume LP. */
1117
1118 static int
1119 resume_callback (struct lwp_info *lp, void *data)
1120 {
1121 if (lp->stopped && lp->status == 0)
1122 {
1123 struct thread_info *tp;
1124
1125 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
1126 0, TARGET_SIGNAL_0);
1127 if (debug_linux_nat)
1128 fprintf_unfiltered (gdb_stdlog,
1129 "RC: PTRACE_CONT %s, 0, 0 (resume sibling)\n",
1130 target_pid_to_str (lp->ptid));
1131 lp->stopped = 0;
1132 lp->step = 0;
1133 }
1134
1135 return 0;
1136 }
1137
1138 static int
1139 resume_clear_callback (struct lwp_info *lp, void *data)
1140 {
1141 lp->resumed = 0;
1142 return 0;
1143 }
1144
1145 static int
1146 resume_set_callback (struct lwp_info *lp, void *data)
1147 {
1148 lp->resumed = 1;
1149 return 0;
1150 }
1151
1152 static void
1153 linux_nat_resume (ptid_t ptid, int step, enum target_signal signo)
1154 {
1155 struct lwp_info *lp;
1156 int resume_all;
1157
1158 if (debug_linux_nat)
1159 fprintf_unfiltered (gdb_stdlog,
1160 "LLR: Preparing to %s %s, %s, inferior_ptid %s\n",
1161 step ? "step" : "resume",
1162 target_pid_to_str (ptid),
1163 signo ? strsignal (signo) : "0",
1164 target_pid_to_str (inferior_ptid));
1165
1166 prune_lwps ();
1167
1168 /* A specific PTID means `step only this process id'. */
1169 resume_all = (PIDGET (ptid) == -1);
1170
1171 if (resume_all)
1172 iterate_over_lwps (resume_set_callback, NULL);
1173 else
1174 iterate_over_lwps (resume_clear_callback, NULL);
1175
1176 /* If PID is -1, it's the current inferior that should be
1177 handled specially. */
1178 if (PIDGET (ptid) == -1)
1179 ptid = inferior_ptid;
1180
1181 lp = find_lwp_pid (ptid);
1182 if (lp)
1183 {
1184 ptid = pid_to_ptid (GET_LWP (lp->ptid));
1185
1186 /* Remember if we're stepping. */
1187 lp->step = step;
1188
1189 /* Mark this LWP as resumed. */
1190 lp->resumed = 1;
1191
1192 /* If we have a pending wait status for this thread, there is no
1193 point in resuming the process. But first make sure that
1194 linux_nat_wait won't preemptively handle the event - we
1195 should never take this short-circuit if we are going to
1196 leave LP running, since we have skipped resuming all the
1197 other threads. This bit of code needs to be synchronized
1198 with linux_nat_wait. */
1199
1200 if (lp->status && WIFSTOPPED (lp->status))
1201 {
1202 int saved_signo = target_signal_from_host (WSTOPSIG (lp->status));
1203
1204 if (signal_stop_state (saved_signo) == 0
1205 && signal_print_state (saved_signo) == 0
1206 && signal_pass_state (saved_signo) == 1)
1207 {
1208 if (debug_linux_nat)
1209 fprintf_unfiltered (gdb_stdlog,
1210 "LLR: Not short circuiting for ignored "
1211 "status 0x%x\n", lp->status);
1212
1213 /* FIXME: What should we do if we are supposed to continue
1214 this thread with a signal? */
1215 gdb_assert (signo == TARGET_SIGNAL_0);
1216 signo = saved_signo;
1217 lp->status = 0;
1218 }
1219 }
1220
1221 if (lp->status)
1222 {
1223 /* FIXME: What should we do if we are supposed to continue
1224 this thread with a signal? */
1225 gdb_assert (signo == TARGET_SIGNAL_0);
1226
1227 if (debug_linux_nat)
1228 fprintf_unfiltered (gdb_stdlog,
1229 "LLR: Short circuiting for status 0x%x\n",
1230 lp->status);
1231
1232 return;
1233 }
1234
1235 /* Mark LWP as not stopped to prevent it from being continued by
1236 resume_callback. */
1237 lp->stopped = 0;
1238 }
1239
1240 if (resume_all)
1241 iterate_over_lwps (resume_callback, NULL);
1242
1243 linux_ops->to_resume (ptid, step, signo);
1244 if (debug_linux_nat)
1245 fprintf_unfiltered (gdb_stdlog,
1246 "LLR: %s %s, %s (resume event thread)\n",
1247 step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
1248 target_pid_to_str (ptid),
1249 signo ? strsignal (signo) : "0");
1250 }
1251
1252 /* Issue kill to specified lwp. */
1253
1254 static int tkill_failed;
1255
1256 static int
1257 kill_lwp (int lwpid, int signo)
1258 {
1259 errno = 0;
1260
1261 /* Use tkill, if possible, in case we are using nptl threads. If tkill
1262 fails, then we are not using nptl threads and we should be using kill. */
1263
1264 #ifdef HAVE_TKILL_SYSCALL
1265 if (!tkill_failed)
1266 {
1267 int ret = syscall (__NR_tkill, lwpid, signo);
1268 if (errno != ENOSYS)
1269 return ret;
1270 errno = 0;
1271 tkill_failed = 1;
1272 }
1273 #endif
1274
1275 return kill (lwpid, signo);
1276 }
1277
1278 /* Handle a GNU/Linux extended wait response. Most of the work we
1279 just pass off to linux_handle_extended_wait, but if it reports a
1280 clone event we need to add the new LWP to our list (and not report
1281 the trap to higher layers). This function returns non-zero if
1282 the event should be ignored and we should wait again. */
1283
1284 static int
1285 linux_nat_handle_extended (struct lwp_info *lp, int status)
1286 {
1287 linux_handle_extended_wait (GET_LWP (lp->ptid), status,
1288 &lp->waitstatus);
1289
1290 /* TARGET_WAITKIND_SPURIOUS is used to indicate clone events. */
1291 if (lp->waitstatus.kind == TARGET_WAITKIND_SPURIOUS)
1292 {
1293 struct lwp_info *new_lp;
1294 new_lp = add_lwp (BUILD_LWP (lp->waitstatus.value.related_pid,
1295 GET_PID (inferior_ptid)));
1296 new_lp->cloned = 1;
1297 new_lp->stopped = 1;
1298
1299 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
1300
1301 if (debug_linux_nat)
1302 fprintf_unfiltered (gdb_stdlog,
1303 "LLHE: Got clone event from LWP %ld, resuming\n",
1304 GET_LWP (lp->ptid));
1305 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
1306
1307 return 1;
1308 }
1309
1310 return 0;
1311 }
1312
1313 /* Wait for LP to stop. Returns the wait status, or 0 if the LWP has
1314 exited. */
1315
1316 static int
1317 wait_lwp (struct lwp_info *lp)
1318 {
1319 pid_t pid;
1320 int status;
1321 int thread_dead = 0;
1322
1323 gdb_assert (!lp->stopped);
1324 gdb_assert (lp->status == 0);
1325
1326 pid = my_waitpid (GET_LWP (lp->ptid), &status, 0);
1327 if (pid == -1 && errno == ECHILD)
1328 {
1329 pid = my_waitpid (GET_LWP (lp->ptid), &status, __WCLONE);
1330 if (pid == -1 && errno == ECHILD)
1331 {
1332 /* The thread has previously exited. We need to delete it
1333 now because, for some vendor 2.4 kernels with NPTL
1334 support backported, there won't be an exit event unless
1335 it is the main thread. 2.6 kernels will report an exit
1336 event for each thread that exits, as expected. */
1337 thread_dead = 1;
1338 if (debug_linux_nat)
1339 fprintf_unfiltered (gdb_stdlog, "WL: %s vanished.\n",
1340 target_pid_to_str (lp->ptid));
1341 }
1342 }
1343
1344 if (!thread_dead)
1345 {
1346 gdb_assert (pid == GET_LWP (lp->ptid));
1347
1348 if (debug_linux_nat)
1349 {
1350 fprintf_unfiltered (gdb_stdlog,
1351 "WL: waitpid %s received %s\n",
1352 target_pid_to_str (lp->ptid),
1353 status_to_str (status));
1354 }
1355 }
1356
1357 /* Check if the thread has exited. */
1358 if (WIFEXITED (status) || WIFSIGNALED (status))
1359 {
1360 thread_dead = 1;
1361 if (debug_linux_nat)
1362 fprintf_unfiltered (gdb_stdlog, "WL: %s exited.\n",
1363 target_pid_to_str (lp->ptid));
1364 }
1365
1366 if (thread_dead)
1367 {
1368 exit_lwp (lp);
1369 return 0;
1370 }
1371
1372 gdb_assert (WIFSTOPPED (status));
1373
1374 /* Handle GNU/Linux's extended waitstatus for trace events. */
1375 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
1376 {
1377 if (debug_linux_nat)
1378 fprintf_unfiltered (gdb_stdlog,
1379 "WL: Handling extended status 0x%06x\n",
1380 status);
1381 if (linux_nat_handle_extended (lp, status))
1382 return wait_lwp (lp);
1383 }
1384
1385 return status;
1386 }
1387
1388 /* Send a SIGSTOP to LP. */
1389
1390 static int
1391 stop_callback (struct lwp_info *lp, void *data)
1392 {
1393 if (!lp->stopped && !lp->signalled)
1394 {
1395 int ret;
1396
1397 if (debug_linux_nat)
1398 {
1399 fprintf_unfiltered (gdb_stdlog,
1400 "SC: kill %s **<SIGSTOP>**\n",
1401 target_pid_to_str (lp->ptid));
1402 }
1403 errno = 0;
1404 ret = kill_lwp (GET_LWP (lp->ptid), SIGSTOP);
1405 if (debug_linux_nat)
1406 {
1407 fprintf_unfiltered (gdb_stdlog,
1408 "SC: lwp kill %d %s\n",
1409 ret,
1410 errno ? safe_strerror (errno) : "ERRNO-OK");
1411 }
1412
1413 lp->signalled = 1;
1414 gdb_assert (lp->status == 0);
1415 }
1416
1417 return 0;
1418 }
1419
1420 /* Wait until LP is stopped. If DATA is non-null it is interpreted as
1421 a pointer to a set of signals to be flushed immediately. */
1422
1423 static int
1424 stop_wait_callback (struct lwp_info *lp, void *data)
1425 {
1426 sigset_t *flush_mask = data;
1427
1428 if (!lp->stopped)
1429 {
1430 int status;
1431
1432 status = wait_lwp (lp);
1433 if (status == 0)
1434 return 0;
1435
1436 /* Ignore any signals in FLUSH_MASK. */
1437 if (flush_mask && sigismember (flush_mask, WSTOPSIG (status)))
1438 {
1439 if (!lp->signalled)
1440 {
1441 lp->stopped = 1;
1442 return 0;
1443 }
1444
1445 errno = 0;
1446 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
1447 if (debug_linux_nat)
1448 fprintf_unfiltered (gdb_stdlog,
1449 "PTRACE_CONT %s, 0, 0 (%s)\n",
1450 target_pid_to_str (lp->ptid),
1451 errno ? safe_strerror (errno) : "OK");
1452
1453 return stop_wait_callback (lp, flush_mask);
1454 }
1455
1456 if (WSTOPSIG (status) != SIGSTOP)
1457 {
1458 if (WSTOPSIG (status) == SIGTRAP)
1459 {
1460 /* If a LWP other than the LWP that we're reporting an
1461 event for has hit a GDB breakpoint (as opposed to
1462 some random trap signal), then just arrange for it to
1463 hit it again later. We don't keep the SIGTRAP status
1464 and don't forward the SIGTRAP signal to the LWP. We
1465 will handle the current event, eventually we will
1466 resume all LWPs, and this one will get its breakpoint
1467 trap again.
1468
1469 If we do not do this, then we run the risk that the
1470 user will delete or disable the breakpoint, but the
1471 thread will have already tripped on it. */
1472
1473 /* Now resume this LWP and get the SIGSTOP event. */
1474 errno = 0;
1475 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
1476 if (debug_linux_nat)
1477 {
1478 fprintf_unfiltered (gdb_stdlog,
1479 "PTRACE_CONT %s, 0, 0 (%s)\n",
1480 target_pid_to_str (lp->ptid),
1481 errno ? safe_strerror (errno) : "OK");
1482
1483 fprintf_unfiltered (gdb_stdlog,
1484 "SWC: Candidate SIGTRAP event in %s\n",
1485 target_pid_to_str (lp->ptid));
1486 }
1487 /* Hold the SIGTRAP for handling by linux_nat_wait. */
1488 stop_wait_callback (lp, data);
1489 /* If there's another event, throw it back into the queue. */
1490 if (lp->status)
1491 {
1492 if (debug_linux_nat)
1493 {
1494 fprintf_unfiltered (gdb_stdlog,
1495 "SWC: kill %s, %s\n",
1496 target_pid_to_str (lp->ptid),
1497 status_to_str ((int) status));
1498 }
1499 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (lp->status));
1500 }
1501 /* Save the sigtrap event. */
1502 lp->status = status;
1503 return 0;
1504 }
1505 else
1506 {
1507 /* The thread was stopped with a signal other than
1508 SIGSTOP, and didn't accidentally trip a breakpoint. */
1509
1510 if (debug_linux_nat)
1511 {
1512 fprintf_unfiltered (gdb_stdlog,
1513 "SWC: Pending event %s in %s\n",
1514 status_to_str ((int) status),
1515 target_pid_to_str (lp->ptid));
1516 }
1517 /* Now resume this LWP and get the SIGSTOP event. */
1518 errno = 0;
1519 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
1520 if (debug_linux_nat)
1521 fprintf_unfiltered (gdb_stdlog,
1522 "SWC: PTRACE_CONT %s, 0, 0 (%s)\n",
1523 target_pid_to_str (lp->ptid),
1524 errno ? safe_strerror (errno) : "OK");
1525
1526 /* Hold this event/waitstatus while we check to see if
1527 there are any more (we still want to get that SIGSTOP). */
1528 stop_wait_callback (lp, data);
1529 /* If the lp->status field is still empty, use it to hold
1530 this event. If not, then this event must be returned
1531 to the event queue of the LWP. */
1532 if (lp->status == 0)
1533 lp->status = status;
1534 else
1535 {
1536 if (debug_linux_nat)
1537 {
1538 fprintf_unfiltered (gdb_stdlog,
1539 "SWC: kill %s, %s\n",
1540 target_pid_to_str (lp->ptid),
1541 status_to_str ((int) status));
1542 }
1543 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (status));
1544 }
1545 return 0;
1546 }
1547 }
1548 else
1549 {
1550 /* We caught the SIGSTOP that we intended to catch, so
1551 there's no SIGSTOP pending. */
1552 lp->stopped = 1;
1553 lp->signalled = 0;
1554 }
1555 }
1556
1557 return 0;
1558 }
1559
1560 /* Check whether PID has any pending signals in FLUSH_MASK. If so set
1561 the appropriate bits in PENDING, and return 1 - otherwise return 0. */
1562
1563 static int
1564 linux_nat_has_pending (int pid, sigset_t *pending, sigset_t *flush_mask)
1565 {
1566 sigset_t blocked, ignored;
1567 int i;
1568
1569 linux_proc_pending_signals (pid, pending, &blocked, &ignored);
1570
1571 if (!flush_mask)
1572 return 0;
1573
1574 for (i = 1; i < NSIG; i++)
1575 if (sigismember (pending, i))
1576 if (!sigismember (flush_mask, i)
1577 || sigismember (&blocked, i)
1578 || sigismember (&ignored, i))
1579 sigdelset (pending, i);
1580
1581 if (sigisemptyset (pending))
1582 return 0;
1583
1584 return 1;
1585 }
1586
1587 /* DATA is interpreted as a mask of signals to flush. If LP has
1588 signals pending, and they are all in the flush mask, then arrange
1589 to flush them. LP should be stopped, as should all other threads
1590 it might share a signal queue with. */
1591
1592 static int
1593 flush_callback (struct lwp_info *lp, void *data)
1594 {
1595 sigset_t *flush_mask = data;
1596 sigset_t pending, intersection, blocked, ignored;
1597 int pid, status;
1598
1599 /* Normally, when an LWP exits, it is removed from the LWP list. The
1600 last LWP isn't removed till later, however. So if there is only
1601 one LWP on the list, make sure it's alive. */
1602 if (lwp_list == lp && lp->next == NULL)
1603 if (!linux_nat_thread_alive (lp->ptid))
1604 return 0;
1605
1606 /* Just because the LWP is stopped doesn't mean that new signals
1607 can't arrive from outside, so this function must be careful of
1608 race conditions. However, because all threads are stopped, we
1609 can assume that the pending mask will not shrink unless we resume
1610 the LWP, and that it will then get another signal. We can't
1611 control which one, however. */
1612
1613 if (lp->status)
1614 {
1615 if (debug_linux_nat)
1616 printf_unfiltered (_("FC: LP has pending status %06x\n"), lp->status);
1617 if (WIFSTOPPED (lp->status) && sigismember (flush_mask, WSTOPSIG (lp->status)))
1618 lp->status = 0;
1619 }
1620
1621 while (linux_nat_has_pending (GET_LWP (lp->ptid), &pending, flush_mask))
1622 {
1623 int ret;
1624
1625 errno = 0;
1626 ret = ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
1627 if (debug_linux_nat)
1628 fprintf_unfiltered (gdb_stderr,
1629 "FC: Sent PTRACE_CONT, ret %d %d\n", ret, errno);
1630
1631 lp->stopped = 0;
1632 stop_wait_callback (lp, flush_mask);
1633 if (debug_linux_nat)
1634 fprintf_unfiltered (gdb_stderr,
1635 "FC: Wait finished; saved status is %d\n",
1636 lp->status);
1637 }
1638
1639 return 0;
1640 }
1641
1642 /* Return non-zero if LP has a wait status pending. */
1643
1644 static int
1645 status_callback (struct lwp_info *lp, void *data)
1646 {
1647 /* Only report a pending wait status if we pretend that this has
1648 indeed been resumed. */
1649 return (lp->status != 0 && lp->resumed);
1650 }
1651
1652 /* Return non-zero if LP isn't stopped. */
1653
1654 static int
1655 running_callback (struct lwp_info *lp, void *data)
1656 {
1657 return (lp->stopped == 0 || (lp->status != 0 && lp->resumed));
1658 }
1659
1660 /* Count the LWP's that have had events. */
1661
1662 static int
1663 count_events_callback (struct lwp_info *lp, void *data)
1664 {
1665 int *count = data;
1666
1667 gdb_assert (count != NULL);
1668
1669 /* Count only LWPs that have a SIGTRAP event pending. */
1670 if (lp->status != 0
1671 && WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP)
1672 (*count)++;
1673
1674 return 0;
1675 }
1676
1677 /* Select the LWP (if any) that is currently being single-stepped. */
1678
1679 static int
1680 select_singlestep_lwp_callback (struct lwp_info *lp, void *data)
1681 {
1682 if (lp->step && lp->status != 0)
1683 return 1;
1684 else
1685 return 0;
1686 }
1687
1688 /* Select the Nth LWP that has had a SIGTRAP event. */
1689
1690 static int
1691 select_event_lwp_callback (struct lwp_info *lp, void *data)
1692 {
1693 int *selector = data;
1694
1695 gdb_assert (selector != NULL);
1696
1697 /* Select only LWPs that have a SIGTRAP event pending. */
1698 if (lp->status != 0
1699 && WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP)
1700 if ((*selector)-- == 0)
1701 return 1;
1702
1703 return 0;
1704 }
1705
1706 static int
1707 cancel_breakpoints_callback (struct lwp_info *lp, void *data)
1708 {
1709 struct lwp_info *event_lp = data;
1710
1711 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
1712 if (lp == event_lp)
1713 return 0;
1714
1715 /* If a LWP other than the LWP that we're reporting an event for has
1716 hit a GDB breakpoint (as opposed to some random trap signal),
1717 then just arrange for it to hit it again later. We don't keep
1718 the SIGTRAP status and don't forward the SIGTRAP signal to the
1719 LWP. We will handle the current event, eventually we will resume
1720 all LWPs, and this one will get its breakpoint trap again.
1721
1722 If we do not do this, then we run the risk that the user will
1723 delete or disable the breakpoint, but the LWP will have already
1724 tripped on it. */
1725
1726 if (lp->status != 0
1727 && WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP
1728 && breakpoint_inserted_here_p (read_pc_pid (lp->ptid) -
1729 DECR_PC_AFTER_BREAK))
1730 {
1731 if (debug_linux_nat)
1732 fprintf_unfiltered (gdb_stdlog,
1733 "CBC: Push back breakpoint for %s\n",
1734 target_pid_to_str (lp->ptid));
1735
1736 /* Back up the PC if necessary. */
1737 if (DECR_PC_AFTER_BREAK)
1738 write_pc_pid (read_pc_pid (lp->ptid) - DECR_PC_AFTER_BREAK, lp->ptid);
1739
1740 /* Throw away the SIGTRAP. */
1741 lp->status = 0;
1742 }
1743
1744 return 0;
1745 }
1746
1747 /* Select one LWP out of those that have events pending. */
1748
1749 static void
1750 select_event_lwp (struct lwp_info **orig_lp, int *status)
1751 {
1752 int num_events = 0;
1753 int random_selector;
1754 struct lwp_info *event_lp;
1755
1756 /* Record the wait status for the original LWP. */
1757 (*orig_lp)->status = *status;
1758
1759 /* Give preference to any LWP that is being single-stepped. */
1760 event_lp = iterate_over_lwps (select_singlestep_lwp_callback, NULL);
1761 if (event_lp != NULL)
1762 {
1763 if (debug_linux_nat)
1764 fprintf_unfiltered (gdb_stdlog,
1765 "SEL: Select single-step %s\n",
1766 target_pid_to_str (event_lp->ptid));
1767 }
1768 else
1769 {
1770 /* No single-stepping LWP. Select one at random, out of those
1771 which have had SIGTRAP events. */
1772
1773 /* First see how many SIGTRAP events we have. */
1774 iterate_over_lwps (count_events_callback, &num_events);
1775
1776 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
1777 random_selector = (int)
1778 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
1779
1780 if (debug_linux_nat && num_events > 1)
1781 fprintf_unfiltered (gdb_stdlog,
1782 "SEL: Found %d SIGTRAP events, selecting #%d\n",
1783 num_events, random_selector);
1784
1785 event_lp = iterate_over_lwps (select_event_lwp_callback,
1786 &random_selector);
1787 }
1788
1789 if (event_lp != NULL)
1790 {
1791 /* Switch the event LWP. */
1792 *orig_lp = event_lp;
1793 *status = event_lp->status;
1794 }
1795
1796 /* Flush the wait status for the event LWP. */
1797 (*orig_lp)->status = 0;
1798 }
1799
1800 /* Return non-zero if LP has been resumed. */
1801
1802 static int
1803 resumed_callback (struct lwp_info *lp, void *data)
1804 {
1805 return lp->resumed;
1806 }
1807
1808 /* Stop an active thread, verify it still exists, then resume it. */
1809
1810 static int
1811 stop_and_resume_callback (struct lwp_info *lp, void *data)
1812 {
1813 struct lwp_info *ptr;
1814
1815 if (!lp->stopped && !lp->signalled)
1816 {
1817 stop_callback (lp, NULL);
1818 stop_wait_callback (lp, NULL);
1819 /* Resume if the lwp still exists. */
1820 for (ptr = lwp_list; ptr; ptr = ptr->next)
1821 if (lp == ptr)
1822 {
1823 resume_callback (lp, NULL);
1824 resume_set_callback (lp, NULL);
1825 }
1826 }
1827 return 0;
1828 }
1829
1830 static ptid_t
1831 linux_nat_wait (ptid_t ptid, struct target_waitstatus *ourstatus)
1832 {
1833 struct lwp_info *lp = NULL;
1834 int options = 0;
1835 int status = 0;
1836 pid_t pid = PIDGET (ptid);
1837 sigset_t flush_mask;
1838
1839 /* The first time we get here after starting a new inferior, we may
1840 not have added it to the LWP list yet - this is the earliest
1841 moment at which we know its PID. */
1842 if (num_lwps == 0)
1843 {
1844 gdb_assert (!is_lwp (inferior_ptid));
1845
1846 inferior_ptid = BUILD_LWP (GET_PID (inferior_ptid),
1847 GET_PID (inferior_ptid));
1848 lp = add_lwp (inferior_ptid);
1849 lp->resumed = 1;
1850 }
1851
1852 sigemptyset (&flush_mask);
1853
1854 /* Make sure SIGCHLD is blocked. */
1855 if (!sigismember (&blocked_mask, SIGCHLD))
1856 {
1857 sigaddset (&blocked_mask, SIGCHLD);
1858 sigprocmask (SIG_BLOCK, &blocked_mask, NULL);
1859 }
1860
1861 retry:
1862
1863 /* Make sure there is at least one LWP that has been resumed. */
1864 gdb_assert (iterate_over_lwps (resumed_callback, NULL));
1865
1866 /* First check if there is a LWP with a wait status pending. */
1867 if (pid == -1)
1868 {
1869 /* Any LWP that's been resumed will do. */
1870 lp = iterate_over_lwps (status_callback, NULL);
1871 if (lp)
1872 {
1873 status = lp->status;
1874 lp->status = 0;
1875
1876 if (debug_linux_nat && status)
1877 fprintf_unfiltered (gdb_stdlog,
1878 "LLW: Using pending wait status %s for %s.\n",
1879 status_to_str (status),
1880 target_pid_to_str (lp->ptid));
1881 }
1882
1883 /* But if we don't fine one, we'll have to wait, and check both
1884 cloned and uncloned processes. We start with the cloned
1885 processes. */
1886 options = __WCLONE | WNOHANG;
1887 }
1888 else if (is_lwp (ptid))
1889 {
1890 if (debug_linux_nat)
1891 fprintf_unfiltered (gdb_stdlog,
1892 "LLW: Waiting for specific LWP %s.\n",
1893 target_pid_to_str (ptid));
1894
1895 /* We have a specific LWP to check. */
1896 lp = find_lwp_pid (ptid);
1897 gdb_assert (lp);
1898 status = lp->status;
1899 lp->status = 0;
1900
1901 if (debug_linux_nat && status)
1902 fprintf_unfiltered (gdb_stdlog,
1903 "LLW: Using pending wait status %s for %s.\n",
1904 status_to_str (status),
1905 target_pid_to_str (lp->ptid));
1906
1907 /* If we have to wait, take into account whether PID is a cloned
1908 process or not. And we have to convert it to something that
1909 the layer beneath us can understand. */
1910 options = lp->cloned ? __WCLONE : 0;
1911 pid = GET_LWP (ptid);
1912 }
1913
1914 if (status && lp->signalled)
1915 {
1916 /* A pending SIGSTOP may interfere with the normal stream of
1917 events. In a typical case where interference is a problem,
1918 we have a SIGSTOP signal pending for LWP A while
1919 single-stepping it, encounter an event in LWP B, and take the
1920 pending SIGSTOP while trying to stop LWP A. After processing
1921 the event in LWP B, LWP A is continued, and we'll never see
1922 the SIGTRAP associated with the last time we were
1923 single-stepping LWP A. */
1924
1925 /* Resume the thread. It should halt immediately returning the
1926 pending SIGSTOP. */
1927 registers_changed ();
1928 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
1929 lp->step, TARGET_SIGNAL_0);
1930 if (debug_linux_nat)
1931 fprintf_unfiltered (gdb_stdlog,
1932 "LLW: %s %s, 0, 0 (expect SIGSTOP)\n",
1933 lp->step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
1934 target_pid_to_str (lp->ptid));
1935 lp->stopped = 0;
1936 gdb_assert (lp->resumed);
1937
1938 /* This should catch the pending SIGSTOP. */
1939 stop_wait_callback (lp, NULL);
1940 }
1941
1942 set_sigint_trap (); /* Causes SIGINT to be passed on to the
1943 attached process. */
1944 set_sigio_trap ();
1945
1946 while (status == 0)
1947 {
1948 pid_t lwpid;
1949
1950 lwpid = my_waitpid (pid, &status, options);
1951 if (lwpid > 0)
1952 {
1953 gdb_assert (pid == -1 || lwpid == pid);
1954
1955 if (debug_linux_nat)
1956 {
1957 fprintf_unfiltered (gdb_stdlog,
1958 "LLW: waitpid %ld received %s\n",
1959 (long) lwpid, status_to_str (status));
1960 }
1961
1962 lp = find_lwp_pid (pid_to_ptid (lwpid));
1963
1964 /* Check for stop events reported by a process we didn't
1965 already know about - anything not already in our LWP
1966 list.
1967
1968 If we're expecting to receive stopped processes after
1969 fork, vfork, and clone events, then we'll just add the
1970 new one to our list and go back to waiting for the event
1971 to be reported - the stopped process might be returned
1972 from waitpid before or after the event is. */
1973 if (WIFSTOPPED (status) && !lp)
1974 {
1975 linux_record_stopped_pid (lwpid);
1976 status = 0;
1977 continue;
1978 }
1979
1980 /* Make sure we don't report an event for the exit of an LWP not in
1981 our list, i.e. not part of the current process. This can happen
1982 if we detach from a program we original forked and then it
1983 exits. */
1984 if (!WIFSTOPPED (status) && !lp)
1985 {
1986 status = 0;
1987 continue;
1988 }
1989
1990 /* NOTE drow/2003-06-17: This code seems to be meant for debugging
1991 CLONE_PTRACE processes which do not use the thread library -
1992 otherwise we wouldn't find the new LWP this way. That doesn't
1993 currently work, and the following code is currently unreachable
1994 due to the two blocks above. If it's fixed some day, this code
1995 should be broken out into a function so that we can also pick up
1996 LWPs from the new interface. */
1997 if (!lp)
1998 {
1999 lp = add_lwp (BUILD_LWP (lwpid, GET_PID (inferior_ptid)));
2000 if (options & __WCLONE)
2001 lp->cloned = 1;
2002
2003 gdb_assert (WIFSTOPPED (status)
2004 && WSTOPSIG (status) == SIGSTOP);
2005 lp->signalled = 1;
2006
2007 if (!in_thread_list (inferior_ptid))
2008 {
2009 inferior_ptid = BUILD_LWP (GET_PID (inferior_ptid),
2010 GET_PID (inferior_ptid));
2011 add_thread (inferior_ptid);
2012 }
2013
2014 add_thread (lp->ptid);
2015 printf_unfiltered (_("[New %s]\n"),
2016 target_pid_to_str (lp->ptid));
2017 }
2018
2019 /* Handle GNU/Linux's extended waitstatus for trace events. */
2020 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
2021 {
2022 if (debug_linux_nat)
2023 fprintf_unfiltered (gdb_stdlog,
2024 "LLW: Handling extended status 0x%06x\n",
2025 status);
2026 if (linux_nat_handle_extended (lp, status))
2027 {
2028 status = 0;
2029 continue;
2030 }
2031 }
2032
2033 /* Check if the thread has exited. */
2034 if ((WIFEXITED (status) || WIFSIGNALED (status)) && num_lwps > 1)
2035 {
2036 /* If this is the main thread, we must stop all threads and
2037 verify if they are still alive. This is because in the nptl
2038 thread model, there is no signal issued for exiting LWPs
2039 other than the main thread. We only get the main thread
2040 exit signal once all child threads have already exited.
2041 If we stop all the threads and use the stop_wait_callback
2042 to check if they have exited we can determine whether this
2043 signal should be ignored or whether it means the end of the
2044 debugged application, regardless of which threading model
2045 is being used. */
2046 if (GET_PID (lp->ptid) == GET_LWP (lp->ptid))
2047 {
2048 lp->stopped = 1;
2049 iterate_over_lwps (stop_and_resume_callback, NULL);
2050 }
2051
2052 if (debug_linux_nat)
2053 fprintf_unfiltered (gdb_stdlog,
2054 "LLW: %s exited.\n",
2055 target_pid_to_str (lp->ptid));
2056
2057 exit_lwp (lp);
2058
2059 /* If there is at least one more LWP, then the exit signal
2060 was not the end of the debugged application and should be
2061 ignored. */
2062 if (num_lwps > 0)
2063 {
2064 /* Make sure there is at least one thread running. */
2065 gdb_assert (iterate_over_lwps (running_callback, NULL));
2066
2067 /* Discard the event. */
2068 status = 0;
2069 continue;
2070 }
2071 }
2072
2073 /* Check if the current LWP has previously exited. In the nptl
2074 thread model, LWPs other than the main thread do not issue
2075 signals when they exit so we must check whenever the thread
2076 has stopped. A similar check is made in stop_wait_callback(). */
2077 if (num_lwps > 1 && !linux_nat_thread_alive (lp->ptid))
2078 {
2079 if (debug_linux_nat)
2080 fprintf_unfiltered (gdb_stdlog,
2081 "LLW: %s exited.\n",
2082 target_pid_to_str (lp->ptid));
2083
2084 exit_lwp (lp);
2085
2086 /* Make sure there is at least one thread running. */
2087 gdb_assert (iterate_over_lwps (running_callback, NULL));
2088
2089 /* Discard the event. */
2090 status = 0;
2091 continue;
2092 }
2093
2094 /* Make sure we don't report a SIGSTOP that we sent
2095 ourselves in an attempt to stop an LWP. */
2096 if (lp->signalled
2097 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP)
2098 {
2099 if (debug_linux_nat)
2100 fprintf_unfiltered (gdb_stdlog,
2101 "LLW: Delayed SIGSTOP caught for %s.\n",
2102 target_pid_to_str (lp->ptid));
2103
2104 /* This is a delayed SIGSTOP. */
2105 lp->signalled = 0;
2106
2107 registers_changed ();
2108 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
2109 lp->step, TARGET_SIGNAL_0);
2110 if (debug_linux_nat)
2111 fprintf_unfiltered (gdb_stdlog,
2112 "LLW: %s %s, 0, 0 (discard SIGSTOP)\n",
2113 lp->step ?
2114 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2115 target_pid_to_str (lp->ptid));
2116
2117 lp->stopped = 0;
2118 gdb_assert (lp->resumed);
2119
2120 /* Discard the event. */
2121 status = 0;
2122 continue;
2123 }
2124
2125 break;
2126 }
2127
2128 if (pid == -1)
2129 {
2130 /* Alternate between checking cloned and uncloned processes. */
2131 options ^= __WCLONE;
2132
2133 /* And suspend every time we have checked both. */
2134 if (options & __WCLONE)
2135 sigsuspend (&suspend_mask);
2136 }
2137
2138 /* We shouldn't end up here unless we want to try again. */
2139 gdb_assert (status == 0);
2140 }
2141
2142 clear_sigio_trap ();
2143 clear_sigint_trap ();
2144
2145 gdb_assert (lp);
2146
2147 /* Don't report signals that GDB isn't interested in, such as
2148 signals that are neither printed nor stopped upon. Stopping all
2149 threads can be a bit time-consuming so if we want decent
2150 performance with heavily multi-threaded programs, especially when
2151 they're using a high frequency timer, we'd better avoid it if we
2152 can. */
2153
2154 if (WIFSTOPPED (status))
2155 {
2156 int signo = target_signal_from_host (WSTOPSIG (status));
2157
2158 if (signal_stop_state (signo) == 0
2159 && signal_print_state (signo) == 0
2160 && signal_pass_state (signo) == 1)
2161 {
2162 /* FIMXE: kettenis/2001-06-06: Should we resume all threads
2163 here? It is not clear we should. GDB may not expect
2164 other threads to run. On the other hand, not resuming
2165 newly attached threads may cause an unwanted delay in
2166 getting them running. */
2167 registers_changed ();
2168 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
2169 lp->step, signo);
2170 if (debug_linux_nat)
2171 fprintf_unfiltered (gdb_stdlog,
2172 "LLW: %s %s, %s (preempt 'handle')\n",
2173 lp->step ?
2174 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2175 target_pid_to_str (lp->ptid),
2176 signo ? strsignal (signo) : "0");
2177 lp->stopped = 0;
2178 status = 0;
2179 goto retry;
2180 }
2181
2182 if (signo == TARGET_SIGNAL_INT && signal_pass_state (signo) == 0)
2183 {
2184 /* If ^C/BREAK is typed at the tty/console, SIGINT gets
2185 forwarded to the entire process group, that is, all LWP's
2186 will receive it. Since we only want to report it once,
2187 we try to flush it from all LWPs except this one. */
2188 sigaddset (&flush_mask, SIGINT);
2189 }
2190 }
2191
2192 /* This LWP is stopped now. */
2193 lp->stopped = 1;
2194
2195 if (debug_linux_nat)
2196 fprintf_unfiltered (gdb_stdlog, "LLW: Candidate event %s in %s.\n",
2197 status_to_str (status), target_pid_to_str (lp->ptid));
2198
2199 /* Now stop all other LWP's ... */
2200 iterate_over_lwps (stop_callback, NULL);
2201
2202 /* ... and wait until all of them have reported back that they're no
2203 longer running. */
2204 iterate_over_lwps (stop_wait_callback, &flush_mask);
2205 iterate_over_lwps (flush_callback, &flush_mask);
2206
2207 /* If we're not waiting for a specific LWP, choose an event LWP from
2208 among those that have had events. Giving equal priority to all
2209 LWPs that have had events helps prevent starvation. */
2210 if (pid == -1)
2211 select_event_lwp (&lp, &status);
2212
2213 /* Now that we've selected our final event LWP, cancel any
2214 breakpoints in other LWPs that have hit a GDB breakpoint. See
2215 the comment in cancel_breakpoints_callback to find out why. */
2216 iterate_over_lwps (cancel_breakpoints_callback, lp);
2217
2218 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP)
2219 {
2220 trap_ptid = lp->ptid;
2221 if (debug_linux_nat)
2222 fprintf_unfiltered (gdb_stdlog,
2223 "LLW: trap_ptid is %s.\n",
2224 target_pid_to_str (trap_ptid));
2225 }
2226 else
2227 trap_ptid = null_ptid;
2228
2229 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
2230 {
2231 *ourstatus = lp->waitstatus;
2232 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
2233 }
2234 else
2235 store_waitstatus (ourstatus, status);
2236
2237 return lp->ptid;
2238 }
2239
2240 static int
2241 kill_callback (struct lwp_info *lp, void *data)
2242 {
2243 errno = 0;
2244 ptrace (PTRACE_KILL, GET_LWP (lp->ptid), 0, 0);
2245 if (debug_linux_nat)
2246 fprintf_unfiltered (gdb_stdlog,
2247 "KC: PTRACE_KILL %s, 0, 0 (%s)\n",
2248 target_pid_to_str (lp->ptid),
2249 errno ? safe_strerror (errno) : "OK");
2250
2251 return 0;
2252 }
2253
2254 static int
2255 kill_wait_callback (struct lwp_info *lp, void *data)
2256 {
2257 pid_t pid;
2258
2259 /* We must make sure that there are no pending events (delayed
2260 SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current
2261 program doesn't interfere with any following debugging session. */
2262
2263 /* For cloned processes we must check both with __WCLONE and
2264 without, since the exit status of a cloned process isn't reported
2265 with __WCLONE. */
2266 if (lp->cloned)
2267 {
2268 do
2269 {
2270 pid = my_waitpid (GET_LWP (lp->ptid), NULL, __WCLONE);
2271 if (pid != (pid_t) -1 && debug_linux_nat)
2272 {
2273 fprintf_unfiltered (gdb_stdlog,
2274 "KWC: wait %s received unknown.\n",
2275 target_pid_to_str (lp->ptid));
2276 }
2277 }
2278 while (pid == GET_LWP (lp->ptid));
2279
2280 gdb_assert (pid == -1 && errno == ECHILD);
2281 }
2282
2283 do
2284 {
2285 pid = my_waitpid (GET_LWP (lp->ptid), NULL, 0);
2286 if (pid != (pid_t) -1 && debug_linux_nat)
2287 {
2288 fprintf_unfiltered (gdb_stdlog,
2289 "KWC: wait %s received unk.\n",
2290 target_pid_to_str (lp->ptid));
2291 }
2292 }
2293 while (pid == GET_LWP (lp->ptid));
2294
2295 gdb_assert (pid == -1 && errno == ECHILD);
2296 return 0;
2297 }
2298
2299 static void
2300 linux_nat_kill (void)
2301 {
2302 struct target_waitstatus last;
2303 ptid_t last_ptid;
2304 int status;
2305
2306 /* If we're stopped while forking and we haven't followed yet,
2307 kill the other task. We need to do this first because the
2308 parent will be sleeping if this is a vfork. */
2309
2310 get_last_target_status (&last_ptid, &last);
2311
2312 if (last.kind == TARGET_WAITKIND_FORKED
2313 || last.kind == TARGET_WAITKIND_VFORKED)
2314 {
2315 ptrace (PT_KILL, last.value.related_pid, 0, 0);
2316 wait (&status);
2317 }
2318
2319 if (forks_exist_p ())
2320 linux_fork_killall ();
2321 else
2322 {
2323 /* Kill all LWP's ... */
2324 iterate_over_lwps (kill_callback, NULL);
2325
2326 /* ... and wait until we've flushed all events. */
2327 iterate_over_lwps (kill_wait_callback, NULL);
2328 }
2329
2330 target_mourn_inferior ();
2331 }
2332
2333 static void
2334 linux_nat_mourn_inferior (void)
2335 {
2336 trap_ptid = null_ptid;
2337
2338 /* Destroy LWP info; it's no longer valid. */
2339 init_lwp_list ();
2340
2341 /* Restore the original signal mask. */
2342 sigprocmask (SIG_SETMASK, &normal_mask, NULL);
2343 sigemptyset (&blocked_mask);
2344
2345 if (! forks_exist_p ())
2346 /* Normal case, no other forks available. */
2347 linux_ops->to_mourn_inferior ();
2348 else
2349 /* Multi-fork case. The current inferior_ptid has exited, but
2350 there are other viable forks to debug. Delete the exiting
2351 one and context-switch to the first available. */
2352 linux_fork_mourn_inferior ();
2353 }
2354
2355 static LONGEST
2356 linux_nat_xfer_partial (struct target_ops *ops, enum target_object object,
2357 const char *annex, gdb_byte *readbuf,
2358 const gdb_byte *writebuf,
2359 ULONGEST offset, LONGEST len)
2360 {
2361 struct cleanup *old_chain = save_inferior_ptid ();
2362 LONGEST xfer;
2363
2364 if (is_lwp (inferior_ptid))
2365 inferior_ptid = pid_to_ptid (GET_LWP (inferior_ptid));
2366
2367 xfer = linux_ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
2368 offset, len);
2369
2370 do_cleanups (old_chain);
2371 return xfer;
2372 }
2373
2374 static int
2375 linux_nat_thread_alive (ptid_t ptid)
2376 {
2377 gdb_assert (is_lwp (ptid));
2378
2379 errno = 0;
2380 ptrace (PTRACE_PEEKUSER, GET_LWP (ptid), 0, 0);
2381 if (debug_linux_nat)
2382 fprintf_unfiltered (gdb_stdlog,
2383 "LLTA: PTRACE_PEEKUSER %s, 0, 0 (%s)\n",
2384 target_pid_to_str (ptid),
2385 errno ? safe_strerror (errno) : "OK");
2386
2387 /* Not every Linux target implements PTRACE_PEEKUSER.
2388 But we can handle that case gracefully since ptrace
2389 will first do a lookup for the process based upon the
2390 passed-in pid. If that fails we will get either -ESRCH
2391 or -EPERM, otherwise the child exists and is alive. */
2392 if (errno == ESRCH || errno == EPERM)
2393 return 0;
2394
2395 return 1;
2396 }
2397
2398 static char *
2399 linux_nat_pid_to_str (ptid_t ptid)
2400 {
2401 static char buf[64];
2402
2403 if (lwp_list && lwp_list->next && is_lwp (ptid))
2404 {
2405 snprintf (buf, sizeof (buf), "LWP %ld", GET_LWP (ptid));
2406 return buf;
2407 }
2408
2409 return normal_pid_to_str (ptid);
2410 }
2411
2412 static void
2413 sigchld_handler (int signo)
2414 {
2415 /* Do nothing. The only reason for this handler is that it allows
2416 us to use sigsuspend in linux_nat_wait above to wait for the
2417 arrival of a SIGCHLD. */
2418 }
2419
2420 /* Accepts an integer PID; Returns a string representing a file that
2421 can be opened to get the symbols for the child process. */
2422
2423 char *
2424 child_pid_to_exec_file (int pid)
2425 {
2426 char *name1, *name2;
2427
2428 name1 = xmalloc (MAXPATHLEN);
2429 name2 = xmalloc (MAXPATHLEN);
2430 make_cleanup (xfree, name1);
2431 make_cleanup (xfree, name2);
2432 memset (name2, 0, MAXPATHLEN);
2433
2434 sprintf (name1, "/proc/%d/exe", pid);
2435 if (readlink (name1, name2, MAXPATHLEN) > 0)
2436 return name2;
2437 else
2438 return name1;
2439 }
2440
2441 /* Service function for corefiles and info proc. */
2442
2443 static int
2444 read_mapping (FILE *mapfile,
2445 long long *addr,
2446 long long *endaddr,
2447 char *permissions,
2448 long long *offset,
2449 char *device, long long *inode, char *filename)
2450 {
2451 int ret = fscanf (mapfile, "%llx-%llx %s %llx %s %llx",
2452 addr, endaddr, permissions, offset, device, inode);
2453
2454 filename[0] = '\0';
2455 if (ret > 0 && ret != EOF)
2456 {
2457 /* Eat everything up to EOL for the filename. This will prevent
2458 weird filenames (such as one with embedded whitespace) from
2459 confusing this code. It also makes this code more robust in
2460 respect to annotations the kernel may add after the filename.
2461
2462 Note the filename is used for informational purposes
2463 only. */
2464 ret += fscanf (mapfile, "%[^\n]\n", filename);
2465 }
2466
2467 return (ret != 0 && ret != EOF);
2468 }
2469
2470 /* Fills the "to_find_memory_regions" target vector. Lists the memory
2471 regions in the inferior for a corefile. */
2472
2473 static int
2474 linux_nat_find_memory_regions (int (*func) (CORE_ADDR,
2475 unsigned long,
2476 int, int, int, void *), void *obfd)
2477 {
2478 long long pid = PIDGET (inferior_ptid);
2479 char mapsfilename[MAXPATHLEN];
2480 FILE *mapsfile;
2481 long long addr, endaddr, size, offset, inode;
2482 char permissions[8], device[8], filename[MAXPATHLEN];
2483 int read, write, exec;
2484 int ret;
2485
2486 /* Compose the filename for the /proc memory map, and open it. */
2487 sprintf (mapsfilename, "/proc/%lld/maps", pid);
2488 if ((mapsfile = fopen (mapsfilename, "r")) == NULL)
2489 error (_("Could not open %s."), mapsfilename);
2490
2491 if (info_verbose)
2492 fprintf_filtered (gdb_stdout,
2493 "Reading memory regions from %s\n", mapsfilename);
2494
2495 /* Now iterate until end-of-file. */
2496 while (read_mapping (mapsfile, &addr, &endaddr, &permissions[0],
2497 &offset, &device[0], &inode, &filename[0]))
2498 {
2499 size = endaddr - addr;
2500
2501 /* Get the segment's permissions. */
2502 read = (strchr (permissions, 'r') != 0);
2503 write = (strchr (permissions, 'w') != 0);
2504 exec = (strchr (permissions, 'x') != 0);
2505
2506 if (info_verbose)
2507 {
2508 fprintf_filtered (gdb_stdout,
2509 "Save segment, %lld bytes at 0x%s (%c%c%c)",
2510 size, paddr_nz (addr),
2511 read ? 'r' : ' ',
2512 write ? 'w' : ' ', exec ? 'x' : ' ');
2513 if (filename && filename[0])
2514 fprintf_filtered (gdb_stdout, " for %s", filename);
2515 fprintf_filtered (gdb_stdout, "\n");
2516 }
2517
2518 /* Invoke the callback function to create the corefile
2519 segment. */
2520 func (addr, size, read, write, exec, obfd);
2521 }
2522 fclose (mapsfile);
2523 return 0;
2524 }
2525
2526 /* Records the thread's register state for the corefile note
2527 section. */
2528
2529 static char *
2530 linux_nat_do_thread_registers (bfd *obfd, ptid_t ptid,
2531 char *note_data, int *note_size)
2532 {
2533 gdb_gregset_t gregs;
2534 gdb_fpregset_t fpregs;
2535 #ifdef FILL_FPXREGSET
2536 gdb_fpxregset_t fpxregs;
2537 #endif
2538 unsigned long lwp = ptid_get_lwp (ptid);
2539 struct gdbarch *gdbarch = current_gdbarch;
2540 const struct regset *regset;
2541 int core_regset_p, record_reg_p;
2542
2543 core_regset_p = gdbarch_regset_from_core_section_p (gdbarch);
2544 record_reg_p = 1;
2545 if (core_regset_p)
2546 {
2547 regset = gdbarch_regset_from_core_section (gdbarch, ".reg",
2548 sizeof (gregs));
2549 if (regset)
2550 regset->collect_regset (regset, current_regcache, -1,
2551 &gregs, sizeof (gregs));
2552 else
2553 record_reg_p = 0;
2554 }
2555 else
2556 fill_gregset (&gregs, -1);
2557
2558 if (record_reg_p)
2559 note_data = (char *) elfcore_write_prstatus (obfd,
2560 note_data,
2561 note_size,
2562 lwp,
2563 stop_signal, &gregs);
2564
2565 record_reg_p = 1;
2566 if (core_regset_p)
2567 {
2568 regset = gdbarch_regset_from_core_section (gdbarch, ".reg2",
2569 sizeof (fpregs));
2570 if (regset)
2571 regset->collect_regset (regset, current_regcache, -1,
2572 &fpregs, sizeof (fpregs));
2573 else
2574 record_reg_p = 0;
2575 }
2576 else
2577 fill_fpregset (&fpregs, -1);
2578
2579 if (record_reg_p)
2580 note_data = (char *) elfcore_write_prfpreg (obfd,
2581 note_data,
2582 note_size,
2583 &fpregs, sizeof (fpregs));
2584
2585 #ifdef FILL_FPXREGSET
2586 record_reg_p = 1;
2587 if (core_regset_p)
2588 {
2589 regset = gdbarch_regset_from_core_section (gdbarch, ".reg-xfp",
2590 sizeof (fpxregs));
2591 if (regset)
2592 regset->collect_regset (regset, current_regcache, -1,
2593 &fpxregs, sizeof (fpxregs));
2594 else
2595 record_reg_p = 0;
2596 }
2597 else
2598 fill_fpxregset (&fpxregs, -1);
2599
2600 if (record_reg_p)
2601 note_data = (char *) elfcore_write_prxfpreg (obfd,
2602 note_data,
2603 note_size,
2604 &fpxregs, sizeof (fpxregs));
2605 #endif
2606 return note_data;
2607 }
2608
2609 struct linux_nat_corefile_thread_data
2610 {
2611 bfd *obfd;
2612 char *note_data;
2613 int *note_size;
2614 int num_notes;
2615 };
2616
2617 /* Called by gdbthread.c once per thread. Records the thread's
2618 register state for the corefile note section. */
2619
2620 static int
2621 linux_nat_corefile_thread_callback (struct lwp_info *ti, void *data)
2622 {
2623 struct linux_nat_corefile_thread_data *args = data;
2624 ptid_t saved_ptid = inferior_ptid;
2625
2626 inferior_ptid = ti->ptid;
2627 registers_changed ();
2628 target_fetch_registers (-1); /* FIXME should not be necessary;
2629 fill_gregset should do it automatically. */
2630 args->note_data = linux_nat_do_thread_registers (args->obfd,
2631 ti->ptid,
2632 args->note_data,
2633 args->note_size);
2634 args->num_notes++;
2635 inferior_ptid = saved_ptid;
2636 registers_changed ();
2637 target_fetch_registers (-1); /* FIXME should not be necessary;
2638 fill_gregset should do it automatically. */
2639 return 0;
2640 }
2641
2642 /* Records the register state for the corefile note section. */
2643
2644 static char *
2645 linux_nat_do_registers (bfd *obfd, ptid_t ptid,
2646 char *note_data, int *note_size)
2647 {
2648 registers_changed ();
2649 target_fetch_registers (-1); /* FIXME should not be necessary;
2650 fill_gregset should do it automatically. */
2651 return linux_nat_do_thread_registers (obfd,
2652 ptid_build (ptid_get_pid (inferior_ptid),
2653 ptid_get_pid (inferior_ptid),
2654 0),
2655 note_data, note_size);
2656 return note_data;
2657 }
2658
2659 /* Fills the "to_make_corefile_note" target vector. Builds the note
2660 section for a corefile, and returns it in a malloc buffer. */
2661
2662 static char *
2663 linux_nat_make_corefile_notes (bfd *obfd, int *note_size)
2664 {
2665 struct linux_nat_corefile_thread_data thread_args;
2666 struct cleanup *old_chain;
2667 char fname[16] = { '\0' };
2668 char psargs[80] = { '\0' };
2669 char *note_data = NULL;
2670 ptid_t current_ptid = inferior_ptid;
2671 gdb_byte *auxv;
2672 int auxv_len;
2673
2674 if (get_exec_file (0))
2675 {
2676 strncpy (fname, strrchr (get_exec_file (0), '/') + 1, sizeof (fname));
2677 strncpy (psargs, get_exec_file (0), sizeof (psargs));
2678 if (get_inferior_args ())
2679 {
2680 strncat (psargs, " ", sizeof (psargs) - strlen (psargs));
2681 strncat (psargs, get_inferior_args (),
2682 sizeof (psargs) - strlen (psargs));
2683 }
2684 note_data = (char *) elfcore_write_prpsinfo (obfd,
2685 note_data,
2686 note_size, fname, psargs);
2687 }
2688
2689 /* Dump information for threads. */
2690 thread_args.obfd = obfd;
2691 thread_args.note_data = note_data;
2692 thread_args.note_size = note_size;
2693 thread_args.num_notes = 0;
2694 iterate_over_lwps (linux_nat_corefile_thread_callback, &thread_args);
2695 if (thread_args.num_notes == 0)
2696 {
2697 /* iterate_over_threads didn't come up with any threads; just
2698 use inferior_ptid. */
2699 note_data = linux_nat_do_registers (obfd, inferior_ptid,
2700 note_data, note_size);
2701 }
2702 else
2703 {
2704 note_data = thread_args.note_data;
2705 }
2706
2707 auxv_len = target_auxv_read (&current_target, &auxv);
2708 if (auxv_len > 0)
2709 {
2710 note_data = elfcore_write_note (obfd, note_data, note_size,
2711 "CORE", NT_AUXV, auxv, auxv_len);
2712 xfree (auxv);
2713 }
2714
2715 make_cleanup (xfree, note_data);
2716 return note_data;
2717 }
2718
2719 /* Implement the "info proc" command. */
2720
2721 static void
2722 linux_nat_info_proc_cmd (char *args, int from_tty)
2723 {
2724 long long pid = PIDGET (inferior_ptid);
2725 FILE *procfile;
2726 char **argv = NULL;
2727 char buffer[MAXPATHLEN];
2728 char fname1[MAXPATHLEN], fname2[MAXPATHLEN];
2729 int cmdline_f = 1;
2730 int cwd_f = 1;
2731 int exe_f = 1;
2732 int mappings_f = 0;
2733 int environ_f = 0;
2734 int status_f = 0;
2735 int stat_f = 0;
2736 int all = 0;
2737 struct stat dummy;
2738
2739 if (args)
2740 {
2741 /* Break up 'args' into an argv array. */
2742 if ((argv = buildargv (args)) == NULL)
2743 nomem (0);
2744 else
2745 make_cleanup_freeargv (argv);
2746 }
2747 while (argv != NULL && *argv != NULL)
2748 {
2749 if (isdigit (argv[0][0]))
2750 {
2751 pid = strtoul (argv[0], NULL, 10);
2752 }
2753 else if (strncmp (argv[0], "mappings", strlen (argv[0])) == 0)
2754 {
2755 mappings_f = 1;
2756 }
2757 else if (strcmp (argv[0], "status") == 0)
2758 {
2759 status_f = 1;
2760 }
2761 else if (strcmp (argv[0], "stat") == 0)
2762 {
2763 stat_f = 1;
2764 }
2765 else if (strcmp (argv[0], "cmd") == 0)
2766 {
2767 cmdline_f = 1;
2768 }
2769 else if (strncmp (argv[0], "exe", strlen (argv[0])) == 0)
2770 {
2771 exe_f = 1;
2772 }
2773 else if (strcmp (argv[0], "cwd") == 0)
2774 {
2775 cwd_f = 1;
2776 }
2777 else if (strncmp (argv[0], "all", strlen (argv[0])) == 0)
2778 {
2779 all = 1;
2780 }
2781 else
2782 {
2783 /* [...] (future options here) */
2784 }
2785 argv++;
2786 }
2787 if (pid == 0)
2788 error (_("No current process: you must name one."));
2789
2790 sprintf (fname1, "/proc/%lld", pid);
2791 if (stat (fname1, &dummy) != 0)
2792 error (_("No /proc directory: '%s'"), fname1);
2793
2794 printf_filtered (_("process %lld\n"), pid);
2795 if (cmdline_f || all)
2796 {
2797 sprintf (fname1, "/proc/%lld/cmdline", pid);
2798 if ((procfile = fopen (fname1, "r")) > 0)
2799 {
2800 fgets (buffer, sizeof (buffer), procfile);
2801 printf_filtered ("cmdline = '%s'\n", buffer);
2802 fclose (procfile);
2803 }
2804 else
2805 warning (_("unable to open /proc file '%s'"), fname1);
2806 }
2807 if (cwd_f || all)
2808 {
2809 sprintf (fname1, "/proc/%lld/cwd", pid);
2810 memset (fname2, 0, sizeof (fname2));
2811 if (readlink (fname1, fname2, sizeof (fname2)) > 0)
2812 printf_filtered ("cwd = '%s'\n", fname2);
2813 else
2814 warning (_("unable to read link '%s'"), fname1);
2815 }
2816 if (exe_f || all)
2817 {
2818 sprintf (fname1, "/proc/%lld/exe", pid);
2819 memset (fname2, 0, sizeof (fname2));
2820 if (readlink (fname1, fname2, sizeof (fname2)) > 0)
2821 printf_filtered ("exe = '%s'\n", fname2);
2822 else
2823 warning (_("unable to read link '%s'"), fname1);
2824 }
2825 if (mappings_f || all)
2826 {
2827 sprintf (fname1, "/proc/%lld/maps", pid);
2828 if ((procfile = fopen (fname1, "r")) > 0)
2829 {
2830 long long addr, endaddr, size, offset, inode;
2831 char permissions[8], device[8], filename[MAXPATHLEN];
2832
2833 printf_filtered (_("Mapped address spaces:\n\n"));
2834 if (TARGET_ADDR_BIT == 32)
2835 {
2836 printf_filtered ("\t%10s %10s %10s %10s %7s\n",
2837 "Start Addr",
2838 " End Addr",
2839 " Size", " Offset", "objfile");
2840 }
2841 else
2842 {
2843 printf_filtered (" %18s %18s %10s %10s %7s\n",
2844 "Start Addr",
2845 " End Addr",
2846 " Size", " Offset", "objfile");
2847 }
2848
2849 while (read_mapping (procfile, &addr, &endaddr, &permissions[0],
2850 &offset, &device[0], &inode, &filename[0]))
2851 {
2852 size = endaddr - addr;
2853
2854 /* FIXME: carlton/2003-08-27: Maybe the printf_filtered
2855 calls here (and possibly above) should be abstracted
2856 out into their own functions? Andrew suggests using
2857 a generic local_address_string instead to print out
2858 the addresses; that makes sense to me, too. */
2859
2860 if (TARGET_ADDR_BIT == 32)
2861 {
2862 printf_filtered ("\t%#10lx %#10lx %#10x %#10x %7s\n",
2863 (unsigned long) addr, /* FIXME: pr_addr */
2864 (unsigned long) endaddr,
2865 (int) size,
2866 (unsigned int) offset,
2867 filename[0] ? filename : "");
2868 }
2869 else
2870 {
2871 printf_filtered (" %#18lx %#18lx %#10x %#10x %7s\n",
2872 (unsigned long) addr, /* FIXME: pr_addr */
2873 (unsigned long) endaddr,
2874 (int) size,
2875 (unsigned int) offset,
2876 filename[0] ? filename : "");
2877 }
2878 }
2879
2880 fclose (procfile);
2881 }
2882 else
2883 warning (_("unable to open /proc file '%s'"), fname1);
2884 }
2885 if (status_f || all)
2886 {
2887 sprintf (fname1, "/proc/%lld/status", pid);
2888 if ((procfile = fopen (fname1, "r")) > 0)
2889 {
2890 while (fgets (buffer, sizeof (buffer), procfile) != NULL)
2891 puts_filtered (buffer);
2892 fclose (procfile);
2893 }
2894 else
2895 warning (_("unable to open /proc file '%s'"), fname1);
2896 }
2897 if (stat_f || all)
2898 {
2899 sprintf (fname1, "/proc/%lld/stat", pid);
2900 if ((procfile = fopen (fname1, "r")) > 0)
2901 {
2902 int itmp;
2903 char ctmp;
2904
2905 if (fscanf (procfile, "%d ", &itmp) > 0)
2906 printf_filtered (_("Process: %d\n"), itmp);
2907 if (fscanf (procfile, "%s ", &buffer[0]) > 0)
2908 printf_filtered (_("Exec file: %s\n"), buffer);
2909 if (fscanf (procfile, "%c ", &ctmp) > 0)
2910 printf_filtered (_("State: %c\n"), ctmp);
2911 if (fscanf (procfile, "%d ", &itmp) > 0)
2912 printf_filtered (_("Parent process: %d\n"), itmp);
2913 if (fscanf (procfile, "%d ", &itmp) > 0)
2914 printf_filtered (_("Process group: %d\n"), itmp);
2915 if (fscanf (procfile, "%d ", &itmp) > 0)
2916 printf_filtered (_("Session id: %d\n"), itmp);
2917 if (fscanf (procfile, "%d ", &itmp) > 0)
2918 printf_filtered (_("TTY: %d\n"), itmp);
2919 if (fscanf (procfile, "%d ", &itmp) > 0)
2920 printf_filtered (_("TTY owner process group: %d\n"), itmp);
2921 if (fscanf (procfile, "%u ", &itmp) > 0)
2922 printf_filtered (_("Flags: 0x%x\n"), itmp);
2923 if (fscanf (procfile, "%u ", &itmp) > 0)
2924 printf_filtered (_("Minor faults (no memory page): %u\n"),
2925 (unsigned int) itmp);
2926 if (fscanf (procfile, "%u ", &itmp) > 0)
2927 printf_filtered (_("Minor faults, children: %u\n"),
2928 (unsigned int) itmp);
2929 if (fscanf (procfile, "%u ", &itmp) > 0)
2930 printf_filtered (_("Major faults (memory page faults): %u\n"),
2931 (unsigned int) itmp);
2932 if (fscanf (procfile, "%u ", &itmp) > 0)
2933 printf_filtered (_("Major faults, children: %u\n"),
2934 (unsigned int) itmp);
2935 if (fscanf (procfile, "%d ", &itmp) > 0)
2936 printf_filtered ("utime: %d\n", itmp);
2937 if (fscanf (procfile, "%d ", &itmp) > 0)
2938 printf_filtered ("stime: %d\n", itmp);
2939 if (fscanf (procfile, "%d ", &itmp) > 0)
2940 printf_filtered ("utime, children: %d\n", itmp);
2941 if (fscanf (procfile, "%d ", &itmp) > 0)
2942 printf_filtered ("stime, children: %d\n", itmp);
2943 if (fscanf (procfile, "%d ", &itmp) > 0)
2944 printf_filtered (_("jiffies remaining in current time slice: %d\n"),
2945 itmp);
2946 if (fscanf (procfile, "%d ", &itmp) > 0)
2947 printf_filtered ("'nice' value: %d\n", itmp);
2948 if (fscanf (procfile, "%u ", &itmp) > 0)
2949 printf_filtered (_("jiffies until next timeout: %u\n"),
2950 (unsigned int) itmp);
2951 if (fscanf (procfile, "%u ", &itmp) > 0)
2952 printf_filtered ("jiffies until next SIGALRM: %u\n",
2953 (unsigned int) itmp);
2954 if (fscanf (procfile, "%d ", &itmp) > 0)
2955 printf_filtered (_("start time (jiffies since system boot): %d\n"),
2956 itmp);
2957 if (fscanf (procfile, "%u ", &itmp) > 0)
2958 printf_filtered (_("Virtual memory size: %u\n"),
2959 (unsigned int) itmp);
2960 if (fscanf (procfile, "%u ", &itmp) > 0)
2961 printf_filtered (_("Resident set size: %u\n"), (unsigned int) itmp);
2962 if (fscanf (procfile, "%u ", &itmp) > 0)
2963 printf_filtered ("rlim: %u\n", (unsigned int) itmp);
2964 if (fscanf (procfile, "%u ", &itmp) > 0)
2965 printf_filtered (_("Start of text: 0x%x\n"), itmp);
2966 if (fscanf (procfile, "%u ", &itmp) > 0)
2967 printf_filtered (_("End of text: 0x%x\n"), itmp);
2968 if (fscanf (procfile, "%u ", &itmp) > 0)
2969 printf_filtered (_("Start of stack: 0x%x\n"), itmp);
2970 #if 0 /* Don't know how architecture-dependent the rest is...
2971 Anyway the signal bitmap info is available from "status". */
2972 if (fscanf (procfile, "%u ", &itmp) > 0) /* FIXME arch? */
2973 printf_filtered (_("Kernel stack pointer: 0x%x\n"), itmp);
2974 if (fscanf (procfile, "%u ", &itmp) > 0) /* FIXME arch? */
2975 printf_filtered (_("Kernel instr pointer: 0x%x\n"), itmp);
2976 if (fscanf (procfile, "%d ", &itmp) > 0)
2977 printf_filtered (_("Pending signals bitmap: 0x%x\n"), itmp);
2978 if (fscanf (procfile, "%d ", &itmp) > 0)
2979 printf_filtered (_("Blocked signals bitmap: 0x%x\n"), itmp);
2980 if (fscanf (procfile, "%d ", &itmp) > 0)
2981 printf_filtered (_("Ignored signals bitmap: 0x%x\n"), itmp);
2982 if (fscanf (procfile, "%d ", &itmp) > 0)
2983 printf_filtered (_("Catched signals bitmap: 0x%x\n"), itmp);
2984 if (fscanf (procfile, "%u ", &itmp) > 0) /* FIXME arch? */
2985 printf_filtered (_("wchan (system call): 0x%x\n"), itmp);
2986 #endif
2987 fclose (procfile);
2988 }
2989 else
2990 warning (_("unable to open /proc file '%s'"), fname1);
2991 }
2992 }
2993
2994 /* Implement the to_xfer_partial interface for memory reads using the /proc
2995 filesystem. Because we can use a single read() call for /proc, this
2996 can be much more efficient than banging away at PTRACE_PEEKTEXT,
2997 but it doesn't support writes. */
2998
2999 static LONGEST
3000 linux_proc_xfer_partial (struct target_ops *ops, enum target_object object,
3001 const char *annex, gdb_byte *readbuf,
3002 const gdb_byte *writebuf,
3003 ULONGEST offset, LONGEST len)
3004 {
3005 LONGEST ret;
3006 int fd;
3007 char filename[64];
3008
3009 if (object != TARGET_OBJECT_MEMORY || !readbuf)
3010 return 0;
3011
3012 /* Don't bother for one word. */
3013 if (len < 3 * sizeof (long))
3014 return 0;
3015
3016 /* We could keep this file open and cache it - possibly one per
3017 thread. That requires some juggling, but is even faster. */
3018 sprintf (filename, "/proc/%d/mem", PIDGET (inferior_ptid));
3019 fd = open (filename, O_RDONLY | O_LARGEFILE);
3020 if (fd == -1)
3021 return 0;
3022
3023 /* If pread64 is available, use it. It's faster if the kernel
3024 supports it (only one syscall), and it's 64-bit safe even on
3025 32-bit platforms (for instance, SPARC debugging a SPARC64
3026 application). */
3027 #ifdef HAVE_PREAD64
3028 if (pread64 (fd, readbuf, len, offset) != len)
3029 #else
3030 if (lseek (fd, offset, SEEK_SET) == -1 || read (fd, readbuf, len) != len)
3031 #endif
3032 ret = 0;
3033 else
3034 ret = len;
3035
3036 close (fd);
3037 return ret;
3038 }
3039
3040 /* Parse LINE as a signal set and add its set bits to SIGS. */
3041
3042 static void
3043 add_line_to_sigset (const char *line, sigset_t *sigs)
3044 {
3045 int len = strlen (line) - 1;
3046 const char *p;
3047 int signum;
3048
3049 if (line[len] != '\n')
3050 error (_("Could not parse signal set: %s"), line);
3051
3052 p = line;
3053 signum = len * 4;
3054 while (len-- > 0)
3055 {
3056 int digit;
3057
3058 if (*p >= '0' && *p <= '9')
3059 digit = *p - '0';
3060 else if (*p >= 'a' && *p <= 'f')
3061 digit = *p - 'a' + 10;
3062 else
3063 error (_("Could not parse signal set: %s"), line);
3064
3065 signum -= 4;
3066
3067 if (digit & 1)
3068 sigaddset (sigs, signum + 1);
3069 if (digit & 2)
3070 sigaddset (sigs, signum + 2);
3071 if (digit & 4)
3072 sigaddset (sigs, signum + 3);
3073 if (digit & 8)
3074 sigaddset (sigs, signum + 4);
3075
3076 p++;
3077 }
3078 }
3079
3080 /* Find process PID's pending signals from /proc/pid/status and set
3081 SIGS to match. */
3082
3083 void
3084 linux_proc_pending_signals (int pid, sigset_t *pending, sigset_t *blocked, sigset_t *ignored)
3085 {
3086 FILE *procfile;
3087 char buffer[MAXPATHLEN], fname[MAXPATHLEN];
3088 int signum;
3089
3090 sigemptyset (pending);
3091 sigemptyset (blocked);
3092 sigemptyset (ignored);
3093 sprintf (fname, "/proc/%d/status", pid);
3094 procfile = fopen (fname, "r");
3095 if (procfile == NULL)
3096 error (_("Could not open %s"), fname);
3097
3098 while (fgets (buffer, MAXPATHLEN, procfile) != NULL)
3099 {
3100 /* Normal queued signals are on the SigPnd line in the status
3101 file. However, 2.6 kernels also have a "shared" pending
3102 queue for delivering signals to a thread group, so check for
3103 a ShdPnd line also.
3104
3105 Unfortunately some Red Hat kernels include the shared pending
3106 queue but not the ShdPnd status field. */
3107
3108 if (strncmp (buffer, "SigPnd:\t", 8) == 0)
3109 add_line_to_sigset (buffer + 8, pending);
3110 else if (strncmp (buffer, "ShdPnd:\t", 8) == 0)
3111 add_line_to_sigset (buffer + 8, pending);
3112 else if (strncmp (buffer, "SigBlk:\t", 8) == 0)
3113 add_line_to_sigset (buffer + 8, blocked);
3114 else if (strncmp (buffer, "SigIgn:\t", 8) == 0)
3115 add_line_to_sigset (buffer + 8, ignored);
3116 }
3117
3118 fclose (procfile);
3119 }
3120
3121 static LONGEST
3122 linux_xfer_partial (struct target_ops *ops, enum target_object object,
3123 const char *annex, gdb_byte *readbuf,
3124 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
3125 {
3126 LONGEST xfer;
3127
3128 if (object == TARGET_OBJECT_AUXV)
3129 return procfs_xfer_auxv (ops, object, annex, readbuf, writebuf,
3130 offset, len);
3131
3132 xfer = linux_proc_xfer_partial (ops, object, annex, readbuf, writebuf,
3133 offset, len);
3134 if (xfer != 0)
3135 return xfer;
3136
3137 return super_xfer_partial (ops, object, annex, readbuf, writebuf,
3138 offset, len);
3139 }
3140
3141 #ifndef FETCH_INFERIOR_REGISTERS
3142
3143 /* Return the address in the core dump or inferior of register
3144 REGNO. */
3145
3146 static CORE_ADDR
3147 linux_register_u_offset (int regno)
3148 {
3149 /* FIXME drow/2005-09-04: The hardcoded use of register_addr should go
3150 away. This requires disentangling the various definitions of it
3151 (particularly alpha-nat.c's). */
3152 return register_addr (regno, 0);
3153 }
3154
3155 #endif
3156
3157 /* Create a prototype generic Linux target. The client can override
3158 it with local methods. */
3159
3160 struct target_ops *
3161 linux_target (void)
3162 {
3163 struct target_ops *t;
3164
3165 #ifdef FETCH_INFERIOR_REGISTERS
3166 t = inf_ptrace_target ();
3167 #else
3168 t = inf_ptrace_trad_target (linux_register_u_offset);
3169 #endif
3170 t->to_insert_fork_catchpoint = child_insert_fork_catchpoint;
3171 t->to_insert_vfork_catchpoint = child_insert_vfork_catchpoint;
3172 t->to_insert_exec_catchpoint = child_insert_exec_catchpoint;
3173 t->to_pid_to_exec_file = child_pid_to_exec_file;
3174 t->to_post_startup_inferior = linux_child_post_startup_inferior;
3175 t->to_post_attach = child_post_attach;
3176 t->to_follow_fork = child_follow_fork;
3177 t->to_find_memory_regions = linux_nat_find_memory_regions;
3178 t->to_make_corefile_notes = linux_nat_make_corefile_notes;
3179
3180 super_xfer_partial = t->to_xfer_partial;
3181 t->to_xfer_partial = linux_xfer_partial;
3182
3183 return t;
3184 }
3185
3186 void
3187 linux_nat_add_target (struct target_ops *t)
3188 {
3189 extern void thread_db_init (struct target_ops *);
3190
3191 /* Save the provided single-threaded target. We save this in a separate
3192 variable because another target we've inherited from (e.g. inf-ptrace)
3193 may have saved a pointer to T; we want to use it for the final
3194 process stratum target. */
3195 linux_ops_saved = *t;
3196 linux_ops = &linux_ops_saved;
3197
3198 /* Override some methods for multithreading. */
3199 t->to_attach = linux_nat_attach;
3200 t->to_detach = linux_nat_detach;
3201 t->to_resume = linux_nat_resume;
3202 t->to_wait = linux_nat_wait;
3203 t->to_xfer_partial = linux_nat_xfer_partial;
3204 t->to_kill = linux_nat_kill;
3205 t->to_mourn_inferior = linux_nat_mourn_inferior;
3206 t->to_thread_alive = linux_nat_thread_alive;
3207 t->to_pid_to_str = linux_nat_pid_to_str;
3208 t->to_has_thread_control = tc_schedlock;
3209
3210 /* We don't change the stratum; this target will sit at
3211 process_stratum and thread_db will set at thread_stratum. This
3212 is a little strange, since this is a multi-threaded-capable
3213 target, but we want to be on the stack below thread_db, and we
3214 also want to be used for single-threaded processes. */
3215
3216 add_target (t);
3217
3218 /* TODO: Eliminate this and have libthread_db use
3219 find_target_beneath. */
3220 thread_db_init (t);
3221 }
3222
3223 void
3224 _initialize_linux_nat (void)
3225 {
3226 struct sigaction action;
3227
3228 add_info ("proc", linux_nat_info_proc_cmd, _("\
3229 Show /proc process information about any running process.\n\
3230 Specify any process id, or use the program being debugged by default.\n\
3231 Specify any of the following keywords for detailed info:\n\
3232 mappings -- list of mapped memory regions.\n\
3233 stat -- list a bunch of random process info.\n\
3234 status -- list a different bunch of random process info.\n\
3235 all -- list all available /proc info."));
3236
3237 /* Save the original signal mask. */
3238 sigprocmask (SIG_SETMASK, NULL, &normal_mask);
3239
3240 action.sa_handler = sigchld_handler;
3241 sigemptyset (&action.sa_mask);
3242 action.sa_flags = SA_RESTART;
3243 sigaction (SIGCHLD, &action, NULL);
3244
3245 /* Make sure we don't block SIGCHLD during a sigsuspend. */
3246 sigprocmask (SIG_SETMASK, NULL, &suspend_mask);
3247 sigdelset (&suspend_mask, SIGCHLD);
3248
3249 sigemptyset (&blocked_mask);
3250
3251 add_setshow_zinteger_cmd ("lin-lwp", no_class, &debug_linux_nat, _("\
3252 Set debugging of GNU/Linux lwp module."), _("\
3253 Show debugging of GNU/Linux lwp module."), _("\
3254 Enables printf debugging output."),
3255 NULL,
3256 show_debug_linux_nat,
3257 &setdebuglist, &showdebuglist);
3258 }
3259 \f
3260
3261 /* FIXME: kettenis/2000-08-26: The stuff on this page is specific to
3262 the GNU/Linux Threads library and therefore doesn't really belong
3263 here. */
3264
3265 /* Read variable NAME in the target and return its value if found.
3266 Otherwise return zero. It is assumed that the type of the variable
3267 is `int'. */
3268
3269 static int
3270 get_signo (const char *name)
3271 {
3272 struct minimal_symbol *ms;
3273 int signo;
3274
3275 ms = lookup_minimal_symbol (name, NULL, NULL);
3276 if (ms == NULL)
3277 return 0;
3278
3279 if (target_read_memory (SYMBOL_VALUE_ADDRESS (ms), (gdb_byte *) &signo,
3280 sizeof (signo)) != 0)
3281 return 0;
3282
3283 return signo;
3284 }
3285
3286 /* Return the set of signals used by the threads library in *SET. */
3287
3288 void
3289 lin_thread_get_thread_signals (sigset_t *set)
3290 {
3291 struct sigaction action;
3292 int restart, cancel;
3293
3294 sigemptyset (set);
3295
3296 restart = get_signo ("__pthread_sig_restart");
3297 cancel = get_signo ("__pthread_sig_cancel");
3298
3299 /* LinuxThreads normally uses the first two RT signals, but in some legacy
3300 cases may use SIGUSR1/SIGUSR2. NPTL always uses RT signals, but does
3301 not provide any way for the debugger to query the signal numbers -
3302 fortunately they don't change! */
3303
3304 if (restart == 0)
3305 restart = __SIGRTMIN;
3306
3307 if (cancel == 0)
3308 cancel = __SIGRTMIN + 1;
3309
3310 sigaddset (set, restart);
3311 sigaddset (set, cancel);
3312
3313 /* The GNU/Linux Threads library makes terminating threads send a
3314 special "cancel" signal instead of SIGCHLD. Make sure we catch
3315 those (to prevent them from terminating GDB itself, which is
3316 likely to be their default action) and treat them the same way as
3317 SIGCHLD. */
3318
3319 action.sa_handler = sigchld_handler;
3320 sigemptyset (&action.sa_mask);
3321 action.sa_flags = SA_RESTART;
3322 sigaction (cancel, &action, NULL);
3323
3324 /* We block the "cancel" signal throughout this code ... */
3325 sigaddset (&blocked_mask, cancel);
3326 sigprocmask (SIG_BLOCK, &blocked_mask, NULL);
3327
3328 /* ... except during a sigsuspend. */
3329 sigdelset (&suspend_mask, cancel);
3330 }
3331
This page took 0.098975 seconds and 4 git commands to generate.