* linux-nat.c (linux_supports_tracevforkdone, child_follow_fork):
[deliverable/binutils-gdb.git] / gdb / linux-nat.c
1 /* GNU/Linux native-dependent code common to multiple platforms.
2
3 Copyright 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
21
22 #include "defs.h"
23 #include "inferior.h"
24 #include "target.h"
25 #include "gdb_string.h"
26 #include "gdb_wait.h"
27 #include "gdb_assert.h"
28 #ifdef HAVE_TKILL_SYSCALL
29 #include <unistd.h>
30 #include <sys/syscall.h>
31 #endif
32 #include <sys/ptrace.h>
33 #include "linux-nat.h"
34 #include "gdbthread.h"
35 #include "gdbcmd.h"
36 #include "regcache.h"
37 #include <sys/param.h> /* for MAXPATHLEN */
38 #include <sys/procfs.h> /* for elf_gregset etc. */
39 #include "elf-bfd.h" /* for elfcore_write_* */
40 #include "gregset.h" /* for gregset */
41 #include "gdbcore.h" /* for get_exec_file */
42 #include <ctype.h> /* for isdigit */
43 #include "gdbthread.h" /* for struct thread_info etc. */
44 #include "gdb_stat.h" /* for struct stat */
45 #include <fcntl.h> /* for O_RDONLY */
46
47 #ifndef O_LARGEFILE
48 #define O_LARGEFILE 0
49 #endif
50
51 /* If the system headers did not provide the constants, hard-code the normal
52 values. */
53 #ifndef PTRACE_EVENT_FORK
54
55 #define PTRACE_SETOPTIONS 0x4200
56 #define PTRACE_GETEVENTMSG 0x4201
57
58 /* options set using PTRACE_SETOPTIONS */
59 #define PTRACE_O_TRACESYSGOOD 0x00000001
60 #define PTRACE_O_TRACEFORK 0x00000002
61 #define PTRACE_O_TRACEVFORK 0x00000004
62 #define PTRACE_O_TRACECLONE 0x00000008
63 #define PTRACE_O_TRACEEXEC 0x00000010
64 #define PTRACE_O_TRACEVFORKDONE 0x00000020
65 #define PTRACE_O_TRACEEXIT 0x00000040
66
67 /* Wait extended result codes for the above trace options. */
68 #define PTRACE_EVENT_FORK 1
69 #define PTRACE_EVENT_VFORK 2
70 #define PTRACE_EVENT_CLONE 3
71 #define PTRACE_EVENT_EXEC 4
72 #define PTRACE_EVENT_VFORK_DONE 5
73 #define PTRACE_EVENT_EXIT 6
74
75 #endif /* PTRACE_EVENT_FORK */
76
77 /* We can't always assume that this flag is available, but all systems
78 with the ptrace event handlers also have __WALL, so it's safe to use
79 here. */
80 #ifndef __WALL
81 #define __WALL 0x40000000 /* Wait for any child. */
82 #endif
83
84 static int debug_linux_nat;
85 static void
86 show_debug_linux_nat (struct ui_file *file, int from_tty,
87 struct cmd_list_element *c, const char *value)
88 {
89 fprintf_filtered (file, _("Debugging of GNU/Linux lwp module is %s.\n"),
90 value);
91 }
92
93 static int linux_parent_pid;
94
95 struct simple_pid_list
96 {
97 int pid;
98 struct simple_pid_list *next;
99 };
100 struct simple_pid_list *stopped_pids;
101
102 /* This variable is a tri-state flag: -1 for unknown, 0 if PTRACE_O_TRACEFORK
103 can not be used, 1 if it can. */
104
105 static int linux_supports_tracefork_flag = -1;
106
107 /* If we have PTRACE_O_TRACEFORK, this flag indicates whether we also have
108 PTRACE_O_TRACEVFORKDONE. */
109
110 static int linux_supports_tracevforkdone_flag = -1;
111
112 \f
113 /* Trivial list manipulation functions to keep track of a list of
114 new stopped processes. */
115 static void
116 add_to_pid_list (struct simple_pid_list **listp, int pid)
117 {
118 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
119 new_pid->pid = pid;
120 new_pid->next = *listp;
121 *listp = new_pid;
122 }
123
124 static int
125 pull_pid_from_list (struct simple_pid_list **listp, int pid)
126 {
127 struct simple_pid_list **p;
128
129 for (p = listp; *p != NULL; p = &(*p)->next)
130 if ((*p)->pid == pid)
131 {
132 struct simple_pid_list *next = (*p)->next;
133 xfree (*p);
134 *p = next;
135 return 1;
136 }
137 return 0;
138 }
139
140 void
141 linux_record_stopped_pid (int pid)
142 {
143 add_to_pid_list (&stopped_pids, pid);
144 }
145
146 \f
147 /* A helper function for linux_test_for_tracefork, called after fork (). */
148
149 static void
150 linux_tracefork_child (void)
151 {
152 int ret;
153
154 ptrace (PTRACE_TRACEME, 0, 0, 0);
155 kill (getpid (), SIGSTOP);
156 fork ();
157 _exit (0);
158 }
159
160 /* Wrapper function for waitpid which handles EINTR. */
161
162 static int
163 my_waitpid (int pid, int *status, int flags)
164 {
165 int ret;
166 do
167 {
168 ret = waitpid (pid, status, flags);
169 }
170 while (ret == -1 && errno == EINTR);
171
172 return ret;
173 }
174
175 /* Determine if PTRACE_O_TRACEFORK can be used to follow fork events.
176
177 First, we try to enable fork tracing on ORIGINAL_PID. If this fails,
178 we know that the feature is not available. This may change the tracing
179 options for ORIGINAL_PID, but we'll be setting them shortly anyway.
180
181 However, if it succeeds, we don't know for sure that the feature is
182 available; old versions of PTRACE_SETOPTIONS ignored unknown options. We
183 create a child process, attach to it, use PTRACE_SETOPTIONS to enable
184 fork tracing, and let it fork. If the process exits, we assume that we
185 can't use TRACEFORK; if we get the fork notification, and we can extract
186 the new child's PID, then we assume that we can. */
187
188 static void
189 linux_test_for_tracefork (int original_pid)
190 {
191 int child_pid, ret, status;
192 long second_pid;
193
194 linux_supports_tracefork_flag = 0;
195 linux_supports_tracevforkdone_flag = 0;
196
197 ret = ptrace (PTRACE_SETOPTIONS, original_pid, 0, PTRACE_O_TRACEFORK);
198 if (ret != 0)
199 return;
200
201 child_pid = fork ();
202 if (child_pid == -1)
203 perror_with_name (("fork"));
204
205 if (child_pid == 0)
206 linux_tracefork_child ();
207
208 ret = my_waitpid (child_pid, &status, 0);
209 if (ret == -1)
210 perror_with_name (("waitpid"));
211 else if (ret != child_pid)
212 error (_("linux_test_for_tracefork: waitpid: unexpected result %d."), ret);
213 if (! WIFSTOPPED (status))
214 error (_("linux_test_for_tracefork: waitpid: unexpected status %d."), status);
215
216 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0, PTRACE_O_TRACEFORK);
217 if (ret != 0)
218 {
219 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
220 if (ret != 0)
221 {
222 warning (_("linux_test_for_tracefork: failed to kill child"));
223 return;
224 }
225
226 ret = my_waitpid (child_pid, &status, 0);
227 if (ret != child_pid)
228 warning (_("linux_test_for_tracefork: failed to wait for killed child"));
229 else if (!WIFSIGNALED (status))
230 warning (_("linux_test_for_tracefork: unexpected wait status 0x%x from "
231 "killed child"), status);
232
233 return;
234 }
235
236 /* Check whether PTRACE_O_TRACEVFORKDONE is available. */
237 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0,
238 PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORKDONE);
239 linux_supports_tracevforkdone_flag = (ret == 0);
240
241 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
242 if (ret != 0)
243 warning (_("linux_test_for_tracefork: failed to resume child"));
244
245 ret = my_waitpid (child_pid, &status, 0);
246
247 if (ret == child_pid && WIFSTOPPED (status)
248 && status >> 16 == PTRACE_EVENT_FORK)
249 {
250 second_pid = 0;
251 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
252 if (ret == 0 && second_pid != 0)
253 {
254 int second_status;
255
256 linux_supports_tracefork_flag = 1;
257 my_waitpid (second_pid, &second_status, 0);
258 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
259 if (ret != 0)
260 warning (_("linux_test_for_tracefork: failed to kill second child"));
261 }
262 }
263 else
264 warning (_("linux_test_for_tracefork: unexpected result from waitpid "
265 "(%d, status 0x%x)"), ret, status);
266
267 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
268 if (ret != 0)
269 warning (_("linux_test_for_tracefork: failed to kill child"));
270 my_waitpid (child_pid, &status, 0);
271 }
272
273 /* Return non-zero iff we have tracefork functionality available.
274 This function also sets linux_supports_tracefork_flag. */
275
276 static int
277 linux_supports_tracefork (int pid)
278 {
279 if (linux_supports_tracefork_flag == -1)
280 linux_test_for_tracefork (pid);
281 return linux_supports_tracefork_flag;
282 }
283
284 static int
285 linux_supports_tracevforkdone (int pid)
286 {
287 if (linux_supports_tracefork_flag == -1)
288 linux_test_for_tracefork (pid);
289 return linux_supports_tracevforkdone_flag;
290 }
291
292 \f
293 void
294 linux_enable_event_reporting (ptid_t ptid)
295 {
296 int pid = ptid_get_lwp (ptid);
297 int options;
298
299 if (pid == 0)
300 pid = ptid_get_pid (ptid);
301
302 if (! linux_supports_tracefork (pid))
303 return;
304
305 options = PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORK | PTRACE_O_TRACEEXEC
306 | PTRACE_O_TRACECLONE;
307 if (linux_supports_tracevforkdone (pid))
308 options |= PTRACE_O_TRACEVFORKDONE;
309
310 /* Do not enable PTRACE_O_TRACEEXIT until GDB is more prepared to support
311 read-only process state. */
312
313 ptrace (PTRACE_SETOPTIONS, pid, 0, options);
314 }
315
316 void
317 child_post_attach (int pid)
318 {
319 linux_enable_event_reporting (pid_to_ptid (pid));
320 }
321
322 void
323 linux_child_post_startup_inferior (ptid_t ptid)
324 {
325 linux_enable_event_reporting (ptid);
326 }
327
328 #ifndef LINUX_CHILD_POST_STARTUP_INFERIOR
329 void
330 child_post_startup_inferior (ptid_t ptid)
331 {
332 linux_child_post_startup_inferior (ptid);
333 }
334 #endif
335
336 int
337 child_follow_fork (int follow_child)
338 {
339 ptid_t last_ptid;
340 struct target_waitstatus last_status;
341 int has_vforked;
342 int parent_pid, child_pid;
343
344 get_last_target_status (&last_ptid, &last_status);
345 has_vforked = (last_status.kind == TARGET_WAITKIND_VFORKED);
346 parent_pid = ptid_get_lwp (last_ptid);
347 if (parent_pid == 0)
348 parent_pid = ptid_get_pid (last_ptid);
349 child_pid = last_status.value.related_pid;
350
351 if (! follow_child)
352 {
353 /* We're already attached to the parent, by default. */
354
355 /* Before detaching from the child, remove all breakpoints from
356 it. (This won't actually modify the breakpoint list, but will
357 physically remove the breakpoints from the child.) */
358 /* If we vforked this will remove the breakpoints from the parent
359 also, but they'll be reinserted below. */
360 detach_breakpoints (child_pid);
361
362 fprintf_filtered (gdb_stdout,
363 "Detaching after fork from child process %d.\n",
364 child_pid);
365
366 ptrace (PTRACE_DETACH, child_pid, 0, 0);
367
368 if (has_vforked)
369 {
370 gdb_assert (linux_supports_tracefork_flag >= 0);
371 if (linux_supports_tracevforkdone (0))
372 {
373 int status;
374
375 ptrace (PTRACE_CONT, parent_pid, 0, 0);
376 waitpid (parent_pid, &status, __WALL);
377 if ((status >> 16) != PTRACE_EVENT_VFORK_DONE)
378 warning (_("Unexpected waitpid result %06x when waiting for "
379 "vfork-done"), status);
380 }
381 else
382 {
383 /* We can't insert breakpoints until the child has
384 finished with the shared memory region. We need to
385 wait until that happens. Ideal would be to just
386 call:
387 - ptrace (PTRACE_SYSCALL, parent_pid, 0, 0);
388 - waitpid (parent_pid, &status, __WALL);
389 However, most architectures can't handle a syscall
390 being traced on the way out if it wasn't traced on
391 the way in.
392
393 We might also think to loop, continuing the child
394 until it exits or gets a SIGTRAP. One problem is
395 that the child might call ptrace with PTRACE_TRACEME.
396
397 There's no simple and reliable way to figure out when
398 the vforked child will be done with its copy of the
399 shared memory. We could step it out of the syscall,
400 two instructions, let it go, and then single-step the
401 parent once. When we have hardware single-step, this
402 would work; with software single-step it could still
403 be made to work but we'd have to be able to insert
404 single-step breakpoints in the child, and we'd have
405 to insert -just- the single-step breakpoint in the
406 parent. Very awkward.
407
408 In the end, the best we can do is to make sure it
409 runs for a little while. Hopefully it will be out of
410 range of any breakpoints we reinsert. Usually this
411 is only the single-step breakpoint at vfork's return
412 point. */
413
414 usleep (10000);
415 }
416
417 /* Since we vforked, breakpoints were removed in the parent
418 too. Put them back. */
419 reattach_breakpoints (parent_pid);
420 }
421 }
422 else
423 {
424 char child_pid_spelling[40];
425
426 /* Needed to keep the breakpoint lists in sync. */
427 if (! has_vforked)
428 detach_breakpoints (child_pid);
429
430 /* Before detaching from the parent, remove all breakpoints from it. */
431 remove_breakpoints ();
432
433 fprintf_filtered (gdb_stdout,
434 "Attaching after fork to child process %d.\n",
435 child_pid);
436
437 /* If we're vforking, we may want to hold on to the parent until
438 the child exits or execs. At exec time we can remove the old
439 breakpoints from the parent and detach it; at exit time we
440 could do the same (or even, sneakily, resume debugging it - the
441 child's exec has failed, or something similar).
442
443 This doesn't clean up "properly", because we can't call
444 target_detach, but that's OK; if the current target is "child",
445 then it doesn't need any further cleanups, and lin_lwp will
446 generally not encounter vfork (vfork is defined to fork
447 in libpthread.so).
448
449 The holding part is very easy if we have VFORKDONE events;
450 but keeping track of both processes is beyond GDB at the
451 moment. So we don't expose the parent to the rest of GDB.
452 Instead we quietly hold onto it until such time as we can
453 safely resume it. */
454
455 if (has_vforked)
456 linux_parent_pid = parent_pid;
457 else
458 target_detach (NULL, 0);
459
460 inferior_ptid = pid_to_ptid (child_pid);
461 push_target (&deprecated_child_ops);
462
463 /* Reset breakpoints in the child as appropriate. */
464 follow_inferior_reset_breakpoints ();
465 }
466
467 return 0;
468 }
469
470 ptid_t
471 linux_handle_extended_wait (int pid, int status,
472 struct target_waitstatus *ourstatus)
473 {
474 int event = status >> 16;
475
476 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK
477 || event == PTRACE_EVENT_CLONE)
478 {
479 unsigned long new_pid;
480 int ret;
481
482 ptrace (PTRACE_GETEVENTMSG, pid, 0, &new_pid);
483
484 /* If we haven't already seen the new PID stop, wait for it now. */
485 if (! pull_pid_from_list (&stopped_pids, new_pid))
486 {
487 /* The new child has a pending SIGSTOP. We can't affect it until it
488 hits the SIGSTOP, but we're already attached. */
489 do {
490 ret = waitpid (new_pid, &status,
491 (event == PTRACE_EVENT_CLONE) ? __WCLONE : 0);
492 } while (ret == -1 && errno == EINTR);
493 if (ret == -1)
494 perror_with_name (_("waiting for new child"));
495 else if (ret != new_pid)
496 internal_error (__FILE__, __LINE__,
497 _("wait returned unexpected PID %d"), ret);
498 else if (!WIFSTOPPED (status) || WSTOPSIG (status) != SIGSTOP)
499 internal_error (__FILE__, __LINE__,
500 _("wait returned unexpected status 0x%x"), status);
501 }
502
503 if (event == PTRACE_EVENT_FORK)
504 ourstatus->kind = TARGET_WAITKIND_FORKED;
505 else if (event == PTRACE_EVENT_VFORK)
506 ourstatus->kind = TARGET_WAITKIND_VFORKED;
507 else
508 ourstatus->kind = TARGET_WAITKIND_SPURIOUS;
509
510 ourstatus->value.related_pid = new_pid;
511 return inferior_ptid;
512 }
513
514 if (event == PTRACE_EVENT_EXEC)
515 {
516 ourstatus->kind = TARGET_WAITKIND_EXECD;
517 ourstatus->value.execd_pathname
518 = xstrdup (child_pid_to_exec_file (pid));
519
520 if (linux_parent_pid)
521 {
522 detach_breakpoints (linux_parent_pid);
523 ptrace (PTRACE_DETACH, linux_parent_pid, 0, 0);
524
525 linux_parent_pid = 0;
526 }
527
528 return inferior_ptid;
529 }
530
531 internal_error (__FILE__, __LINE__,
532 _("unknown ptrace event %d"), event);
533 }
534
535 \f
536 void
537 child_insert_fork_catchpoint (int pid)
538 {
539 if (! linux_supports_tracefork (pid))
540 error (_("Your system does not support fork catchpoints."));
541 }
542
543 void
544 child_insert_vfork_catchpoint (int pid)
545 {
546 if (!linux_supports_tracefork (pid))
547 error (_("Your system does not support vfork catchpoints."));
548 }
549
550 void
551 child_insert_exec_catchpoint (int pid)
552 {
553 if (!linux_supports_tracefork (pid))
554 error (_("Your system does not support exec catchpoints."));
555 }
556
557 void
558 kill_inferior (void)
559 {
560 int status;
561 int pid = PIDGET (inferior_ptid);
562 struct target_waitstatus last;
563 ptid_t last_ptid;
564 int ret;
565
566 if (pid == 0)
567 return;
568
569 /* If we're stopped while forking and we haven't followed yet, kill the
570 other task. We need to do this first because the parent will be
571 sleeping if this is a vfork. */
572
573 get_last_target_status (&last_ptid, &last);
574
575 if (last.kind == TARGET_WAITKIND_FORKED
576 || last.kind == TARGET_WAITKIND_VFORKED)
577 {
578 ptrace (PT_KILL, last.value.related_pid, 0, 0);
579 wait (&status);
580 }
581
582 /* Kill the current process. */
583 ptrace (PT_KILL, pid, 0, 0);
584 ret = wait (&status);
585
586 /* We might get a SIGCHLD instead of an exit status. This is
587 aggravated by the first kill above - a child has just died. */
588
589 while (ret == pid && WIFSTOPPED (status))
590 {
591 ptrace (PT_KILL, pid, 0, 0);
592 ret = wait (&status);
593 }
594
595 target_mourn_inferior ();
596 }
597
598 /* On GNU/Linux there are no real LWP's. The closest thing to LWP's
599 are processes sharing the same VM space. A multi-threaded process
600 is basically a group of such processes. However, such a grouping
601 is almost entirely a user-space issue; the kernel doesn't enforce
602 such a grouping at all (this might change in the future). In
603 general, we'll rely on the threads library (i.e. the GNU/Linux
604 Threads library) to provide such a grouping.
605
606 It is perfectly well possible to write a multi-threaded application
607 without the assistance of a threads library, by using the clone
608 system call directly. This module should be able to give some
609 rudimentary support for debugging such applications if developers
610 specify the CLONE_PTRACE flag in the clone system call, and are
611 using the Linux kernel 2.4 or above.
612
613 Note that there are some peculiarities in GNU/Linux that affect
614 this code:
615
616 - In general one should specify the __WCLONE flag to waitpid in
617 order to make it report events for any of the cloned processes
618 (and leave it out for the initial process). However, if a cloned
619 process has exited the exit status is only reported if the
620 __WCLONE flag is absent. Linux kernel 2.4 has a __WALL flag, but
621 we cannot use it since GDB must work on older systems too.
622
623 - When a traced, cloned process exits and is waited for by the
624 debugger, the kernel reassigns it to the original parent and
625 keeps it around as a "zombie". Somehow, the GNU/Linux Threads
626 library doesn't notice this, which leads to the "zombie problem":
627 When debugged a multi-threaded process that spawns a lot of
628 threads will run out of processes, even if the threads exit,
629 because the "zombies" stay around. */
630
631 /* List of known LWPs. */
632 static struct lwp_info *lwp_list;
633
634 /* Number of LWPs in the list. */
635 static int num_lwps;
636
637 /* Non-zero if we're running in "threaded" mode. */
638 static int threaded;
639 \f
640
641 #define GET_LWP(ptid) ptid_get_lwp (ptid)
642 #define GET_PID(ptid) ptid_get_pid (ptid)
643 #define is_lwp(ptid) (GET_LWP (ptid) != 0)
644 #define BUILD_LWP(lwp, pid) ptid_build (pid, lwp, 0)
645
646 /* If the last reported event was a SIGTRAP, this variable is set to
647 the process id of the LWP/thread that got it. */
648 ptid_t trap_ptid;
649 \f
650
651 /* This module's target-specific operations. */
652 static struct target_ops linux_nat_ops;
653
654 /* Since we cannot wait (in linux_nat_wait) for the initial process and
655 any cloned processes with a single call to waitpid, we have to use
656 the WNOHANG flag and call waitpid in a loop. To optimize
657 things a bit we use `sigsuspend' to wake us up when a process has
658 something to report (it will send us a SIGCHLD if it has). To make
659 this work we have to juggle with the signal mask. We save the
660 original signal mask such that we can restore it before creating a
661 new process in order to avoid blocking certain signals in the
662 inferior. We then block SIGCHLD during the waitpid/sigsuspend
663 loop. */
664
665 /* Original signal mask. */
666 static sigset_t normal_mask;
667
668 /* Signal mask for use with sigsuspend in linux_nat_wait, initialized in
669 _initialize_linux_nat. */
670 static sigset_t suspend_mask;
671
672 /* Signals to block to make that sigsuspend work. */
673 static sigset_t blocked_mask;
674 \f
675
676 /* Prototypes for local functions. */
677 static int stop_wait_callback (struct lwp_info *lp, void *data);
678 static int linux_nat_thread_alive (ptid_t ptid);
679 \f
680 /* Convert wait status STATUS to a string. Used for printing debug
681 messages only. */
682
683 static char *
684 status_to_str (int status)
685 {
686 static char buf[64];
687
688 if (WIFSTOPPED (status))
689 snprintf (buf, sizeof (buf), "%s (stopped)",
690 strsignal (WSTOPSIG (status)));
691 else if (WIFSIGNALED (status))
692 snprintf (buf, sizeof (buf), "%s (terminated)",
693 strsignal (WSTOPSIG (status)));
694 else
695 snprintf (buf, sizeof (buf), "%d (exited)", WEXITSTATUS (status));
696
697 return buf;
698 }
699
700 /* Initialize the list of LWPs. Note that this module, contrary to
701 what GDB's generic threads layer does for its thread list,
702 re-initializes the LWP lists whenever we mourn or detach (which
703 doesn't involve mourning) the inferior. */
704
705 static void
706 init_lwp_list (void)
707 {
708 struct lwp_info *lp, *lpnext;
709
710 for (lp = lwp_list; lp; lp = lpnext)
711 {
712 lpnext = lp->next;
713 xfree (lp);
714 }
715
716 lwp_list = NULL;
717 num_lwps = 0;
718 threaded = 0;
719 }
720
721 /* Add the LWP specified by PID to the list. If this causes the
722 number of LWPs to become larger than one, go into "threaded" mode.
723 Return a pointer to the structure describing the new LWP. */
724
725 static struct lwp_info *
726 add_lwp (ptid_t ptid)
727 {
728 struct lwp_info *lp;
729
730 gdb_assert (is_lwp (ptid));
731
732 lp = (struct lwp_info *) xmalloc (sizeof (struct lwp_info));
733
734 memset (lp, 0, sizeof (struct lwp_info));
735
736 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
737
738 lp->ptid = ptid;
739
740 lp->next = lwp_list;
741 lwp_list = lp;
742 if (++num_lwps > 1)
743 threaded = 1;
744
745 return lp;
746 }
747
748 /* Remove the LWP specified by PID from the list. */
749
750 static void
751 delete_lwp (ptid_t ptid)
752 {
753 struct lwp_info *lp, *lpprev;
754
755 lpprev = NULL;
756
757 for (lp = lwp_list; lp; lpprev = lp, lp = lp->next)
758 if (ptid_equal (lp->ptid, ptid))
759 break;
760
761 if (!lp)
762 return;
763
764 /* We don't go back to "non-threaded" mode if the number of threads
765 becomes less than two. */
766 num_lwps--;
767
768 if (lpprev)
769 lpprev->next = lp->next;
770 else
771 lwp_list = lp->next;
772
773 xfree (lp);
774 }
775
776 /* Return a pointer to the structure describing the LWP corresponding
777 to PID. If no corresponding LWP could be found, return NULL. */
778
779 static struct lwp_info *
780 find_lwp_pid (ptid_t ptid)
781 {
782 struct lwp_info *lp;
783 int lwp;
784
785 if (is_lwp (ptid))
786 lwp = GET_LWP (ptid);
787 else
788 lwp = GET_PID (ptid);
789
790 for (lp = lwp_list; lp; lp = lp->next)
791 if (lwp == GET_LWP (lp->ptid))
792 return lp;
793
794 return NULL;
795 }
796
797 /* Call CALLBACK with its second argument set to DATA for every LWP in
798 the list. If CALLBACK returns 1 for a particular LWP, return a
799 pointer to the structure describing that LWP immediately.
800 Otherwise return NULL. */
801
802 struct lwp_info *
803 iterate_over_lwps (int (*callback) (struct lwp_info *, void *), void *data)
804 {
805 struct lwp_info *lp, *lpnext;
806
807 for (lp = lwp_list; lp; lp = lpnext)
808 {
809 lpnext = lp->next;
810 if ((*callback) (lp, data))
811 return lp;
812 }
813
814 return NULL;
815 }
816
817 /* Attach to the LWP specified by PID. If VERBOSE is non-zero, print
818 a message telling the user that a new LWP has been added to the
819 process. */
820
821 void
822 lin_lwp_attach_lwp (ptid_t ptid, int verbose)
823 {
824 struct lwp_info *lp, *found_lp;
825
826 gdb_assert (is_lwp (ptid));
827
828 /* Make sure SIGCHLD is blocked. We don't want SIGCHLD events
829 to interrupt either the ptrace() or waitpid() calls below. */
830 if (!sigismember (&blocked_mask, SIGCHLD))
831 {
832 sigaddset (&blocked_mask, SIGCHLD);
833 sigprocmask (SIG_BLOCK, &blocked_mask, NULL);
834 }
835
836 if (verbose)
837 printf_filtered (_("[New %s]\n"), target_pid_to_str (ptid));
838
839 found_lp = lp = find_lwp_pid (ptid);
840 if (lp == NULL)
841 lp = add_lwp (ptid);
842
843 /* We assume that we're already attached to any LWP that has an id
844 equal to the overall process id, and to any LWP that is already
845 in our list of LWPs. If we're not seeing exit events from threads
846 and we've had PID wraparound since we last tried to stop all threads,
847 this assumption might be wrong; fortunately, this is very unlikely
848 to happen. */
849 if (GET_LWP (ptid) != GET_PID (ptid) && found_lp == NULL)
850 {
851 pid_t pid;
852 int status;
853
854 if (ptrace (PTRACE_ATTACH, GET_LWP (ptid), 0, 0) < 0)
855 error (_("Can't attach %s: %s"), target_pid_to_str (ptid),
856 safe_strerror (errno));
857
858 if (debug_linux_nat)
859 fprintf_unfiltered (gdb_stdlog,
860 "LLAL: PTRACE_ATTACH %s, 0, 0 (OK)\n",
861 target_pid_to_str (ptid));
862
863 pid = waitpid (GET_LWP (ptid), &status, 0);
864 if (pid == -1 && errno == ECHILD)
865 {
866 /* Try again with __WCLONE to check cloned processes. */
867 pid = waitpid (GET_LWP (ptid), &status, __WCLONE);
868 lp->cloned = 1;
869 }
870
871 gdb_assert (pid == GET_LWP (ptid)
872 && WIFSTOPPED (status) && WSTOPSIG (status));
873
874 child_post_attach (pid);
875
876 lp->stopped = 1;
877
878 if (debug_linux_nat)
879 {
880 fprintf_unfiltered (gdb_stdlog,
881 "LLAL: waitpid %s received %s\n",
882 target_pid_to_str (ptid),
883 status_to_str (status));
884 }
885 }
886 else
887 {
888 /* We assume that the LWP representing the original process is
889 already stopped. Mark it as stopped in the data structure
890 that the linux ptrace layer uses to keep track of threads.
891 Note that this won't have already been done since the main
892 thread will have, we assume, been stopped by an attach from a
893 different layer. */
894 lp->stopped = 1;
895 }
896 }
897
898 static void
899 linux_nat_attach (char *args, int from_tty)
900 {
901 struct lwp_info *lp;
902 pid_t pid;
903 int status;
904
905 /* FIXME: We should probably accept a list of process id's, and
906 attach all of them. */
907 deprecated_child_ops.to_attach (args, from_tty);
908
909 /* Add the initial process as the first LWP to the list. */
910 lp = add_lwp (BUILD_LWP (GET_PID (inferior_ptid), GET_PID (inferior_ptid)));
911
912 /* Make sure the initial process is stopped. The user-level threads
913 layer might want to poke around in the inferior, and that won't
914 work if things haven't stabilized yet. */
915 pid = waitpid (GET_PID (inferior_ptid), &status, 0);
916 if (pid == -1 && errno == ECHILD)
917 {
918 warning (_("%s is a cloned process"), target_pid_to_str (inferior_ptid));
919
920 /* Try again with __WCLONE to check cloned processes. */
921 pid = waitpid (GET_PID (inferior_ptid), &status, __WCLONE);
922 lp->cloned = 1;
923 }
924
925 gdb_assert (pid == GET_PID (inferior_ptid)
926 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP);
927
928 lp->stopped = 1;
929
930 /* Fake the SIGSTOP that core GDB expects. */
931 lp->status = W_STOPCODE (SIGSTOP);
932 lp->resumed = 1;
933 if (debug_linux_nat)
934 {
935 fprintf_unfiltered (gdb_stdlog,
936 "LLA: waitpid %ld, faking SIGSTOP\n", (long) pid);
937 }
938 }
939
940 static int
941 detach_callback (struct lwp_info *lp, void *data)
942 {
943 gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status));
944
945 if (debug_linux_nat && lp->status)
946 fprintf_unfiltered (gdb_stdlog, "DC: Pending %s for %s on detach.\n",
947 strsignal (WSTOPSIG (lp->status)),
948 target_pid_to_str (lp->ptid));
949
950 while (lp->signalled && lp->stopped)
951 {
952 errno = 0;
953 if (ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0,
954 WSTOPSIG (lp->status)) < 0)
955 error (_("Can't continue %s: %s"), target_pid_to_str (lp->ptid),
956 safe_strerror (errno));
957
958 if (debug_linux_nat)
959 fprintf_unfiltered (gdb_stdlog,
960 "DC: PTRACE_CONTINUE (%s, 0, %s) (OK)\n",
961 target_pid_to_str (lp->ptid),
962 status_to_str (lp->status));
963
964 lp->stopped = 0;
965 lp->signalled = 0;
966 lp->status = 0;
967 /* FIXME drow/2003-08-26: There was a call to stop_wait_callback
968 here. But since lp->signalled was cleared above,
969 stop_wait_callback didn't do anything; the process was left
970 running. Shouldn't we be waiting for it to stop?
971 I've removed the call, since stop_wait_callback now does do
972 something when called with lp->signalled == 0. */
973
974 gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status));
975 }
976
977 /* We don't actually detach from the LWP that has an id equal to the
978 overall process id just yet. */
979 if (GET_LWP (lp->ptid) != GET_PID (lp->ptid))
980 {
981 errno = 0;
982 if (ptrace (PTRACE_DETACH, GET_LWP (lp->ptid), 0,
983 WSTOPSIG (lp->status)) < 0)
984 error (_("Can't detach %s: %s"), target_pid_to_str (lp->ptid),
985 safe_strerror (errno));
986
987 if (debug_linux_nat)
988 fprintf_unfiltered (gdb_stdlog,
989 "PTRACE_DETACH (%s, %s, 0) (OK)\n",
990 target_pid_to_str (lp->ptid),
991 strsignal (WSTOPSIG (lp->status)));
992
993 delete_lwp (lp->ptid);
994 }
995
996 return 0;
997 }
998
999 static void
1000 linux_nat_detach (char *args, int from_tty)
1001 {
1002 iterate_over_lwps (detach_callback, NULL);
1003
1004 /* Only the initial process should be left right now. */
1005 gdb_assert (num_lwps == 1);
1006
1007 trap_ptid = null_ptid;
1008
1009 /* Destroy LWP info; it's no longer valid. */
1010 init_lwp_list ();
1011
1012 /* Restore the original signal mask. */
1013 sigprocmask (SIG_SETMASK, &normal_mask, NULL);
1014 sigemptyset (&blocked_mask);
1015
1016 inferior_ptid = pid_to_ptid (GET_PID (inferior_ptid));
1017 deprecated_child_ops.to_detach (args, from_tty);
1018 }
1019
1020 /* Resume LP. */
1021
1022 static int
1023 resume_callback (struct lwp_info *lp, void *data)
1024 {
1025 if (lp->stopped && lp->status == 0)
1026 {
1027 struct thread_info *tp;
1028
1029 child_resume (pid_to_ptid (GET_LWP (lp->ptid)), 0, TARGET_SIGNAL_0);
1030 if (debug_linux_nat)
1031 fprintf_unfiltered (gdb_stdlog,
1032 "RC: PTRACE_CONT %s, 0, 0 (resume sibling)\n",
1033 target_pid_to_str (lp->ptid));
1034 lp->stopped = 0;
1035 lp->step = 0;
1036 }
1037
1038 return 0;
1039 }
1040
1041 static int
1042 resume_clear_callback (struct lwp_info *lp, void *data)
1043 {
1044 lp->resumed = 0;
1045 return 0;
1046 }
1047
1048 static int
1049 resume_set_callback (struct lwp_info *lp, void *data)
1050 {
1051 lp->resumed = 1;
1052 return 0;
1053 }
1054
1055 static void
1056 linux_nat_resume (ptid_t ptid, int step, enum target_signal signo)
1057 {
1058 struct lwp_info *lp;
1059 int resume_all;
1060
1061 /* A specific PTID means `step only this process id'. */
1062 resume_all = (PIDGET (ptid) == -1);
1063
1064 if (resume_all)
1065 iterate_over_lwps (resume_set_callback, NULL);
1066 else
1067 iterate_over_lwps (resume_clear_callback, NULL);
1068
1069 /* If PID is -1, it's the current inferior that should be
1070 handled specially. */
1071 if (PIDGET (ptid) == -1)
1072 ptid = inferior_ptid;
1073
1074 lp = find_lwp_pid (ptid);
1075 if (lp)
1076 {
1077 ptid = pid_to_ptid (GET_LWP (lp->ptid));
1078
1079 /* Remember if we're stepping. */
1080 lp->step = step;
1081
1082 /* Mark this LWP as resumed. */
1083 lp->resumed = 1;
1084
1085 /* If we have a pending wait status for this thread, there is no
1086 point in resuming the process. */
1087 if (lp->status)
1088 {
1089 /* FIXME: What should we do if we are supposed to continue
1090 this thread with a signal? */
1091 gdb_assert (signo == TARGET_SIGNAL_0);
1092 return;
1093 }
1094
1095 /* Mark LWP as not stopped to prevent it from being continued by
1096 resume_callback. */
1097 lp->stopped = 0;
1098 }
1099
1100 if (resume_all)
1101 iterate_over_lwps (resume_callback, NULL);
1102
1103 child_resume (ptid, step, signo);
1104 if (debug_linux_nat)
1105 fprintf_unfiltered (gdb_stdlog,
1106 "LLR: %s %s, %s (resume event thread)\n",
1107 step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
1108 target_pid_to_str (ptid),
1109 signo ? strsignal (signo) : "0");
1110 }
1111
1112 /* Issue kill to specified lwp. */
1113
1114 static int tkill_failed;
1115
1116 static int
1117 kill_lwp (int lwpid, int signo)
1118 {
1119 errno = 0;
1120
1121 /* Use tkill, if possible, in case we are using nptl threads. If tkill
1122 fails, then we are not using nptl threads and we should be using kill. */
1123
1124 #ifdef HAVE_TKILL_SYSCALL
1125 if (!tkill_failed)
1126 {
1127 int ret = syscall (__NR_tkill, lwpid, signo);
1128 if (errno != ENOSYS)
1129 return ret;
1130 errno = 0;
1131 tkill_failed = 1;
1132 }
1133 #endif
1134
1135 return kill (lwpid, signo);
1136 }
1137
1138 /* Handle a GNU/Linux extended wait response. Most of the work we
1139 just pass off to linux_handle_extended_wait, but if it reports a
1140 clone event we need to add the new LWP to our list (and not report
1141 the trap to higher layers). This function returns non-zero if
1142 the event should be ignored and we should wait again. */
1143
1144 static int
1145 linux_nat_handle_extended (struct lwp_info *lp, int status)
1146 {
1147 linux_handle_extended_wait (GET_LWP (lp->ptid), status,
1148 &lp->waitstatus);
1149
1150 /* TARGET_WAITKIND_SPURIOUS is used to indicate clone events. */
1151 if (lp->waitstatus.kind == TARGET_WAITKIND_SPURIOUS)
1152 {
1153 struct lwp_info *new_lp;
1154 new_lp = add_lwp (BUILD_LWP (lp->waitstatus.value.related_pid,
1155 GET_PID (inferior_ptid)));
1156 new_lp->cloned = 1;
1157 new_lp->stopped = 1;
1158
1159 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
1160
1161 if (debug_linux_nat)
1162 fprintf_unfiltered (gdb_stdlog,
1163 "LLHE: Got clone event from LWP %ld, resuming\n",
1164 GET_LWP (lp->ptid));
1165 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
1166
1167 return 1;
1168 }
1169
1170 return 0;
1171 }
1172
1173 /* Wait for LP to stop. Returns the wait status, or 0 if the LWP has
1174 exited. */
1175
1176 static int
1177 wait_lwp (struct lwp_info *lp)
1178 {
1179 pid_t pid;
1180 int status;
1181 int thread_dead = 0;
1182
1183 gdb_assert (!lp->stopped);
1184 gdb_assert (lp->status == 0);
1185
1186 pid = waitpid (GET_LWP (lp->ptid), &status, 0);
1187 if (pid == -1 && errno == ECHILD)
1188 {
1189 pid = waitpid (GET_LWP (lp->ptid), &status, __WCLONE);
1190 if (pid == -1 && errno == ECHILD)
1191 {
1192 /* The thread has previously exited. We need to delete it
1193 now because, for some vendor 2.4 kernels with NPTL
1194 support backported, there won't be an exit event unless
1195 it is the main thread. 2.6 kernels will report an exit
1196 event for each thread that exits, as expected. */
1197 thread_dead = 1;
1198 if (debug_linux_nat)
1199 fprintf_unfiltered (gdb_stdlog, "WL: %s vanished.\n",
1200 target_pid_to_str (lp->ptid));
1201 }
1202 }
1203
1204 if (!thread_dead)
1205 {
1206 gdb_assert (pid == GET_LWP (lp->ptid));
1207
1208 if (debug_linux_nat)
1209 {
1210 fprintf_unfiltered (gdb_stdlog,
1211 "WL: waitpid %s received %s\n",
1212 target_pid_to_str (lp->ptid),
1213 status_to_str (status));
1214 }
1215 }
1216
1217 /* Check if the thread has exited. */
1218 if (WIFEXITED (status) || WIFSIGNALED (status))
1219 {
1220 thread_dead = 1;
1221 if (debug_linux_nat)
1222 fprintf_unfiltered (gdb_stdlog, "WL: %s exited.\n",
1223 target_pid_to_str (lp->ptid));
1224 }
1225
1226 if (thread_dead)
1227 {
1228 if (in_thread_list (lp->ptid))
1229 {
1230 /* Core GDB cannot deal with us deleting the current thread. */
1231 if (!ptid_equal (lp->ptid, inferior_ptid))
1232 delete_thread (lp->ptid);
1233 printf_unfiltered (_("[%s exited]\n"),
1234 target_pid_to_str (lp->ptid));
1235 }
1236
1237 delete_lwp (lp->ptid);
1238 return 0;
1239 }
1240
1241 gdb_assert (WIFSTOPPED (status));
1242
1243 /* Handle GNU/Linux's extended waitstatus for trace events. */
1244 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
1245 {
1246 if (debug_linux_nat)
1247 fprintf_unfiltered (gdb_stdlog,
1248 "WL: Handling extended status 0x%06x\n",
1249 status);
1250 if (linux_nat_handle_extended (lp, status))
1251 return wait_lwp (lp);
1252 }
1253
1254 return status;
1255 }
1256
1257 /* Send a SIGSTOP to LP. */
1258
1259 static int
1260 stop_callback (struct lwp_info *lp, void *data)
1261 {
1262 if (!lp->stopped && !lp->signalled)
1263 {
1264 int ret;
1265
1266 if (debug_linux_nat)
1267 {
1268 fprintf_unfiltered (gdb_stdlog,
1269 "SC: kill %s **<SIGSTOP>**\n",
1270 target_pid_to_str (lp->ptid));
1271 }
1272 errno = 0;
1273 ret = kill_lwp (GET_LWP (lp->ptid), SIGSTOP);
1274 if (debug_linux_nat)
1275 {
1276 fprintf_unfiltered (gdb_stdlog,
1277 "SC: lwp kill %d %s\n",
1278 ret,
1279 errno ? safe_strerror (errno) : "ERRNO-OK");
1280 }
1281
1282 lp->signalled = 1;
1283 gdb_assert (lp->status == 0);
1284 }
1285
1286 return 0;
1287 }
1288
1289 /* Wait until LP is stopped. If DATA is non-null it is interpreted as
1290 a pointer to a set of signals to be flushed immediately. */
1291
1292 static int
1293 stop_wait_callback (struct lwp_info *lp, void *data)
1294 {
1295 sigset_t *flush_mask = data;
1296
1297 if (!lp->stopped)
1298 {
1299 int status;
1300
1301 status = wait_lwp (lp);
1302 if (status == 0)
1303 return 0;
1304
1305 /* Ignore any signals in FLUSH_MASK. */
1306 if (flush_mask && sigismember (flush_mask, WSTOPSIG (status)))
1307 {
1308 if (!lp->signalled)
1309 {
1310 lp->stopped = 1;
1311 return 0;
1312 }
1313
1314 errno = 0;
1315 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
1316 if (debug_linux_nat)
1317 fprintf_unfiltered (gdb_stdlog,
1318 "PTRACE_CONT %s, 0, 0 (%s)\n",
1319 target_pid_to_str (lp->ptid),
1320 errno ? safe_strerror (errno) : "OK");
1321
1322 return stop_wait_callback (lp, flush_mask);
1323 }
1324
1325 if (WSTOPSIG (status) != SIGSTOP)
1326 {
1327 if (WSTOPSIG (status) == SIGTRAP)
1328 {
1329 /* If a LWP other than the LWP that we're reporting an
1330 event for has hit a GDB breakpoint (as opposed to
1331 some random trap signal), then just arrange for it to
1332 hit it again later. We don't keep the SIGTRAP status
1333 and don't forward the SIGTRAP signal to the LWP. We
1334 will handle the current event, eventually we will
1335 resume all LWPs, and this one will get its breakpoint
1336 trap again.
1337
1338 If we do not do this, then we run the risk that the
1339 user will delete or disable the breakpoint, but the
1340 thread will have already tripped on it. */
1341
1342 /* Now resume this LWP and get the SIGSTOP event. */
1343 errno = 0;
1344 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
1345 if (debug_linux_nat)
1346 {
1347 fprintf_unfiltered (gdb_stdlog,
1348 "PTRACE_CONT %s, 0, 0 (%s)\n",
1349 target_pid_to_str (lp->ptid),
1350 errno ? safe_strerror (errno) : "OK");
1351
1352 fprintf_unfiltered (gdb_stdlog,
1353 "SWC: Candidate SIGTRAP event in %s\n",
1354 target_pid_to_str (lp->ptid));
1355 }
1356 /* Hold the SIGTRAP for handling by linux_nat_wait. */
1357 stop_wait_callback (lp, data);
1358 /* If there's another event, throw it back into the queue. */
1359 if (lp->status)
1360 {
1361 if (debug_linux_nat)
1362 {
1363 fprintf_unfiltered (gdb_stdlog,
1364 "SWC: kill %s, %s\n",
1365 target_pid_to_str (lp->ptid),
1366 status_to_str ((int) status));
1367 }
1368 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (lp->status));
1369 }
1370 /* Save the sigtrap event. */
1371 lp->status = status;
1372 return 0;
1373 }
1374 else
1375 {
1376 /* The thread was stopped with a signal other than
1377 SIGSTOP, and didn't accidentally trip a breakpoint. */
1378
1379 if (debug_linux_nat)
1380 {
1381 fprintf_unfiltered (gdb_stdlog,
1382 "SWC: Pending event %s in %s\n",
1383 status_to_str ((int) status),
1384 target_pid_to_str (lp->ptid));
1385 }
1386 /* Now resume this LWP and get the SIGSTOP event. */
1387 errno = 0;
1388 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
1389 if (debug_linux_nat)
1390 fprintf_unfiltered (gdb_stdlog,
1391 "SWC: PTRACE_CONT %s, 0, 0 (%s)\n",
1392 target_pid_to_str (lp->ptid),
1393 errno ? safe_strerror (errno) : "OK");
1394
1395 /* Hold this event/waitstatus while we check to see if
1396 there are any more (we still want to get that SIGSTOP). */
1397 stop_wait_callback (lp, data);
1398 /* If the lp->status field is still empty, use it to hold
1399 this event. If not, then this event must be returned
1400 to the event queue of the LWP. */
1401 if (lp->status == 0)
1402 lp->status = status;
1403 else
1404 {
1405 if (debug_linux_nat)
1406 {
1407 fprintf_unfiltered (gdb_stdlog,
1408 "SWC: kill %s, %s\n",
1409 target_pid_to_str (lp->ptid),
1410 status_to_str ((int) status));
1411 }
1412 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (status));
1413 }
1414 return 0;
1415 }
1416 }
1417 else
1418 {
1419 /* We caught the SIGSTOP that we intended to catch, so
1420 there's no SIGSTOP pending. */
1421 lp->stopped = 1;
1422 lp->signalled = 0;
1423 }
1424 }
1425
1426 return 0;
1427 }
1428
1429 /* Check whether PID has any pending signals in FLUSH_MASK. If so set
1430 the appropriate bits in PENDING, and return 1 - otherwise return 0. */
1431
1432 static int
1433 linux_nat_has_pending (int pid, sigset_t *pending, sigset_t *flush_mask)
1434 {
1435 sigset_t blocked, ignored;
1436 int i;
1437
1438 linux_proc_pending_signals (pid, pending, &blocked, &ignored);
1439
1440 if (!flush_mask)
1441 return 0;
1442
1443 for (i = 1; i < NSIG; i++)
1444 if (sigismember (pending, i))
1445 if (!sigismember (flush_mask, i)
1446 || sigismember (&blocked, i)
1447 || sigismember (&ignored, i))
1448 sigdelset (pending, i);
1449
1450 if (sigisemptyset (pending))
1451 return 0;
1452
1453 return 1;
1454 }
1455
1456 /* DATA is interpreted as a mask of signals to flush. If LP has
1457 signals pending, and they are all in the flush mask, then arrange
1458 to flush them. LP should be stopped, as should all other threads
1459 it might share a signal queue with. */
1460
1461 static int
1462 flush_callback (struct lwp_info *lp, void *data)
1463 {
1464 sigset_t *flush_mask = data;
1465 sigset_t pending, intersection, blocked, ignored;
1466 int pid, status;
1467
1468 /* Normally, when an LWP exits, it is removed from the LWP list. The
1469 last LWP isn't removed till later, however. So if there is only
1470 one LWP on the list, make sure it's alive. */
1471 if (lwp_list == lp && lp->next == NULL)
1472 if (!linux_nat_thread_alive (lp->ptid))
1473 return 0;
1474
1475 /* Just because the LWP is stopped doesn't mean that new signals
1476 can't arrive from outside, so this function must be careful of
1477 race conditions. However, because all threads are stopped, we
1478 can assume that the pending mask will not shrink unless we resume
1479 the LWP, and that it will then get another signal. We can't
1480 control which one, however. */
1481
1482 if (lp->status)
1483 {
1484 if (debug_linux_nat)
1485 printf_unfiltered (_("FC: LP has pending status %06x\n"), lp->status);
1486 if (WIFSTOPPED (lp->status) && sigismember (flush_mask, WSTOPSIG (lp->status)))
1487 lp->status = 0;
1488 }
1489
1490 while (linux_nat_has_pending (GET_LWP (lp->ptid), &pending, flush_mask))
1491 {
1492 int ret;
1493
1494 errno = 0;
1495 ret = ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
1496 if (debug_linux_nat)
1497 fprintf_unfiltered (gdb_stderr,
1498 "FC: Sent PTRACE_CONT, ret %d %d\n", ret, errno);
1499
1500 lp->stopped = 0;
1501 stop_wait_callback (lp, flush_mask);
1502 if (debug_linux_nat)
1503 fprintf_unfiltered (gdb_stderr,
1504 "FC: Wait finished; saved status is %d\n",
1505 lp->status);
1506 }
1507
1508 return 0;
1509 }
1510
1511 /* Return non-zero if LP has a wait status pending. */
1512
1513 static int
1514 status_callback (struct lwp_info *lp, void *data)
1515 {
1516 /* Only report a pending wait status if we pretend that this has
1517 indeed been resumed. */
1518 return (lp->status != 0 && lp->resumed);
1519 }
1520
1521 /* Return non-zero if LP isn't stopped. */
1522
1523 static int
1524 running_callback (struct lwp_info *lp, void *data)
1525 {
1526 return (lp->stopped == 0 || (lp->status != 0 && lp->resumed));
1527 }
1528
1529 /* Count the LWP's that have had events. */
1530
1531 static int
1532 count_events_callback (struct lwp_info *lp, void *data)
1533 {
1534 int *count = data;
1535
1536 gdb_assert (count != NULL);
1537
1538 /* Count only LWPs that have a SIGTRAP event pending. */
1539 if (lp->status != 0
1540 && WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP)
1541 (*count)++;
1542
1543 return 0;
1544 }
1545
1546 /* Select the LWP (if any) that is currently being single-stepped. */
1547
1548 static int
1549 select_singlestep_lwp_callback (struct lwp_info *lp, void *data)
1550 {
1551 if (lp->step && lp->status != 0)
1552 return 1;
1553 else
1554 return 0;
1555 }
1556
1557 /* Select the Nth LWP that has had a SIGTRAP event. */
1558
1559 static int
1560 select_event_lwp_callback (struct lwp_info *lp, void *data)
1561 {
1562 int *selector = data;
1563
1564 gdb_assert (selector != NULL);
1565
1566 /* Select only LWPs that have a SIGTRAP event pending. */
1567 if (lp->status != 0
1568 && WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP)
1569 if ((*selector)-- == 0)
1570 return 1;
1571
1572 return 0;
1573 }
1574
1575 static int
1576 cancel_breakpoints_callback (struct lwp_info *lp, void *data)
1577 {
1578 struct lwp_info *event_lp = data;
1579
1580 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
1581 if (lp == event_lp)
1582 return 0;
1583
1584 /* If a LWP other than the LWP that we're reporting an event for has
1585 hit a GDB breakpoint (as opposed to some random trap signal),
1586 then just arrange for it to hit it again later. We don't keep
1587 the SIGTRAP status and don't forward the SIGTRAP signal to the
1588 LWP. We will handle the current event, eventually we will resume
1589 all LWPs, and this one will get its breakpoint trap again.
1590
1591 If we do not do this, then we run the risk that the user will
1592 delete or disable the breakpoint, but the LWP will have already
1593 tripped on it. */
1594
1595 if (lp->status != 0
1596 && WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP
1597 && breakpoint_inserted_here_p (read_pc_pid (lp->ptid) -
1598 DECR_PC_AFTER_BREAK))
1599 {
1600 if (debug_linux_nat)
1601 fprintf_unfiltered (gdb_stdlog,
1602 "CBC: Push back breakpoint for %s\n",
1603 target_pid_to_str (lp->ptid));
1604
1605 /* Back up the PC if necessary. */
1606 if (DECR_PC_AFTER_BREAK)
1607 write_pc_pid (read_pc_pid (lp->ptid) - DECR_PC_AFTER_BREAK, lp->ptid);
1608
1609 /* Throw away the SIGTRAP. */
1610 lp->status = 0;
1611 }
1612
1613 return 0;
1614 }
1615
1616 /* Select one LWP out of those that have events pending. */
1617
1618 static void
1619 select_event_lwp (struct lwp_info **orig_lp, int *status)
1620 {
1621 int num_events = 0;
1622 int random_selector;
1623 struct lwp_info *event_lp;
1624
1625 /* Record the wait status for the origional LWP. */
1626 (*orig_lp)->status = *status;
1627
1628 /* Give preference to any LWP that is being single-stepped. */
1629 event_lp = iterate_over_lwps (select_singlestep_lwp_callback, NULL);
1630 if (event_lp != NULL)
1631 {
1632 if (debug_linux_nat)
1633 fprintf_unfiltered (gdb_stdlog,
1634 "SEL: Select single-step %s\n",
1635 target_pid_to_str (event_lp->ptid));
1636 }
1637 else
1638 {
1639 /* No single-stepping LWP. Select one at random, out of those
1640 which have had SIGTRAP events. */
1641
1642 /* First see how many SIGTRAP events we have. */
1643 iterate_over_lwps (count_events_callback, &num_events);
1644
1645 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
1646 random_selector = (int)
1647 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
1648
1649 if (debug_linux_nat && num_events > 1)
1650 fprintf_unfiltered (gdb_stdlog,
1651 "SEL: Found %d SIGTRAP events, selecting #%d\n",
1652 num_events, random_selector);
1653
1654 event_lp = iterate_over_lwps (select_event_lwp_callback,
1655 &random_selector);
1656 }
1657
1658 if (event_lp != NULL)
1659 {
1660 /* Switch the event LWP. */
1661 *orig_lp = event_lp;
1662 *status = event_lp->status;
1663 }
1664
1665 /* Flush the wait status for the event LWP. */
1666 (*orig_lp)->status = 0;
1667 }
1668
1669 /* Return non-zero if LP has been resumed. */
1670
1671 static int
1672 resumed_callback (struct lwp_info *lp, void *data)
1673 {
1674 return lp->resumed;
1675 }
1676
1677 #ifdef CHILD_WAIT
1678
1679 /* We need to override child_wait to support attaching to cloned
1680 processes, since a normal wait (as done by the default version)
1681 ignores those processes. */
1682
1683 /* Wait for child PTID to do something. Return id of the child,
1684 minus_one_ptid in case of error; store status into *OURSTATUS. */
1685
1686 ptid_t
1687 child_wait (ptid_t ptid, struct target_waitstatus *ourstatus)
1688 {
1689 int save_errno;
1690 int status;
1691 pid_t pid;
1692
1693 ourstatus->kind = TARGET_WAITKIND_IGNORE;
1694
1695 do
1696 {
1697 set_sigint_trap (); /* Causes SIGINT to be passed on to the
1698 attached process. */
1699 set_sigio_trap ();
1700
1701 pid = waitpid (GET_PID (ptid), &status, 0);
1702 if (pid == -1 && errno == ECHILD)
1703 /* Try again with __WCLONE to check cloned processes. */
1704 pid = waitpid (GET_PID (ptid), &status, __WCLONE);
1705
1706 if (debug_linux_nat)
1707 {
1708 fprintf_unfiltered (gdb_stdlog,
1709 "CW: waitpid %ld received %s\n",
1710 (long) pid, status_to_str (status));
1711 }
1712
1713 save_errno = errno;
1714
1715 /* Make sure we don't report an event for the exit of the
1716 original program, if we've detached from it. */
1717 if (pid != -1 && !WIFSTOPPED (status) && pid != GET_PID (inferior_ptid))
1718 {
1719 pid = -1;
1720 save_errno = EINTR;
1721 }
1722
1723 /* Check for stop events reported by a process we didn't already
1724 know about - in this case, anything other than inferior_ptid.
1725
1726 If we're expecting to receive stopped processes after fork,
1727 vfork, and clone events, then we'll just add the new one to
1728 our list and go back to waiting for the event to be reported
1729 - the stopped process might be returned from waitpid before
1730 or after the event is. If we want to handle debugging of
1731 CLONE_PTRACE processes we need to do more here, i.e. switch
1732 to multi-threaded mode. */
1733 if (pid != -1 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP
1734 && pid != GET_PID (inferior_ptid))
1735 {
1736 linux_record_stopped_pid (pid);
1737 pid = -1;
1738 save_errno = EINTR;
1739 }
1740
1741 /* Handle GNU/Linux's extended waitstatus for trace events. */
1742 if (pid != -1 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP
1743 && status >> 16 != 0)
1744 {
1745 linux_handle_extended_wait (pid, status, ourstatus);
1746
1747 /* If we see a clone event, detach the child, and don't
1748 report the event. It would be nice to offer some way to
1749 switch into a non-thread-db based threaded mode at this
1750 point. */
1751 if (ourstatus->kind == TARGET_WAITKIND_SPURIOUS)
1752 {
1753 ptrace (PTRACE_DETACH, ourstatus->value.related_pid, 0, 0);
1754 ourstatus->kind = TARGET_WAITKIND_IGNORE;
1755 ptrace (PTRACE_CONT, pid, 0, 0);
1756 pid = -1;
1757 save_errno = EINTR;
1758 }
1759 }
1760
1761 clear_sigio_trap ();
1762 clear_sigint_trap ();
1763 }
1764 while (pid == -1 && save_errno == EINTR);
1765
1766 if (pid == -1)
1767 {
1768 warning (_("Child process unexpectedly missing: %s"),
1769 safe_strerror (errno));
1770
1771 /* Claim it exited with unknown signal. */
1772 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
1773 ourstatus->value.sig = TARGET_SIGNAL_UNKNOWN;
1774 return minus_one_ptid;
1775 }
1776
1777 if (ourstatus->kind == TARGET_WAITKIND_IGNORE)
1778 store_waitstatus (ourstatus, status);
1779
1780 return pid_to_ptid (pid);
1781 }
1782
1783 #endif
1784
1785 /* Stop an active thread, verify it still exists, then resume it. */
1786
1787 static int
1788 stop_and_resume_callback (struct lwp_info *lp, void *data)
1789 {
1790 struct lwp_info *ptr;
1791
1792 if (!lp->stopped && !lp->signalled)
1793 {
1794 stop_callback (lp, NULL);
1795 stop_wait_callback (lp, NULL);
1796 /* Resume if the lwp still exists. */
1797 for (ptr = lwp_list; ptr; ptr = ptr->next)
1798 if (lp == ptr)
1799 {
1800 resume_callback (lp, NULL);
1801 resume_set_callback (lp, NULL);
1802 }
1803 }
1804 return 0;
1805 }
1806
1807 static ptid_t
1808 linux_nat_wait (ptid_t ptid, struct target_waitstatus *ourstatus)
1809 {
1810 struct lwp_info *lp = NULL;
1811 int options = 0;
1812 int status = 0;
1813 pid_t pid = PIDGET (ptid);
1814 sigset_t flush_mask;
1815
1816 sigemptyset (&flush_mask);
1817
1818 /* Make sure SIGCHLD is blocked. */
1819 if (!sigismember (&blocked_mask, SIGCHLD))
1820 {
1821 sigaddset (&blocked_mask, SIGCHLD);
1822 sigprocmask (SIG_BLOCK, &blocked_mask, NULL);
1823 }
1824
1825 retry:
1826
1827 /* Make sure there is at least one LWP that has been resumed, at
1828 least if there are any LWPs at all. */
1829 gdb_assert (num_lwps == 0 || iterate_over_lwps (resumed_callback, NULL));
1830
1831 /* First check if there is a LWP with a wait status pending. */
1832 if (pid == -1)
1833 {
1834 /* Any LWP that's been resumed will do. */
1835 lp = iterate_over_lwps (status_callback, NULL);
1836 if (lp)
1837 {
1838 status = lp->status;
1839 lp->status = 0;
1840
1841 if (debug_linux_nat && status)
1842 fprintf_unfiltered (gdb_stdlog,
1843 "LLW: Using pending wait status %s for %s.\n",
1844 status_to_str (status),
1845 target_pid_to_str (lp->ptid));
1846 }
1847
1848 /* But if we don't fine one, we'll have to wait, and check both
1849 cloned and uncloned processes. We start with the cloned
1850 processes. */
1851 options = __WCLONE | WNOHANG;
1852 }
1853 else if (is_lwp (ptid))
1854 {
1855 if (debug_linux_nat)
1856 fprintf_unfiltered (gdb_stdlog,
1857 "LLW: Waiting for specific LWP %s.\n",
1858 target_pid_to_str (ptid));
1859
1860 /* We have a specific LWP to check. */
1861 lp = find_lwp_pid (ptid);
1862 gdb_assert (lp);
1863 status = lp->status;
1864 lp->status = 0;
1865
1866 if (debug_linux_nat && status)
1867 fprintf_unfiltered (gdb_stdlog,
1868 "LLW: Using pending wait status %s for %s.\n",
1869 status_to_str (status),
1870 target_pid_to_str (lp->ptid));
1871
1872 /* If we have to wait, take into account whether PID is a cloned
1873 process or not. And we have to convert it to something that
1874 the layer beneath us can understand. */
1875 options = lp->cloned ? __WCLONE : 0;
1876 pid = GET_LWP (ptid);
1877 }
1878
1879 if (status && lp->signalled)
1880 {
1881 /* A pending SIGSTOP may interfere with the normal stream of
1882 events. In a typical case where interference is a problem,
1883 we have a SIGSTOP signal pending for LWP A while
1884 single-stepping it, encounter an event in LWP B, and take the
1885 pending SIGSTOP while trying to stop LWP A. After processing
1886 the event in LWP B, LWP A is continued, and we'll never see
1887 the SIGTRAP associated with the last time we were
1888 single-stepping LWP A. */
1889
1890 /* Resume the thread. It should halt immediately returning the
1891 pending SIGSTOP. */
1892 registers_changed ();
1893 child_resume (pid_to_ptid (GET_LWP (lp->ptid)), lp->step,
1894 TARGET_SIGNAL_0);
1895 if (debug_linux_nat)
1896 fprintf_unfiltered (gdb_stdlog,
1897 "LLW: %s %s, 0, 0 (expect SIGSTOP)\n",
1898 lp->step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
1899 target_pid_to_str (lp->ptid));
1900 lp->stopped = 0;
1901 gdb_assert (lp->resumed);
1902
1903 /* This should catch the pending SIGSTOP. */
1904 stop_wait_callback (lp, NULL);
1905 }
1906
1907 set_sigint_trap (); /* Causes SIGINT to be passed on to the
1908 attached process. */
1909 set_sigio_trap ();
1910
1911 while (status == 0)
1912 {
1913 pid_t lwpid;
1914
1915 lwpid = waitpid (pid, &status, options);
1916 if (lwpid > 0)
1917 {
1918 gdb_assert (pid == -1 || lwpid == pid);
1919
1920 if (debug_linux_nat)
1921 {
1922 fprintf_unfiltered (gdb_stdlog,
1923 "LLW: waitpid %ld received %s\n",
1924 (long) lwpid, status_to_str (status));
1925 }
1926
1927 lp = find_lwp_pid (pid_to_ptid (lwpid));
1928
1929 /* Check for stop events reported by a process we didn't
1930 already know about - anything not already in our LWP
1931 list.
1932
1933 If we're expecting to receive stopped processes after
1934 fork, vfork, and clone events, then we'll just add the
1935 new one to our list and go back to waiting for the event
1936 to be reported - the stopped process might be returned
1937 from waitpid before or after the event is. */
1938 if (WIFSTOPPED (status) && !lp)
1939 {
1940 linux_record_stopped_pid (lwpid);
1941 status = 0;
1942 continue;
1943 }
1944
1945 /* Make sure we don't report an event for the exit of an LWP not in
1946 our list, i.e. not part of the current process. This can happen
1947 if we detach from a program we original forked and then it
1948 exits. */
1949 if (!WIFSTOPPED (status) && !lp)
1950 {
1951 status = 0;
1952 continue;
1953 }
1954
1955 /* NOTE drow/2003-06-17: This code seems to be meant for debugging
1956 CLONE_PTRACE processes which do not use the thread library -
1957 otherwise we wouldn't find the new LWP this way. That doesn't
1958 currently work, and the following code is currently unreachable
1959 due to the two blocks above. If it's fixed some day, this code
1960 should be broken out into a function so that we can also pick up
1961 LWPs from the new interface. */
1962 if (!lp)
1963 {
1964 lp = add_lwp (BUILD_LWP (lwpid, GET_PID (inferior_ptid)));
1965 if (options & __WCLONE)
1966 lp->cloned = 1;
1967
1968 if (threaded)
1969 {
1970 gdb_assert (WIFSTOPPED (status)
1971 && WSTOPSIG (status) == SIGSTOP);
1972 lp->signalled = 1;
1973
1974 if (!in_thread_list (inferior_ptid))
1975 {
1976 inferior_ptid = BUILD_LWP (GET_PID (inferior_ptid),
1977 GET_PID (inferior_ptid));
1978 add_thread (inferior_ptid);
1979 }
1980
1981 add_thread (lp->ptid);
1982 printf_unfiltered (_("[New %s]\n"),
1983 target_pid_to_str (lp->ptid));
1984 }
1985 }
1986
1987 /* Handle GNU/Linux's extended waitstatus for trace events. */
1988 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
1989 {
1990 if (debug_linux_nat)
1991 fprintf_unfiltered (gdb_stdlog,
1992 "LLW: Handling extended status 0x%06x\n",
1993 status);
1994 if (linux_nat_handle_extended (lp, status))
1995 {
1996 status = 0;
1997 continue;
1998 }
1999 }
2000
2001 /* Check if the thread has exited. */
2002 if ((WIFEXITED (status) || WIFSIGNALED (status)) && num_lwps > 1)
2003 {
2004 if (in_thread_list (lp->ptid))
2005 {
2006 /* Core GDB cannot deal with us deleting the current
2007 thread. */
2008 if (!ptid_equal (lp->ptid, inferior_ptid))
2009 delete_thread (lp->ptid);
2010 printf_unfiltered (_("[%s exited]\n"),
2011 target_pid_to_str (lp->ptid));
2012 }
2013
2014 /* If this is the main thread, we must stop all threads and
2015 verify if they are still alive. This is because in the nptl
2016 thread model, there is no signal issued for exiting LWPs
2017 other than the main thread. We only get the main thread
2018 exit signal once all child threads have already exited.
2019 If we stop all the threads and use the stop_wait_callback
2020 to check if they have exited we can determine whether this
2021 signal should be ignored or whether it means the end of the
2022 debugged application, regardless of which threading model
2023 is being used. */
2024 if (GET_PID (lp->ptid) == GET_LWP (lp->ptid))
2025 {
2026 lp->stopped = 1;
2027 iterate_over_lwps (stop_and_resume_callback, NULL);
2028 }
2029
2030 if (debug_linux_nat)
2031 fprintf_unfiltered (gdb_stdlog,
2032 "LLW: %s exited.\n",
2033 target_pid_to_str (lp->ptid));
2034
2035 delete_lwp (lp->ptid);
2036
2037 /* If there is at least one more LWP, then the exit signal
2038 was not the end of the debugged application and should be
2039 ignored. */
2040 if (num_lwps > 0)
2041 {
2042 /* Make sure there is at least one thread running. */
2043 gdb_assert (iterate_over_lwps (running_callback, NULL));
2044
2045 /* Discard the event. */
2046 status = 0;
2047 continue;
2048 }
2049 }
2050
2051 /* Check if the current LWP has previously exited. In the nptl
2052 thread model, LWPs other than the main thread do not issue
2053 signals when they exit so we must check whenever the thread
2054 has stopped. A similar check is made in stop_wait_callback(). */
2055 if (num_lwps > 1 && !linux_nat_thread_alive (lp->ptid))
2056 {
2057 if (in_thread_list (lp->ptid))
2058 {
2059 /* Core GDB cannot deal with us deleting the current
2060 thread. */
2061 if (!ptid_equal (lp->ptid, inferior_ptid))
2062 delete_thread (lp->ptid);
2063 printf_unfiltered (_("[%s exited]\n"),
2064 target_pid_to_str (lp->ptid));
2065 }
2066 if (debug_linux_nat)
2067 fprintf_unfiltered (gdb_stdlog,
2068 "LLW: %s exited.\n",
2069 target_pid_to_str (lp->ptid));
2070
2071 delete_lwp (lp->ptid);
2072
2073 /* Make sure there is at least one thread running. */
2074 gdb_assert (iterate_over_lwps (running_callback, NULL));
2075
2076 /* Discard the event. */
2077 status = 0;
2078 continue;
2079 }
2080
2081 /* Make sure we don't report a SIGSTOP that we sent
2082 ourselves in an attempt to stop an LWP. */
2083 if (lp->signalled
2084 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP)
2085 {
2086 if (debug_linux_nat)
2087 fprintf_unfiltered (gdb_stdlog,
2088 "LLW: Delayed SIGSTOP caught for %s.\n",
2089 target_pid_to_str (lp->ptid));
2090
2091 /* This is a delayed SIGSTOP. */
2092 lp->signalled = 0;
2093
2094 registers_changed ();
2095 child_resume (pid_to_ptid (GET_LWP (lp->ptid)), lp->step,
2096 TARGET_SIGNAL_0);
2097 if (debug_linux_nat)
2098 fprintf_unfiltered (gdb_stdlog,
2099 "LLW: %s %s, 0, 0 (discard SIGSTOP)\n",
2100 lp->step ?
2101 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2102 target_pid_to_str (lp->ptid));
2103
2104 lp->stopped = 0;
2105 gdb_assert (lp->resumed);
2106
2107 /* Discard the event. */
2108 status = 0;
2109 continue;
2110 }
2111
2112 break;
2113 }
2114
2115 if (pid == -1)
2116 {
2117 /* Alternate between checking cloned and uncloned processes. */
2118 options ^= __WCLONE;
2119
2120 /* And suspend every time we have checked both. */
2121 if (options & __WCLONE)
2122 sigsuspend (&suspend_mask);
2123 }
2124
2125 /* We shouldn't end up here unless we want to try again. */
2126 gdb_assert (status == 0);
2127 }
2128
2129 clear_sigio_trap ();
2130 clear_sigint_trap ();
2131
2132 gdb_assert (lp);
2133
2134 /* Don't report signals that GDB isn't interested in, such as
2135 signals that are neither printed nor stopped upon. Stopping all
2136 threads can be a bit time-consuming so if we want decent
2137 performance with heavily multi-threaded programs, especially when
2138 they're using a high frequency timer, we'd better avoid it if we
2139 can. */
2140
2141 if (WIFSTOPPED (status))
2142 {
2143 int signo = target_signal_from_host (WSTOPSIG (status));
2144
2145 if (signal_stop_state (signo) == 0
2146 && signal_print_state (signo) == 0
2147 && signal_pass_state (signo) == 1)
2148 {
2149 /* FIMXE: kettenis/2001-06-06: Should we resume all threads
2150 here? It is not clear we should. GDB may not expect
2151 other threads to run. On the other hand, not resuming
2152 newly attached threads may cause an unwanted delay in
2153 getting them running. */
2154 registers_changed ();
2155 child_resume (pid_to_ptid (GET_LWP (lp->ptid)), lp->step, signo);
2156 if (debug_linux_nat)
2157 fprintf_unfiltered (gdb_stdlog,
2158 "LLW: %s %s, %s (preempt 'handle')\n",
2159 lp->step ?
2160 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2161 target_pid_to_str (lp->ptid),
2162 signo ? strsignal (signo) : "0");
2163 lp->stopped = 0;
2164 status = 0;
2165 goto retry;
2166 }
2167
2168 if (signo == TARGET_SIGNAL_INT && signal_pass_state (signo) == 0)
2169 {
2170 /* If ^C/BREAK is typed at the tty/console, SIGINT gets
2171 forwarded to the entire process group, that is, all LWP's
2172 will receive it. Since we only want to report it once,
2173 we try to flush it from all LWPs except this one. */
2174 sigaddset (&flush_mask, SIGINT);
2175 }
2176 }
2177
2178 /* This LWP is stopped now. */
2179 lp->stopped = 1;
2180
2181 if (debug_linux_nat)
2182 fprintf_unfiltered (gdb_stdlog, "LLW: Candidate event %s in %s.\n",
2183 status_to_str (status), target_pid_to_str (lp->ptid));
2184
2185 /* Now stop all other LWP's ... */
2186 iterate_over_lwps (stop_callback, NULL);
2187
2188 /* ... and wait until all of them have reported back that they're no
2189 longer running. */
2190 iterate_over_lwps (stop_wait_callback, &flush_mask);
2191 iterate_over_lwps (flush_callback, &flush_mask);
2192
2193 /* If we're not waiting for a specific LWP, choose an event LWP from
2194 among those that have had events. Giving equal priority to all
2195 LWPs that have had events helps prevent starvation. */
2196 if (pid == -1)
2197 select_event_lwp (&lp, &status);
2198
2199 /* Now that we've selected our final event LWP, cancel any
2200 breakpoints in other LWPs that have hit a GDB breakpoint. See
2201 the comment in cancel_breakpoints_callback to find out why. */
2202 iterate_over_lwps (cancel_breakpoints_callback, lp);
2203
2204 /* If we're not running in "threaded" mode, we'll report the bare
2205 process id. */
2206
2207 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP)
2208 {
2209 trap_ptid = (threaded ? lp->ptid : pid_to_ptid (GET_LWP (lp->ptid)));
2210 if (debug_linux_nat)
2211 fprintf_unfiltered (gdb_stdlog,
2212 "LLW: trap_ptid is %s.\n",
2213 target_pid_to_str (trap_ptid));
2214 }
2215 else
2216 trap_ptid = null_ptid;
2217
2218 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
2219 {
2220 *ourstatus = lp->waitstatus;
2221 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
2222 }
2223 else
2224 store_waitstatus (ourstatus, status);
2225
2226 return (threaded ? lp->ptid : pid_to_ptid (GET_LWP (lp->ptid)));
2227 }
2228
2229 static int
2230 kill_callback (struct lwp_info *lp, void *data)
2231 {
2232 errno = 0;
2233 ptrace (PTRACE_KILL, GET_LWP (lp->ptid), 0, 0);
2234 if (debug_linux_nat)
2235 fprintf_unfiltered (gdb_stdlog,
2236 "KC: PTRACE_KILL %s, 0, 0 (%s)\n",
2237 target_pid_to_str (lp->ptid),
2238 errno ? safe_strerror (errno) : "OK");
2239
2240 return 0;
2241 }
2242
2243 static int
2244 kill_wait_callback (struct lwp_info *lp, void *data)
2245 {
2246 pid_t pid;
2247
2248 /* We must make sure that there are no pending events (delayed
2249 SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current
2250 program doesn't interfere with any following debugging session. */
2251
2252 /* For cloned processes we must check both with __WCLONE and
2253 without, since the exit status of a cloned process isn't reported
2254 with __WCLONE. */
2255 if (lp->cloned)
2256 {
2257 do
2258 {
2259 pid = waitpid (GET_LWP (lp->ptid), NULL, __WCLONE);
2260 if (pid != (pid_t) -1 && debug_linux_nat)
2261 {
2262 fprintf_unfiltered (gdb_stdlog,
2263 "KWC: wait %s received unknown.\n",
2264 target_pid_to_str (lp->ptid));
2265 }
2266 }
2267 while (pid == GET_LWP (lp->ptid));
2268
2269 gdb_assert (pid == -1 && errno == ECHILD);
2270 }
2271
2272 do
2273 {
2274 pid = waitpid (GET_LWP (lp->ptid), NULL, 0);
2275 if (pid != (pid_t) -1 && debug_linux_nat)
2276 {
2277 fprintf_unfiltered (gdb_stdlog,
2278 "KWC: wait %s received unk.\n",
2279 target_pid_to_str (lp->ptid));
2280 }
2281 }
2282 while (pid == GET_LWP (lp->ptid));
2283
2284 gdb_assert (pid == -1 && errno == ECHILD);
2285 return 0;
2286 }
2287
2288 static void
2289 linux_nat_kill (void)
2290 {
2291 /* Kill all LWP's ... */
2292 iterate_over_lwps (kill_callback, NULL);
2293
2294 /* ... and wait until we've flushed all events. */
2295 iterate_over_lwps (kill_wait_callback, NULL);
2296
2297 target_mourn_inferior ();
2298 }
2299
2300 static void
2301 linux_nat_create_inferior (char *exec_file, char *allargs, char **env,
2302 int from_tty)
2303 {
2304 deprecated_child_ops.to_create_inferior (exec_file, allargs, env, from_tty);
2305 }
2306
2307 static void
2308 linux_nat_mourn_inferior (void)
2309 {
2310 trap_ptid = null_ptid;
2311
2312 /* Destroy LWP info; it's no longer valid. */
2313 init_lwp_list ();
2314
2315 /* Restore the original signal mask. */
2316 sigprocmask (SIG_SETMASK, &normal_mask, NULL);
2317 sigemptyset (&blocked_mask);
2318
2319 deprecated_child_ops.to_mourn_inferior ();
2320 }
2321
2322 static int
2323 linux_nat_xfer_memory (CORE_ADDR memaddr, char *myaddr, int len, int write,
2324 struct mem_attrib *attrib, struct target_ops *target)
2325 {
2326 struct cleanup *old_chain = save_inferior_ptid ();
2327 int xfer;
2328
2329 if (is_lwp (inferior_ptid))
2330 inferior_ptid = pid_to_ptid (GET_LWP (inferior_ptid));
2331
2332 xfer = linux_proc_xfer_memory (memaddr, myaddr, len, write, attrib, target);
2333 if (xfer == 0)
2334 xfer = child_xfer_memory (memaddr, myaddr, len, write, attrib, target);
2335
2336 do_cleanups (old_chain);
2337 return xfer;
2338 }
2339
2340 static int
2341 linux_nat_thread_alive (ptid_t ptid)
2342 {
2343 gdb_assert (is_lwp (ptid));
2344
2345 errno = 0;
2346 ptrace (PTRACE_PEEKUSER, GET_LWP (ptid), 0, 0);
2347 if (debug_linux_nat)
2348 fprintf_unfiltered (gdb_stdlog,
2349 "LLTA: PTRACE_PEEKUSER %s, 0, 0 (%s)\n",
2350 target_pid_to_str (ptid),
2351 errno ? safe_strerror (errno) : "OK");
2352 if (errno)
2353 return 0;
2354
2355 return 1;
2356 }
2357
2358 static char *
2359 linux_nat_pid_to_str (ptid_t ptid)
2360 {
2361 static char buf[64];
2362
2363 if (is_lwp (ptid))
2364 {
2365 snprintf (buf, sizeof (buf), "LWP %ld", GET_LWP (ptid));
2366 return buf;
2367 }
2368
2369 return normal_pid_to_str (ptid);
2370 }
2371
2372 static void
2373 init_linux_nat_ops (void)
2374 {
2375 #if 0
2376 linux_nat_ops.to_open = linux_nat_open;
2377 #endif
2378 linux_nat_ops.to_shortname = "lwp-layer";
2379 linux_nat_ops.to_longname = "lwp-layer";
2380 linux_nat_ops.to_doc = "Low level threads support (LWP layer)";
2381 linux_nat_ops.to_attach = linux_nat_attach;
2382 linux_nat_ops.to_detach = linux_nat_detach;
2383 linux_nat_ops.to_resume = linux_nat_resume;
2384 linux_nat_ops.to_wait = linux_nat_wait;
2385 /* fetch_inferior_registers and store_inferior_registers will
2386 honor the LWP id, so we can use them directly. */
2387 linux_nat_ops.to_fetch_registers = fetch_inferior_registers;
2388 linux_nat_ops.to_store_registers = store_inferior_registers;
2389 linux_nat_ops.deprecated_xfer_memory = linux_nat_xfer_memory;
2390 linux_nat_ops.to_kill = linux_nat_kill;
2391 linux_nat_ops.to_create_inferior = linux_nat_create_inferior;
2392 linux_nat_ops.to_mourn_inferior = linux_nat_mourn_inferior;
2393 linux_nat_ops.to_thread_alive = linux_nat_thread_alive;
2394 linux_nat_ops.to_pid_to_str = linux_nat_pid_to_str;
2395 linux_nat_ops.to_post_startup_inferior = child_post_startup_inferior;
2396 linux_nat_ops.to_post_attach = child_post_attach;
2397 linux_nat_ops.to_insert_fork_catchpoint = child_insert_fork_catchpoint;
2398 linux_nat_ops.to_insert_vfork_catchpoint = child_insert_vfork_catchpoint;
2399 linux_nat_ops.to_insert_exec_catchpoint = child_insert_exec_catchpoint;
2400
2401 linux_nat_ops.to_stratum = thread_stratum;
2402 linux_nat_ops.to_has_thread_control = tc_schedlock;
2403 linux_nat_ops.to_magic = OPS_MAGIC;
2404 }
2405
2406 static void
2407 sigchld_handler (int signo)
2408 {
2409 /* Do nothing. The only reason for this handler is that it allows
2410 us to use sigsuspend in linux_nat_wait above to wait for the
2411 arrival of a SIGCHLD. */
2412 }
2413
2414 /* Accepts an integer PID; Returns a string representing a file that
2415 can be opened to get the symbols for the child process. */
2416
2417 char *
2418 child_pid_to_exec_file (int pid)
2419 {
2420 char *name1, *name2;
2421
2422 name1 = xmalloc (MAXPATHLEN);
2423 name2 = xmalloc (MAXPATHLEN);
2424 make_cleanup (xfree, name1);
2425 make_cleanup (xfree, name2);
2426 memset (name2, 0, MAXPATHLEN);
2427
2428 sprintf (name1, "/proc/%d/exe", pid);
2429 if (readlink (name1, name2, MAXPATHLEN) > 0)
2430 return name2;
2431 else
2432 return name1;
2433 }
2434
2435 /* Service function for corefiles and info proc. */
2436
2437 static int
2438 read_mapping (FILE *mapfile,
2439 long long *addr,
2440 long long *endaddr,
2441 char *permissions,
2442 long long *offset,
2443 char *device, long long *inode, char *filename)
2444 {
2445 int ret = fscanf (mapfile, "%llx-%llx %s %llx %s %llx",
2446 addr, endaddr, permissions, offset, device, inode);
2447
2448 if (ret > 0 && ret != EOF && *inode != 0)
2449 {
2450 /* Eat everything up to EOL for the filename. This will prevent
2451 weird filenames (such as one with embedded whitespace) from
2452 confusing this code. It also makes this code more robust in
2453 respect to annotations the kernel may add after the filename.
2454
2455 Note the filename is used for informational purposes
2456 only. */
2457 ret += fscanf (mapfile, "%[^\n]\n", filename);
2458 }
2459 else
2460 {
2461 filename[0] = '\0'; /* no filename */
2462 fscanf (mapfile, "\n");
2463 }
2464 return (ret != 0 && ret != EOF);
2465 }
2466
2467 /* Fills the "to_find_memory_regions" target vector. Lists the memory
2468 regions in the inferior for a corefile. */
2469
2470 static int
2471 linux_nat_find_memory_regions (int (*func) (CORE_ADDR,
2472 unsigned long,
2473 int, int, int, void *), void *obfd)
2474 {
2475 long long pid = PIDGET (inferior_ptid);
2476 char mapsfilename[MAXPATHLEN];
2477 FILE *mapsfile;
2478 long long addr, endaddr, size, offset, inode;
2479 char permissions[8], device[8], filename[MAXPATHLEN];
2480 int read, write, exec;
2481 int ret;
2482
2483 /* Compose the filename for the /proc memory map, and open it. */
2484 sprintf (mapsfilename, "/proc/%lld/maps", pid);
2485 if ((mapsfile = fopen (mapsfilename, "r")) == NULL)
2486 error (_("Could not open %s."), mapsfilename);
2487
2488 if (info_verbose)
2489 fprintf_filtered (gdb_stdout,
2490 "Reading memory regions from %s\n", mapsfilename);
2491
2492 /* Now iterate until end-of-file. */
2493 while (read_mapping (mapsfile, &addr, &endaddr, &permissions[0],
2494 &offset, &device[0], &inode, &filename[0]))
2495 {
2496 size = endaddr - addr;
2497
2498 /* Get the segment's permissions. */
2499 read = (strchr (permissions, 'r') != 0);
2500 write = (strchr (permissions, 'w') != 0);
2501 exec = (strchr (permissions, 'x') != 0);
2502
2503 if (info_verbose)
2504 {
2505 fprintf_filtered (gdb_stdout,
2506 "Save segment, %lld bytes at 0x%s (%c%c%c)",
2507 size, paddr_nz (addr),
2508 read ? 'r' : ' ',
2509 write ? 'w' : ' ', exec ? 'x' : ' ');
2510 if (filename && filename[0])
2511 fprintf_filtered (gdb_stdout, " for %s", filename);
2512 fprintf_filtered (gdb_stdout, "\n");
2513 }
2514
2515 /* Invoke the callback function to create the corefile
2516 segment. */
2517 func (addr, size, read, write, exec, obfd);
2518 }
2519 fclose (mapsfile);
2520 return 0;
2521 }
2522
2523 /* Records the thread's register state for the corefile note
2524 section. */
2525
2526 static char *
2527 linux_nat_do_thread_registers (bfd *obfd, ptid_t ptid,
2528 char *note_data, int *note_size)
2529 {
2530 gdb_gregset_t gregs;
2531 gdb_fpregset_t fpregs;
2532 #ifdef FILL_FPXREGSET
2533 gdb_fpxregset_t fpxregs;
2534 #endif
2535 unsigned long lwp = ptid_get_lwp (ptid);
2536
2537 fill_gregset (&gregs, -1);
2538 note_data = (char *) elfcore_write_prstatus (obfd,
2539 note_data,
2540 note_size,
2541 lwp,
2542 stop_signal, &gregs);
2543
2544 fill_fpregset (&fpregs, -1);
2545 note_data = (char *) elfcore_write_prfpreg (obfd,
2546 note_data,
2547 note_size,
2548 &fpregs, sizeof (fpregs));
2549 #ifdef FILL_FPXREGSET
2550 fill_fpxregset (&fpxregs, -1);
2551 note_data = (char *) elfcore_write_prxfpreg (obfd,
2552 note_data,
2553 note_size,
2554 &fpxregs, sizeof (fpxregs));
2555 #endif
2556 return note_data;
2557 }
2558
2559 struct linux_nat_corefile_thread_data
2560 {
2561 bfd *obfd;
2562 char *note_data;
2563 int *note_size;
2564 int num_notes;
2565 };
2566
2567 /* Called by gdbthread.c once per thread. Records the thread's
2568 register state for the corefile note section. */
2569
2570 static int
2571 linux_nat_corefile_thread_callback (struct lwp_info *ti, void *data)
2572 {
2573 struct linux_nat_corefile_thread_data *args = data;
2574 ptid_t saved_ptid = inferior_ptid;
2575
2576 inferior_ptid = ti->ptid;
2577 registers_changed ();
2578 target_fetch_registers (-1); /* FIXME should not be necessary;
2579 fill_gregset should do it automatically. */
2580 args->note_data = linux_nat_do_thread_registers (args->obfd,
2581 ti->ptid,
2582 args->note_data,
2583 args->note_size);
2584 args->num_notes++;
2585 inferior_ptid = saved_ptid;
2586 registers_changed ();
2587 target_fetch_registers (-1); /* FIXME should not be necessary;
2588 fill_gregset should do it automatically. */
2589 return 0;
2590 }
2591
2592 /* Records the register state for the corefile note section. */
2593
2594 static char *
2595 linux_nat_do_registers (bfd *obfd, ptid_t ptid,
2596 char *note_data, int *note_size)
2597 {
2598 registers_changed ();
2599 target_fetch_registers (-1); /* FIXME should not be necessary;
2600 fill_gregset should do it automatically. */
2601 return linux_nat_do_thread_registers (obfd,
2602 ptid_build (ptid_get_pid (inferior_ptid),
2603 ptid_get_pid (inferior_ptid),
2604 0),
2605 note_data, note_size);
2606 return note_data;
2607 }
2608
2609 /* Fills the "to_make_corefile_note" target vector. Builds the note
2610 section for a corefile, and returns it in a malloc buffer. */
2611
2612 static char *
2613 linux_nat_make_corefile_notes (bfd *obfd, int *note_size)
2614 {
2615 struct linux_nat_corefile_thread_data thread_args;
2616 struct cleanup *old_chain;
2617 char fname[16] = { '\0' };
2618 char psargs[80] = { '\0' };
2619 char *note_data = NULL;
2620 ptid_t current_ptid = inferior_ptid;
2621 char *auxv;
2622 int auxv_len;
2623
2624 if (get_exec_file (0))
2625 {
2626 strncpy (fname, strrchr (get_exec_file (0), '/') + 1, sizeof (fname));
2627 strncpy (psargs, get_exec_file (0), sizeof (psargs));
2628 if (get_inferior_args ())
2629 {
2630 strncat (psargs, " ", sizeof (psargs) - strlen (psargs));
2631 strncat (psargs, get_inferior_args (),
2632 sizeof (psargs) - strlen (psargs));
2633 }
2634 note_data = (char *) elfcore_write_prpsinfo (obfd,
2635 note_data,
2636 note_size, fname, psargs);
2637 }
2638
2639 /* Dump information for threads. */
2640 thread_args.obfd = obfd;
2641 thread_args.note_data = note_data;
2642 thread_args.note_size = note_size;
2643 thread_args.num_notes = 0;
2644 iterate_over_lwps (linux_nat_corefile_thread_callback, &thread_args);
2645 if (thread_args.num_notes == 0)
2646 {
2647 /* iterate_over_threads didn't come up with any threads; just
2648 use inferior_ptid. */
2649 note_data = linux_nat_do_registers (obfd, inferior_ptid,
2650 note_data, note_size);
2651 }
2652 else
2653 {
2654 note_data = thread_args.note_data;
2655 }
2656
2657 auxv_len = target_auxv_read (&current_target, &auxv);
2658 if (auxv_len > 0)
2659 {
2660 note_data = elfcore_write_note (obfd, note_data, note_size,
2661 "CORE", NT_AUXV, auxv, auxv_len);
2662 xfree (auxv);
2663 }
2664
2665 make_cleanup (xfree, note_data);
2666 return note_data;
2667 }
2668
2669 /* Implement the "info proc" command. */
2670
2671 static void
2672 linux_nat_info_proc_cmd (char *args, int from_tty)
2673 {
2674 long long pid = PIDGET (inferior_ptid);
2675 FILE *procfile;
2676 char **argv = NULL;
2677 char buffer[MAXPATHLEN];
2678 char fname1[MAXPATHLEN], fname2[MAXPATHLEN];
2679 int cmdline_f = 1;
2680 int cwd_f = 1;
2681 int exe_f = 1;
2682 int mappings_f = 0;
2683 int environ_f = 0;
2684 int status_f = 0;
2685 int stat_f = 0;
2686 int all = 0;
2687 struct stat dummy;
2688
2689 if (args)
2690 {
2691 /* Break up 'args' into an argv array. */
2692 if ((argv = buildargv (args)) == NULL)
2693 nomem (0);
2694 else
2695 make_cleanup_freeargv (argv);
2696 }
2697 while (argv != NULL && *argv != NULL)
2698 {
2699 if (isdigit (argv[0][0]))
2700 {
2701 pid = strtoul (argv[0], NULL, 10);
2702 }
2703 else if (strncmp (argv[0], "mappings", strlen (argv[0])) == 0)
2704 {
2705 mappings_f = 1;
2706 }
2707 else if (strcmp (argv[0], "status") == 0)
2708 {
2709 status_f = 1;
2710 }
2711 else if (strcmp (argv[0], "stat") == 0)
2712 {
2713 stat_f = 1;
2714 }
2715 else if (strcmp (argv[0], "cmd") == 0)
2716 {
2717 cmdline_f = 1;
2718 }
2719 else if (strncmp (argv[0], "exe", strlen (argv[0])) == 0)
2720 {
2721 exe_f = 1;
2722 }
2723 else if (strcmp (argv[0], "cwd") == 0)
2724 {
2725 cwd_f = 1;
2726 }
2727 else if (strncmp (argv[0], "all", strlen (argv[0])) == 0)
2728 {
2729 all = 1;
2730 }
2731 else
2732 {
2733 /* [...] (future options here) */
2734 }
2735 argv++;
2736 }
2737 if (pid == 0)
2738 error (_("No current process: you must name one."));
2739
2740 sprintf (fname1, "/proc/%lld", pid);
2741 if (stat (fname1, &dummy) != 0)
2742 error (_("No /proc directory: '%s'"), fname1);
2743
2744 printf_filtered (_("process %lld\n"), pid);
2745 if (cmdline_f || all)
2746 {
2747 sprintf (fname1, "/proc/%lld/cmdline", pid);
2748 if ((procfile = fopen (fname1, "r")) > 0)
2749 {
2750 fgets (buffer, sizeof (buffer), procfile);
2751 printf_filtered ("cmdline = '%s'\n", buffer);
2752 fclose (procfile);
2753 }
2754 else
2755 warning (_("unable to open /proc file '%s'"), fname1);
2756 }
2757 if (cwd_f || all)
2758 {
2759 sprintf (fname1, "/proc/%lld/cwd", pid);
2760 memset (fname2, 0, sizeof (fname2));
2761 if (readlink (fname1, fname2, sizeof (fname2)) > 0)
2762 printf_filtered ("cwd = '%s'\n", fname2);
2763 else
2764 warning (_("unable to read link '%s'"), fname1);
2765 }
2766 if (exe_f || all)
2767 {
2768 sprintf (fname1, "/proc/%lld/exe", pid);
2769 memset (fname2, 0, sizeof (fname2));
2770 if (readlink (fname1, fname2, sizeof (fname2)) > 0)
2771 printf_filtered ("exe = '%s'\n", fname2);
2772 else
2773 warning (_("unable to read link '%s'"), fname1);
2774 }
2775 if (mappings_f || all)
2776 {
2777 sprintf (fname1, "/proc/%lld/maps", pid);
2778 if ((procfile = fopen (fname1, "r")) > 0)
2779 {
2780 long long addr, endaddr, size, offset, inode;
2781 char permissions[8], device[8], filename[MAXPATHLEN];
2782
2783 printf_filtered (_("Mapped address spaces:\n\n"));
2784 if (TARGET_ADDR_BIT == 32)
2785 {
2786 printf_filtered ("\t%10s %10s %10s %10s %7s\n",
2787 "Start Addr",
2788 " End Addr",
2789 " Size", " Offset", "objfile");
2790 }
2791 else
2792 {
2793 printf_filtered (" %18s %18s %10s %10s %7s\n",
2794 "Start Addr",
2795 " End Addr",
2796 " Size", " Offset", "objfile");
2797 }
2798
2799 while (read_mapping (procfile, &addr, &endaddr, &permissions[0],
2800 &offset, &device[0], &inode, &filename[0]))
2801 {
2802 size = endaddr - addr;
2803
2804 /* FIXME: carlton/2003-08-27: Maybe the printf_filtered
2805 calls here (and possibly above) should be abstracted
2806 out into their own functions? Andrew suggests using
2807 a generic local_address_string instead to print out
2808 the addresses; that makes sense to me, too. */
2809
2810 if (TARGET_ADDR_BIT == 32)
2811 {
2812 printf_filtered ("\t%#10lx %#10lx %#10x %#10x %7s\n",
2813 (unsigned long) addr, /* FIXME: pr_addr */
2814 (unsigned long) endaddr,
2815 (int) size,
2816 (unsigned int) offset,
2817 filename[0] ? filename : "");
2818 }
2819 else
2820 {
2821 printf_filtered (" %#18lx %#18lx %#10x %#10x %7s\n",
2822 (unsigned long) addr, /* FIXME: pr_addr */
2823 (unsigned long) endaddr,
2824 (int) size,
2825 (unsigned int) offset,
2826 filename[0] ? filename : "");
2827 }
2828 }
2829
2830 fclose (procfile);
2831 }
2832 else
2833 warning (_("unable to open /proc file '%s'"), fname1);
2834 }
2835 if (status_f || all)
2836 {
2837 sprintf (fname1, "/proc/%lld/status", pid);
2838 if ((procfile = fopen (fname1, "r")) > 0)
2839 {
2840 while (fgets (buffer, sizeof (buffer), procfile) != NULL)
2841 puts_filtered (buffer);
2842 fclose (procfile);
2843 }
2844 else
2845 warning (_("unable to open /proc file '%s'"), fname1);
2846 }
2847 if (stat_f || all)
2848 {
2849 sprintf (fname1, "/proc/%lld/stat", pid);
2850 if ((procfile = fopen (fname1, "r")) > 0)
2851 {
2852 int itmp;
2853 char ctmp;
2854
2855 if (fscanf (procfile, "%d ", &itmp) > 0)
2856 printf_filtered (_("Process: %d\n"), itmp);
2857 if (fscanf (procfile, "%s ", &buffer[0]) > 0)
2858 printf_filtered (_("Exec file: %s\n"), buffer);
2859 if (fscanf (procfile, "%c ", &ctmp) > 0)
2860 printf_filtered (_("State: %c\n"), ctmp);
2861 if (fscanf (procfile, "%d ", &itmp) > 0)
2862 printf_filtered (_("Parent process: %d\n"), itmp);
2863 if (fscanf (procfile, "%d ", &itmp) > 0)
2864 printf_filtered (_("Process group: %d\n"), itmp);
2865 if (fscanf (procfile, "%d ", &itmp) > 0)
2866 printf_filtered (_("Session id: %d\n"), itmp);
2867 if (fscanf (procfile, "%d ", &itmp) > 0)
2868 printf_filtered (_("TTY: %d\n"), itmp);
2869 if (fscanf (procfile, "%d ", &itmp) > 0)
2870 printf_filtered (_("TTY owner process group: %d\n"), itmp);
2871 if (fscanf (procfile, "%u ", &itmp) > 0)
2872 printf_filtered (_("Flags: 0x%x\n"), itmp);
2873 if (fscanf (procfile, "%u ", &itmp) > 0)
2874 printf_filtered (_("Minor faults (no memory page): %u\n"),
2875 (unsigned int) itmp);
2876 if (fscanf (procfile, "%u ", &itmp) > 0)
2877 printf_filtered (_("Minor faults, children: %u\n"),
2878 (unsigned int) itmp);
2879 if (fscanf (procfile, "%u ", &itmp) > 0)
2880 printf_filtered (_("Major faults (memory page faults): %u\n"),
2881 (unsigned int) itmp);
2882 if (fscanf (procfile, "%u ", &itmp) > 0)
2883 printf_filtered (_("Major faults, children: %u\n"),
2884 (unsigned int) itmp);
2885 if (fscanf (procfile, "%d ", &itmp) > 0)
2886 printf_filtered ("utime: %d\n", itmp);
2887 if (fscanf (procfile, "%d ", &itmp) > 0)
2888 printf_filtered ("stime: %d\n", itmp);
2889 if (fscanf (procfile, "%d ", &itmp) > 0)
2890 printf_filtered ("utime, children: %d\n", itmp);
2891 if (fscanf (procfile, "%d ", &itmp) > 0)
2892 printf_filtered ("stime, children: %d\n", itmp);
2893 if (fscanf (procfile, "%d ", &itmp) > 0)
2894 printf_filtered (_("jiffies remaining in current time slice: %d\n"),
2895 itmp);
2896 if (fscanf (procfile, "%d ", &itmp) > 0)
2897 printf_filtered ("'nice' value: %d\n", itmp);
2898 if (fscanf (procfile, "%u ", &itmp) > 0)
2899 printf_filtered (_("jiffies until next timeout: %u\n"),
2900 (unsigned int) itmp);
2901 if (fscanf (procfile, "%u ", &itmp) > 0)
2902 printf_filtered ("jiffies until next SIGALRM: %u\n",
2903 (unsigned int) itmp);
2904 if (fscanf (procfile, "%d ", &itmp) > 0)
2905 printf_filtered (_("start time (jiffies since system boot): %d\n"),
2906 itmp);
2907 if (fscanf (procfile, "%u ", &itmp) > 0)
2908 printf_filtered (_("Virtual memory size: %u\n"),
2909 (unsigned int) itmp);
2910 if (fscanf (procfile, "%u ", &itmp) > 0)
2911 printf_filtered (_("Resident set size: %u\n"), (unsigned int) itmp);
2912 if (fscanf (procfile, "%u ", &itmp) > 0)
2913 printf_filtered ("rlim: %u\n", (unsigned int) itmp);
2914 if (fscanf (procfile, "%u ", &itmp) > 0)
2915 printf_filtered (_("Start of text: 0x%x\n"), itmp);
2916 if (fscanf (procfile, "%u ", &itmp) > 0)
2917 printf_filtered (_("End of text: 0x%x\n"), itmp);
2918 if (fscanf (procfile, "%u ", &itmp) > 0)
2919 printf_filtered (_("Start of stack: 0x%x\n"), itmp);
2920 #if 0 /* Don't know how architecture-dependent the rest is...
2921 Anyway the signal bitmap info is available from "status". */
2922 if (fscanf (procfile, "%u ", &itmp) > 0) /* FIXME arch? */
2923 printf_filtered (_("Kernel stack pointer: 0x%x\n"), itmp);
2924 if (fscanf (procfile, "%u ", &itmp) > 0) /* FIXME arch? */
2925 printf_filtered (_("Kernel instr pointer: 0x%x\n"), itmp);
2926 if (fscanf (procfile, "%d ", &itmp) > 0)
2927 printf_filtered (_("Pending signals bitmap: 0x%x\n"), itmp);
2928 if (fscanf (procfile, "%d ", &itmp) > 0)
2929 printf_filtered (_("Blocked signals bitmap: 0x%x\n"), itmp);
2930 if (fscanf (procfile, "%d ", &itmp) > 0)
2931 printf_filtered (_("Ignored signals bitmap: 0x%x\n"), itmp);
2932 if (fscanf (procfile, "%d ", &itmp) > 0)
2933 printf_filtered (_("Catched signals bitmap: 0x%x\n"), itmp);
2934 if (fscanf (procfile, "%u ", &itmp) > 0) /* FIXME arch? */
2935 printf_filtered (_("wchan (system call): 0x%x\n"), itmp);
2936 #endif
2937 fclose (procfile);
2938 }
2939 else
2940 warning (_("unable to open /proc file '%s'"), fname1);
2941 }
2942 }
2943
2944 int
2945 linux_proc_xfer_memory (CORE_ADDR addr, char *myaddr, int len, int write,
2946 struct mem_attrib *attrib, struct target_ops *target)
2947 {
2948 int fd, ret;
2949 char filename[64];
2950
2951 if (write)
2952 return 0;
2953
2954 /* Don't bother for one word. */
2955 if (len < 3 * sizeof (long))
2956 return 0;
2957
2958 /* We could keep this file open and cache it - possibly one per
2959 thread. That requires some juggling, but is even faster. */
2960 sprintf (filename, "/proc/%d/mem", PIDGET (inferior_ptid));
2961 fd = open (filename, O_RDONLY | O_LARGEFILE);
2962 if (fd == -1)
2963 return 0;
2964
2965 /* If pread64 is available, use it. It's faster if the kernel
2966 supports it (only one syscall), and it's 64-bit safe even on
2967 32-bit platforms (for instance, SPARC debugging a SPARC64
2968 application). */
2969 #ifdef HAVE_PREAD64
2970 if (pread64 (fd, myaddr, len, addr) != len)
2971 #else
2972 if (lseek (fd, addr, SEEK_SET) == -1 || read (fd, myaddr, len) != len)
2973 #endif
2974 ret = 0;
2975 else
2976 ret = len;
2977
2978 close (fd);
2979 return ret;
2980 }
2981
2982 /* Parse LINE as a signal set and add its set bits to SIGS. */
2983
2984 static void
2985 add_line_to_sigset (const char *line, sigset_t *sigs)
2986 {
2987 int len = strlen (line) - 1;
2988 const char *p;
2989 int signum;
2990
2991 if (line[len] != '\n')
2992 error (_("Could not parse signal set: %s"), line);
2993
2994 p = line;
2995 signum = len * 4;
2996 while (len-- > 0)
2997 {
2998 int digit;
2999
3000 if (*p >= '0' && *p <= '9')
3001 digit = *p - '0';
3002 else if (*p >= 'a' && *p <= 'f')
3003 digit = *p - 'a' + 10;
3004 else
3005 error (_("Could not parse signal set: %s"), line);
3006
3007 signum -= 4;
3008
3009 if (digit & 1)
3010 sigaddset (sigs, signum + 1);
3011 if (digit & 2)
3012 sigaddset (sigs, signum + 2);
3013 if (digit & 4)
3014 sigaddset (sigs, signum + 3);
3015 if (digit & 8)
3016 sigaddset (sigs, signum + 4);
3017
3018 p++;
3019 }
3020 }
3021
3022 /* Find process PID's pending signals from /proc/pid/status and set
3023 SIGS to match. */
3024
3025 void
3026 linux_proc_pending_signals (int pid, sigset_t *pending, sigset_t *blocked, sigset_t *ignored)
3027 {
3028 FILE *procfile;
3029 char buffer[MAXPATHLEN], fname[MAXPATHLEN];
3030 int signum;
3031
3032 sigemptyset (pending);
3033 sigemptyset (blocked);
3034 sigemptyset (ignored);
3035 sprintf (fname, "/proc/%d/status", pid);
3036 procfile = fopen (fname, "r");
3037 if (procfile == NULL)
3038 error (_("Could not open %s"), fname);
3039
3040 while (fgets (buffer, MAXPATHLEN, procfile) != NULL)
3041 {
3042 /* Normal queued signals are on the SigPnd line in the status
3043 file. However, 2.6 kernels also have a "shared" pending
3044 queue for delivering signals to a thread group, so check for
3045 a ShdPnd line also.
3046
3047 Unfortunately some Red Hat kernels include the shared pending
3048 queue but not the ShdPnd status field. */
3049
3050 if (strncmp (buffer, "SigPnd:\t", 8) == 0)
3051 add_line_to_sigset (buffer + 8, pending);
3052 else if (strncmp (buffer, "ShdPnd:\t", 8) == 0)
3053 add_line_to_sigset (buffer + 8, pending);
3054 else if (strncmp (buffer, "SigBlk:\t", 8) == 0)
3055 add_line_to_sigset (buffer + 8, blocked);
3056 else if (strncmp (buffer, "SigIgn:\t", 8) == 0)
3057 add_line_to_sigset (buffer + 8, ignored);
3058 }
3059
3060 fclose (procfile);
3061 }
3062
3063 void
3064 _initialize_linux_nat (void)
3065 {
3066 struct sigaction action;
3067 extern void thread_db_init (struct target_ops *);
3068
3069 deprecated_child_ops.to_find_memory_regions = linux_nat_find_memory_regions;
3070 deprecated_child_ops.to_make_corefile_notes = linux_nat_make_corefile_notes;
3071
3072 add_info ("proc", linux_nat_info_proc_cmd, _("\
3073 Show /proc process information about any running process.\n\
3074 Specify any process id, or use the program being debugged by default.\n\
3075 Specify any of the following keywords for detailed info:\n\
3076 mappings -- list of mapped memory regions.\n\
3077 stat -- list a bunch of random process info.\n\
3078 status -- list a different bunch of random process info.\n\
3079 all -- list all available /proc info."));
3080
3081 init_linux_nat_ops ();
3082 add_target (&linux_nat_ops);
3083 thread_db_init (&linux_nat_ops);
3084
3085 /* Save the original signal mask. */
3086 sigprocmask (SIG_SETMASK, NULL, &normal_mask);
3087
3088 action.sa_handler = sigchld_handler;
3089 sigemptyset (&action.sa_mask);
3090 action.sa_flags = 0;
3091 sigaction (SIGCHLD, &action, NULL);
3092
3093 /* Make sure we don't block SIGCHLD during a sigsuspend. */
3094 sigprocmask (SIG_SETMASK, NULL, &suspend_mask);
3095 sigdelset (&suspend_mask, SIGCHLD);
3096
3097 sigemptyset (&blocked_mask);
3098
3099 add_setshow_zinteger_cmd ("lin-lwp", no_class, &debug_linux_nat, _("\
3100 Set debugging of GNU/Linux lwp module."), _("\
3101 Show debugging of GNU/Linux lwp module."), _("\
3102 Enables printf debugging output."),
3103 NULL,
3104 show_debug_linux_nat,
3105 &setdebuglist, &showdebuglist);
3106 }
3107 \f
3108
3109 /* FIXME: kettenis/2000-08-26: The stuff on this page is specific to
3110 the GNU/Linux Threads library and therefore doesn't really belong
3111 here. */
3112
3113 /* Read variable NAME in the target and return its value if found.
3114 Otherwise return zero. It is assumed that the type of the variable
3115 is `int'. */
3116
3117 static int
3118 get_signo (const char *name)
3119 {
3120 struct minimal_symbol *ms;
3121 int signo;
3122
3123 ms = lookup_minimal_symbol (name, NULL, NULL);
3124 if (ms == NULL)
3125 return 0;
3126
3127 if (target_read_memory (SYMBOL_VALUE_ADDRESS (ms), (char *) &signo,
3128 sizeof (signo)) != 0)
3129 return 0;
3130
3131 return signo;
3132 }
3133
3134 /* Return the set of signals used by the threads library in *SET. */
3135
3136 void
3137 lin_thread_get_thread_signals (sigset_t *set)
3138 {
3139 struct sigaction action;
3140 int restart, cancel;
3141
3142 sigemptyset (set);
3143
3144 restart = get_signo ("__pthread_sig_restart");
3145 if (restart == 0)
3146 return;
3147
3148 cancel = get_signo ("__pthread_sig_cancel");
3149 if (cancel == 0)
3150 return;
3151
3152 sigaddset (set, restart);
3153 sigaddset (set, cancel);
3154
3155 /* The GNU/Linux Threads library makes terminating threads send a
3156 special "cancel" signal instead of SIGCHLD. Make sure we catch
3157 those (to prevent them from terminating GDB itself, which is
3158 likely to be their default action) and treat them the same way as
3159 SIGCHLD. */
3160
3161 action.sa_handler = sigchld_handler;
3162 sigemptyset (&action.sa_mask);
3163 action.sa_flags = 0;
3164 sigaction (cancel, &action, NULL);
3165
3166 /* We block the "cancel" signal throughout this code ... */
3167 sigaddset (&blocked_mask, cancel);
3168 sigprocmask (SIG_BLOCK, &blocked_mask, NULL);
3169
3170 /* ... except during a sigsuspend. */
3171 sigdelset (&suspend_mask, cancel);
3172 }
This page took 0.097565 seconds and 4 git commands to generate.