*** empty log message ***
[deliverable/binutils-gdb.git] / gdb / linux-nat.c
1 /* GNU/Linux native-dependent code common to multiple platforms.
2
3 Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006
4 Free Software Foundation, Inc.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; if not, write to the Free Software
20 Foundation, Inc., 51 Franklin Street, Fifth Floor,
21 Boston, MA 02110-1301, USA. */
22
23 #include "defs.h"
24 #include "inferior.h"
25 #include "target.h"
26 #include "gdb_string.h"
27 #include "gdb_wait.h"
28 #include "gdb_assert.h"
29 #ifdef HAVE_TKILL_SYSCALL
30 #include <unistd.h>
31 #include <sys/syscall.h>
32 #endif
33 #include <sys/ptrace.h>
34 #include "linux-nat.h"
35 #include "linux-fork.h"
36 #include "gdbthread.h"
37 #include "gdbcmd.h"
38 #include "regcache.h"
39 #include "regset.h"
40 #include "inf-ptrace.h"
41 #include "auxv.h"
42 #include <sys/param.h> /* for MAXPATHLEN */
43 #include <sys/procfs.h> /* for elf_gregset etc. */
44 #include "elf-bfd.h" /* for elfcore_write_* */
45 #include "gregset.h" /* for gregset */
46 #include "gdbcore.h" /* for get_exec_file */
47 #include <ctype.h> /* for isdigit */
48 #include "gdbthread.h" /* for struct thread_info etc. */
49 #include "gdb_stat.h" /* for struct stat */
50 #include <fcntl.h> /* for O_RDONLY */
51
52 #ifndef O_LARGEFILE
53 #define O_LARGEFILE 0
54 #endif
55
56 /* If the system headers did not provide the constants, hard-code the normal
57 values. */
58 #ifndef PTRACE_EVENT_FORK
59
60 #define PTRACE_SETOPTIONS 0x4200
61 #define PTRACE_GETEVENTMSG 0x4201
62
63 /* options set using PTRACE_SETOPTIONS */
64 #define PTRACE_O_TRACESYSGOOD 0x00000001
65 #define PTRACE_O_TRACEFORK 0x00000002
66 #define PTRACE_O_TRACEVFORK 0x00000004
67 #define PTRACE_O_TRACECLONE 0x00000008
68 #define PTRACE_O_TRACEEXEC 0x00000010
69 #define PTRACE_O_TRACEVFORKDONE 0x00000020
70 #define PTRACE_O_TRACEEXIT 0x00000040
71
72 /* Wait extended result codes for the above trace options. */
73 #define PTRACE_EVENT_FORK 1
74 #define PTRACE_EVENT_VFORK 2
75 #define PTRACE_EVENT_CLONE 3
76 #define PTRACE_EVENT_EXEC 4
77 #define PTRACE_EVENT_VFORK_DONE 5
78 #define PTRACE_EVENT_EXIT 6
79
80 #endif /* PTRACE_EVENT_FORK */
81
82 /* We can't always assume that this flag is available, but all systems
83 with the ptrace event handlers also have __WALL, so it's safe to use
84 here. */
85 #ifndef __WALL
86 #define __WALL 0x40000000 /* Wait for any child. */
87 #endif
88
89 /* The single-threaded native GNU/Linux target_ops. We save a pointer for
90 the use of the multi-threaded target. */
91 static struct target_ops *linux_ops;
92 static struct target_ops linux_ops_saved;
93
94 /* The saved to_xfer_partial method, inherited from inf-ptrace.c.
95 Called by our to_xfer_partial. */
96 static LONGEST (*super_xfer_partial) (struct target_ops *,
97 enum target_object,
98 const char *, gdb_byte *,
99 const gdb_byte *,
100 ULONGEST, LONGEST);
101
102 static int debug_linux_nat;
103 static void
104 show_debug_linux_nat (struct ui_file *file, int from_tty,
105 struct cmd_list_element *c, const char *value)
106 {
107 fprintf_filtered (file, _("Debugging of GNU/Linux lwp module is %s.\n"),
108 value);
109 }
110
111 static int linux_parent_pid;
112
113 struct simple_pid_list
114 {
115 int pid;
116 struct simple_pid_list *next;
117 };
118 struct simple_pid_list *stopped_pids;
119
120 /* This variable is a tri-state flag: -1 for unknown, 0 if PTRACE_O_TRACEFORK
121 can not be used, 1 if it can. */
122
123 static int linux_supports_tracefork_flag = -1;
124
125 /* If we have PTRACE_O_TRACEFORK, this flag indicates whether we also have
126 PTRACE_O_TRACEVFORKDONE. */
127
128 static int linux_supports_tracevforkdone_flag = -1;
129
130 \f
131 /* Trivial list manipulation functions to keep track of a list of
132 new stopped processes. */
133 static void
134 add_to_pid_list (struct simple_pid_list **listp, int pid)
135 {
136 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
137 new_pid->pid = pid;
138 new_pid->next = *listp;
139 *listp = new_pid;
140 }
141
142 static int
143 pull_pid_from_list (struct simple_pid_list **listp, int pid)
144 {
145 struct simple_pid_list **p;
146
147 for (p = listp; *p != NULL; p = &(*p)->next)
148 if ((*p)->pid == pid)
149 {
150 struct simple_pid_list *next = (*p)->next;
151 xfree (*p);
152 *p = next;
153 return 1;
154 }
155 return 0;
156 }
157
158 void
159 linux_record_stopped_pid (int pid)
160 {
161 add_to_pid_list (&stopped_pids, pid);
162 }
163
164 \f
165 /* A helper function for linux_test_for_tracefork, called after fork (). */
166
167 static void
168 linux_tracefork_child (void)
169 {
170 int ret;
171
172 ptrace (PTRACE_TRACEME, 0, 0, 0);
173 kill (getpid (), SIGSTOP);
174 fork ();
175 _exit (0);
176 }
177
178 /* Wrapper function for waitpid which handles EINTR. */
179
180 static int
181 my_waitpid (int pid, int *status, int flags)
182 {
183 int ret;
184 do
185 {
186 ret = waitpid (pid, status, flags);
187 }
188 while (ret == -1 && errno == EINTR);
189
190 return ret;
191 }
192
193 /* Determine if PTRACE_O_TRACEFORK can be used to follow fork events.
194
195 First, we try to enable fork tracing on ORIGINAL_PID. If this fails,
196 we know that the feature is not available. This may change the tracing
197 options for ORIGINAL_PID, but we'll be setting them shortly anyway.
198
199 However, if it succeeds, we don't know for sure that the feature is
200 available; old versions of PTRACE_SETOPTIONS ignored unknown options. We
201 create a child process, attach to it, use PTRACE_SETOPTIONS to enable
202 fork tracing, and let it fork. If the process exits, we assume that we
203 can't use TRACEFORK; if we get the fork notification, and we can extract
204 the new child's PID, then we assume that we can. */
205
206 static void
207 linux_test_for_tracefork (int original_pid)
208 {
209 int child_pid, ret, status;
210 long second_pid;
211
212 linux_supports_tracefork_flag = 0;
213 linux_supports_tracevforkdone_flag = 0;
214
215 ret = ptrace (PTRACE_SETOPTIONS, original_pid, 0, PTRACE_O_TRACEFORK);
216 if (ret != 0)
217 return;
218
219 child_pid = fork ();
220 if (child_pid == -1)
221 perror_with_name (("fork"));
222
223 if (child_pid == 0)
224 linux_tracefork_child ();
225
226 ret = my_waitpid (child_pid, &status, 0);
227 if (ret == -1)
228 perror_with_name (("waitpid"));
229 else if (ret != child_pid)
230 error (_("linux_test_for_tracefork: waitpid: unexpected result %d."), ret);
231 if (! WIFSTOPPED (status))
232 error (_("linux_test_for_tracefork: waitpid: unexpected status %d."), status);
233
234 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0, PTRACE_O_TRACEFORK);
235 if (ret != 0)
236 {
237 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
238 if (ret != 0)
239 {
240 warning (_("linux_test_for_tracefork: failed to kill child"));
241 return;
242 }
243
244 ret = my_waitpid (child_pid, &status, 0);
245 if (ret != child_pid)
246 warning (_("linux_test_for_tracefork: failed to wait for killed child"));
247 else if (!WIFSIGNALED (status))
248 warning (_("linux_test_for_tracefork: unexpected wait status 0x%x from "
249 "killed child"), status);
250
251 return;
252 }
253
254 /* Check whether PTRACE_O_TRACEVFORKDONE is available. */
255 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0,
256 PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORKDONE);
257 linux_supports_tracevforkdone_flag = (ret == 0);
258
259 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
260 if (ret != 0)
261 warning (_("linux_test_for_tracefork: failed to resume child"));
262
263 ret = my_waitpid (child_pid, &status, 0);
264
265 if (ret == child_pid && WIFSTOPPED (status)
266 && status >> 16 == PTRACE_EVENT_FORK)
267 {
268 second_pid = 0;
269 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
270 if (ret == 0 && second_pid != 0)
271 {
272 int second_status;
273
274 linux_supports_tracefork_flag = 1;
275 my_waitpid (second_pid, &second_status, 0);
276 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
277 if (ret != 0)
278 warning (_("linux_test_for_tracefork: failed to kill second child"));
279 }
280 }
281 else
282 warning (_("linux_test_for_tracefork: unexpected result from waitpid "
283 "(%d, status 0x%x)"), ret, status);
284
285 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
286 if (ret != 0)
287 warning (_("linux_test_for_tracefork: failed to kill child"));
288 my_waitpid (child_pid, &status, 0);
289 }
290
291 /* Return non-zero iff we have tracefork functionality available.
292 This function also sets linux_supports_tracefork_flag. */
293
294 static int
295 linux_supports_tracefork (int pid)
296 {
297 if (linux_supports_tracefork_flag == -1)
298 linux_test_for_tracefork (pid);
299 return linux_supports_tracefork_flag;
300 }
301
302 static int
303 linux_supports_tracevforkdone (int pid)
304 {
305 if (linux_supports_tracefork_flag == -1)
306 linux_test_for_tracefork (pid);
307 return linux_supports_tracevforkdone_flag;
308 }
309
310 \f
311 void
312 linux_enable_event_reporting (ptid_t ptid)
313 {
314 int pid = ptid_get_lwp (ptid);
315 int options;
316
317 if (pid == 0)
318 pid = ptid_get_pid (ptid);
319
320 if (! linux_supports_tracefork (pid))
321 return;
322
323 options = PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORK | PTRACE_O_TRACEEXEC
324 | PTRACE_O_TRACECLONE;
325 if (linux_supports_tracevforkdone (pid))
326 options |= PTRACE_O_TRACEVFORKDONE;
327
328 /* Do not enable PTRACE_O_TRACEEXIT until GDB is more prepared to support
329 read-only process state. */
330
331 ptrace (PTRACE_SETOPTIONS, pid, 0, options);
332 }
333
334 void
335 child_post_attach (int pid)
336 {
337 linux_enable_event_reporting (pid_to_ptid (pid));
338 check_for_thread_db ();
339 }
340
341 static void
342 linux_child_post_startup_inferior (ptid_t ptid)
343 {
344 linux_enable_event_reporting (ptid);
345 check_for_thread_db ();
346 }
347
348 int
349 child_follow_fork (struct target_ops *ops, int follow_child)
350 {
351 ptid_t last_ptid;
352 struct target_waitstatus last_status;
353 int has_vforked;
354 int parent_pid, child_pid;
355
356 get_last_target_status (&last_ptid, &last_status);
357 has_vforked = (last_status.kind == TARGET_WAITKIND_VFORKED);
358 parent_pid = ptid_get_lwp (last_ptid);
359 if (parent_pid == 0)
360 parent_pid = ptid_get_pid (last_ptid);
361 child_pid = last_status.value.related_pid;
362
363 if (! follow_child)
364 {
365 /* We're already attached to the parent, by default. */
366
367 /* Before detaching from the child, remove all breakpoints from
368 it. (This won't actually modify the breakpoint list, but will
369 physically remove the breakpoints from the child.) */
370 /* If we vforked this will remove the breakpoints from the parent
371 also, but they'll be reinserted below. */
372 detach_breakpoints (child_pid);
373
374 /* Detach new forked process? */
375 if (detach_fork)
376 {
377 if (debug_linux_nat)
378 {
379 target_terminal_ours ();
380 fprintf_filtered (gdb_stdlog,
381 "Detaching after fork from child process %d.\n",
382 child_pid);
383 }
384
385 ptrace (PTRACE_DETACH, child_pid, 0, 0);
386 }
387 else
388 {
389 struct fork_info *fp;
390 /* Retain child fork in ptrace (stopped) state. */
391 fp = find_fork_pid (child_pid);
392 if (!fp)
393 fp = add_fork (child_pid);
394 fork_save_infrun_state (fp, 0);
395 }
396
397 if (has_vforked)
398 {
399 gdb_assert (linux_supports_tracefork_flag >= 0);
400 if (linux_supports_tracevforkdone (0))
401 {
402 int status;
403
404 ptrace (PTRACE_CONT, parent_pid, 0, 0);
405 my_waitpid (parent_pid, &status, __WALL);
406 if ((status >> 16) != PTRACE_EVENT_VFORK_DONE)
407 warning (_("Unexpected waitpid result %06x when waiting for "
408 "vfork-done"), status);
409 }
410 else
411 {
412 /* We can't insert breakpoints until the child has
413 finished with the shared memory region. We need to
414 wait until that happens. Ideal would be to just
415 call:
416 - ptrace (PTRACE_SYSCALL, parent_pid, 0, 0);
417 - waitpid (parent_pid, &status, __WALL);
418 However, most architectures can't handle a syscall
419 being traced on the way out if it wasn't traced on
420 the way in.
421
422 We might also think to loop, continuing the child
423 until it exits or gets a SIGTRAP. One problem is
424 that the child might call ptrace with PTRACE_TRACEME.
425
426 There's no simple and reliable way to figure out when
427 the vforked child will be done with its copy of the
428 shared memory. We could step it out of the syscall,
429 two instructions, let it go, and then single-step the
430 parent once. When we have hardware single-step, this
431 would work; with software single-step it could still
432 be made to work but we'd have to be able to insert
433 single-step breakpoints in the child, and we'd have
434 to insert -just- the single-step breakpoint in the
435 parent. Very awkward.
436
437 In the end, the best we can do is to make sure it
438 runs for a little while. Hopefully it will be out of
439 range of any breakpoints we reinsert. Usually this
440 is only the single-step breakpoint at vfork's return
441 point. */
442
443 usleep (10000);
444 }
445
446 /* Since we vforked, breakpoints were removed in the parent
447 too. Put them back. */
448 reattach_breakpoints (parent_pid);
449 }
450 }
451 else
452 {
453 char child_pid_spelling[40];
454
455 /* Needed to keep the breakpoint lists in sync. */
456 if (! has_vforked)
457 detach_breakpoints (child_pid);
458
459 /* Before detaching from the parent, remove all breakpoints from it. */
460 remove_breakpoints ();
461
462 if (debug_linux_nat)
463 {
464 target_terminal_ours ();
465 fprintf_filtered (gdb_stdlog,
466 "Attaching after fork to child process %d.\n",
467 child_pid);
468 }
469
470 /* If we're vforking, we may want to hold on to the parent until
471 the child exits or execs. At exec time we can remove the old
472 breakpoints from the parent and detach it; at exit time we
473 could do the same (or even, sneakily, resume debugging it - the
474 child's exec has failed, or something similar).
475
476 This doesn't clean up "properly", because we can't call
477 target_detach, but that's OK; if the current target is "child",
478 then it doesn't need any further cleanups, and lin_lwp will
479 generally not encounter vfork (vfork is defined to fork
480 in libpthread.so).
481
482 The holding part is very easy if we have VFORKDONE events;
483 but keeping track of both processes is beyond GDB at the
484 moment. So we don't expose the parent to the rest of GDB.
485 Instead we quietly hold onto it until such time as we can
486 safely resume it. */
487
488 if (has_vforked)
489 linux_parent_pid = parent_pid;
490 else if (!detach_fork)
491 {
492 struct fork_info *fp;
493 /* Retain parent fork in ptrace (stopped) state. */
494 fp = find_fork_pid (parent_pid);
495 if (!fp)
496 fp = add_fork (parent_pid);
497 fork_save_infrun_state (fp, 0);
498 }
499 else
500 {
501 target_detach (NULL, 0);
502 }
503
504 inferior_ptid = pid_to_ptid (child_pid);
505
506 /* Reinstall ourselves, since we might have been removed in
507 target_detach (which does other necessary cleanup). */
508
509 push_target (ops);
510
511 /* Reset breakpoints in the child as appropriate. */
512 follow_inferior_reset_breakpoints ();
513 }
514
515 return 0;
516 }
517
518 ptid_t
519 linux_handle_extended_wait (int pid, int status,
520 struct target_waitstatus *ourstatus)
521 {
522 int event = status >> 16;
523
524 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK
525 || event == PTRACE_EVENT_CLONE)
526 {
527 unsigned long new_pid;
528 int ret;
529
530 ptrace (PTRACE_GETEVENTMSG, pid, 0, &new_pid);
531
532 /* If we haven't already seen the new PID stop, wait for it now. */
533 if (! pull_pid_from_list (&stopped_pids, new_pid))
534 {
535 /* The new child has a pending SIGSTOP. We can't affect it until it
536 hits the SIGSTOP, but we're already attached. */
537 ret = my_waitpid (new_pid, &status,
538 (event == PTRACE_EVENT_CLONE) ? __WCLONE : 0);
539 if (ret == -1)
540 perror_with_name (_("waiting for new child"));
541 else if (ret != new_pid)
542 internal_error (__FILE__, __LINE__,
543 _("wait returned unexpected PID %d"), ret);
544 else if (!WIFSTOPPED (status) || WSTOPSIG (status) != SIGSTOP)
545 internal_error (__FILE__, __LINE__,
546 _("wait returned unexpected status 0x%x"), status);
547 }
548
549 if (event == PTRACE_EVENT_FORK)
550 ourstatus->kind = TARGET_WAITKIND_FORKED;
551 else if (event == PTRACE_EVENT_VFORK)
552 ourstatus->kind = TARGET_WAITKIND_VFORKED;
553 else
554 ourstatus->kind = TARGET_WAITKIND_SPURIOUS;
555
556 ourstatus->value.related_pid = new_pid;
557 return inferior_ptid;
558 }
559
560 if (event == PTRACE_EVENT_EXEC)
561 {
562 ourstatus->kind = TARGET_WAITKIND_EXECD;
563 ourstatus->value.execd_pathname
564 = xstrdup (child_pid_to_exec_file (pid));
565
566 if (linux_parent_pid)
567 {
568 detach_breakpoints (linux_parent_pid);
569 ptrace (PTRACE_DETACH, linux_parent_pid, 0, 0);
570
571 linux_parent_pid = 0;
572 }
573
574 return inferior_ptid;
575 }
576
577 internal_error (__FILE__, __LINE__,
578 _("unknown ptrace event %d"), event);
579 }
580
581 \f
582 void
583 child_insert_fork_catchpoint (int pid)
584 {
585 if (! linux_supports_tracefork (pid))
586 error (_("Your system does not support fork catchpoints."));
587 }
588
589 void
590 child_insert_vfork_catchpoint (int pid)
591 {
592 if (!linux_supports_tracefork (pid))
593 error (_("Your system does not support vfork catchpoints."));
594 }
595
596 void
597 child_insert_exec_catchpoint (int pid)
598 {
599 if (!linux_supports_tracefork (pid))
600 error (_("Your system does not support exec catchpoints."));
601 }
602
603 /* On GNU/Linux there are no real LWP's. The closest thing to LWP's
604 are processes sharing the same VM space. A multi-threaded process
605 is basically a group of such processes. However, such a grouping
606 is almost entirely a user-space issue; the kernel doesn't enforce
607 such a grouping at all (this might change in the future). In
608 general, we'll rely on the threads library (i.e. the GNU/Linux
609 Threads library) to provide such a grouping.
610
611 It is perfectly well possible to write a multi-threaded application
612 without the assistance of a threads library, by using the clone
613 system call directly. This module should be able to give some
614 rudimentary support for debugging such applications if developers
615 specify the CLONE_PTRACE flag in the clone system call, and are
616 using the Linux kernel 2.4 or above.
617
618 Note that there are some peculiarities in GNU/Linux that affect
619 this code:
620
621 - In general one should specify the __WCLONE flag to waitpid in
622 order to make it report events for any of the cloned processes
623 (and leave it out for the initial process). However, if a cloned
624 process has exited the exit status is only reported if the
625 __WCLONE flag is absent. Linux kernel 2.4 has a __WALL flag, but
626 we cannot use it since GDB must work on older systems too.
627
628 - When a traced, cloned process exits and is waited for by the
629 debugger, the kernel reassigns it to the original parent and
630 keeps it around as a "zombie". Somehow, the GNU/Linux Threads
631 library doesn't notice this, which leads to the "zombie problem":
632 When debugged a multi-threaded process that spawns a lot of
633 threads will run out of processes, even if the threads exit,
634 because the "zombies" stay around. */
635
636 /* List of known LWPs. */
637 static struct lwp_info *lwp_list;
638
639 /* Number of LWPs in the list. */
640 static int num_lwps;
641 \f
642
643 #define GET_LWP(ptid) ptid_get_lwp (ptid)
644 #define GET_PID(ptid) ptid_get_pid (ptid)
645 #define is_lwp(ptid) (GET_LWP (ptid) != 0)
646 #define BUILD_LWP(lwp, pid) ptid_build (pid, lwp, 0)
647
648 /* If the last reported event was a SIGTRAP, this variable is set to
649 the process id of the LWP/thread that got it. */
650 ptid_t trap_ptid;
651 \f
652
653 /* Since we cannot wait (in linux_nat_wait) for the initial process and
654 any cloned processes with a single call to waitpid, we have to use
655 the WNOHANG flag and call waitpid in a loop. To optimize
656 things a bit we use `sigsuspend' to wake us up when a process has
657 something to report (it will send us a SIGCHLD if it has). To make
658 this work we have to juggle with the signal mask. We save the
659 original signal mask such that we can restore it before creating a
660 new process in order to avoid blocking certain signals in the
661 inferior. We then block SIGCHLD during the waitpid/sigsuspend
662 loop. */
663
664 /* Original signal mask. */
665 static sigset_t normal_mask;
666
667 /* Signal mask for use with sigsuspend in linux_nat_wait, initialized in
668 _initialize_linux_nat. */
669 static sigset_t suspend_mask;
670
671 /* Signals to block to make that sigsuspend work. */
672 static sigset_t blocked_mask;
673 \f
674
675 /* Prototypes for local functions. */
676 static int stop_wait_callback (struct lwp_info *lp, void *data);
677 static int linux_nat_thread_alive (ptid_t ptid);
678 \f
679 /* Convert wait status STATUS to a string. Used for printing debug
680 messages only. */
681
682 static char *
683 status_to_str (int status)
684 {
685 static char buf[64];
686
687 if (WIFSTOPPED (status))
688 snprintf (buf, sizeof (buf), "%s (stopped)",
689 strsignal (WSTOPSIG (status)));
690 else if (WIFSIGNALED (status))
691 snprintf (buf, sizeof (buf), "%s (terminated)",
692 strsignal (WSTOPSIG (status)));
693 else
694 snprintf (buf, sizeof (buf), "%d (exited)", WEXITSTATUS (status));
695
696 return buf;
697 }
698
699 /* Initialize the list of LWPs. Note that this module, contrary to
700 what GDB's generic threads layer does for its thread list,
701 re-initializes the LWP lists whenever we mourn or detach (which
702 doesn't involve mourning) the inferior. */
703
704 static void
705 init_lwp_list (void)
706 {
707 struct lwp_info *lp, *lpnext;
708
709 for (lp = lwp_list; lp; lp = lpnext)
710 {
711 lpnext = lp->next;
712 xfree (lp);
713 }
714
715 lwp_list = NULL;
716 num_lwps = 0;
717 }
718
719 /* Add the LWP specified by PID to the list. Return a pointer to the
720 structure describing the new LWP. */
721
722 static struct lwp_info *
723 add_lwp (ptid_t ptid)
724 {
725 struct lwp_info *lp;
726
727 gdb_assert (is_lwp (ptid));
728
729 lp = (struct lwp_info *) xmalloc (sizeof (struct lwp_info));
730
731 memset (lp, 0, sizeof (struct lwp_info));
732
733 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
734
735 lp->ptid = ptid;
736
737 lp->next = lwp_list;
738 lwp_list = lp;
739 ++num_lwps;
740
741 return lp;
742 }
743
744 /* Remove the LWP specified by PID from the list. */
745
746 static void
747 delete_lwp (ptid_t ptid)
748 {
749 struct lwp_info *lp, *lpprev;
750
751 lpprev = NULL;
752
753 for (lp = lwp_list; lp; lpprev = lp, lp = lp->next)
754 if (ptid_equal (lp->ptid, ptid))
755 break;
756
757 if (!lp)
758 return;
759
760 num_lwps--;
761
762 if (lpprev)
763 lpprev->next = lp->next;
764 else
765 lwp_list = lp->next;
766
767 xfree (lp);
768 }
769
770 /* Return a pointer to the structure describing the LWP corresponding
771 to PID. If no corresponding LWP could be found, return NULL. */
772
773 static struct lwp_info *
774 find_lwp_pid (ptid_t ptid)
775 {
776 struct lwp_info *lp;
777 int lwp;
778
779 if (is_lwp (ptid))
780 lwp = GET_LWP (ptid);
781 else
782 lwp = GET_PID (ptid);
783
784 for (lp = lwp_list; lp; lp = lp->next)
785 if (lwp == GET_LWP (lp->ptid))
786 return lp;
787
788 return NULL;
789 }
790
791 /* Call CALLBACK with its second argument set to DATA for every LWP in
792 the list. If CALLBACK returns 1 for a particular LWP, return a
793 pointer to the structure describing that LWP immediately.
794 Otherwise return NULL. */
795
796 struct lwp_info *
797 iterate_over_lwps (int (*callback) (struct lwp_info *, void *), void *data)
798 {
799 struct lwp_info *lp, *lpnext;
800
801 for (lp = lwp_list; lp; lp = lpnext)
802 {
803 lpnext = lp->next;
804 if ((*callback) (lp, data))
805 return lp;
806 }
807
808 return NULL;
809 }
810
811 /* Update our internal state when changing from one fork (checkpoint,
812 et cetera) to another indicated by NEW_PTID. We can only switch
813 single-threaded applications, so we only create one new LWP, and
814 the previous list is discarded. */
815
816 void
817 linux_nat_switch_fork (ptid_t new_ptid)
818 {
819 struct lwp_info *lp;
820
821 init_lwp_list ();
822 lp = add_lwp (new_ptid);
823 lp->stopped = 1;
824 }
825
826 /* Record a PTID for later deletion. */
827
828 struct saved_ptids
829 {
830 ptid_t ptid;
831 struct saved_ptids *next;
832 };
833 static struct saved_ptids *threads_to_delete;
834
835 static void
836 record_dead_thread (ptid_t ptid)
837 {
838 struct saved_ptids *p = xmalloc (sizeof (struct saved_ptids));
839 p->ptid = ptid;
840 p->next = threads_to_delete;
841 threads_to_delete = p;
842 }
843
844 /* Delete any dead threads which are not the current thread. */
845
846 static void
847 prune_lwps (void)
848 {
849 struct saved_ptids **p = &threads_to_delete;
850
851 while (*p)
852 if (! ptid_equal ((*p)->ptid, inferior_ptid))
853 {
854 struct saved_ptids *tmp = *p;
855 delete_thread (tmp->ptid);
856 *p = tmp->next;
857 xfree (tmp);
858 }
859 else
860 p = &(*p)->next;
861 }
862
863 /* Callback for iterate_over_threads that finds a thread corresponding
864 to the given LWP. */
865
866 static int
867 find_thread_from_lwp (struct thread_info *thr, void *dummy)
868 {
869 ptid_t *ptid_p = dummy;
870
871 if (GET_LWP (thr->ptid) && GET_LWP (thr->ptid) == GET_LWP (*ptid_p))
872 return 1;
873 else
874 return 0;
875 }
876
877 /* Handle the exit of a single thread LP. */
878
879 static void
880 exit_lwp (struct lwp_info *lp)
881 {
882 if (in_thread_list (lp->ptid))
883 {
884 /* Core GDB cannot deal with us deleting the current thread. */
885 if (!ptid_equal (lp->ptid, inferior_ptid))
886 delete_thread (lp->ptid);
887 else
888 record_dead_thread (lp->ptid);
889 printf_unfiltered (_("[%s exited]\n"),
890 target_pid_to_str (lp->ptid));
891 }
892 else
893 {
894 /* Even if LP->PTID is not in the global GDB thread list, the
895 LWP may be - with an additional thread ID. We don't need
896 to print anything in this case; thread_db is in use and
897 already took care of that. But it didn't delete the thread
898 in order to handle zombies correctly. */
899
900 struct thread_info *thr;
901
902 thr = iterate_over_threads (find_thread_from_lwp, &lp->ptid);
903 if (thr)
904 {
905 if (!ptid_equal (thr->ptid, inferior_ptid))
906 delete_thread (thr->ptid);
907 else
908 record_dead_thread (thr->ptid);
909 }
910 }
911
912 delete_lwp (lp->ptid);
913 }
914
915 /* Attach to the LWP specified by PID. If VERBOSE is non-zero, print
916 a message telling the user that a new LWP has been added to the
917 process. */
918
919 void
920 lin_lwp_attach_lwp (ptid_t ptid, int verbose)
921 {
922 struct lwp_info *lp, *found_lp;
923
924 gdb_assert (is_lwp (ptid));
925
926 /* Make sure SIGCHLD is blocked. We don't want SIGCHLD events
927 to interrupt either the ptrace() or waitpid() calls below. */
928 if (!sigismember (&blocked_mask, SIGCHLD))
929 {
930 sigaddset (&blocked_mask, SIGCHLD);
931 sigprocmask (SIG_BLOCK, &blocked_mask, NULL);
932 }
933
934 if (verbose)
935 printf_filtered (_("[New %s]\n"), target_pid_to_str (ptid));
936
937 found_lp = lp = find_lwp_pid (ptid);
938 if (lp == NULL)
939 lp = add_lwp (ptid);
940
941 /* We assume that we're already attached to any LWP that has an id
942 equal to the overall process id, and to any LWP that is already
943 in our list of LWPs. If we're not seeing exit events from threads
944 and we've had PID wraparound since we last tried to stop all threads,
945 this assumption might be wrong; fortunately, this is very unlikely
946 to happen. */
947 if (GET_LWP (ptid) != GET_PID (ptid) && found_lp == NULL)
948 {
949 pid_t pid;
950 int status;
951
952 if (ptrace (PTRACE_ATTACH, GET_LWP (ptid), 0, 0) < 0)
953 error (_("Can't attach %s: %s"), target_pid_to_str (ptid),
954 safe_strerror (errno));
955
956 if (debug_linux_nat)
957 fprintf_unfiltered (gdb_stdlog,
958 "LLAL: PTRACE_ATTACH %s, 0, 0 (OK)\n",
959 target_pid_to_str (ptid));
960
961 pid = my_waitpid (GET_LWP (ptid), &status, 0);
962 if (pid == -1 && errno == ECHILD)
963 {
964 /* Try again with __WCLONE to check cloned processes. */
965 pid = my_waitpid (GET_LWP (ptid), &status, __WCLONE);
966 lp->cloned = 1;
967 }
968
969 gdb_assert (pid == GET_LWP (ptid)
970 && WIFSTOPPED (status) && WSTOPSIG (status));
971
972 target_post_attach (pid);
973
974 lp->stopped = 1;
975
976 if (debug_linux_nat)
977 {
978 fprintf_unfiltered (gdb_stdlog,
979 "LLAL: waitpid %s received %s\n",
980 target_pid_to_str (ptid),
981 status_to_str (status));
982 }
983 }
984 else
985 {
986 /* We assume that the LWP representing the original process is
987 already stopped. Mark it as stopped in the data structure
988 that the linux ptrace layer uses to keep track of threads.
989 Note that this won't have already been done since the main
990 thread will have, we assume, been stopped by an attach from a
991 different layer. */
992 lp->stopped = 1;
993 }
994 }
995
996 static void
997 linux_nat_attach (char *args, int from_tty)
998 {
999 struct lwp_info *lp;
1000 pid_t pid;
1001 int status;
1002
1003 /* FIXME: We should probably accept a list of process id's, and
1004 attach all of them. */
1005 linux_ops->to_attach (args, from_tty);
1006
1007 /* Add the initial process as the first LWP to the list. */
1008 inferior_ptid = BUILD_LWP (GET_PID (inferior_ptid), GET_PID (inferior_ptid));
1009 lp = add_lwp (inferior_ptid);
1010
1011 /* Make sure the initial process is stopped. The user-level threads
1012 layer might want to poke around in the inferior, and that won't
1013 work if things haven't stabilized yet. */
1014 pid = my_waitpid (GET_PID (inferior_ptid), &status, 0);
1015 if (pid == -1 && errno == ECHILD)
1016 {
1017 warning (_("%s is a cloned process"), target_pid_to_str (inferior_ptid));
1018
1019 /* Try again with __WCLONE to check cloned processes. */
1020 pid = my_waitpid (GET_PID (inferior_ptid), &status, __WCLONE);
1021 lp->cloned = 1;
1022 }
1023
1024 gdb_assert (pid == GET_PID (inferior_ptid)
1025 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP);
1026
1027 lp->stopped = 1;
1028
1029 /* Fake the SIGSTOP that core GDB expects. */
1030 lp->status = W_STOPCODE (SIGSTOP);
1031 lp->resumed = 1;
1032 if (debug_linux_nat)
1033 {
1034 fprintf_unfiltered (gdb_stdlog,
1035 "LLA: waitpid %ld, faking SIGSTOP\n", (long) pid);
1036 }
1037 }
1038
1039 static int
1040 detach_callback (struct lwp_info *lp, void *data)
1041 {
1042 gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status));
1043
1044 if (debug_linux_nat && lp->status)
1045 fprintf_unfiltered (gdb_stdlog, "DC: Pending %s for %s on detach.\n",
1046 strsignal (WSTOPSIG (lp->status)),
1047 target_pid_to_str (lp->ptid));
1048
1049 while (lp->signalled && lp->stopped)
1050 {
1051 errno = 0;
1052 if (ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0,
1053 WSTOPSIG (lp->status)) < 0)
1054 error (_("Can't continue %s: %s"), target_pid_to_str (lp->ptid),
1055 safe_strerror (errno));
1056
1057 if (debug_linux_nat)
1058 fprintf_unfiltered (gdb_stdlog,
1059 "DC: PTRACE_CONTINUE (%s, 0, %s) (OK)\n",
1060 target_pid_to_str (lp->ptid),
1061 status_to_str (lp->status));
1062
1063 lp->stopped = 0;
1064 lp->signalled = 0;
1065 lp->status = 0;
1066 /* FIXME drow/2003-08-26: There was a call to stop_wait_callback
1067 here. But since lp->signalled was cleared above,
1068 stop_wait_callback didn't do anything; the process was left
1069 running. Shouldn't we be waiting for it to stop?
1070 I've removed the call, since stop_wait_callback now does do
1071 something when called with lp->signalled == 0. */
1072
1073 gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status));
1074 }
1075
1076 /* We don't actually detach from the LWP that has an id equal to the
1077 overall process id just yet. */
1078 if (GET_LWP (lp->ptid) != GET_PID (lp->ptid))
1079 {
1080 errno = 0;
1081 if (ptrace (PTRACE_DETACH, GET_LWP (lp->ptid), 0,
1082 WSTOPSIG (lp->status)) < 0)
1083 error (_("Can't detach %s: %s"), target_pid_to_str (lp->ptid),
1084 safe_strerror (errno));
1085
1086 if (debug_linux_nat)
1087 fprintf_unfiltered (gdb_stdlog,
1088 "PTRACE_DETACH (%s, %s, 0) (OK)\n",
1089 target_pid_to_str (lp->ptid),
1090 strsignal (WSTOPSIG (lp->status)));
1091
1092 delete_lwp (lp->ptid);
1093 }
1094
1095 return 0;
1096 }
1097
1098 static void
1099 linux_nat_detach (char *args, int from_tty)
1100 {
1101 iterate_over_lwps (detach_callback, NULL);
1102
1103 /* Only the initial process should be left right now. */
1104 gdb_assert (num_lwps == 1);
1105
1106 trap_ptid = null_ptid;
1107
1108 /* Destroy LWP info; it's no longer valid. */
1109 init_lwp_list ();
1110
1111 /* Restore the original signal mask. */
1112 sigprocmask (SIG_SETMASK, &normal_mask, NULL);
1113 sigemptyset (&blocked_mask);
1114
1115 inferior_ptid = pid_to_ptid (GET_PID (inferior_ptid));
1116 linux_ops->to_detach (args, from_tty);
1117 }
1118
1119 /* Resume LP. */
1120
1121 static int
1122 resume_callback (struct lwp_info *lp, void *data)
1123 {
1124 if (lp->stopped && lp->status == 0)
1125 {
1126 struct thread_info *tp;
1127
1128 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
1129 0, TARGET_SIGNAL_0);
1130 if (debug_linux_nat)
1131 fprintf_unfiltered (gdb_stdlog,
1132 "RC: PTRACE_CONT %s, 0, 0 (resume sibling)\n",
1133 target_pid_to_str (lp->ptid));
1134 lp->stopped = 0;
1135 lp->step = 0;
1136 }
1137
1138 return 0;
1139 }
1140
1141 static int
1142 resume_clear_callback (struct lwp_info *lp, void *data)
1143 {
1144 lp->resumed = 0;
1145 return 0;
1146 }
1147
1148 static int
1149 resume_set_callback (struct lwp_info *lp, void *data)
1150 {
1151 lp->resumed = 1;
1152 return 0;
1153 }
1154
1155 static void
1156 linux_nat_resume (ptid_t ptid, int step, enum target_signal signo)
1157 {
1158 struct lwp_info *lp;
1159 int resume_all;
1160
1161 if (debug_linux_nat)
1162 fprintf_unfiltered (gdb_stdlog,
1163 "LLR: Preparing to %s %s, %s, inferior_ptid %s\n",
1164 step ? "step" : "resume",
1165 target_pid_to_str (ptid),
1166 signo ? strsignal (signo) : "0",
1167 target_pid_to_str (inferior_ptid));
1168
1169 prune_lwps ();
1170
1171 /* A specific PTID means `step only this process id'. */
1172 resume_all = (PIDGET (ptid) == -1);
1173
1174 if (resume_all)
1175 iterate_over_lwps (resume_set_callback, NULL);
1176 else
1177 iterate_over_lwps (resume_clear_callback, NULL);
1178
1179 /* If PID is -1, it's the current inferior that should be
1180 handled specially. */
1181 if (PIDGET (ptid) == -1)
1182 ptid = inferior_ptid;
1183
1184 lp = find_lwp_pid (ptid);
1185 if (lp)
1186 {
1187 ptid = pid_to_ptid (GET_LWP (lp->ptid));
1188
1189 /* Remember if we're stepping. */
1190 lp->step = step;
1191
1192 /* Mark this LWP as resumed. */
1193 lp->resumed = 1;
1194
1195 /* If we have a pending wait status for this thread, there is no
1196 point in resuming the process. But first make sure that
1197 linux_nat_wait won't preemptively handle the event - we
1198 should never take this short-circuit if we are going to
1199 leave LP running, since we have skipped resuming all the
1200 other threads. This bit of code needs to be synchronized
1201 with linux_nat_wait. */
1202
1203 if (lp->status && WIFSTOPPED (lp->status))
1204 {
1205 int saved_signo = target_signal_from_host (WSTOPSIG (lp->status));
1206
1207 if (signal_stop_state (saved_signo) == 0
1208 && signal_print_state (saved_signo) == 0
1209 && signal_pass_state (saved_signo) == 1)
1210 {
1211 if (debug_linux_nat)
1212 fprintf_unfiltered (gdb_stdlog,
1213 "LLR: Not short circuiting for ignored "
1214 "status 0x%x\n", lp->status);
1215
1216 /* FIXME: What should we do if we are supposed to continue
1217 this thread with a signal? */
1218 gdb_assert (signo == TARGET_SIGNAL_0);
1219 signo = saved_signo;
1220 lp->status = 0;
1221 }
1222 }
1223
1224 if (lp->status)
1225 {
1226 /* FIXME: What should we do if we are supposed to continue
1227 this thread with a signal? */
1228 gdb_assert (signo == TARGET_SIGNAL_0);
1229
1230 if (debug_linux_nat)
1231 fprintf_unfiltered (gdb_stdlog,
1232 "LLR: Short circuiting for status 0x%x\n",
1233 lp->status);
1234
1235 return;
1236 }
1237
1238 /* Mark LWP as not stopped to prevent it from being continued by
1239 resume_callback. */
1240 lp->stopped = 0;
1241 }
1242
1243 if (resume_all)
1244 iterate_over_lwps (resume_callback, NULL);
1245
1246 linux_ops->to_resume (ptid, step, signo);
1247 if (debug_linux_nat)
1248 fprintf_unfiltered (gdb_stdlog,
1249 "LLR: %s %s, %s (resume event thread)\n",
1250 step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
1251 target_pid_to_str (ptid),
1252 signo ? strsignal (signo) : "0");
1253 }
1254
1255 /* Issue kill to specified lwp. */
1256
1257 static int tkill_failed;
1258
1259 static int
1260 kill_lwp (int lwpid, int signo)
1261 {
1262 errno = 0;
1263
1264 /* Use tkill, if possible, in case we are using nptl threads. If tkill
1265 fails, then we are not using nptl threads and we should be using kill. */
1266
1267 #ifdef HAVE_TKILL_SYSCALL
1268 if (!tkill_failed)
1269 {
1270 int ret = syscall (__NR_tkill, lwpid, signo);
1271 if (errno != ENOSYS)
1272 return ret;
1273 errno = 0;
1274 tkill_failed = 1;
1275 }
1276 #endif
1277
1278 return kill (lwpid, signo);
1279 }
1280
1281 /* Handle a GNU/Linux extended wait response. Most of the work we
1282 just pass off to linux_handle_extended_wait, but if it reports a
1283 clone event we need to add the new LWP to our list (and not report
1284 the trap to higher layers). This function returns non-zero if
1285 the event should be ignored and we should wait again. If STOPPING
1286 is true, the new LWP remains stopped, otherwise it is continued. */
1287
1288 static int
1289 linux_nat_handle_extended (struct lwp_info *lp, int status, int stopping)
1290 {
1291 linux_handle_extended_wait (GET_LWP (lp->ptid), status,
1292 &lp->waitstatus);
1293
1294 /* TARGET_WAITKIND_SPURIOUS is used to indicate clone events. */
1295 if (lp->waitstatus.kind == TARGET_WAITKIND_SPURIOUS)
1296 {
1297 struct lwp_info *new_lp;
1298 new_lp = add_lwp (BUILD_LWP (lp->waitstatus.value.related_pid,
1299 GET_PID (inferior_ptid)));
1300 new_lp->cloned = 1;
1301
1302 if (stopping)
1303 new_lp->stopped = 1;
1304 else
1305 ptrace (PTRACE_CONT, lp->waitstatus.value.related_pid, 0, 0);
1306
1307 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
1308
1309 if (debug_linux_nat)
1310 fprintf_unfiltered (gdb_stdlog,
1311 "LLHE: Got clone event from LWP %ld, resuming\n",
1312 GET_LWP (lp->ptid));
1313 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
1314
1315 return 1;
1316 }
1317
1318 return 0;
1319 }
1320
1321 /* Wait for LP to stop. Returns the wait status, or 0 if the LWP has
1322 exited. */
1323
1324 static int
1325 wait_lwp (struct lwp_info *lp)
1326 {
1327 pid_t pid;
1328 int status;
1329 int thread_dead = 0;
1330
1331 gdb_assert (!lp->stopped);
1332 gdb_assert (lp->status == 0);
1333
1334 pid = my_waitpid (GET_LWP (lp->ptid), &status, 0);
1335 if (pid == -1 && errno == ECHILD)
1336 {
1337 pid = my_waitpid (GET_LWP (lp->ptid), &status, __WCLONE);
1338 if (pid == -1 && errno == ECHILD)
1339 {
1340 /* The thread has previously exited. We need to delete it
1341 now because, for some vendor 2.4 kernels with NPTL
1342 support backported, there won't be an exit event unless
1343 it is the main thread. 2.6 kernels will report an exit
1344 event for each thread that exits, as expected. */
1345 thread_dead = 1;
1346 if (debug_linux_nat)
1347 fprintf_unfiltered (gdb_stdlog, "WL: %s vanished.\n",
1348 target_pid_to_str (lp->ptid));
1349 }
1350 }
1351
1352 if (!thread_dead)
1353 {
1354 gdb_assert (pid == GET_LWP (lp->ptid));
1355
1356 if (debug_linux_nat)
1357 {
1358 fprintf_unfiltered (gdb_stdlog,
1359 "WL: waitpid %s received %s\n",
1360 target_pid_to_str (lp->ptid),
1361 status_to_str (status));
1362 }
1363 }
1364
1365 /* Check if the thread has exited. */
1366 if (WIFEXITED (status) || WIFSIGNALED (status))
1367 {
1368 thread_dead = 1;
1369 if (debug_linux_nat)
1370 fprintf_unfiltered (gdb_stdlog, "WL: %s exited.\n",
1371 target_pid_to_str (lp->ptid));
1372 }
1373
1374 if (thread_dead)
1375 {
1376 exit_lwp (lp);
1377 return 0;
1378 }
1379
1380 gdb_assert (WIFSTOPPED (status));
1381
1382 /* Handle GNU/Linux's extended waitstatus for trace events. */
1383 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
1384 {
1385 if (debug_linux_nat)
1386 fprintf_unfiltered (gdb_stdlog,
1387 "WL: Handling extended status 0x%06x\n",
1388 status);
1389 if (linux_nat_handle_extended (lp, status, 1))
1390 return wait_lwp (lp);
1391 }
1392
1393 return status;
1394 }
1395
1396 /* Send a SIGSTOP to LP. */
1397
1398 static int
1399 stop_callback (struct lwp_info *lp, void *data)
1400 {
1401 if (!lp->stopped && !lp->signalled)
1402 {
1403 int ret;
1404
1405 if (debug_linux_nat)
1406 {
1407 fprintf_unfiltered (gdb_stdlog,
1408 "SC: kill %s **<SIGSTOP>**\n",
1409 target_pid_to_str (lp->ptid));
1410 }
1411 errno = 0;
1412 ret = kill_lwp (GET_LWP (lp->ptid), SIGSTOP);
1413 if (debug_linux_nat)
1414 {
1415 fprintf_unfiltered (gdb_stdlog,
1416 "SC: lwp kill %d %s\n",
1417 ret,
1418 errno ? safe_strerror (errno) : "ERRNO-OK");
1419 }
1420
1421 lp->signalled = 1;
1422 gdb_assert (lp->status == 0);
1423 }
1424
1425 return 0;
1426 }
1427
1428 /* Wait until LP is stopped. If DATA is non-null it is interpreted as
1429 a pointer to a set of signals to be flushed immediately. */
1430
1431 static int
1432 stop_wait_callback (struct lwp_info *lp, void *data)
1433 {
1434 sigset_t *flush_mask = data;
1435
1436 if (!lp->stopped)
1437 {
1438 int status;
1439
1440 status = wait_lwp (lp);
1441 if (status == 0)
1442 return 0;
1443
1444 /* Ignore any signals in FLUSH_MASK. */
1445 if (flush_mask && sigismember (flush_mask, WSTOPSIG (status)))
1446 {
1447 if (!lp->signalled)
1448 {
1449 lp->stopped = 1;
1450 return 0;
1451 }
1452
1453 errno = 0;
1454 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
1455 if (debug_linux_nat)
1456 fprintf_unfiltered (gdb_stdlog,
1457 "PTRACE_CONT %s, 0, 0 (%s)\n",
1458 target_pid_to_str (lp->ptid),
1459 errno ? safe_strerror (errno) : "OK");
1460
1461 return stop_wait_callback (lp, flush_mask);
1462 }
1463
1464 if (WSTOPSIG (status) != SIGSTOP)
1465 {
1466 if (WSTOPSIG (status) == SIGTRAP)
1467 {
1468 /* If a LWP other than the LWP that we're reporting an
1469 event for has hit a GDB breakpoint (as opposed to
1470 some random trap signal), then just arrange for it to
1471 hit it again later. We don't keep the SIGTRAP status
1472 and don't forward the SIGTRAP signal to the LWP. We
1473 will handle the current event, eventually we will
1474 resume all LWPs, and this one will get its breakpoint
1475 trap again.
1476
1477 If we do not do this, then we run the risk that the
1478 user will delete or disable the breakpoint, but the
1479 thread will have already tripped on it. */
1480
1481 /* Now resume this LWP and get the SIGSTOP event. */
1482 errno = 0;
1483 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
1484 if (debug_linux_nat)
1485 {
1486 fprintf_unfiltered (gdb_stdlog,
1487 "PTRACE_CONT %s, 0, 0 (%s)\n",
1488 target_pid_to_str (lp->ptid),
1489 errno ? safe_strerror (errno) : "OK");
1490
1491 fprintf_unfiltered (gdb_stdlog,
1492 "SWC: Candidate SIGTRAP event in %s\n",
1493 target_pid_to_str (lp->ptid));
1494 }
1495 /* Hold the SIGTRAP for handling by linux_nat_wait. */
1496 stop_wait_callback (lp, data);
1497 /* If there's another event, throw it back into the queue. */
1498 if (lp->status)
1499 {
1500 if (debug_linux_nat)
1501 {
1502 fprintf_unfiltered (gdb_stdlog,
1503 "SWC: kill %s, %s\n",
1504 target_pid_to_str (lp->ptid),
1505 status_to_str ((int) status));
1506 }
1507 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (lp->status));
1508 }
1509 /* Save the sigtrap event. */
1510 lp->status = status;
1511 return 0;
1512 }
1513 else
1514 {
1515 /* The thread was stopped with a signal other than
1516 SIGSTOP, and didn't accidentally trip a breakpoint. */
1517
1518 if (debug_linux_nat)
1519 {
1520 fprintf_unfiltered (gdb_stdlog,
1521 "SWC: Pending event %s in %s\n",
1522 status_to_str ((int) status),
1523 target_pid_to_str (lp->ptid));
1524 }
1525 /* Now resume this LWP and get the SIGSTOP event. */
1526 errno = 0;
1527 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
1528 if (debug_linux_nat)
1529 fprintf_unfiltered (gdb_stdlog,
1530 "SWC: PTRACE_CONT %s, 0, 0 (%s)\n",
1531 target_pid_to_str (lp->ptid),
1532 errno ? safe_strerror (errno) : "OK");
1533
1534 /* Hold this event/waitstatus while we check to see if
1535 there are any more (we still want to get that SIGSTOP). */
1536 stop_wait_callback (lp, data);
1537 /* If the lp->status field is still empty, use it to hold
1538 this event. If not, then this event must be returned
1539 to the event queue of the LWP. */
1540 if (lp->status == 0)
1541 lp->status = status;
1542 else
1543 {
1544 if (debug_linux_nat)
1545 {
1546 fprintf_unfiltered (gdb_stdlog,
1547 "SWC: kill %s, %s\n",
1548 target_pid_to_str (lp->ptid),
1549 status_to_str ((int) status));
1550 }
1551 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (status));
1552 }
1553 return 0;
1554 }
1555 }
1556 else
1557 {
1558 /* We caught the SIGSTOP that we intended to catch, so
1559 there's no SIGSTOP pending. */
1560 lp->stopped = 1;
1561 lp->signalled = 0;
1562 }
1563 }
1564
1565 return 0;
1566 }
1567
1568 /* Check whether PID has any pending signals in FLUSH_MASK. If so set
1569 the appropriate bits in PENDING, and return 1 - otherwise return 0. */
1570
1571 static int
1572 linux_nat_has_pending (int pid, sigset_t *pending, sigset_t *flush_mask)
1573 {
1574 sigset_t blocked, ignored;
1575 int i;
1576
1577 linux_proc_pending_signals (pid, pending, &blocked, &ignored);
1578
1579 if (!flush_mask)
1580 return 0;
1581
1582 for (i = 1; i < NSIG; i++)
1583 if (sigismember (pending, i))
1584 if (!sigismember (flush_mask, i)
1585 || sigismember (&blocked, i)
1586 || sigismember (&ignored, i))
1587 sigdelset (pending, i);
1588
1589 if (sigisemptyset (pending))
1590 return 0;
1591
1592 return 1;
1593 }
1594
1595 /* DATA is interpreted as a mask of signals to flush. If LP has
1596 signals pending, and they are all in the flush mask, then arrange
1597 to flush them. LP should be stopped, as should all other threads
1598 it might share a signal queue with. */
1599
1600 static int
1601 flush_callback (struct lwp_info *lp, void *data)
1602 {
1603 sigset_t *flush_mask = data;
1604 sigset_t pending, intersection, blocked, ignored;
1605 int pid, status;
1606
1607 /* Normally, when an LWP exits, it is removed from the LWP list. The
1608 last LWP isn't removed till later, however. So if there is only
1609 one LWP on the list, make sure it's alive. */
1610 if (lwp_list == lp && lp->next == NULL)
1611 if (!linux_nat_thread_alive (lp->ptid))
1612 return 0;
1613
1614 /* Just because the LWP is stopped doesn't mean that new signals
1615 can't arrive from outside, so this function must be careful of
1616 race conditions. However, because all threads are stopped, we
1617 can assume that the pending mask will not shrink unless we resume
1618 the LWP, and that it will then get another signal. We can't
1619 control which one, however. */
1620
1621 if (lp->status)
1622 {
1623 if (debug_linux_nat)
1624 printf_unfiltered (_("FC: LP has pending status %06x\n"), lp->status);
1625 if (WIFSTOPPED (lp->status) && sigismember (flush_mask, WSTOPSIG (lp->status)))
1626 lp->status = 0;
1627 }
1628
1629 while (linux_nat_has_pending (GET_LWP (lp->ptid), &pending, flush_mask))
1630 {
1631 int ret;
1632
1633 errno = 0;
1634 ret = ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
1635 if (debug_linux_nat)
1636 fprintf_unfiltered (gdb_stderr,
1637 "FC: Sent PTRACE_CONT, ret %d %d\n", ret, errno);
1638
1639 lp->stopped = 0;
1640 stop_wait_callback (lp, flush_mask);
1641 if (debug_linux_nat)
1642 fprintf_unfiltered (gdb_stderr,
1643 "FC: Wait finished; saved status is %d\n",
1644 lp->status);
1645 }
1646
1647 return 0;
1648 }
1649
1650 /* Return non-zero if LP has a wait status pending. */
1651
1652 static int
1653 status_callback (struct lwp_info *lp, void *data)
1654 {
1655 /* Only report a pending wait status if we pretend that this has
1656 indeed been resumed. */
1657 return (lp->status != 0 && lp->resumed);
1658 }
1659
1660 /* Return non-zero if LP isn't stopped. */
1661
1662 static int
1663 running_callback (struct lwp_info *lp, void *data)
1664 {
1665 return (lp->stopped == 0 || (lp->status != 0 && lp->resumed));
1666 }
1667
1668 /* Count the LWP's that have had events. */
1669
1670 static int
1671 count_events_callback (struct lwp_info *lp, void *data)
1672 {
1673 int *count = data;
1674
1675 gdb_assert (count != NULL);
1676
1677 /* Count only LWPs that have a SIGTRAP event pending. */
1678 if (lp->status != 0
1679 && WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP)
1680 (*count)++;
1681
1682 return 0;
1683 }
1684
1685 /* Select the LWP (if any) that is currently being single-stepped. */
1686
1687 static int
1688 select_singlestep_lwp_callback (struct lwp_info *lp, void *data)
1689 {
1690 if (lp->step && lp->status != 0)
1691 return 1;
1692 else
1693 return 0;
1694 }
1695
1696 /* Select the Nth LWP that has had a SIGTRAP event. */
1697
1698 static int
1699 select_event_lwp_callback (struct lwp_info *lp, void *data)
1700 {
1701 int *selector = data;
1702
1703 gdb_assert (selector != NULL);
1704
1705 /* Select only LWPs that have a SIGTRAP event pending. */
1706 if (lp->status != 0
1707 && WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP)
1708 if ((*selector)-- == 0)
1709 return 1;
1710
1711 return 0;
1712 }
1713
1714 static int
1715 cancel_breakpoints_callback (struct lwp_info *lp, void *data)
1716 {
1717 struct lwp_info *event_lp = data;
1718
1719 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
1720 if (lp == event_lp)
1721 return 0;
1722
1723 /* If a LWP other than the LWP that we're reporting an event for has
1724 hit a GDB breakpoint (as opposed to some random trap signal),
1725 then just arrange for it to hit it again later. We don't keep
1726 the SIGTRAP status and don't forward the SIGTRAP signal to the
1727 LWP. We will handle the current event, eventually we will resume
1728 all LWPs, and this one will get its breakpoint trap again.
1729
1730 If we do not do this, then we run the risk that the user will
1731 delete or disable the breakpoint, but the LWP will have already
1732 tripped on it. */
1733
1734 if (lp->status != 0
1735 && WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP
1736 && breakpoint_inserted_here_p (read_pc_pid (lp->ptid) -
1737 DECR_PC_AFTER_BREAK))
1738 {
1739 if (debug_linux_nat)
1740 fprintf_unfiltered (gdb_stdlog,
1741 "CBC: Push back breakpoint for %s\n",
1742 target_pid_to_str (lp->ptid));
1743
1744 /* Back up the PC if necessary. */
1745 if (DECR_PC_AFTER_BREAK)
1746 write_pc_pid (read_pc_pid (lp->ptid) - DECR_PC_AFTER_BREAK, lp->ptid);
1747
1748 /* Throw away the SIGTRAP. */
1749 lp->status = 0;
1750 }
1751
1752 return 0;
1753 }
1754
1755 /* Select one LWP out of those that have events pending. */
1756
1757 static void
1758 select_event_lwp (struct lwp_info **orig_lp, int *status)
1759 {
1760 int num_events = 0;
1761 int random_selector;
1762 struct lwp_info *event_lp;
1763
1764 /* Record the wait status for the original LWP. */
1765 (*orig_lp)->status = *status;
1766
1767 /* Give preference to any LWP that is being single-stepped. */
1768 event_lp = iterate_over_lwps (select_singlestep_lwp_callback, NULL);
1769 if (event_lp != NULL)
1770 {
1771 if (debug_linux_nat)
1772 fprintf_unfiltered (gdb_stdlog,
1773 "SEL: Select single-step %s\n",
1774 target_pid_to_str (event_lp->ptid));
1775 }
1776 else
1777 {
1778 /* No single-stepping LWP. Select one at random, out of those
1779 which have had SIGTRAP events. */
1780
1781 /* First see how many SIGTRAP events we have. */
1782 iterate_over_lwps (count_events_callback, &num_events);
1783
1784 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
1785 random_selector = (int)
1786 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
1787
1788 if (debug_linux_nat && num_events > 1)
1789 fprintf_unfiltered (gdb_stdlog,
1790 "SEL: Found %d SIGTRAP events, selecting #%d\n",
1791 num_events, random_selector);
1792
1793 event_lp = iterate_over_lwps (select_event_lwp_callback,
1794 &random_selector);
1795 }
1796
1797 if (event_lp != NULL)
1798 {
1799 /* Switch the event LWP. */
1800 *orig_lp = event_lp;
1801 *status = event_lp->status;
1802 }
1803
1804 /* Flush the wait status for the event LWP. */
1805 (*orig_lp)->status = 0;
1806 }
1807
1808 /* Return non-zero if LP has been resumed. */
1809
1810 static int
1811 resumed_callback (struct lwp_info *lp, void *data)
1812 {
1813 return lp->resumed;
1814 }
1815
1816 /* Stop an active thread, verify it still exists, then resume it. */
1817
1818 static int
1819 stop_and_resume_callback (struct lwp_info *lp, void *data)
1820 {
1821 struct lwp_info *ptr;
1822
1823 if (!lp->stopped && !lp->signalled)
1824 {
1825 stop_callback (lp, NULL);
1826 stop_wait_callback (lp, NULL);
1827 /* Resume if the lwp still exists. */
1828 for (ptr = lwp_list; ptr; ptr = ptr->next)
1829 if (lp == ptr)
1830 {
1831 resume_callback (lp, NULL);
1832 resume_set_callback (lp, NULL);
1833 }
1834 }
1835 return 0;
1836 }
1837
1838 static ptid_t
1839 linux_nat_wait (ptid_t ptid, struct target_waitstatus *ourstatus)
1840 {
1841 struct lwp_info *lp = NULL;
1842 int options = 0;
1843 int status = 0;
1844 pid_t pid = PIDGET (ptid);
1845 sigset_t flush_mask;
1846
1847 /* The first time we get here after starting a new inferior, we may
1848 not have added it to the LWP list yet - this is the earliest
1849 moment at which we know its PID. */
1850 if (num_lwps == 0)
1851 {
1852 gdb_assert (!is_lwp (inferior_ptid));
1853
1854 inferior_ptid = BUILD_LWP (GET_PID (inferior_ptid),
1855 GET_PID (inferior_ptid));
1856 lp = add_lwp (inferior_ptid);
1857 lp->resumed = 1;
1858 }
1859
1860 sigemptyset (&flush_mask);
1861
1862 /* Make sure SIGCHLD is blocked. */
1863 if (!sigismember (&blocked_mask, SIGCHLD))
1864 {
1865 sigaddset (&blocked_mask, SIGCHLD);
1866 sigprocmask (SIG_BLOCK, &blocked_mask, NULL);
1867 }
1868
1869 retry:
1870
1871 /* Make sure there is at least one LWP that has been resumed. */
1872 gdb_assert (iterate_over_lwps (resumed_callback, NULL));
1873
1874 /* First check if there is a LWP with a wait status pending. */
1875 if (pid == -1)
1876 {
1877 /* Any LWP that's been resumed will do. */
1878 lp = iterate_over_lwps (status_callback, NULL);
1879 if (lp)
1880 {
1881 status = lp->status;
1882 lp->status = 0;
1883
1884 if (debug_linux_nat && status)
1885 fprintf_unfiltered (gdb_stdlog,
1886 "LLW: Using pending wait status %s for %s.\n",
1887 status_to_str (status),
1888 target_pid_to_str (lp->ptid));
1889 }
1890
1891 /* But if we don't fine one, we'll have to wait, and check both
1892 cloned and uncloned processes. We start with the cloned
1893 processes. */
1894 options = __WCLONE | WNOHANG;
1895 }
1896 else if (is_lwp (ptid))
1897 {
1898 if (debug_linux_nat)
1899 fprintf_unfiltered (gdb_stdlog,
1900 "LLW: Waiting for specific LWP %s.\n",
1901 target_pid_to_str (ptid));
1902
1903 /* We have a specific LWP to check. */
1904 lp = find_lwp_pid (ptid);
1905 gdb_assert (lp);
1906 status = lp->status;
1907 lp->status = 0;
1908
1909 if (debug_linux_nat && status)
1910 fprintf_unfiltered (gdb_stdlog,
1911 "LLW: Using pending wait status %s for %s.\n",
1912 status_to_str (status),
1913 target_pid_to_str (lp->ptid));
1914
1915 /* If we have to wait, take into account whether PID is a cloned
1916 process or not. And we have to convert it to something that
1917 the layer beneath us can understand. */
1918 options = lp->cloned ? __WCLONE : 0;
1919 pid = GET_LWP (ptid);
1920 }
1921
1922 if (status && lp->signalled)
1923 {
1924 /* A pending SIGSTOP may interfere with the normal stream of
1925 events. In a typical case where interference is a problem,
1926 we have a SIGSTOP signal pending for LWP A while
1927 single-stepping it, encounter an event in LWP B, and take the
1928 pending SIGSTOP while trying to stop LWP A. After processing
1929 the event in LWP B, LWP A is continued, and we'll never see
1930 the SIGTRAP associated with the last time we were
1931 single-stepping LWP A. */
1932
1933 /* Resume the thread. It should halt immediately returning the
1934 pending SIGSTOP. */
1935 registers_changed ();
1936 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
1937 lp->step, TARGET_SIGNAL_0);
1938 if (debug_linux_nat)
1939 fprintf_unfiltered (gdb_stdlog,
1940 "LLW: %s %s, 0, 0 (expect SIGSTOP)\n",
1941 lp->step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
1942 target_pid_to_str (lp->ptid));
1943 lp->stopped = 0;
1944 gdb_assert (lp->resumed);
1945
1946 /* This should catch the pending SIGSTOP. */
1947 stop_wait_callback (lp, NULL);
1948 }
1949
1950 set_sigint_trap (); /* Causes SIGINT to be passed on to the
1951 attached process. */
1952 set_sigio_trap ();
1953
1954 while (status == 0)
1955 {
1956 pid_t lwpid;
1957
1958 lwpid = my_waitpid (pid, &status, options);
1959 if (lwpid > 0)
1960 {
1961 gdb_assert (pid == -1 || lwpid == pid);
1962
1963 if (debug_linux_nat)
1964 {
1965 fprintf_unfiltered (gdb_stdlog,
1966 "LLW: waitpid %ld received %s\n",
1967 (long) lwpid, status_to_str (status));
1968 }
1969
1970 lp = find_lwp_pid (pid_to_ptid (lwpid));
1971
1972 /* Check for stop events reported by a process we didn't
1973 already know about - anything not already in our LWP
1974 list.
1975
1976 If we're expecting to receive stopped processes after
1977 fork, vfork, and clone events, then we'll just add the
1978 new one to our list and go back to waiting for the event
1979 to be reported - the stopped process might be returned
1980 from waitpid before or after the event is. */
1981 if (WIFSTOPPED (status) && !lp)
1982 {
1983 linux_record_stopped_pid (lwpid);
1984 status = 0;
1985 continue;
1986 }
1987
1988 /* Make sure we don't report an event for the exit of an LWP not in
1989 our list, i.e. not part of the current process. This can happen
1990 if we detach from a program we original forked and then it
1991 exits. */
1992 if (!WIFSTOPPED (status) && !lp)
1993 {
1994 status = 0;
1995 continue;
1996 }
1997
1998 /* NOTE drow/2003-06-17: This code seems to be meant for debugging
1999 CLONE_PTRACE processes which do not use the thread library -
2000 otherwise we wouldn't find the new LWP this way. That doesn't
2001 currently work, and the following code is currently unreachable
2002 due to the two blocks above. If it's fixed some day, this code
2003 should be broken out into a function so that we can also pick up
2004 LWPs from the new interface. */
2005 if (!lp)
2006 {
2007 lp = add_lwp (BUILD_LWP (lwpid, GET_PID (inferior_ptid)));
2008 if (options & __WCLONE)
2009 lp->cloned = 1;
2010
2011 gdb_assert (WIFSTOPPED (status)
2012 && WSTOPSIG (status) == SIGSTOP);
2013 lp->signalled = 1;
2014
2015 if (!in_thread_list (inferior_ptid))
2016 {
2017 inferior_ptid = BUILD_LWP (GET_PID (inferior_ptid),
2018 GET_PID (inferior_ptid));
2019 add_thread (inferior_ptid);
2020 }
2021
2022 add_thread (lp->ptid);
2023 printf_unfiltered (_("[New %s]\n"),
2024 target_pid_to_str (lp->ptid));
2025 }
2026
2027 /* Handle GNU/Linux's extended waitstatus for trace events. */
2028 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
2029 {
2030 if (debug_linux_nat)
2031 fprintf_unfiltered (gdb_stdlog,
2032 "LLW: Handling extended status 0x%06x\n",
2033 status);
2034 if (linux_nat_handle_extended (lp, status, 0))
2035 {
2036 status = 0;
2037 continue;
2038 }
2039 }
2040
2041 /* Check if the thread has exited. */
2042 if ((WIFEXITED (status) || WIFSIGNALED (status)) && num_lwps > 1)
2043 {
2044 /* If this is the main thread, we must stop all threads and
2045 verify if they are still alive. This is because in the nptl
2046 thread model, there is no signal issued for exiting LWPs
2047 other than the main thread. We only get the main thread
2048 exit signal once all child threads have already exited.
2049 If we stop all the threads and use the stop_wait_callback
2050 to check if they have exited we can determine whether this
2051 signal should be ignored or whether it means the end of the
2052 debugged application, regardless of which threading model
2053 is being used. */
2054 if (GET_PID (lp->ptid) == GET_LWP (lp->ptid))
2055 {
2056 lp->stopped = 1;
2057 iterate_over_lwps (stop_and_resume_callback, NULL);
2058 }
2059
2060 if (debug_linux_nat)
2061 fprintf_unfiltered (gdb_stdlog,
2062 "LLW: %s exited.\n",
2063 target_pid_to_str (lp->ptid));
2064
2065 exit_lwp (lp);
2066
2067 /* If there is at least one more LWP, then the exit signal
2068 was not the end of the debugged application and should be
2069 ignored. */
2070 if (num_lwps > 0)
2071 {
2072 /* Make sure there is at least one thread running. */
2073 gdb_assert (iterate_over_lwps (running_callback, NULL));
2074
2075 /* Discard the event. */
2076 status = 0;
2077 continue;
2078 }
2079 }
2080
2081 /* Check if the current LWP has previously exited. In the nptl
2082 thread model, LWPs other than the main thread do not issue
2083 signals when they exit so we must check whenever the thread
2084 has stopped. A similar check is made in stop_wait_callback(). */
2085 if (num_lwps > 1 && !linux_nat_thread_alive (lp->ptid))
2086 {
2087 if (debug_linux_nat)
2088 fprintf_unfiltered (gdb_stdlog,
2089 "LLW: %s exited.\n",
2090 target_pid_to_str (lp->ptid));
2091
2092 exit_lwp (lp);
2093
2094 /* Make sure there is at least one thread running. */
2095 gdb_assert (iterate_over_lwps (running_callback, NULL));
2096
2097 /* Discard the event. */
2098 status = 0;
2099 continue;
2100 }
2101
2102 /* Make sure we don't report a SIGSTOP that we sent
2103 ourselves in an attempt to stop an LWP. */
2104 if (lp->signalled
2105 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP)
2106 {
2107 if (debug_linux_nat)
2108 fprintf_unfiltered (gdb_stdlog,
2109 "LLW: Delayed SIGSTOP caught for %s.\n",
2110 target_pid_to_str (lp->ptid));
2111
2112 /* This is a delayed SIGSTOP. */
2113 lp->signalled = 0;
2114
2115 registers_changed ();
2116 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
2117 lp->step, TARGET_SIGNAL_0);
2118 if (debug_linux_nat)
2119 fprintf_unfiltered (gdb_stdlog,
2120 "LLW: %s %s, 0, 0 (discard SIGSTOP)\n",
2121 lp->step ?
2122 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2123 target_pid_to_str (lp->ptid));
2124
2125 lp->stopped = 0;
2126 gdb_assert (lp->resumed);
2127
2128 /* Discard the event. */
2129 status = 0;
2130 continue;
2131 }
2132
2133 break;
2134 }
2135
2136 if (pid == -1)
2137 {
2138 /* Alternate between checking cloned and uncloned processes. */
2139 options ^= __WCLONE;
2140
2141 /* And suspend every time we have checked both. */
2142 if (options & __WCLONE)
2143 sigsuspend (&suspend_mask);
2144 }
2145
2146 /* We shouldn't end up here unless we want to try again. */
2147 gdb_assert (status == 0);
2148 }
2149
2150 clear_sigio_trap ();
2151 clear_sigint_trap ();
2152
2153 gdb_assert (lp);
2154
2155 /* Don't report signals that GDB isn't interested in, such as
2156 signals that are neither printed nor stopped upon. Stopping all
2157 threads can be a bit time-consuming so if we want decent
2158 performance with heavily multi-threaded programs, especially when
2159 they're using a high frequency timer, we'd better avoid it if we
2160 can. */
2161
2162 if (WIFSTOPPED (status))
2163 {
2164 int signo = target_signal_from_host (WSTOPSIG (status));
2165
2166 /* If we get a signal while single-stepping, we may need special
2167 care, e.g. to skip the signal handler. Defer to common code. */
2168 if (!lp->step
2169 && signal_stop_state (signo) == 0
2170 && signal_print_state (signo) == 0
2171 && signal_pass_state (signo) == 1)
2172 {
2173 /* FIMXE: kettenis/2001-06-06: Should we resume all threads
2174 here? It is not clear we should. GDB may not expect
2175 other threads to run. On the other hand, not resuming
2176 newly attached threads may cause an unwanted delay in
2177 getting them running. */
2178 registers_changed ();
2179 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
2180 lp->step, signo);
2181 if (debug_linux_nat)
2182 fprintf_unfiltered (gdb_stdlog,
2183 "LLW: %s %s, %s (preempt 'handle')\n",
2184 lp->step ?
2185 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2186 target_pid_to_str (lp->ptid),
2187 signo ? strsignal (signo) : "0");
2188 lp->stopped = 0;
2189 status = 0;
2190 goto retry;
2191 }
2192
2193 if (signo == TARGET_SIGNAL_INT && signal_pass_state (signo) == 0)
2194 {
2195 /* If ^C/BREAK is typed at the tty/console, SIGINT gets
2196 forwarded to the entire process group, that is, all LWP's
2197 will receive it. Since we only want to report it once,
2198 we try to flush it from all LWPs except this one. */
2199 sigaddset (&flush_mask, SIGINT);
2200 }
2201 }
2202
2203 /* This LWP is stopped now. */
2204 lp->stopped = 1;
2205
2206 if (debug_linux_nat)
2207 fprintf_unfiltered (gdb_stdlog, "LLW: Candidate event %s in %s.\n",
2208 status_to_str (status), target_pid_to_str (lp->ptid));
2209
2210 /* Now stop all other LWP's ... */
2211 iterate_over_lwps (stop_callback, NULL);
2212
2213 /* ... and wait until all of them have reported back that they're no
2214 longer running. */
2215 iterate_over_lwps (stop_wait_callback, &flush_mask);
2216 iterate_over_lwps (flush_callback, &flush_mask);
2217
2218 /* If we're not waiting for a specific LWP, choose an event LWP from
2219 among those that have had events. Giving equal priority to all
2220 LWPs that have had events helps prevent starvation. */
2221 if (pid == -1)
2222 select_event_lwp (&lp, &status);
2223
2224 /* Now that we've selected our final event LWP, cancel any
2225 breakpoints in other LWPs that have hit a GDB breakpoint. See
2226 the comment in cancel_breakpoints_callback to find out why. */
2227 iterate_over_lwps (cancel_breakpoints_callback, lp);
2228
2229 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP)
2230 {
2231 trap_ptid = lp->ptid;
2232 if (debug_linux_nat)
2233 fprintf_unfiltered (gdb_stdlog,
2234 "LLW: trap_ptid is %s.\n",
2235 target_pid_to_str (trap_ptid));
2236 }
2237 else
2238 trap_ptid = null_ptid;
2239
2240 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
2241 {
2242 *ourstatus = lp->waitstatus;
2243 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
2244 }
2245 else
2246 store_waitstatus (ourstatus, status);
2247
2248 return lp->ptid;
2249 }
2250
2251 static int
2252 kill_callback (struct lwp_info *lp, void *data)
2253 {
2254 errno = 0;
2255 ptrace (PTRACE_KILL, GET_LWP (lp->ptid), 0, 0);
2256 if (debug_linux_nat)
2257 fprintf_unfiltered (gdb_stdlog,
2258 "KC: PTRACE_KILL %s, 0, 0 (%s)\n",
2259 target_pid_to_str (lp->ptid),
2260 errno ? safe_strerror (errno) : "OK");
2261
2262 return 0;
2263 }
2264
2265 static int
2266 kill_wait_callback (struct lwp_info *lp, void *data)
2267 {
2268 pid_t pid;
2269
2270 /* We must make sure that there are no pending events (delayed
2271 SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current
2272 program doesn't interfere with any following debugging session. */
2273
2274 /* For cloned processes we must check both with __WCLONE and
2275 without, since the exit status of a cloned process isn't reported
2276 with __WCLONE. */
2277 if (lp->cloned)
2278 {
2279 do
2280 {
2281 pid = my_waitpid (GET_LWP (lp->ptid), NULL, __WCLONE);
2282 if (pid != (pid_t) -1 && debug_linux_nat)
2283 {
2284 fprintf_unfiltered (gdb_stdlog,
2285 "KWC: wait %s received unknown.\n",
2286 target_pid_to_str (lp->ptid));
2287 }
2288 }
2289 while (pid == GET_LWP (lp->ptid));
2290
2291 gdb_assert (pid == -1 && errno == ECHILD);
2292 }
2293
2294 do
2295 {
2296 pid = my_waitpid (GET_LWP (lp->ptid), NULL, 0);
2297 if (pid != (pid_t) -1 && debug_linux_nat)
2298 {
2299 fprintf_unfiltered (gdb_stdlog,
2300 "KWC: wait %s received unk.\n",
2301 target_pid_to_str (lp->ptid));
2302 }
2303 }
2304 while (pid == GET_LWP (lp->ptid));
2305
2306 gdb_assert (pid == -1 && errno == ECHILD);
2307 return 0;
2308 }
2309
2310 static void
2311 linux_nat_kill (void)
2312 {
2313 struct target_waitstatus last;
2314 ptid_t last_ptid;
2315 int status;
2316
2317 /* If we're stopped while forking and we haven't followed yet,
2318 kill the other task. We need to do this first because the
2319 parent will be sleeping if this is a vfork. */
2320
2321 get_last_target_status (&last_ptid, &last);
2322
2323 if (last.kind == TARGET_WAITKIND_FORKED
2324 || last.kind == TARGET_WAITKIND_VFORKED)
2325 {
2326 ptrace (PT_KILL, last.value.related_pid, 0, 0);
2327 wait (&status);
2328 }
2329
2330 if (forks_exist_p ())
2331 linux_fork_killall ();
2332 else
2333 {
2334 /* Kill all LWP's ... */
2335 iterate_over_lwps (kill_callback, NULL);
2336
2337 /* ... and wait until we've flushed all events. */
2338 iterate_over_lwps (kill_wait_callback, NULL);
2339 }
2340
2341 target_mourn_inferior ();
2342 }
2343
2344 static void
2345 linux_nat_mourn_inferior (void)
2346 {
2347 trap_ptid = null_ptid;
2348
2349 /* Destroy LWP info; it's no longer valid. */
2350 init_lwp_list ();
2351
2352 /* Restore the original signal mask. */
2353 sigprocmask (SIG_SETMASK, &normal_mask, NULL);
2354 sigemptyset (&blocked_mask);
2355
2356 if (! forks_exist_p ())
2357 /* Normal case, no other forks available. */
2358 linux_ops->to_mourn_inferior ();
2359 else
2360 /* Multi-fork case. The current inferior_ptid has exited, but
2361 there are other viable forks to debug. Delete the exiting
2362 one and context-switch to the first available. */
2363 linux_fork_mourn_inferior ();
2364 }
2365
2366 static LONGEST
2367 linux_nat_xfer_partial (struct target_ops *ops, enum target_object object,
2368 const char *annex, gdb_byte *readbuf,
2369 const gdb_byte *writebuf,
2370 ULONGEST offset, LONGEST len)
2371 {
2372 struct cleanup *old_chain = save_inferior_ptid ();
2373 LONGEST xfer;
2374
2375 if (is_lwp (inferior_ptid))
2376 inferior_ptid = pid_to_ptid (GET_LWP (inferior_ptid));
2377
2378 xfer = linux_ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
2379 offset, len);
2380
2381 do_cleanups (old_chain);
2382 return xfer;
2383 }
2384
2385 static int
2386 linux_nat_thread_alive (ptid_t ptid)
2387 {
2388 gdb_assert (is_lwp (ptid));
2389
2390 errno = 0;
2391 ptrace (PTRACE_PEEKUSER, GET_LWP (ptid), 0, 0);
2392 if (debug_linux_nat)
2393 fprintf_unfiltered (gdb_stdlog,
2394 "LLTA: PTRACE_PEEKUSER %s, 0, 0 (%s)\n",
2395 target_pid_to_str (ptid),
2396 errno ? safe_strerror (errno) : "OK");
2397
2398 /* Not every Linux target implements PTRACE_PEEKUSER.
2399 But we can handle that case gracefully since ptrace
2400 will first do a lookup for the process based upon the
2401 passed-in pid. If that fails we will get either -ESRCH
2402 or -EPERM, otherwise the child exists and is alive. */
2403 if (errno == ESRCH || errno == EPERM)
2404 return 0;
2405
2406 return 1;
2407 }
2408
2409 static char *
2410 linux_nat_pid_to_str (ptid_t ptid)
2411 {
2412 static char buf[64];
2413
2414 if (lwp_list && lwp_list->next && is_lwp (ptid))
2415 {
2416 snprintf (buf, sizeof (buf), "LWP %ld", GET_LWP (ptid));
2417 return buf;
2418 }
2419
2420 return normal_pid_to_str (ptid);
2421 }
2422
2423 static void
2424 sigchld_handler (int signo)
2425 {
2426 /* Do nothing. The only reason for this handler is that it allows
2427 us to use sigsuspend in linux_nat_wait above to wait for the
2428 arrival of a SIGCHLD. */
2429 }
2430
2431 /* Accepts an integer PID; Returns a string representing a file that
2432 can be opened to get the symbols for the child process. */
2433
2434 char *
2435 child_pid_to_exec_file (int pid)
2436 {
2437 char *name1, *name2;
2438
2439 name1 = xmalloc (MAXPATHLEN);
2440 name2 = xmalloc (MAXPATHLEN);
2441 make_cleanup (xfree, name1);
2442 make_cleanup (xfree, name2);
2443 memset (name2, 0, MAXPATHLEN);
2444
2445 sprintf (name1, "/proc/%d/exe", pid);
2446 if (readlink (name1, name2, MAXPATHLEN) > 0)
2447 return name2;
2448 else
2449 return name1;
2450 }
2451
2452 /* Service function for corefiles and info proc. */
2453
2454 static int
2455 read_mapping (FILE *mapfile,
2456 long long *addr,
2457 long long *endaddr,
2458 char *permissions,
2459 long long *offset,
2460 char *device, long long *inode, char *filename)
2461 {
2462 int ret = fscanf (mapfile, "%llx-%llx %s %llx %s %llx",
2463 addr, endaddr, permissions, offset, device, inode);
2464
2465 filename[0] = '\0';
2466 if (ret > 0 && ret != EOF)
2467 {
2468 /* Eat everything up to EOL for the filename. This will prevent
2469 weird filenames (such as one with embedded whitespace) from
2470 confusing this code. It also makes this code more robust in
2471 respect to annotations the kernel may add after the filename.
2472
2473 Note the filename is used for informational purposes
2474 only. */
2475 ret += fscanf (mapfile, "%[^\n]\n", filename);
2476 }
2477
2478 return (ret != 0 && ret != EOF);
2479 }
2480
2481 /* Fills the "to_find_memory_regions" target vector. Lists the memory
2482 regions in the inferior for a corefile. */
2483
2484 static int
2485 linux_nat_find_memory_regions (int (*func) (CORE_ADDR,
2486 unsigned long,
2487 int, int, int, void *), void *obfd)
2488 {
2489 long long pid = PIDGET (inferior_ptid);
2490 char mapsfilename[MAXPATHLEN];
2491 FILE *mapsfile;
2492 long long addr, endaddr, size, offset, inode;
2493 char permissions[8], device[8], filename[MAXPATHLEN];
2494 int read, write, exec;
2495 int ret;
2496
2497 /* Compose the filename for the /proc memory map, and open it. */
2498 sprintf (mapsfilename, "/proc/%lld/maps", pid);
2499 if ((mapsfile = fopen (mapsfilename, "r")) == NULL)
2500 error (_("Could not open %s."), mapsfilename);
2501
2502 if (info_verbose)
2503 fprintf_filtered (gdb_stdout,
2504 "Reading memory regions from %s\n", mapsfilename);
2505
2506 /* Now iterate until end-of-file. */
2507 while (read_mapping (mapsfile, &addr, &endaddr, &permissions[0],
2508 &offset, &device[0], &inode, &filename[0]))
2509 {
2510 size = endaddr - addr;
2511
2512 /* Get the segment's permissions. */
2513 read = (strchr (permissions, 'r') != 0);
2514 write = (strchr (permissions, 'w') != 0);
2515 exec = (strchr (permissions, 'x') != 0);
2516
2517 if (info_verbose)
2518 {
2519 fprintf_filtered (gdb_stdout,
2520 "Save segment, %lld bytes at 0x%s (%c%c%c)",
2521 size, paddr_nz (addr),
2522 read ? 'r' : ' ',
2523 write ? 'w' : ' ', exec ? 'x' : ' ');
2524 if (filename && filename[0])
2525 fprintf_filtered (gdb_stdout, " for %s", filename);
2526 fprintf_filtered (gdb_stdout, "\n");
2527 }
2528
2529 /* Invoke the callback function to create the corefile
2530 segment. */
2531 func (addr, size, read, write, exec, obfd);
2532 }
2533 fclose (mapsfile);
2534 return 0;
2535 }
2536
2537 /* Records the thread's register state for the corefile note
2538 section. */
2539
2540 static char *
2541 linux_nat_do_thread_registers (bfd *obfd, ptid_t ptid,
2542 char *note_data, int *note_size)
2543 {
2544 gdb_gregset_t gregs;
2545 gdb_fpregset_t fpregs;
2546 #ifdef FILL_FPXREGSET
2547 gdb_fpxregset_t fpxregs;
2548 #endif
2549 unsigned long lwp = ptid_get_lwp (ptid);
2550 struct gdbarch *gdbarch = current_gdbarch;
2551 const struct regset *regset;
2552 int core_regset_p;
2553
2554 core_regset_p = gdbarch_regset_from_core_section_p (gdbarch);
2555 if (core_regset_p
2556 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg",
2557 sizeof (gregs))) != NULL
2558 && regset->collect_regset != NULL)
2559 regset->collect_regset (regset, current_regcache, -1,
2560 &gregs, sizeof (gregs));
2561 else
2562 fill_gregset (&gregs, -1);
2563
2564 note_data = (char *) elfcore_write_prstatus (obfd,
2565 note_data,
2566 note_size,
2567 lwp,
2568 stop_signal, &gregs);
2569
2570 if (core_regset_p
2571 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg2",
2572 sizeof (fpregs))) != NULL
2573 && regset->collect_regset != NULL)
2574 regset->collect_regset (regset, current_regcache, -1,
2575 &fpregs, sizeof (fpregs));
2576 else
2577 fill_fpregset (&fpregs, -1);
2578
2579 note_data = (char *) elfcore_write_prfpreg (obfd,
2580 note_data,
2581 note_size,
2582 &fpregs, sizeof (fpregs));
2583
2584 #ifdef FILL_FPXREGSET
2585 if (core_regset_p
2586 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg-xfp",
2587 sizeof (fpxregs))) != NULL
2588 && regset->collect_regset != NULL)
2589 regset->collect_regset (regset, current_regcache, -1,
2590 &fpxregs, sizeof (fpxregs));
2591 else
2592 fill_fpxregset (&fpxregs, -1);
2593
2594 note_data = (char *) elfcore_write_prxfpreg (obfd,
2595 note_data,
2596 note_size,
2597 &fpxregs, sizeof (fpxregs));
2598 #endif
2599 return note_data;
2600 }
2601
2602 struct linux_nat_corefile_thread_data
2603 {
2604 bfd *obfd;
2605 char *note_data;
2606 int *note_size;
2607 int num_notes;
2608 };
2609
2610 /* Called by gdbthread.c once per thread. Records the thread's
2611 register state for the corefile note section. */
2612
2613 static int
2614 linux_nat_corefile_thread_callback (struct lwp_info *ti, void *data)
2615 {
2616 struct linux_nat_corefile_thread_data *args = data;
2617 ptid_t saved_ptid = inferior_ptid;
2618
2619 inferior_ptid = ti->ptid;
2620 registers_changed ();
2621 target_fetch_registers (-1); /* FIXME should not be necessary;
2622 fill_gregset should do it automatically. */
2623 args->note_data = linux_nat_do_thread_registers (args->obfd,
2624 ti->ptid,
2625 args->note_data,
2626 args->note_size);
2627 args->num_notes++;
2628 inferior_ptid = saved_ptid;
2629 registers_changed ();
2630 target_fetch_registers (-1); /* FIXME should not be necessary;
2631 fill_gregset should do it automatically. */
2632 return 0;
2633 }
2634
2635 /* Records the register state for the corefile note section. */
2636
2637 static char *
2638 linux_nat_do_registers (bfd *obfd, ptid_t ptid,
2639 char *note_data, int *note_size)
2640 {
2641 registers_changed ();
2642 target_fetch_registers (-1); /* FIXME should not be necessary;
2643 fill_gregset should do it automatically. */
2644 return linux_nat_do_thread_registers (obfd,
2645 ptid_build (ptid_get_pid (inferior_ptid),
2646 ptid_get_pid (inferior_ptid),
2647 0),
2648 note_data, note_size);
2649 return note_data;
2650 }
2651
2652 /* Fills the "to_make_corefile_note" target vector. Builds the note
2653 section for a corefile, and returns it in a malloc buffer. */
2654
2655 static char *
2656 linux_nat_make_corefile_notes (bfd *obfd, int *note_size)
2657 {
2658 struct linux_nat_corefile_thread_data thread_args;
2659 struct cleanup *old_chain;
2660 char fname[16] = { '\0' };
2661 char psargs[80] = { '\0' };
2662 char *note_data = NULL;
2663 ptid_t current_ptid = inferior_ptid;
2664 gdb_byte *auxv;
2665 int auxv_len;
2666
2667 if (get_exec_file (0))
2668 {
2669 strncpy (fname, strrchr (get_exec_file (0), '/') + 1, sizeof (fname));
2670 strncpy (psargs, get_exec_file (0), sizeof (psargs));
2671 if (get_inferior_args ())
2672 {
2673 strncat (psargs, " ", sizeof (psargs) - strlen (psargs));
2674 strncat (psargs, get_inferior_args (),
2675 sizeof (psargs) - strlen (psargs));
2676 }
2677 note_data = (char *) elfcore_write_prpsinfo (obfd,
2678 note_data,
2679 note_size, fname, psargs);
2680 }
2681
2682 /* Dump information for threads. */
2683 thread_args.obfd = obfd;
2684 thread_args.note_data = note_data;
2685 thread_args.note_size = note_size;
2686 thread_args.num_notes = 0;
2687 iterate_over_lwps (linux_nat_corefile_thread_callback, &thread_args);
2688 if (thread_args.num_notes == 0)
2689 {
2690 /* iterate_over_threads didn't come up with any threads; just
2691 use inferior_ptid. */
2692 note_data = linux_nat_do_registers (obfd, inferior_ptid,
2693 note_data, note_size);
2694 }
2695 else
2696 {
2697 note_data = thread_args.note_data;
2698 }
2699
2700 auxv_len = target_read_alloc (&current_target, TARGET_OBJECT_AUXV,
2701 NULL, &auxv);
2702 if (auxv_len > 0)
2703 {
2704 note_data = elfcore_write_note (obfd, note_data, note_size,
2705 "CORE", NT_AUXV, auxv, auxv_len);
2706 xfree (auxv);
2707 }
2708
2709 make_cleanup (xfree, note_data);
2710 return note_data;
2711 }
2712
2713 /* Implement the "info proc" command. */
2714
2715 static void
2716 linux_nat_info_proc_cmd (char *args, int from_tty)
2717 {
2718 long long pid = PIDGET (inferior_ptid);
2719 FILE *procfile;
2720 char **argv = NULL;
2721 char buffer[MAXPATHLEN];
2722 char fname1[MAXPATHLEN], fname2[MAXPATHLEN];
2723 int cmdline_f = 1;
2724 int cwd_f = 1;
2725 int exe_f = 1;
2726 int mappings_f = 0;
2727 int environ_f = 0;
2728 int status_f = 0;
2729 int stat_f = 0;
2730 int all = 0;
2731 struct stat dummy;
2732
2733 if (args)
2734 {
2735 /* Break up 'args' into an argv array. */
2736 if ((argv = buildargv (args)) == NULL)
2737 nomem (0);
2738 else
2739 make_cleanup_freeargv (argv);
2740 }
2741 while (argv != NULL && *argv != NULL)
2742 {
2743 if (isdigit (argv[0][0]))
2744 {
2745 pid = strtoul (argv[0], NULL, 10);
2746 }
2747 else if (strncmp (argv[0], "mappings", strlen (argv[0])) == 0)
2748 {
2749 mappings_f = 1;
2750 }
2751 else if (strcmp (argv[0], "status") == 0)
2752 {
2753 status_f = 1;
2754 }
2755 else if (strcmp (argv[0], "stat") == 0)
2756 {
2757 stat_f = 1;
2758 }
2759 else if (strcmp (argv[0], "cmd") == 0)
2760 {
2761 cmdline_f = 1;
2762 }
2763 else if (strncmp (argv[0], "exe", strlen (argv[0])) == 0)
2764 {
2765 exe_f = 1;
2766 }
2767 else if (strcmp (argv[0], "cwd") == 0)
2768 {
2769 cwd_f = 1;
2770 }
2771 else if (strncmp (argv[0], "all", strlen (argv[0])) == 0)
2772 {
2773 all = 1;
2774 }
2775 else
2776 {
2777 /* [...] (future options here) */
2778 }
2779 argv++;
2780 }
2781 if (pid == 0)
2782 error (_("No current process: you must name one."));
2783
2784 sprintf (fname1, "/proc/%lld", pid);
2785 if (stat (fname1, &dummy) != 0)
2786 error (_("No /proc directory: '%s'"), fname1);
2787
2788 printf_filtered (_("process %lld\n"), pid);
2789 if (cmdline_f || all)
2790 {
2791 sprintf (fname1, "/proc/%lld/cmdline", pid);
2792 if ((procfile = fopen (fname1, "r")) > 0)
2793 {
2794 fgets (buffer, sizeof (buffer), procfile);
2795 printf_filtered ("cmdline = '%s'\n", buffer);
2796 fclose (procfile);
2797 }
2798 else
2799 warning (_("unable to open /proc file '%s'"), fname1);
2800 }
2801 if (cwd_f || all)
2802 {
2803 sprintf (fname1, "/proc/%lld/cwd", pid);
2804 memset (fname2, 0, sizeof (fname2));
2805 if (readlink (fname1, fname2, sizeof (fname2)) > 0)
2806 printf_filtered ("cwd = '%s'\n", fname2);
2807 else
2808 warning (_("unable to read link '%s'"), fname1);
2809 }
2810 if (exe_f || all)
2811 {
2812 sprintf (fname1, "/proc/%lld/exe", pid);
2813 memset (fname2, 0, sizeof (fname2));
2814 if (readlink (fname1, fname2, sizeof (fname2)) > 0)
2815 printf_filtered ("exe = '%s'\n", fname2);
2816 else
2817 warning (_("unable to read link '%s'"), fname1);
2818 }
2819 if (mappings_f || all)
2820 {
2821 sprintf (fname1, "/proc/%lld/maps", pid);
2822 if ((procfile = fopen (fname1, "r")) > 0)
2823 {
2824 long long addr, endaddr, size, offset, inode;
2825 char permissions[8], device[8], filename[MAXPATHLEN];
2826
2827 printf_filtered (_("Mapped address spaces:\n\n"));
2828 if (TARGET_ADDR_BIT == 32)
2829 {
2830 printf_filtered ("\t%10s %10s %10s %10s %7s\n",
2831 "Start Addr",
2832 " End Addr",
2833 " Size", " Offset", "objfile");
2834 }
2835 else
2836 {
2837 printf_filtered (" %18s %18s %10s %10s %7s\n",
2838 "Start Addr",
2839 " End Addr",
2840 " Size", " Offset", "objfile");
2841 }
2842
2843 while (read_mapping (procfile, &addr, &endaddr, &permissions[0],
2844 &offset, &device[0], &inode, &filename[0]))
2845 {
2846 size = endaddr - addr;
2847
2848 /* FIXME: carlton/2003-08-27: Maybe the printf_filtered
2849 calls here (and possibly above) should be abstracted
2850 out into their own functions? Andrew suggests using
2851 a generic local_address_string instead to print out
2852 the addresses; that makes sense to me, too. */
2853
2854 if (TARGET_ADDR_BIT == 32)
2855 {
2856 printf_filtered ("\t%#10lx %#10lx %#10x %#10x %7s\n",
2857 (unsigned long) addr, /* FIXME: pr_addr */
2858 (unsigned long) endaddr,
2859 (int) size,
2860 (unsigned int) offset,
2861 filename[0] ? filename : "");
2862 }
2863 else
2864 {
2865 printf_filtered (" %#18lx %#18lx %#10x %#10x %7s\n",
2866 (unsigned long) addr, /* FIXME: pr_addr */
2867 (unsigned long) endaddr,
2868 (int) size,
2869 (unsigned int) offset,
2870 filename[0] ? filename : "");
2871 }
2872 }
2873
2874 fclose (procfile);
2875 }
2876 else
2877 warning (_("unable to open /proc file '%s'"), fname1);
2878 }
2879 if (status_f || all)
2880 {
2881 sprintf (fname1, "/proc/%lld/status", pid);
2882 if ((procfile = fopen (fname1, "r")) > 0)
2883 {
2884 while (fgets (buffer, sizeof (buffer), procfile) != NULL)
2885 puts_filtered (buffer);
2886 fclose (procfile);
2887 }
2888 else
2889 warning (_("unable to open /proc file '%s'"), fname1);
2890 }
2891 if (stat_f || all)
2892 {
2893 sprintf (fname1, "/proc/%lld/stat", pid);
2894 if ((procfile = fopen (fname1, "r")) > 0)
2895 {
2896 int itmp;
2897 char ctmp;
2898
2899 if (fscanf (procfile, "%d ", &itmp) > 0)
2900 printf_filtered (_("Process: %d\n"), itmp);
2901 if (fscanf (procfile, "%s ", &buffer[0]) > 0)
2902 printf_filtered (_("Exec file: %s\n"), buffer);
2903 if (fscanf (procfile, "%c ", &ctmp) > 0)
2904 printf_filtered (_("State: %c\n"), ctmp);
2905 if (fscanf (procfile, "%d ", &itmp) > 0)
2906 printf_filtered (_("Parent process: %d\n"), itmp);
2907 if (fscanf (procfile, "%d ", &itmp) > 0)
2908 printf_filtered (_("Process group: %d\n"), itmp);
2909 if (fscanf (procfile, "%d ", &itmp) > 0)
2910 printf_filtered (_("Session id: %d\n"), itmp);
2911 if (fscanf (procfile, "%d ", &itmp) > 0)
2912 printf_filtered (_("TTY: %d\n"), itmp);
2913 if (fscanf (procfile, "%d ", &itmp) > 0)
2914 printf_filtered (_("TTY owner process group: %d\n"), itmp);
2915 if (fscanf (procfile, "%u ", &itmp) > 0)
2916 printf_filtered (_("Flags: 0x%x\n"), itmp);
2917 if (fscanf (procfile, "%u ", &itmp) > 0)
2918 printf_filtered (_("Minor faults (no memory page): %u\n"),
2919 (unsigned int) itmp);
2920 if (fscanf (procfile, "%u ", &itmp) > 0)
2921 printf_filtered (_("Minor faults, children: %u\n"),
2922 (unsigned int) itmp);
2923 if (fscanf (procfile, "%u ", &itmp) > 0)
2924 printf_filtered (_("Major faults (memory page faults): %u\n"),
2925 (unsigned int) itmp);
2926 if (fscanf (procfile, "%u ", &itmp) > 0)
2927 printf_filtered (_("Major faults, children: %u\n"),
2928 (unsigned int) itmp);
2929 if (fscanf (procfile, "%d ", &itmp) > 0)
2930 printf_filtered ("utime: %d\n", itmp);
2931 if (fscanf (procfile, "%d ", &itmp) > 0)
2932 printf_filtered ("stime: %d\n", itmp);
2933 if (fscanf (procfile, "%d ", &itmp) > 0)
2934 printf_filtered ("utime, children: %d\n", itmp);
2935 if (fscanf (procfile, "%d ", &itmp) > 0)
2936 printf_filtered ("stime, children: %d\n", itmp);
2937 if (fscanf (procfile, "%d ", &itmp) > 0)
2938 printf_filtered (_("jiffies remaining in current time slice: %d\n"),
2939 itmp);
2940 if (fscanf (procfile, "%d ", &itmp) > 0)
2941 printf_filtered ("'nice' value: %d\n", itmp);
2942 if (fscanf (procfile, "%u ", &itmp) > 0)
2943 printf_filtered (_("jiffies until next timeout: %u\n"),
2944 (unsigned int) itmp);
2945 if (fscanf (procfile, "%u ", &itmp) > 0)
2946 printf_filtered ("jiffies until next SIGALRM: %u\n",
2947 (unsigned int) itmp);
2948 if (fscanf (procfile, "%d ", &itmp) > 0)
2949 printf_filtered (_("start time (jiffies since system boot): %d\n"),
2950 itmp);
2951 if (fscanf (procfile, "%u ", &itmp) > 0)
2952 printf_filtered (_("Virtual memory size: %u\n"),
2953 (unsigned int) itmp);
2954 if (fscanf (procfile, "%u ", &itmp) > 0)
2955 printf_filtered (_("Resident set size: %u\n"), (unsigned int) itmp);
2956 if (fscanf (procfile, "%u ", &itmp) > 0)
2957 printf_filtered ("rlim: %u\n", (unsigned int) itmp);
2958 if (fscanf (procfile, "%u ", &itmp) > 0)
2959 printf_filtered (_("Start of text: 0x%x\n"), itmp);
2960 if (fscanf (procfile, "%u ", &itmp) > 0)
2961 printf_filtered (_("End of text: 0x%x\n"), itmp);
2962 if (fscanf (procfile, "%u ", &itmp) > 0)
2963 printf_filtered (_("Start of stack: 0x%x\n"), itmp);
2964 #if 0 /* Don't know how architecture-dependent the rest is...
2965 Anyway the signal bitmap info is available from "status". */
2966 if (fscanf (procfile, "%u ", &itmp) > 0) /* FIXME arch? */
2967 printf_filtered (_("Kernel stack pointer: 0x%x\n"), itmp);
2968 if (fscanf (procfile, "%u ", &itmp) > 0) /* FIXME arch? */
2969 printf_filtered (_("Kernel instr pointer: 0x%x\n"), itmp);
2970 if (fscanf (procfile, "%d ", &itmp) > 0)
2971 printf_filtered (_("Pending signals bitmap: 0x%x\n"), itmp);
2972 if (fscanf (procfile, "%d ", &itmp) > 0)
2973 printf_filtered (_("Blocked signals bitmap: 0x%x\n"), itmp);
2974 if (fscanf (procfile, "%d ", &itmp) > 0)
2975 printf_filtered (_("Ignored signals bitmap: 0x%x\n"), itmp);
2976 if (fscanf (procfile, "%d ", &itmp) > 0)
2977 printf_filtered (_("Catched signals bitmap: 0x%x\n"), itmp);
2978 if (fscanf (procfile, "%u ", &itmp) > 0) /* FIXME arch? */
2979 printf_filtered (_("wchan (system call): 0x%x\n"), itmp);
2980 #endif
2981 fclose (procfile);
2982 }
2983 else
2984 warning (_("unable to open /proc file '%s'"), fname1);
2985 }
2986 }
2987
2988 /* Implement the to_xfer_partial interface for memory reads using the /proc
2989 filesystem. Because we can use a single read() call for /proc, this
2990 can be much more efficient than banging away at PTRACE_PEEKTEXT,
2991 but it doesn't support writes. */
2992
2993 static LONGEST
2994 linux_proc_xfer_partial (struct target_ops *ops, enum target_object object,
2995 const char *annex, gdb_byte *readbuf,
2996 const gdb_byte *writebuf,
2997 ULONGEST offset, LONGEST len)
2998 {
2999 LONGEST ret;
3000 int fd;
3001 char filename[64];
3002
3003 if (object != TARGET_OBJECT_MEMORY || !readbuf)
3004 return 0;
3005
3006 /* Don't bother for one word. */
3007 if (len < 3 * sizeof (long))
3008 return 0;
3009
3010 /* We could keep this file open and cache it - possibly one per
3011 thread. That requires some juggling, but is even faster. */
3012 sprintf (filename, "/proc/%d/mem", PIDGET (inferior_ptid));
3013 fd = open (filename, O_RDONLY | O_LARGEFILE);
3014 if (fd == -1)
3015 return 0;
3016
3017 /* If pread64 is available, use it. It's faster if the kernel
3018 supports it (only one syscall), and it's 64-bit safe even on
3019 32-bit platforms (for instance, SPARC debugging a SPARC64
3020 application). */
3021 #ifdef HAVE_PREAD64
3022 if (pread64 (fd, readbuf, len, offset) != len)
3023 #else
3024 if (lseek (fd, offset, SEEK_SET) == -1 || read (fd, readbuf, len) != len)
3025 #endif
3026 ret = 0;
3027 else
3028 ret = len;
3029
3030 close (fd);
3031 return ret;
3032 }
3033
3034 /* Parse LINE as a signal set and add its set bits to SIGS. */
3035
3036 static void
3037 add_line_to_sigset (const char *line, sigset_t *sigs)
3038 {
3039 int len = strlen (line) - 1;
3040 const char *p;
3041 int signum;
3042
3043 if (line[len] != '\n')
3044 error (_("Could not parse signal set: %s"), line);
3045
3046 p = line;
3047 signum = len * 4;
3048 while (len-- > 0)
3049 {
3050 int digit;
3051
3052 if (*p >= '0' && *p <= '9')
3053 digit = *p - '0';
3054 else if (*p >= 'a' && *p <= 'f')
3055 digit = *p - 'a' + 10;
3056 else
3057 error (_("Could not parse signal set: %s"), line);
3058
3059 signum -= 4;
3060
3061 if (digit & 1)
3062 sigaddset (sigs, signum + 1);
3063 if (digit & 2)
3064 sigaddset (sigs, signum + 2);
3065 if (digit & 4)
3066 sigaddset (sigs, signum + 3);
3067 if (digit & 8)
3068 sigaddset (sigs, signum + 4);
3069
3070 p++;
3071 }
3072 }
3073
3074 /* Find process PID's pending signals from /proc/pid/status and set
3075 SIGS to match. */
3076
3077 void
3078 linux_proc_pending_signals (int pid, sigset_t *pending, sigset_t *blocked, sigset_t *ignored)
3079 {
3080 FILE *procfile;
3081 char buffer[MAXPATHLEN], fname[MAXPATHLEN];
3082 int signum;
3083
3084 sigemptyset (pending);
3085 sigemptyset (blocked);
3086 sigemptyset (ignored);
3087 sprintf (fname, "/proc/%d/status", pid);
3088 procfile = fopen (fname, "r");
3089 if (procfile == NULL)
3090 error (_("Could not open %s"), fname);
3091
3092 while (fgets (buffer, MAXPATHLEN, procfile) != NULL)
3093 {
3094 /* Normal queued signals are on the SigPnd line in the status
3095 file. However, 2.6 kernels also have a "shared" pending
3096 queue for delivering signals to a thread group, so check for
3097 a ShdPnd line also.
3098
3099 Unfortunately some Red Hat kernels include the shared pending
3100 queue but not the ShdPnd status field. */
3101
3102 if (strncmp (buffer, "SigPnd:\t", 8) == 0)
3103 add_line_to_sigset (buffer + 8, pending);
3104 else if (strncmp (buffer, "ShdPnd:\t", 8) == 0)
3105 add_line_to_sigset (buffer + 8, pending);
3106 else if (strncmp (buffer, "SigBlk:\t", 8) == 0)
3107 add_line_to_sigset (buffer + 8, blocked);
3108 else if (strncmp (buffer, "SigIgn:\t", 8) == 0)
3109 add_line_to_sigset (buffer + 8, ignored);
3110 }
3111
3112 fclose (procfile);
3113 }
3114
3115 static LONGEST
3116 linux_xfer_partial (struct target_ops *ops, enum target_object object,
3117 const char *annex, gdb_byte *readbuf,
3118 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
3119 {
3120 LONGEST xfer;
3121
3122 if (object == TARGET_OBJECT_AUXV)
3123 return procfs_xfer_auxv (ops, object, annex, readbuf, writebuf,
3124 offset, len);
3125
3126 xfer = linux_proc_xfer_partial (ops, object, annex, readbuf, writebuf,
3127 offset, len);
3128 if (xfer != 0)
3129 return xfer;
3130
3131 return super_xfer_partial (ops, object, annex, readbuf, writebuf,
3132 offset, len);
3133 }
3134
3135 #ifndef FETCH_INFERIOR_REGISTERS
3136
3137 /* Return the address in the core dump or inferior of register
3138 REGNO. */
3139
3140 static CORE_ADDR
3141 linux_register_u_offset (int regno)
3142 {
3143 /* FIXME drow/2005-09-04: The hardcoded use of register_addr should go
3144 away. This requires disentangling the various definitions of it
3145 (particularly alpha-nat.c's). */
3146 return register_addr (regno, 0);
3147 }
3148
3149 #endif
3150
3151 /* Create a prototype generic Linux target. The client can override
3152 it with local methods. */
3153
3154 struct target_ops *
3155 linux_target (void)
3156 {
3157 struct target_ops *t;
3158
3159 #ifdef FETCH_INFERIOR_REGISTERS
3160 t = inf_ptrace_target ();
3161 #else
3162 t = inf_ptrace_trad_target (linux_register_u_offset);
3163 #endif
3164 t->to_insert_fork_catchpoint = child_insert_fork_catchpoint;
3165 t->to_insert_vfork_catchpoint = child_insert_vfork_catchpoint;
3166 t->to_insert_exec_catchpoint = child_insert_exec_catchpoint;
3167 t->to_pid_to_exec_file = child_pid_to_exec_file;
3168 t->to_post_startup_inferior = linux_child_post_startup_inferior;
3169 t->to_post_attach = child_post_attach;
3170 t->to_follow_fork = child_follow_fork;
3171 t->to_find_memory_regions = linux_nat_find_memory_regions;
3172 t->to_make_corefile_notes = linux_nat_make_corefile_notes;
3173
3174 super_xfer_partial = t->to_xfer_partial;
3175 t->to_xfer_partial = linux_xfer_partial;
3176
3177 return t;
3178 }
3179
3180 void
3181 linux_nat_add_target (struct target_ops *t)
3182 {
3183 /* Save the provided single-threaded target. We save this in a separate
3184 variable because another target we've inherited from (e.g. inf-ptrace)
3185 may have saved a pointer to T; we want to use it for the final
3186 process stratum target. */
3187 linux_ops_saved = *t;
3188 linux_ops = &linux_ops_saved;
3189
3190 /* Override some methods for multithreading. */
3191 t->to_attach = linux_nat_attach;
3192 t->to_detach = linux_nat_detach;
3193 t->to_resume = linux_nat_resume;
3194 t->to_wait = linux_nat_wait;
3195 t->to_xfer_partial = linux_nat_xfer_partial;
3196 t->to_kill = linux_nat_kill;
3197 t->to_mourn_inferior = linux_nat_mourn_inferior;
3198 t->to_thread_alive = linux_nat_thread_alive;
3199 t->to_pid_to_str = linux_nat_pid_to_str;
3200 t->to_has_thread_control = tc_schedlock;
3201
3202 /* We don't change the stratum; this target will sit at
3203 process_stratum and thread_db will set at thread_stratum. This
3204 is a little strange, since this is a multi-threaded-capable
3205 target, but we want to be on the stack below thread_db, and we
3206 also want to be used for single-threaded processes. */
3207
3208 add_target (t);
3209
3210 /* TODO: Eliminate this and have libthread_db use
3211 find_target_beneath. */
3212 thread_db_init (t);
3213 }
3214
3215 void
3216 _initialize_linux_nat (void)
3217 {
3218 struct sigaction action;
3219
3220 add_info ("proc", linux_nat_info_proc_cmd, _("\
3221 Show /proc process information about any running process.\n\
3222 Specify any process id, or use the program being debugged by default.\n\
3223 Specify any of the following keywords for detailed info:\n\
3224 mappings -- list of mapped memory regions.\n\
3225 stat -- list a bunch of random process info.\n\
3226 status -- list a different bunch of random process info.\n\
3227 all -- list all available /proc info."));
3228
3229 /* Save the original signal mask. */
3230 sigprocmask (SIG_SETMASK, NULL, &normal_mask);
3231
3232 action.sa_handler = sigchld_handler;
3233 sigemptyset (&action.sa_mask);
3234 action.sa_flags = SA_RESTART;
3235 sigaction (SIGCHLD, &action, NULL);
3236
3237 /* Make sure we don't block SIGCHLD during a sigsuspend. */
3238 sigprocmask (SIG_SETMASK, NULL, &suspend_mask);
3239 sigdelset (&suspend_mask, SIGCHLD);
3240
3241 sigemptyset (&blocked_mask);
3242
3243 add_setshow_zinteger_cmd ("lin-lwp", no_class, &debug_linux_nat, _("\
3244 Set debugging of GNU/Linux lwp module."), _("\
3245 Show debugging of GNU/Linux lwp module."), _("\
3246 Enables printf debugging output."),
3247 NULL,
3248 show_debug_linux_nat,
3249 &setdebuglist, &showdebuglist);
3250 }
3251 \f
3252
3253 /* FIXME: kettenis/2000-08-26: The stuff on this page is specific to
3254 the GNU/Linux Threads library and therefore doesn't really belong
3255 here. */
3256
3257 /* Read variable NAME in the target and return its value if found.
3258 Otherwise return zero. It is assumed that the type of the variable
3259 is `int'. */
3260
3261 static int
3262 get_signo (const char *name)
3263 {
3264 struct minimal_symbol *ms;
3265 int signo;
3266
3267 ms = lookup_minimal_symbol (name, NULL, NULL);
3268 if (ms == NULL)
3269 return 0;
3270
3271 if (target_read_memory (SYMBOL_VALUE_ADDRESS (ms), (gdb_byte *) &signo,
3272 sizeof (signo)) != 0)
3273 return 0;
3274
3275 return signo;
3276 }
3277
3278 /* Return the set of signals used by the threads library in *SET. */
3279
3280 void
3281 lin_thread_get_thread_signals (sigset_t *set)
3282 {
3283 struct sigaction action;
3284 int restart, cancel;
3285
3286 sigemptyset (set);
3287
3288 restart = get_signo ("__pthread_sig_restart");
3289 cancel = get_signo ("__pthread_sig_cancel");
3290
3291 /* LinuxThreads normally uses the first two RT signals, but in some legacy
3292 cases may use SIGUSR1/SIGUSR2. NPTL always uses RT signals, but does
3293 not provide any way for the debugger to query the signal numbers -
3294 fortunately they don't change! */
3295
3296 if (restart == 0)
3297 restart = __SIGRTMIN;
3298
3299 if (cancel == 0)
3300 cancel = __SIGRTMIN + 1;
3301
3302 sigaddset (set, restart);
3303 sigaddset (set, cancel);
3304
3305 /* The GNU/Linux Threads library makes terminating threads send a
3306 special "cancel" signal instead of SIGCHLD. Make sure we catch
3307 those (to prevent them from terminating GDB itself, which is
3308 likely to be their default action) and treat them the same way as
3309 SIGCHLD. */
3310
3311 action.sa_handler = sigchld_handler;
3312 sigemptyset (&action.sa_mask);
3313 action.sa_flags = SA_RESTART;
3314 sigaction (cancel, &action, NULL);
3315
3316 /* We block the "cancel" signal throughout this code ... */
3317 sigaddset (&blocked_mask, cancel);
3318 sigprocmask (SIG_BLOCK, &blocked_mask, NULL);
3319
3320 /* ... except during a sigsuspend. */
3321 sigdelset (&suspend_mask, cancel);
3322 }
3323
This page took 0.092917 seconds and 5 git commands to generate.