1 /* GNU/Linux native-dependent code common to multiple platforms.
3 Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
4 Free Software Foundation, Inc.
6 This file is part of GDB.
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "gdb_string.h"
26 #include "gdb_assert.h"
27 #ifdef HAVE_TKILL_SYSCALL
29 #include <sys/syscall.h>
31 #include <sys/ptrace.h>
32 #include "linux-nat.h"
33 #include "linux-fork.h"
34 #include "gdbthread.h"
38 #include "inf-ptrace.h"
40 #include <sys/param.h> /* for MAXPATHLEN */
41 #include <sys/procfs.h> /* for elf_gregset etc. */
42 #include "elf-bfd.h" /* for elfcore_write_* */
43 #include "gregset.h" /* for gregset */
44 #include "gdbcore.h" /* for get_exec_file */
45 #include <ctype.h> /* for isdigit */
46 #include "gdbthread.h" /* for struct thread_info etc. */
47 #include "gdb_stat.h" /* for struct stat */
48 #include <fcntl.h> /* for O_RDONLY */
50 #include "event-loop.h"
51 #include "event-top.h"
53 /* Note on this file's use of signals:
55 We stop threads by sending a SIGSTOP. The use of SIGSTOP instead
56 of another signal is not entirely significant; we just need for a
57 signal to be delivered, so that we can intercept it. SIGSTOP's
58 advantage is that it can not be blocked. A disadvantage is that it
59 is not a real-time signal, so it can only be queued once; we do not
60 keep track of other sources of SIGSTOP.
62 Two other signals that can't be blocked are SIGCONT and SIGKILL.
63 But we can't use them, because they have special behavior when the
64 signal is generated - not when it is delivered. SIGCONT resumes
65 the entire thread group and SIGKILL kills the entire thread group.
67 A delivered SIGSTOP would stop the entire thread group, not just the
68 thread we tkill'd. But we never let the SIGSTOP deliver; we always
69 intercept and cancel it (by PTRACE_CONT without passing SIGSTOP).
71 We could use a real-time signal instead. This would solve those
72 problems; we could use PTRACE_GETSIGINFO to locate the specific
73 stop signals sent by GDB. But we would still have to have some
74 support for SIGSTOP, since PTRACE_ATTACH generates it, and there
75 are races with trying to find a signal that is not blocked. */
81 /* If the system headers did not provide the constants, hard-code the normal
83 #ifndef PTRACE_EVENT_FORK
85 #define PTRACE_SETOPTIONS 0x4200
86 #define PTRACE_GETEVENTMSG 0x4201
88 /* options set using PTRACE_SETOPTIONS */
89 #define PTRACE_O_TRACESYSGOOD 0x00000001
90 #define PTRACE_O_TRACEFORK 0x00000002
91 #define PTRACE_O_TRACEVFORK 0x00000004
92 #define PTRACE_O_TRACECLONE 0x00000008
93 #define PTRACE_O_TRACEEXEC 0x00000010
94 #define PTRACE_O_TRACEVFORKDONE 0x00000020
95 #define PTRACE_O_TRACEEXIT 0x00000040
97 /* Wait extended result codes for the above trace options. */
98 #define PTRACE_EVENT_FORK 1
99 #define PTRACE_EVENT_VFORK 2
100 #define PTRACE_EVENT_CLONE 3
101 #define PTRACE_EVENT_EXEC 4
102 #define PTRACE_EVENT_VFORK_DONE 5
103 #define PTRACE_EVENT_EXIT 6
105 #endif /* PTRACE_EVENT_FORK */
107 /* We can't always assume that this flag is available, but all systems
108 with the ptrace event handlers also have __WALL, so it's safe to use
111 #define __WALL 0x40000000 /* Wait for any child. */
114 #ifndef PTRACE_GETSIGINFO
115 #define PTRACE_GETSIGINFO 0x4202
118 /* The single-threaded native GNU/Linux target_ops. We save a pointer for
119 the use of the multi-threaded target. */
120 static struct target_ops
*linux_ops
;
121 static struct target_ops linux_ops_saved
;
123 /* The method to call, if any, when a new thread is attached. */
124 static void (*linux_nat_new_thread
) (ptid_t
);
126 /* The saved to_xfer_partial method, inherited from inf-ptrace.c.
127 Called by our to_xfer_partial. */
128 static LONGEST (*super_xfer_partial
) (struct target_ops
*,
130 const char *, gdb_byte
*,
134 static int debug_linux_nat
;
136 show_debug_linux_nat (struct ui_file
*file
, int from_tty
,
137 struct cmd_list_element
*c
, const char *value
)
139 fprintf_filtered (file
, _("Debugging of GNU/Linux lwp module is %s.\n"),
143 static int debug_linux_nat_async
= 0;
145 show_debug_linux_nat_async (struct ui_file
*file
, int from_tty
,
146 struct cmd_list_element
*c
, const char *value
)
148 fprintf_filtered (file
, _("Debugging of GNU/Linux async lwp module is %s.\n"),
152 static int linux_parent_pid
;
154 struct simple_pid_list
158 struct simple_pid_list
*next
;
160 struct simple_pid_list
*stopped_pids
;
162 /* This variable is a tri-state flag: -1 for unknown, 0 if PTRACE_O_TRACEFORK
163 can not be used, 1 if it can. */
165 static int linux_supports_tracefork_flag
= -1;
167 /* If we have PTRACE_O_TRACEFORK, this flag indicates whether we also have
168 PTRACE_O_TRACEVFORKDONE. */
170 static int linux_supports_tracevforkdone_flag
= -1;
172 /* Async mode support */
174 /* To listen to target events asynchronously, we install a SIGCHLD
175 handler whose duty is to call waitpid (-1, ..., WNOHANG) to get all
176 the pending events into a pipe. Whenever we're ready to handle
177 events asynchronously, this pipe is registered as the waitable file
178 handle in the event loop. When we get to entry target points
179 coming out of the common code (target_wait, target_resume, ...),
180 that are going to call waitpid, we block SIGCHLD signals, and
181 remove all the events placed in the pipe into a local queue. All
182 the subsequent calls to my_waitpid (a waitpid wrapper) check this
183 local queue first. */
185 /* True if async mode is currently on. */
186 static int linux_nat_async_enabled
;
188 /* Zero if the async mode, although enabled, is masked, which means
189 linux_nat_wait should behave as if async mode was off. */
190 static int linux_nat_async_mask_value
= 1;
192 /* The read/write ends of the pipe registered as waitable file in the
194 static int linux_nat_event_pipe
[2] = { -1, -1 };
196 /* Number of queued events in the pipe. */
197 static volatile int linux_nat_num_queued_events
;
199 /* If async mode is on, true if we're listening for events; false if
200 target events are blocked. */
201 static int linux_nat_async_events_enabled
;
203 static int linux_nat_async_events (int enable
);
204 static void pipe_to_local_event_queue (void);
205 static void local_event_queue_to_pipe (void);
206 static void linux_nat_event_pipe_push (int pid
, int status
, int options
);
207 static int linux_nat_event_pipe_pop (int* ptr_status
, int* ptr_options
);
208 static void linux_nat_set_async_mode (int on
);
209 static void linux_nat_async (void (*callback
)
210 (enum inferior_event_type event_type
, void *context
),
212 static int linux_nat_async_mask (int mask
);
213 static int kill_lwp (int lwpid
, int signo
);
215 /* Captures the result of a successful waitpid call, along with the
216 options used in that call. */
217 struct waitpid_result
222 struct waitpid_result
*next
;
225 /* A singly-linked list of the results of the waitpid calls performed
226 in the async SIGCHLD handler. */
227 static struct waitpid_result
*waitpid_queue
= NULL
;
230 queued_waitpid (int pid
, int *status
, int flags
)
232 struct waitpid_result
*msg
= waitpid_queue
, *prev
= NULL
;
234 if (debug_linux_nat_async
)
235 fprintf_unfiltered (gdb_stdlog
,
237 QWPID: linux_nat_async_events_enabled(%d), linux_nat_num_queued_events(%d)\n",
238 linux_nat_async_events_enabled
,
239 linux_nat_num_queued_events
);
243 for (; msg
; prev
= msg
, msg
= msg
->next
)
244 if (pid
== -1 || pid
== msg
->pid
)
247 else if (flags
& __WCLONE
)
249 for (; msg
; prev
= msg
, msg
= msg
->next
)
250 if (msg
->options
& __WCLONE
251 && (pid
== -1 || pid
== msg
->pid
))
256 for (; msg
; prev
= msg
, msg
= msg
->next
)
257 if ((msg
->options
& __WCLONE
) == 0
258 && (pid
== -1 || pid
== msg
->pid
))
267 prev
->next
= msg
->next
;
269 waitpid_queue
= msg
->next
;
273 *status
= msg
->status
;
276 if (debug_linux_nat_async
)
277 fprintf_unfiltered (gdb_stdlog
, "QWPID: pid(%d), status(%x)\n",
284 if (debug_linux_nat_async
)
285 fprintf_unfiltered (gdb_stdlog
, "QWPID: miss\n");
293 push_waitpid (int pid
, int status
, int options
)
295 struct waitpid_result
*event
, *new_event
;
297 new_event
= xmalloc (sizeof (*new_event
));
298 new_event
->pid
= pid
;
299 new_event
->status
= status
;
300 new_event
->options
= options
;
301 new_event
->next
= NULL
;
305 for (event
= waitpid_queue
;
306 event
&& event
->next
;
310 event
->next
= new_event
;
313 waitpid_queue
= new_event
;
316 /* Drain all queued events of PID. If PID is -1, the effect is of
317 draining all events. */
319 drain_queued_events (int pid
)
321 while (queued_waitpid (pid
, NULL
, __WALL
) != -1)
326 /* Trivial list manipulation functions to keep track of a list of
327 new stopped processes. */
329 add_to_pid_list (struct simple_pid_list
**listp
, int pid
, int status
)
331 struct simple_pid_list
*new_pid
= xmalloc (sizeof (struct simple_pid_list
));
333 new_pid
->status
= status
;
334 new_pid
->next
= *listp
;
339 pull_pid_from_list (struct simple_pid_list
**listp
, int pid
, int *status
)
341 struct simple_pid_list
**p
;
343 for (p
= listp
; *p
!= NULL
; p
= &(*p
)->next
)
344 if ((*p
)->pid
== pid
)
346 struct simple_pid_list
*next
= (*p
)->next
;
347 *status
= (*p
)->status
;
356 linux_record_stopped_pid (int pid
, int status
)
358 add_to_pid_list (&stopped_pids
, pid
, status
);
362 /* A helper function for linux_test_for_tracefork, called after fork (). */
365 linux_tracefork_child (void)
369 ptrace (PTRACE_TRACEME
, 0, 0, 0);
370 kill (getpid (), SIGSTOP
);
375 /* Wrapper function for waitpid which handles EINTR, and checks for
376 locally queued events. */
379 my_waitpid (int pid
, int *status
, int flags
)
383 /* There should be no concurrent calls to waitpid. */
384 gdb_assert (!linux_nat_async_events_enabled
);
386 ret
= queued_waitpid (pid
, status
, flags
);
392 ret
= waitpid (pid
, status
, flags
);
394 while (ret
== -1 && errno
== EINTR
);
399 /* Determine if PTRACE_O_TRACEFORK can be used to follow fork events.
401 First, we try to enable fork tracing on ORIGINAL_PID. If this fails,
402 we know that the feature is not available. This may change the tracing
403 options for ORIGINAL_PID, but we'll be setting them shortly anyway.
405 However, if it succeeds, we don't know for sure that the feature is
406 available; old versions of PTRACE_SETOPTIONS ignored unknown options. We
407 create a child process, attach to it, use PTRACE_SETOPTIONS to enable
408 fork tracing, and let it fork. If the process exits, we assume that we
409 can't use TRACEFORK; if we get the fork notification, and we can extract
410 the new child's PID, then we assume that we can. */
413 linux_test_for_tracefork (int original_pid
)
415 int child_pid
, ret
, status
;
418 linux_supports_tracefork_flag
= 0;
419 linux_supports_tracevforkdone_flag
= 0;
421 ret
= ptrace (PTRACE_SETOPTIONS
, original_pid
, 0, PTRACE_O_TRACEFORK
);
427 perror_with_name (("fork"));
430 linux_tracefork_child ();
432 ret
= my_waitpid (child_pid
, &status
, 0);
434 perror_with_name (("waitpid"));
435 else if (ret
!= child_pid
)
436 error (_("linux_test_for_tracefork: waitpid: unexpected result %d."), ret
);
437 if (! WIFSTOPPED (status
))
438 error (_("linux_test_for_tracefork: waitpid: unexpected status %d."), status
);
440 ret
= ptrace (PTRACE_SETOPTIONS
, child_pid
, 0, PTRACE_O_TRACEFORK
);
443 ret
= ptrace (PTRACE_KILL
, child_pid
, 0, 0);
446 warning (_("linux_test_for_tracefork: failed to kill child"));
450 ret
= my_waitpid (child_pid
, &status
, 0);
451 if (ret
!= child_pid
)
452 warning (_("linux_test_for_tracefork: failed to wait for killed child"));
453 else if (!WIFSIGNALED (status
))
454 warning (_("linux_test_for_tracefork: unexpected wait status 0x%x from "
455 "killed child"), status
);
460 /* Check whether PTRACE_O_TRACEVFORKDONE is available. */
461 ret
= ptrace (PTRACE_SETOPTIONS
, child_pid
, 0,
462 PTRACE_O_TRACEFORK
| PTRACE_O_TRACEVFORKDONE
);
463 linux_supports_tracevforkdone_flag
= (ret
== 0);
465 ret
= ptrace (PTRACE_CONT
, child_pid
, 0, 0);
467 warning (_("linux_test_for_tracefork: failed to resume child"));
469 ret
= my_waitpid (child_pid
, &status
, 0);
471 if (ret
== child_pid
&& WIFSTOPPED (status
)
472 && status
>> 16 == PTRACE_EVENT_FORK
)
475 ret
= ptrace (PTRACE_GETEVENTMSG
, child_pid
, 0, &second_pid
);
476 if (ret
== 0 && second_pid
!= 0)
480 linux_supports_tracefork_flag
= 1;
481 my_waitpid (second_pid
, &second_status
, 0);
482 ret
= ptrace (PTRACE_KILL
, second_pid
, 0, 0);
484 warning (_("linux_test_for_tracefork: failed to kill second child"));
485 my_waitpid (second_pid
, &status
, 0);
489 warning (_("linux_test_for_tracefork: unexpected result from waitpid "
490 "(%d, status 0x%x)"), ret
, status
);
492 ret
= ptrace (PTRACE_KILL
, child_pid
, 0, 0);
494 warning (_("linux_test_for_tracefork: failed to kill child"));
495 my_waitpid (child_pid
, &status
, 0);
498 /* Return non-zero iff we have tracefork functionality available.
499 This function also sets linux_supports_tracefork_flag. */
502 linux_supports_tracefork (int pid
)
504 if (linux_supports_tracefork_flag
== -1)
505 linux_test_for_tracefork (pid
);
506 return linux_supports_tracefork_flag
;
510 linux_supports_tracevforkdone (int pid
)
512 if (linux_supports_tracefork_flag
== -1)
513 linux_test_for_tracefork (pid
);
514 return linux_supports_tracevforkdone_flag
;
519 linux_enable_event_reporting (ptid_t ptid
)
521 int pid
= ptid_get_lwp (ptid
);
525 pid
= ptid_get_pid (ptid
);
527 if (! linux_supports_tracefork (pid
))
530 options
= PTRACE_O_TRACEFORK
| PTRACE_O_TRACEVFORK
| PTRACE_O_TRACEEXEC
531 | PTRACE_O_TRACECLONE
;
532 if (linux_supports_tracevforkdone (pid
))
533 options
|= PTRACE_O_TRACEVFORKDONE
;
535 /* Do not enable PTRACE_O_TRACEEXIT until GDB is more prepared to support
536 read-only process state. */
538 ptrace (PTRACE_SETOPTIONS
, pid
, 0, options
);
542 linux_child_post_attach (int pid
)
544 linux_enable_event_reporting (pid_to_ptid (pid
));
545 check_for_thread_db ();
549 linux_child_post_startup_inferior (ptid_t ptid
)
551 linux_enable_event_reporting (ptid
);
552 check_for_thread_db ();
556 linux_child_follow_fork (struct target_ops
*ops
, int follow_child
)
559 struct target_waitstatus last_status
;
561 int parent_pid
, child_pid
;
563 if (target_can_async_p ())
564 target_async (NULL
, 0);
566 get_last_target_status (&last_ptid
, &last_status
);
567 has_vforked
= (last_status
.kind
== TARGET_WAITKIND_VFORKED
);
568 parent_pid
= ptid_get_lwp (last_ptid
);
570 parent_pid
= ptid_get_pid (last_ptid
);
571 child_pid
= last_status
.value
.related_pid
;
575 /* We're already attached to the parent, by default. */
577 /* Before detaching from the child, remove all breakpoints from
578 it. (This won't actually modify the breakpoint list, but will
579 physically remove the breakpoints from the child.) */
580 /* If we vforked this will remove the breakpoints from the parent
581 also, but they'll be reinserted below. */
582 detach_breakpoints (child_pid
);
584 /* Detach new forked process? */
587 if (info_verbose
|| debug_linux_nat
)
589 target_terminal_ours ();
590 fprintf_filtered (gdb_stdlog
,
591 "Detaching after fork from child process %d.\n",
595 ptrace (PTRACE_DETACH
, child_pid
, 0, 0);
599 struct fork_info
*fp
;
600 /* Retain child fork in ptrace (stopped) state. */
601 fp
= find_fork_pid (child_pid
);
603 fp
= add_fork (child_pid
);
604 fork_save_infrun_state (fp
, 0);
609 gdb_assert (linux_supports_tracefork_flag
>= 0);
610 if (linux_supports_tracevforkdone (0))
614 ptrace (PTRACE_CONT
, parent_pid
, 0, 0);
615 my_waitpid (parent_pid
, &status
, __WALL
);
616 if ((status
>> 16) != PTRACE_EVENT_VFORK_DONE
)
617 warning (_("Unexpected waitpid result %06x when waiting for "
618 "vfork-done"), status
);
622 /* We can't insert breakpoints until the child has
623 finished with the shared memory region. We need to
624 wait until that happens. Ideal would be to just
626 - ptrace (PTRACE_SYSCALL, parent_pid, 0, 0);
627 - waitpid (parent_pid, &status, __WALL);
628 However, most architectures can't handle a syscall
629 being traced on the way out if it wasn't traced on
632 We might also think to loop, continuing the child
633 until it exits or gets a SIGTRAP. One problem is
634 that the child might call ptrace with PTRACE_TRACEME.
636 There's no simple and reliable way to figure out when
637 the vforked child will be done with its copy of the
638 shared memory. We could step it out of the syscall,
639 two instructions, let it go, and then single-step the
640 parent once. When we have hardware single-step, this
641 would work; with software single-step it could still
642 be made to work but we'd have to be able to insert
643 single-step breakpoints in the child, and we'd have
644 to insert -just- the single-step breakpoint in the
645 parent. Very awkward.
647 In the end, the best we can do is to make sure it
648 runs for a little while. Hopefully it will be out of
649 range of any breakpoints we reinsert. Usually this
650 is only the single-step breakpoint at vfork's return
656 /* Since we vforked, breakpoints were removed in the parent
657 too. Put them back. */
658 reattach_breakpoints (parent_pid
);
663 char child_pid_spelling
[40];
665 /* Needed to keep the breakpoint lists in sync. */
667 detach_breakpoints (child_pid
);
669 /* Before detaching from the parent, remove all breakpoints from it. */
670 remove_breakpoints ();
672 if (info_verbose
|| debug_linux_nat
)
674 target_terminal_ours ();
675 fprintf_filtered (gdb_stdlog
,
676 "Attaching after fork to child process %d.\n",
680 /* If we're vforking, we may want to hold on to the parent until
681 the child exits or execs. At exec time we can remove the old
682 breakpoints from the parent and detach it; at exit time we
683 could do the same (or even, sneakily, resume debugging it - the
684 child's exec has failed, or something similar).
686 This doesn't clean up "properly", because we can't call
687 target_detach, but that's OK; if the current target is "child",
688 then it doesn't need any further cleanups, and lin_lwp will
689 generally not encounter vfork (vfork is defined to fork
692 The holding part is very easy if we have VFORKDONE events;
693 but keeping track of both processes is beyond GDB at the
694 moment. So we don't expose the parent to the rest of GDB.
695 Instead we quietly hold onto it until such time as we can
699 linux_parent_pid
= parent_pid
;
700 else if (!detach_fork
)
702 struct fork_info
*fp
;
703 /* Retain parent fork in ptrace (stopped) state. */
704 fp
= find_fork_pid (parent_pid
);
706 fp
= add_fork (parent_pid
);
707 fork_save_infrun_state (fp
, 0);
710 target_detach (NULL
, 0);
712 inferior_ptid
= ptid_build (child_pid
, child_pid
, 0);
714 /* Reinstall ourselves, since we might have been removed in
715 target_detach (which does other necessary cleanup). */
718 linux_nat_switch_fork (inferior_ptid
);
719 check_for_thread_db ();
721 /* Reset breakpoints in the child as appropriate. */
722 follow_inferior_reset_breakpoints ();
725 if (target_can_async_p ())
726 target_async (inferior_event_handler
, 0);
733 linux_child_insert_fork_catchpoint (int pid
)
735 if (! linux_supports_tracefork (pid
))
736 error (_("Your system does not support fork catchpoints."));
740 linux_child_insert_vfork_catchpoint (int pid
)
742 if (!linux_supports_tracefork (pid
))
743 error (_("Your system does not support vfork catchpoints."));
747 linux_child_insert_exec_catchpoint (int pid
)
749 if (!linux_supports_tracefork (pid
))
750 error (_("Your system does not support exec catchpoints."));
753 /* On GNU/Linux there are no real LWP's. The closest thing to LWP's
754 are processes sharing the same VM space. A multi-threaded process
755 is basically a group of such processes. However, such a grouping
756 is almost entirely a user-space issue; the kernel doesn't enforce
757 such a grouping at all (this might change in the future). In
758 general, we'll rely on the threads library (i.e. the GNU/Linux
759 Threads library) to provide such a grouping.
761 It is perfectly well possible to write a multi-threaded application
762 without the assistance of a threads library, by using the clone
763 system call directly. This module should be able to give some
764 rudimentary support for debugging such applications if developers
765 specify the CLONE_PTRACE flag in the clone system call, and are
766 using the Linux kernel 2.4 or above.
768 Note that there are some peculiarities in GNU/Linux that affect
771 - In general one should specify the __WCLONE flag to waitpid in
772 order to make it report events for any of the cloned processes
773 (and leave it out for the initial process). However, if a cloned
774 process has exited the exit status is only reported if the
775 __WCLONE flag is absent. Linux kernel 2.4 has a __WALL flag, but
776 we cannot use it since GDB must work on older systems too.
778 - When a traced, cloned process exits and is waited for by the
779 debugger, the kernel reassigns it to the original parent and
780 keeps it around as a "zombie". Somehow, the GNU/Linux Threads
781 library doesn't notice this, which leads to the "zombie problem":
782 When debugged a multi-threaded process that spawns a lot of
783 threads will run out of processes, even if the threads exit,
784 because the "zombies" stay around. */
786 /* List of known LWPs. */
787 struct lwp_info
*lwp_list
;
789 /* Number of LWPs in the list. */
793 /* If the last reported event was a SIGTRAP, this variable is set to
794 the process id of the LWP/thread that got it. */
798 /* Since we cannot wait (in linux_nat_wait) for the initial process and
799 any cloned processes with a single call to waitpid, we have to use
800 the WNOHANG flag and call waitpid in a loop. To optimize
801 things a bit we use `sigsuspend' to wake us up when a process has
802 something to report (it will send us a SIGCHLD if it has). To make
803 this work we have to juggle with the signal mask. We save the
804 original signal mask such that we can restore it before creating a
805 new process in order to avoid blocking certain signals in the
806 inferior. We then block SIGCHLD during the waitpid/sigsuspend
809 /* Original signal mask. */
810 static sigset_t normal_mask
;
812 /* Signal mask for use with sigsuspend in linux_nat_wait, initialized in
813 _initialize_linux_nat. */
814 static sigset_t suspend_mask
;
816 /* SIGCHLD action for synchronous mode. */
817 struct sigaction sync_sigchld_action
;
819 /* SIGCHLD action for asynchronous mode. */
820 static struct sigaction async_sigchld_action
;
823 /* Prototypes for local functions. */
824 static int stop_wait_callback (struct lwp_info
*lp
, void *data
);
825 static int linux_nat_thread_alive (ptid_t ptid
);
826 static char *linux_child_pid_to_exec_file (int pid
);
827 static int cancel_breakpoint (struct lwp_info
*lp
);
830 /* Convert wait status STATUS to a string. Used for printing debug
834 status_to_str (int status
)
838 if (WIFSTOPPED (status
))
839 snprintf (buf
, sizeof (buf
), "%s (stopped)",
840 strsignal (WSTOPSIG (status
)));
841 else if (WIFSIGNALED (status
))
842 snprintf (buf
, sizeof (buf
), "%s (terminated)",
843 strsignal (WSTOPSIG (status
)));
845 snprintf (buf
, sizeof (buf
), "%d (exited)", WEXITSTATUS (status
));
850 /* Initialize the list of LWPs. Note that this module, contrary to
851 what GDB's generic threads layer does for its thread list,
852 re-initializes the LWP lists whenever we mourn or detach (which
853 doesn't involve mourning) the inferior. */
858 struct lwp_info
*lp
, *lpnext
;
860 for (lp
= lwp_list
; lp
; lp
= lpnext
)
870 /* Add the LWP specified by PID to the list. Return a pointer to the
871 structure describing the new LWP. The LWP should already be stopped
872 (with an exception for the very first LWP). */
874 static struct lwp_info
*
875 add_lwp (ptid_t ptid
)
879 gdb_assert (is_lwp (ptid
));
881 lp
= (struct lwp_info
*) xmalloc (sizeof (struct lwp_info
));
883 memset (lp
, 0, sizeof (struct lwp_info
));
885 lp
->waitstatus
.kind
= TARGET_WAITKIND_IGNORE
;
893 if (num_lwps
> 1 && linux_nat_new_thread
!= NULL
)
894 linux_nat_new_thread (ptid
);
899 /* Remove the LWP specified by PID from the list. */
902 delete_lwp (ptid_t ptid
)
904 struct lwp_info
*lp
, *lpprev
;
908 for (lp
= lwp_list
; lp
; lpprev
= lp
, lp
= lp
->next
)
909 if (ptid_equal (lp
->ptid
, ptid
))
918 lpprev
->next
= lp
->next
;
925 /* Return a pointer to the structure describing the LWP corresponding
926 to PID. If no corresponding LWP could be found, return NULL. */
928 static struct lwp_info
*
929 find_lwp_pid (ptid_t ptid
)
935 lwp
= GET_LWP (ptid
);
937 lwp
= GET_PID (ptid
);
939 for (lp
= lwp_list
; lp
; lp
= lp
->next
)
940 if (lwp
== GET_LWP (lp
->ptid
))
946 /* Call CALLBACK with its second argument set to DATA for every LWP in
947 the list. If CALLBACK returns 1 for a particular LWP, return a
948 pointer to the structure describing that LWP immediately.
949 Otherwise return NULL. */
952 iterate_over_lwps (int (*callback
) (struct lwp_info
*, void *), void *data
)
954 struct lwp_info
*lp
, *lpnext
;
956 for (lp
= lwp_list
; lp
; lp
= lpnext
)
959 if ((*callback
) (lp
, data
))
966 /* Update our internal state when changing from one fork (checkpoint,
967 et cetera) to another indicated by NEW_PTID. We can only switch
968 single-threaded applications, so we only create one new LWP, and
969 the previous list is discarded. */
972 linux_nat_switch_fork (ptid_t new_ptid
)
977 lp
= add_lwp (new_ptid
);
981 /* Record a PTID for later deletion. */
986 struct saved_ptids
*next
;
988 static struct saved_ptids
*threads_to_delete
;
991 record_dead_thread (ptid_t ptid
)
993 struct saved_ptids
*p
= xmalloc (sizeof (struct saved_ptids
));
995 p
->next
= threads_to_delete
;
996 threads_to_delete
= p
;
999 /* Delete any dead threads which are not the current thread. */
1004 struct saved_ptids
**p
= &threads_to_delete
;
1007 if (! ptid_equal ((*p
)->ptid
, inferior_ptid
))
1009 struct saved_ptids
*tmp
= *p
;
1010 delete_thread (tmp
->ptid
);
1018 /* Handle the exit of a single thread LP. */
1021 exit_lwp (struct lwp_info
*lp
)
1023 if (in_thread_list (lp
->ptid
))
1025 if (print_thread_events
)
1026 printf_unfiltered (_("[%s exited]\n"), target_pid_to_str (lp
->ptid
));
1028 /* Core GDB cannot deal with us deleting the current thread. */
1029 if (!ptid_equal (lp
->ptid
, inferior_ptid
))
1030 delete_thread (lp
->ptid
);
1032 record_dead_thread (lp
->ptid
);
1035 delete_lwp (lp
->ptid
);
1038 /* Detect `T (stopped)' in `/proc/PID/status'.
1039 Other states including `T (tracing stop)' are reported as false. */
1042 pid_is_stopped (pid_t pid
)
1048 snprintf (buf
, sizeof (buf
), "/proc/%d/status", (int) pid
);
1049 status_file
= fopen (buf
, "r");
1050 if (status_file
!= NULL
)
1054 while (fgets (buf
, sizeof (buf
), status_file
))
1056 if (strncmp (buf
, "State:", 6) == 0)
1062 if (have_state
&& strstr (buf
, "T (stopped)") != NULL
)
1064 fclose (status_file
);
1069 /* Wait for the LWP specified by LP, which we have just attached to.
1070 Returns a wait status for that LWP, to cache. */
1073 linux_nat_post_attach_wait (ptid_t ptid
, int first
, int *cloned
,
1076 pid_t new_pid
, pid
= GET_LWP (ptid
);
1079 if (pid_is_stopped (pid
))
1081 if (debug_linux_nat
)
1082 fprintf_unfiltered (gdb_stdlog
,
1083 "LNPAW: Attaching to a stopped process\n");
1085 /* The process is definitely stopped. It is in a job control
1086 stop, unless the kernel predates the TASK_STOPPED /
1087 TASK_TRACED distinction, in which case it might be in a
1088 ptrace stop. Make sure it is in a ptrace stop; from there we
1089 can kill it, signal it, et cetera.
1091 First make sure there is a pending SIGSTOP. Since we are
1092 already attached, the process can not transition from stopped
1093 to running without a PTRACE_CONT; so we know this signal will
1094 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1095 probably already in the queue (unless this kernel is old
1096 enough to use TASK_STOPPED for ptrace stops); but since SIGSTOP
1097 is not an RT signal, it can only be queued once. */
1098 kill_lwp (pid
, SIGSTOP
);
1100 /* Finally, resume the stopped process. This will deliver the SIGSTOP
1101 (or a higher priority signal, just like normal PTRACE_ATTACH). */
1102 ptrace (PTRACE_CONT
, pid
, 0, 0);
1105 /* Make sure the initial process is stopped. The user-level threads
1106 layer might want to poke around in the inferior, and that won't
1107 work if things haven't stabilized yet. */
1108 new_pid
= my_waitpid (pid
, &status
, 0);
1109 if (new_pid
== -1 && errno
== ECHILD
)
1112 warning (_("%s is a cloned process"), target_pid_to_str (ptid
));
1114 /* Try again with __WCLONE to check cloned processes. */
1115 new_pid
= my_waitpid (pid
, &status
, __WCLONE
);
1119 gdb_assert (pid
== new_pid
&& WIFSTOPPED (status
));
1121 if (WSTOPSIG (status
) != SIGSTOP
)
1124 if (debug_linux_nat
)
1125 fprintf_unfiltered (gdb_stdlog
,
1126 "LNPAW: Received %s after attaching\n",
1127 status_to_str (status
));
1133 /* Attach to the LWP specified by PID. Return 0 if successful or -1
1134 if the new LWP could not be attached. */
1137 lin_lwp_attach_lwp (ptid_t ptid
)
1139 struct lwp_info
*lp
;
1140 int async_events_were_enabled
= 0;
1142 gdb_assert (is_lwp (ptid
));
1144 if (target_can_async_p ())
1145 async_events_were_enabled
= linux_nat_async_events (0);
1147 lp
= find_lwp_pid (ptid
);
1149 /* We assume that we're already attached to any LWP that has an id
1150 equal to the overall process id, and to any LWP that is already
1151 in our list of LWPs. If we're not seeing exit events from threads
1152 and we've had PID wraparound since we last tried to stop all threads,
1153 this assumption might be wrong; fortunately, this is very unlikely
1155 if (GET_LWP (ptid
) != GET_PID (ptid
) && lp
== NULL
)
1157 int status
, cloned
= 0, signalled
= 0;
1159 if (ptrace (PTRACE_ATTACH
, GET_LWP (ptid
), 0, 0) < 0)
1161 /* If we fail to attach to the thread, issue a warning,
1162 but continue. One way this can happen is if thread
1163 creation is interrupted; as of Linux kernel 2.6.19, a
1164 bug may place threads in the thread list and then fail
1166 warning (_("Can't attach %s: %s"), target_pid_to_str (ptid
),
1167 safe_strerror (errno
));
1171 if (debug_linux_nat
)
1172 fprintf_unfiltered (gdb_stdlog
,
1173 "LLAL: PTRACE_ATTACH %s, 0, 0 (OK)\n",
1174 target_pid_to_str (ptid
));
1176 status
= linux_nat_post_attach_wait (ptid
, 0, &cloned
, &signalled
);
1177 lp
= add_lwp (ptid
);
1179 lp
->cloned
= cloned
;
1180 lp
->signalled
= signalled
;
1181 if (WSTOPSIG (status
) != SIGSTOP
)
1184 lp
->status
= status
;
1187 target_post_attach (GET_LWP (lp
->ptid
));
1189 if (debug_linux_nat
)
1191 fprintf_unfiltered (gdb_stdlog
,
1192 "LLAL: waitpid %s received %s\n",
1193 target_pid_to_str (ptid
),
1194 status_to_str (status
));
1199 /* We assume that the LWP representing the original process is
1200 already stopped. Mark it as stopped in the data structure
1201 that the GNU/linux ptrace layer uses to keep track of
1202 threads. Note that this won't have already been done since
1203 the main thread will have, we assume, been stopped by an
1204 attach from a different layer. */
1206 lp
= add_lwp (ptid
);
1210 if (async_events_were_enabled
)
1211 linux_nat_async_events (1);
1217 linux_nat_create_inferior (char *exec_file
, char *allargs
, char **env
,
1220 int saved_async
= 0;
1222 /* The fork_child mechanism is synchronous and calls target_wait, so
1223 we have to mask the async mode. */
1225 if (target_can_async_p ())
1226 saved_async
= linux_nat_async_mask (0);
1229 /* Restore the original signal mask. */
1230 sigprocmask (SIG_SETMASK
, &normal_mask
, NULL
);
1231 /* Make sure we don't block SIGCHLD during a sigsuspend. */
1232 suspend_mask
= normal_mask
;
1233 sigdelset (&suspend_mask
, SIGCHLD
);
1236 linux_ops
->to_create_inferior (exec_file
, allargs
, env
, from_tty
);
1239 linux_nat_async_mask (saved_async
);
1243 linux_nat_attach (char *args
, int from_tty
)
1245 struct lwp_info
*lp
;
1248 /* FIXME: We should probably accept a list of process id's, and
1249 attach all of them. */
1250 linux_ops
->to_attach (args
, from_tty
);
1252 if (!target_can_async_p ())
1254 /* Restore the original signal mask. */
1255 sigprocmask (SIG_SETMASK
, &normal_mask
, NULL
);
1256 /* Make sure we don't block SIGCHLD during a sigsuspend. */
1257 suspend_mask
= normal_mask
;
1258 sigdelset (&suspend_mask
, SIGCHLD
);
1261 /* Add the initial process as the first LWP to the list. */
1262 inferior_ptid
= BUILD_LWP (GET_PID (inferior_ptid
), GET_PID (inferior_ptid
));
1263 lp
= add_lwp (inferior_ptid
);
1265 status
= linux_nat_post_attach_wait (lp
->ptid
, 1, &lp
->cloned
,
1269 /* If this process is not using thread_db, then we still don't
1270 detect any other threads, but add at least this one. */
1271 add_thread_silent (lp
->ptid
);
1273 /* Save the wait status to report later. */
1275 if (debug_linux_nat
)
1276 fprintf_unfiltered (gdb_stdlog
,
1277 "LNA: waitpid %ld, saving status %s\n",
1278 (long) GET_PID (lp
->ptid
), status_to_str (status
));
1280 if (!target_can_async_p ())
1281 lp
->status
= status
;
1284 /* We already waited for this LWP, so put the wait result on the
1285 pipe. The event loop will wake up and gets us to handling
1287 linux_nat_event_pipe_push (GET_PID (lp
->ptid
), status
,
1288 lp
->cloned
? __WCLONE
: 0);
1289 /* Register in the event loop. */
1290 target_async (inferior_event_handler
, 0);
1294 /* Get pending status of LP. */
1296 get_pending_status (struct lwp_info
*lp
, int *status
)
1298 struct target_waitstatus last
;
1301 get_last_target_status (&last_ptid
, &last
);
1303 /* If this lwp is the ptid that GDB is processing an event from, the
1304 signal will be in stop_signal. Otherwise, in all-stop + sync
1305 mode, we may cache pending events in lp->status while trying to
1306 stop all threads (see stop_wait_callback). In async mode, the
1307 events are always cached in waitpid_queue. */
1310 if (GET_LWP (lp
->ptid
) == GET_LWP (last_ptid
))
1312 if (stop_signal
!= TARGET_SIGNAL_0
1313 && signal_pass_state (stop_signal
))
1314 *status
= W_STOPCODE (target_signal_to_host (stop_signal
));
1316 else if (target_can_async_p ())
1317 queued_waitpid (GET_LWP (lp
->ptid
), status
, __WALL
);
1319 *status
= lp
->status
;
1325 detach_callback (struct lwp_info
*lp
, void *data
)
1327 gdb_assert (lp
->status
== 0 || WIFSTOPPED (lp
->status
));
1329 if (debug_linux_nat
&& lp
->status
)
1330 fprintf_unfiltered (gdb_stdlog
, "DC: Pending %s for %s on detach.\n",
1331 strsignal (WSTOPSIG (lp
->status
)),
1332 target_pid_to_str (lp
->ptid
));
1334 /* If there is a pending SIGSTOP, get rid of it. */
1337 if (debug_linux_nat
)
1338 fprintf_unfiltered (gdb_stdlog
,
1339 "DC: Sending SIGCONT to %s\n",
1340 target_pid_to_str (lp
->ptid
));
1342 kill_lwp (GET_LWP (lp
->ptid
), SIGCONT
);
1346 /* We don't actually detach from the LWP that has an id equal to the
1347 overall process id just yet. */
1348 if (GET_LWP (lp
->ptid
) != GET_PID (lp
->ptid
))
1352 /* Pass on any pending signal for this LWP. */
1353 get_pending_status (lp
, &status
);
1356 if (ptrace (PTRACE_DETACH
, GET_LWP (lp
->ptid
), 0,
1357 WSTOPSIG (status
)) < 0)
1358 error (_("Can't detach %s: %s"), target_pid_to_str (lp
->ptid
),
1359 safe_strerror (errno
));
1361 if (debug_linux_nat
)
1362 fprintf_unfiltered (gdb_stdlog
,
1363 "PTRACE_DETACH (%s, %s, 0) (OK)\n",
1364 target_pid_to_str (lp
->ptid
),
1365 strsignal (WSTOPSIG (lp
->status
)));
1367 delete_lwp (lp
->ptid
);
1374 linux_nat_detach (char *args
, int from_tty
)
1378 enum target_signal sig
;
1380 if (target_can_async_p ())
1381 linux_nat_async (NULL
, 0);
1383 iterate_over_lwps (detach_callback
, NULL
);
1385 /* Only the initial process should be left right now. */
1386 gdb_assert (num_lwps
== 1);
1388 /* Pass on any pending signal for the last LWP. */
1389 if ((args
== NULL
|| *args
== '\0')
1390 && get_pending_status (lwp_list
, &status
) != -1
1391 && WIFSTOPPED (status
))
1393 /* Put the signal number in ARGS so that inf_ptrace_detach will
1394 pass it along with PTRACE_DETACH. */
1396 sprintf (args
, "%d", (int) WSTOPSIG (status
));
1397 fprintf_unfiltered (gdb_stdlog
,
1398 "LND: Sending signal %s to %s\n",
1400 target_pid_to_str (lwp_list
->ptid
));
1403 trap_ptid
= null_ptid
;
1405 /* Destroy LWP info; it's no longer valid. */
1408 pid
= GET_PID (inferior_ptid
);
1409 inferior_ptid
= pid_to_ptid (pid
);
1410 linux_ops
->to_detach (args
, from_tty
);
1412 if (target_can_async_p ())
1413 drain_queued_events (pid
);
1419 resume_callback (struct lwp_info
*lp
, void *data
)
1421 if (lp
->stopped
&& lp
->status
== 0)
1423 linux_ops
->to_resume (pid_to_ptid (GET_LWP (lp
->ptid
)),
1424 0, TARGET_SIGNAL_0
);
1425 if (debug_linux_nat
)
1426 fprintf_unfiltered (gdb_stdlog
,
1427 "RC: PTRACE_CONT %s, 0, 0 (resume sibling)\n",
1428 target_pid_to_str (lp
->ptid
));
1431 memset (&lp
->siginfo
, 0, sizeof (lp
->siginfo
));
1438 resume_clear_callback (struct lwp_info
*lp
, void *data
)
1445 resume_set_callback (struct lwp_info
*lp
, void *data
)
1452 linux_nat_resume (ptid_t ptid
, int step
, enum target_signal signo
)
1454 struct lwp_info
*lp
;
1457 if (debug_linux_nat
)
1458 fprintf_unfiltered (gdb_stdlog
,
1459 "LLR: Preparing to %s %s, %s, inferior_ptid %s\n",
1460 step
? "step" : "resume",
1461 target_pid_to_str (ptid
),
1462 signo
? strsignal (signo
) : "0",
1463 target_pid_to_str (inferior_ptid
));
1467 if (target_can_async_p ())
1468 /* Block events while we're here. */
1469 linux_nat_async_events (0);
1471 /* A specific PTID means `step only this process id'. */
1472 resume_all
= (PIDGET (ptid
) == -1);
1475 iterate_over_lwps (resume_set_callback
, NULL
);
1477 iterate_over_lwps (resume_clear_callback
, NULL
);
1479 /* If PID is -1, it's the current inferior that should be
1480 handled specially. */
1481 if (PIDGET (ptid
) == -1)
1482 ptid
= inferior_ptid
;
1484 lp
= find_lwp_pid (ptid
);
1485 gdb_assert (lp
!= NULL
);
1487 ptid
= pid_to_ptid (GET_LWP (lp
->ptid
));
1489 /* Remember if we're stepping. */
1492 /* Mark this LWP as resumed. */
1495 /* If we have a pending wait status for this thread, there is no
1496 point in resuming the process. But first make sure that
1497 linux_nat_wait won't preemptively handle the event - we
1498 should never take this short-circuit if we are going to
1499 leave LP running, since we have skipped resuming all the
1500 other threads. This bit of code needs to be synchronized
1501 with linux_nat_wait. */
1503 /* In async mode, we never have pending wait status. */
1504 if (target_can_async_p () && lp
->status
)
1505 internal_error (__FILE__
, __LINE__
, "Pending status in async mode");
1507 if (lp
->status
&& WIFSTOPPED (lp
->status
))
1509 int saved_signo
= target_signal_from_host (WSTOPSIG (lp
->status
));
1511 if (signal_stop_state (saved_signo
) == 0
1512 && signal_print_state (saved_signo
) == 0
1513 && signal_pass_state (saved_signo
) == 1)
1515 if (debug_linux_nat
)
1516 fprintf_unfiltered (gdb_stdlog
,
1517 "LLR: Not short circuiting for ignored "
1518 "status 0x%x\n", lp
->status
);
1520 /* FIXME: What should we do if we are supposed to continue
1521 this thread with a signal? */
1522 gdb_assert (signo
== TARGET_SIGNAL_0
);
1523 signo
= saved_signo
;
1530 /* FIXME: What should we do if we are supposed to continue
1531 this thread with a signal? */
1532 gdb_assert (signo
== TARGET_SIGNAL_0
);
1534 if (debug_linux_nat
)
1535 fprintf_unfiltered (gdb_stdlog
,
1536 "LLR: Short circuiting for status 0x%x\n",
1542 /* Mark LWP as not stopped to prevent it from being continued by
1547 iterate_over_lwps (resume_callback
, NULL
);
1549 linux_ops
->to_resume (ptid
, step
, signo
);
1550 memset (&lp
->siginfo
, 0, sizeof (lp
->siginfo
));
1552 if (debug_linux_nat
)
1553 fprintf_unfiltered (gdb_stdlog
,
1554 "LLR: %s %s, %s (resume event thread)\n",
1555 step
? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
1556 target_pid_to_str (ptid
),
1557 signo
? strsignal (signo
) : "0");
1559 if (target_can_async_p ())
1561 target_executing
= 1;
1562 target_async (inferior_event_handler
, 0);
1566 /* Issue kill to specified lwp. */
1568 static int tkill_failed
;
1571 kill_lwp (int lwpid
, int signo
)
1575 /* Use tkill, if possible, in case we are using nptl threads. If tkill
1576 fails, then we are not using nptl threads and we should be using kill. */
1578 #ifdef HAVE_TKILL_SYSCALL
1581 int ret
= syscall (__NR_tkill
, lwpid
, signo
);
1582 if (errno
!= ENOSYS
)
1589 return kill (lwpid
, signo
);
1592 /* Handle a GNU/Linux extended wait response. If we see a clone
1593 event, we need to add the new LWP to our list (and not report the
1594 trap to higher layers). This function returns non-zero if the
1595 event should be ignored and we should wait again. If STOPPING is
1596 true, the new LWP remains stopped, otherwise it is continued. */
1599 linux_handle_extended_wait (struct lwp_info
*lp
, int status
,
1602 int pid
= GET_LWP (lp
->ptid
);
1603 struct target_waitstatus
*ourstatus
= &lp
->waitstatus
;
1604 struct lwp_info
*new_lp
= NULL
;
1605 int event
= status
>> 16;
1607 if (event
== PTRACE_EVENT_FORK
|| event
== PTRACE_EVENT_VFORK
1608 || event
== PTRACE_EVENT_CLONE
)
1610 unsigned long new_pid
;
1613 ptrace (PTRACE_GETEVENTMSG
, pid
, 0, &new_pid
);
1615 /* If we haven't already seen the new PID stop, wait for it now. */
1616 if (! pull_pid_from_list (&stopped_pids
, new_pid
, &status
))
1618 /* The new child has a pending SIGSTOP. We can't affect it until it
1619 hits the SIGSTOP, but we're already attached. */
1620 ret
= my_waitpid (new_pid
, &status
,
1621 (event
== PTRACE_EVENT_CLONE
) ? __WCLONE
: 0);
1623 perror_with_name (_("waiting for new child"));
1624 else if (ret
!= new_pid
)
1625 internal_error (__FILE__
, __LINE__
,
1626 _("wait returned unexpected PID %d"), ret
);
1627 else if (!WIFSTOPPED (status
))
1628 internal_error (__FILE__
, __LINE__
,
1629 _("wait returned unexpected status 0x%x"), status
);
1632 ourstatus
->value
.related_pid
= new_pid
;
1634 if (event
== PTRACE_EVENT_FORK
)
1635 ourstatus
->kind
= TARGET_WAITKIND_FORKED
;
1636 else if (event
== PTRACE_EVENT_VFORK
)
1637 ourstatus
->kind
= TARGET_WAITKIND_VFORKED
;
1640 ourstatus
->kind
= TARGET_WAITKIND_IGNORE
;
1641 new_lp
= add_lwp (BUILD_LWP (new_pid
, GET_PID (inferior_ptid
)));
1644 if (WSTOPSIG (status
) != SIGSTOP
)
1646 /* This can happen if someone starts sending signals to
1647 the new thread before it gets a chance to run, which
1648 have a lower number than SIGSTOP (e.g. SIGUSR1).
1649 This is an unlikely case, and harder to handle for
1650 fork / vfork than for clone, so we do not try - but
1651 we handle it for clone events here. We'll send
1652 the other signal on to the thread below. */
1654 new_lp
->signalled
= 1;
1660 new_lp
->stopped
= 1;
1663 new_lp
->resumed
= 1;
1664 ptrace (PTRACE_CONT
, lp
->waitstatus
.value
.related_pid
, 0,
1665 status
? WSTOPSIG (status
) : 0);
1668 if (debug_linux_nat
)
1669 fprintf_unfiltered (gdb_stdlog
,
1670 "LHEW: Got clone event from LWP %ld, resuming\n",
1671 GET_LWP (lp
->ptid
));
1672 ptrace (PTRACE_CONT
, GET_LWP (lp
->ptid
), 0, 0);
1680 if (event
== PTRACE_EVENT_EXEC
)
1682 ourstatus
->kind
= TARGET_WAITKIND_EXECD
;
1683 ourstatus
->value
.execd_pathname
1684 = xstrdup (linux_child_pid_to_exec_file (pid
));
1686 if (linux_parent_pid
)
1688 detach_breakpoints (linux_parent_pid
);
1689 ptrace (PTRACE_DETACH
, linux_parent_pid
, 0, 0);
1691 linux_parent_pid
= 0;
1697 internal_error (__FILE__
, __LINE__
,
1698 _("unknown ptrace event %d"), event
);
1701 /* Wait for LP to stop. Returns the wait status, or 0 if the LWP has
1705 wait_lwp (struct lwp_info
*lp
)
1709 int thread_dead
= 0;
1711 gdb_assert (!lp
->stopped
);
1712 gdb_assert (lp
->status
== 0);
1714 pid
= my_waitpid (GET_LWP (lp
->ptid
), &status
, 0);
1715 if (pid
== -1 && errno
== ECHILD
)
1717 pid
= my_waitpid (GET_LWP (lp
->ptid
), &status
, __WCLONE
);
1718 if (pid
== -1 && errno
== ECHILD
)
1720 /* The thread has previously exited. We need to delete it
1721 now because, for some vendor 2.4 kernels with NPTL
1722 support backported, there won't be an exit event unless
1723 it is the main thread. 2.6 kernels will report an exit
1724 event for each thread that exits, as expected. */
1726 if (debug_linux_nat
)
1727 fprintf_unfiltered (gdb_stdlog
, "WL: %s vanished.\n",
1728 target_pid_to_str (lp
->ptid
));
1734 gdb_assert (pid
== GET_LWP (lp
->ptid
));
1736 if (debug_linux_nat
)
1738 fprintf_unfiltered (gdb_stdlog
,
1739 "WL: waitpid %s received %s\n",
1740 target_pid_to_str (lp
->ptid
),
1741 status_to_str (status
));
1745 /* Check if the thread has exited. */
1746 if (WIFEXITED (status
) || WIFSIGNALED (status
))
1749 if (debug_linux_nat
)
1750 fprintf_unfiltered (gdb_stdlog
, "WL: %s exited.\n",
1751 target_pid_to_str (lp
->ptid
));
1760 gdb_assert (WIFSTOPPED (status
));
1762 /* Handle GNU/Linux's extended waitstatus for trace events. */
1763 if (WIFSTOPPED (status
) && WSTOPSIG (status
) == SIGTRAP
&& status
>> 16 != 0)
1765 if (debug_linux_nat
)
1766 fprintf_unfiltered (gdb_stdlog
,
1767 "WL: Handling extended status 0x%06x\n",
1769 if (linux_handle_extended_wait (lp
, status
, 1))
1770 return wait_lwp (lp
);
1776 /* Save the most recent siginfo for LP. This is currently only called
1777 for SIGTRAP; some ports use the si_addr field for
1778 target_stopped_data_address. In the future, it may also be used to
1779 restore the siginfo of requeued signals. */
1782 save_siginfo (struct lwp_info
*lp
)
1785 ptrace (PTRACE_GETSIGINFO
, GET_LWP (lp
->ptid
),
1786 (PTRACE_TYPE_ARG3
) 0, &lp
->siginfo
);
1789 memset (&lp
->siginfo
, 0, sizeof (lp
->siginfo
));
1792 /* Send a SIGSTOP to LP. */
1795 stop_callback (struct lwp_info
*lp
, void *data
)
1797 if (!lp
->stopped
&& !lp
->signalled
)
1801 if (debug_linux_nat
)
1803 fprintf_unfiltered (gdb_stdlog
,
1804 "SC: kill %s **<SIGSTOP>**\n",
1805 target_pid_to_str (lp
->ptid
));
1808 ret
= kill_lwp (GET_LWP (lp
->ptid
), SIGSTOP
);
1809 if (debug_linux_nat
)
1811 fprintf_unfiltered (gdb_stdlog
,
1812 "SC: lwp kill %d %s\n",
1814 errno
? safe_strerror (errno
) : "ERRNO-OK");
1818 gdb_assert (lp
->status
== 0);
1824 /* Wait until LP is stopped. If DATA is non-null it is interpreted as
1825 a pointer to a set of signals to be flushed immediately. */
1828 stop_wait_callback (struct lwp_info
*lp
, void *data
)
1830 sigset_t
*flush_mask
= data
;
1836 status
= wait_lwp (lp
);
1840 /* Ignore any signals in FLUSH_MASK. */
1841 if (flush_mask
&& sigismember (flush_mask
, WSTOPSIG (status
)))
1850 ptrace (PTRACE_CONT
, GET_LWP (lp
->ptid
), 0, 0);
1851 if (debug_linux_nat
)
1852 fprintf_unfiltered (gdb_stdlog
,
1853 "PTRACE_CONT %s, 0, 0 (%s)\n",
1854 target_pid_to_str (lp
->ptid
),
1855 errno
? safe_strerror (errno
) : "OK");
1857 return stop_wait_callback (lp
, flush_mask
);
1860 if (WSTOPSIG (status
) != SIGSTOP
)
1862 if (WSTOPSIG (status
) == SIGTRAP
)
1864 /* If a LWP other than the LWP that we're reporting an
1865 event for has hit a GDB breakpoint (as opposed to
1866 some random trap signal), then just arrange for it to
1867 hit it again later. We don't keep the SIGTRAP status
1868 and don't forward the SIGTRAP signal to the LWP. We
1869 will handle the current event, eventually we will
1870 resume all LWPs, and this one will get its breakpoint
1873 If we do not do this, then we run the risk that the
1874 user will delete or disable the breakpoint, but the
1875 thread will have already tripped on it. */
1877 /* Save the trap's siginfo in case we need it later. */
1880 /* Now resume this LWP and get the SIGSTOP event. */
1882 ptrace (PTRACE_CONT
, GET_LWP (lp
->ptid
), 0, 0);
1883 if (debug_linux_nat
)
1885 fprintf_unfiltered (gdb_stdlog
,
1886 "PTRACE_CONT %s, 0, 0 (%s)\n",
1887 target_pid_to_str (lp
->ptid
),
1888 errno
? safe_strerror (errno
) : "OK");
1890 fprintf_unfiltered (gdb_stdlog
,
1891 "SWC: Candidate SIGTRAP event in %s\n",
1892 target_pid_to_str (lp
->ptid
));
1894 /* Hold this event/waitstatus while we check to see if
1895 there are any more (we still want to get that SIGSTOP). */
1896 stop_wait_callback (lp
, data
);
1898 if (target_can_async_p ())
1900 /* Don't leave a pending wait status in async mode.
1901 Retrigger the breakpoint. */
1902 if (!cancel_breakpoint (lp
))
1904 /* There was no gdb breakpoint set at pc. Put
1905 the event back in the queue. */
1906 if (debug_linux_nat
)
1907 fprintf_unfiltered (gdb_stdlog
,
1908 "SWC: kill %s, %s\n",
1909 target_pid_to_str (lp
->ptid
),
1910 status_to_str ((int) status
));
1911 kill_lwp (GET_LWP (lp
->ptid
), WSTOPSIG (status
));
1916 /* Hold the SIGTRAP for handling by
1918 /* If there's another event, throw it back into the
1922 if (debug_linux_nat
)
1923 fprintf_unfiltered (gdb_stdlog
,
1924 "SWC: kill %s, %s\n",
1925 target_pid_to_str (lp
->ptid
),
1926 status_to_str ((int) status
));
1927 kill_lwp (GET_LWP (lp
->ptid
), WSTOPSIG (lp
->status
));
1929 /* Save the sigtrap event. */
1930 lp
->status
= status
;
1936 /* The thread was stopped with a signal other than
1937 SIGSTOP, and didn't accidentally trip a breakpoint. */
1939 if (debug_linux_nat
)
1941 fprintf_unfiltered (gdb_stdlog
,
1942 "SWC: Pending event %s in %s\n",
1943 status_to_str ((int) status
),
1944 target_pid_to_str (lp
->ptid
));
1946 /* Now resume this LWP and get the SIGSTOP event. */
1948 ptrace (PTRACE_CONT
, GET_LWP (lp
->ptid
), 0, 0);
1949 if (debug_linux_nat
)
1950 fprintf_unfiltered (gdb_stdlog
,
1951 "SWC: PTRACE_CONT %s, 0, 0 (%s)\n",
1952 target_pid_to_str (lp
->ptid
),
1953 errno
? safe_strerror (errno
) : "OK");
1955 /* Hold this event/waitstatus while we check to see if
1956 there are any more (we still want to get that SIGSTOP). */
1957 stop_wait_callback (lp
, data
);
1959 /* If the lp->status field is still empty, use it to
1960 hold this event. If not, then this event must be
1961 returned to the event queue of the LWP. */
1962 if (lp
->status
|| target_can_async_p ())
1964 if (debug_linux_nat
)
1966 fprintf_unfiltered (gdb_stdlog
,
1967 "SWC: kill %s, %s\n",
1968 target_pid_to_str (lp
->ptid
),
1969 status_to_str ((int) status
));
1971 kill_lwp (GET_LWP (lp
->ptid
), WSTOPSIG (status
));
1974 lp
->status
= status
;
1980 /* We caught the SIGSTOP that we intended to catch, so
1981 there's no SIGSTOP pending. */
1990 /* Check whether PID has any pending signals in FLUSH_MASK. If so set
1991 the appropriate bits in PENDING, and return 1 - otherwise return 0. */
1994 linux_nat_has_pending (int pid
, sigset_t
*pending
, sigset_t
*flush_mask
)
1996 sigset_t blocked
, ignored
;
1999 linux_proc_pending_signals (pid
, pending
, &blocked
, &ignored
);
2004 for (i
= 1; i
< NSIG
; i
++)
2005 if (sigismember (pending
, i
))
2006 if (!sigismember (flush_mask
, i
)
2007 || sigismember (&blocked
, i
)
2008 || sigismember (&ignored
, i
))
2009 sigdelset (pending
, i
);
2011 if (sigisemptyset (pending
))
2017 /* DATA is interpreted as a mask of signals to flush. If LP has
2018 signals pending, and they are all in the flush mask, then arrange
2019 to flush them. LP should be stopped, as should all other threads
2020 it might share a signal queue with. */
2023 flush_callback (struct lwp_info
*lp
, void *data
)
2025 sigset_t
*flush_mask
= data
;
2026 sigset_t pending
, intersection
, blocked
, ignored
;
2029 /* Normally, when an LWP exits, it is removed from the LWP list. The
2030 last LWP isn't removed till later, however. So if there is only
2031 one LWP on the list, make sure it's alive. */
2032 if (lwp_list
== lp
&& lp
->next
== NULL
)
2033 if (!linux_nat_thread_alive (lp
->ptid
))
2036 /* Just because the LWP is stopped doesn't mean that new signals
2037 can't arrive from outside, so this function must be careful of
2038 race conditions. However, because all threads are stopped, we
2039 can assume that the pending mask will not shrink unless we resume
2040 the LWP, and that it will then get another signal. We can't
2041 control which one, however. */
2045 if (debug_linux_nat
)
2046 printf_unfiltered (_("FC: LP has pending status %06x\n"), lp
->status
);
2047 if (WIFSTOPPED (lp
->status
) && sigismember (flush_mask
, WSTOPSIG (lp
->status
)))
2051 /* While there is a pending signal we would like to flush, continue
2052 the inferior and collect another signal. But if there's already
2053 a saved status that we don't want to flush, we can't resume the
2054 inferior - if it stopped for some other reason we wouldn't have
2055 anywhere to save the new status. In that case, we must leave the
2056 signal unflushed (and possibly generate an extra SIGINT stop).
2057 That's much less bad than losing a signal. */
2058 while (lp
->status
== 0
2059 && linux_nat_has_pending (GET_LWP (lp
->ptid
), &pending
, flush_mask
))
2064 ret
= ptrace (PTRACE_CONT
, GET_LWP (lp
->ptid
), 0, 0);
2065 if (debug_linux_nat
)
2066 fprintf_unfiltered (gdb_stderr
,
2067 "FC: Sent PTRACE_CONT, ret %d %d\n", ret
, errno
);
2070 stop_wait_callback (lp
, flush_mask
);
2071 if (debug_linux_nat
)
2072 fprintf_unfiltered (gdb_stderr
,
2073 "FC: Wait finished; saved status is %d\n",
2080 /* Return non-zero if LP has a wait status pending. */
2083 status_callback (struct lwp_info
*lp
, void *data
)
2085 /* Only report a pending wait status if we pretend that this has
2086 indeed been resumed. */
2087 return (lp
->status
!= 0 && lp
->resumed
);
2090 /* Return non-zero if LP isn't stopped. */
2093 running_callback (struct lwp_info
*lp
, void *data
)
2095 return (lp
->stopped
== 0 || (lp
->status
!= 0 && lp
->resumed
));
2098 /* Count the LWP's that have had events. */
2101 count_events_callback (struct lwp_info
*lp
, void *data
)
2105 gdb_assert (count
!= NULL
);
2107 /* Count only LWPs that have a SIGTRAP event pending. */
2109 && WIFSTOPPED (lp
->status
) && WSTOPSIG (lp
->status
) == SIGTRAP
)
2115 /* Select the LWP (if any) that is currently being single-stepped. */
2118 select_singlestep_lwp_callback (struct lwp_info
*lp
, void *data
)
2120 if (lp
->step
&& lp
->status
!= 0)
2126 /* Select the Nth LWP that has had a SIGTRAP event. */
2129 select_event_lwp_callback (struct lwp_info
*lp
, void *data
)
2131 int *selector
= data
;
2133 gdb_assert (selector
!= NULL
);
2135 /* Select only LWPs that have a SIGTRAP event pending. */
2137 && WIFSTOPPED (lp
->status
) && WSTOPSIG (lp
->status
) == SIGTRAP
)
2138 if ((*selector
)-- == 0)
2145 cancel_breakpoint (struct lwp_info
*lp
)
2147 /* Arrange for a breakpoint to be hit again later. We don't keep
2148 the SIGTRAP status and don't forward the SIGTRAP signal to the
2149 LWP. We will handle the current event, eventually we will resume
2150 this LWP, and this breakpoint will trap again.
2152 If we do not do this, then we run the risk that the user will
2153 delete or disable the breakpoint, but the LWP will have already
2156 if (breakpoint_inserted_here_p (read_pc_pid (lp
->ptid
) -
2157 gdbarch_decr_pc_after_break
2160 if (debug_linux_nat
)
2161 fprintf_unfiltered (gdb_stdlog
,
2162 "CB: Push back breakpoint for %s\n",
2163 target_pid_to_str (lp
->ptid
));
2165 /* Back up the PC if necessary. */
2166 if (gdbarch_decr_pc_after_break (current_gdbarch
))
2167 write_pc_pid (read_pc_pid (lp
->ptid
) - gdbarch_decr_pc_after_break
2176 cancel_breakpoints_callback (struct lwp_info
*lp
, void *data
)
2178 struct lwp_info
*event_lp
= data
;
2180 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
2184 /* If a LWP other than the LWP that we're reporting an event for has
2185 hit a GDB breakpoint (as opposed to some random trap signal),
2186 then just arrange for it to hit it again later. We don't keep
2187 the SIGTRAP status and don't forward the SIGTRAP signal to the
2188 LWP. We will handle the current event, eventually we will resume
2189 all LWPs, and this one will get its breakpoint trap again.
2191 If we do not do this, then we run the risk that the user will
2192 delete or disable the breakpoint, but the LWP will have already
2196 && WIFSTOPPED (lp
->status
) && WSTOPSIG (lp
->status
) == SIGTRAP
2197 && cancel_breakpoint (lp
))
2198 /* Throw away the SIGTRAP. */
2204 /* Select one LWP out of those that have events pending. */
2207 select_event_lwp (struct lwp_info
**orig_lp
, int *status
)
2210 int random_selector
;
2211 struct lwp_info
*event_lp
;
2213 /* Record the wait status for the original LWP. */
2214 (*orig_lp
)->status
= *status
;
2216 /* Give preference to any LWP that is being single-stepped. */
2217 event_lp
= iterate_over_lwps (select_singlestep_lwp_callback
, NULL
);
2218 if (event_lp
!= NULL
)
2220 if (debug_linux_nat
)
2221 fprintf_unfiltered (gdb_stdlog
,
2222 "SEL: Select single-step %s\n",
2223 target_pid_to_str (event_lp
->ptid
));
2227 /* No single-stepping LWP. Select one at random, out of those
2228 which have had SIGTRAP events. */
2230 /* First see how many SIGTRAP events we have. */
2231 iterate_over_lwps (count_events_callback
, &num_events
);
2233 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
2234 random_selector
= (int)
2235 ((num_events
* (double) rand ()) / (RAND_MAX
+ 1.0));
2237 if (debug_linux_nat
&& num_events
> 1)
2238 fprintf_unfiltered (gdb_stdlog
,
2239 "SEL: Found %d SIGTRAP events, selecting #%d\n",
2240 num_events
, random_selector
);
2242 event_lp
= iterate_over_lwps (select_event_lwp_callback
,
2246 if (event_lp
!= NULL
)
2248 /* Switch the event LWP. */
2249 *orig_lp
= event_lp
;
2250 *status
= event_lp
->status
;
2253 /* Flush the wait status for the event LWP. */
2254 (*orig_lp
)->status
= 0;
2257 /* Return non-zero if LP has been resumed. */
2260 resumed_callback (struct lwp_info
*lp
, void *data
)
2265 /* Stop an active thread, verify it still exists, then resume it. */
2268 stop_and_resume_callback (struct lwp_info
*lp
, void *data
)
2270 struct lwp_info
*ptr
;
2272 if (!lp
->stopped
&& !lp
->signalled
)
2274 stop_callback (lp
, NULL
);
2275 stop_wait_callback (lp
, NULL
);
2276 /* Resume if the lwp still exists. */
2277 for (ptr
= lwp_list
; ptr
; ptr
= ptr
->next
)
2280 resume_callback (lp
, NULL
);
2281 resume_set_callback (lp
, NULL
);
2287 /* Check if we should go on and pass this event to common code.
2288 Return the affected lwp if we are, or NULL otherwise. */
2289 static struct lwp_info
*
2290 linux_nat_filter_event (int lwpid
, int status
, int options
)
2292 struct lwp_info
*lp
;
2294 lp
= find_lwp_pid (pid_to_ptid (lwpid
));
2296 /* Check for stop events reported by a process we didn't already
2297 know about - anything not already in our LWP list.
2299 If we're expecting to receive stopped processes after
2300 fork, vfork, and clone events, then we'll just add the
2301 new one to our list and go back to waiting for the event
2302 to be reported - the stopped process might be returned
2303 from waitpid before or after the event is. */
2304 if (WIFSTOPPED (status
) && !lp
)
2306 linux_record_stopped_pid (lwpid
, status
);
2310 /* Make sure we don't report an event for the exit of an LWP not in
2311 our list, i.e. not part of the current process. This can happen
2312 if we detach from a program we original forked and then it
2314 if (!WIFSTOPPED (status
) && !lp
)
2317 /* NOTE drow/2003-06-17: This code seems to be meant for debugging
2318 CLONE_PTRACE processes which do not use the thread library -
2319 otherwise we wouldn't find the new LWP this way. That doesn't
2320 currently work, and the following code is currently unreachable
2321 due to the two blocks above. If it's fixed some day, this code
2322 should be broken out into a function so that we can also pick up
2323 LWPs from the new interface. */
2326 lp
= add_lwp (BUILD_LWP (lwpid
, GET_PID (inferior_ptid
)));
2327 if (options
& __WCLONE
)
2330 gdb_assert (WIFSTOPPED (status
)
2331 && WSTOPSIG (status
) == SIGSTOP
);
2334 if (!in_thread_list (inferior_ptid
))
2336 inferior_ptid
= BUILD_LWP (GET_PID (inferior_ptid
),
2337 GET_PID (inferior_ptid
));
2338 add_thread (inferior_ptid
);
2341 add_thread (lp
->ptid
);
2344 /* Save the trap's siginfo in case we need it later. */
2345 if (WIFSTOPPED (status
) && WSTOPSIG (status
) == SIGTRAP
)
2348 /* Handle GNU/Linux's extended waitstatus for trace events. */
2349 if (WIFSTOPPED (status
) && WSTOPSIG (status
) == SIGTRAP
&& status
>> 16 != 0)
2351 if (debug_linux_nat
)
2352 fprintf_unfiltered (gdb_stdlog
,
2353 "LLW: Handling extended status 0x%06x\n",
2355 if (linux_handle_extended_wait (lp
, status
, 0))
2359 /* Check if the thread has exited. */
2360 if ((WIFEXITED (status
) || WIFSIGNALED (status
)) && num_lwps
> 1)
2362 /* If this is the main thread, we must stop all threads and
2363 verify if they are still alive. This is because in the nptl
2364 thread model, there is no signal issued for exiting LWPs
2365 other than the main thread. We only get the main thread exit
2366 signal once all child threads have already exited. If we
2367 stop all the threads and use the stop_wait_callback to check
2368 if they have exited we can determine whether this signal
2369 should be ignored or whether it means the end of the debugged
2370 application, regardless of which threading model is being
2372 if (GET_PID (lp
->ptid
) == GET_LWP (lp
->ptid
))
2375 iterate_over_lwps (stop_and_resume_callback
, NULL
);
2378 if (debug_linux_nat
)
2379 fprintf_unfiltered (gdb_stdlog
,
2380 "LLW: %s exited.\n",
2381 target_pid_to_str (lp
->ptid
));
2385 /* If there is at least one more LWP, then the exit signal was
2386 not the end of the debugged application and should be
2390 /* Make sure there is at least one thread running. */
2391 gdb_assert (iterate_over_lwps (running_callback
, NULL
));
2393 /* Discard the event. */
2398 /* Check if the current LWP has previously exited. In the nptl
2399 thread model, LWPs other than the main thread do not issue
2400 signals when they exit so we must check whenever the thread has
2401 stopped. A similar check is made in stop_wait_callback(). */
2402 if (num_lwps
> 1 && !linux_nat_thread_alive (lp
->ptid
))
2404 if (debug_linux_nat
)
2405 fprintf_unfiltered (gdb_stdlog
,
2406 "LLW: %s exited.\n",
2407 target_pid_to_str (lp
->ptid
));
2411 /* Make sure there is at least one thread running. */
2412 gdb_assert (iterate_over_lwps (running_callback
, NULL
));
2414 /* Discard the event. */
2418 /* Make sure we don't report a SIGSTOP that we sent ourselves in
2419 an attempt to stop an LWP. */
2421 && WIFSTOPPED (status
) && WSTOPSIG (status
) == SIGSTOP
)
2423 if (debug_linux_nat
)
2424 fprintf_unfiltered (gdb_stdlog
,
2425 "LLW: Delayed SIGSTOP caught for %s.\n",
2426 target_pid_to_str (lp
->ptid
));
2428 /* This is a delayed SIGSTOP. */
2431 registers_changed ();
2433 linux_ops
->to_resume (pid_to_ptid (GET_LWP (lp
->ptid
)),
2434 lp
->step
, TARGET_SIGNAL_0
);
2435 if (debug_linux_nat
)
2436 fprintf_unfiltered (gdb_stdlog
,
2437 "LLW: %s %s, 0, 0 (discard SIGSTOP)\n",
2439 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2440 target_pid_to_str (lp
->ptid
));
2443 gdb_assert (lp
->resumed
);
2445 /* Discard the event. */
2449 /* An interesting event. */
2454 /* Get the events stored in the pipe into the local queue, so they are
2455 accessible to queued_waitpid. We need to do this, since it is not
2456 always the case that the event at the head of the pipe is the event
2460 pipe_to_local_event_queue (void)
2462 if (debug_linux_nat_async
)
2463 fprintf_unfiltered (gdb_stdlog
,
2464 "PTLEQ: linux_nat_num_queued_events(%d)\n",
2465 linux_nat_num_queued_events
);
2466 while (linux_nat_num_queued_events
)
2468 int lwpid
, status
, options
;
2469 lwpid
= linux_nat_event_pipe_pop (&status
, &options
);
2470 gdb_assert (lwpid
> 0);
2471 push_waitpid (lwpid
, status
, options
);
2475 /* Get the unprocessed events stored in the local queue back into the
2476 pipe, so the event loop realizes there's something else to
2480 local_event_queue_to_pipe (void)
2482 struct waitpid_result
*w
= waitpid_queue
;
2485 struct waitpid_result
*next
= w
->next
;
2486 linux_nat_event_pipe_push (w
->pid
,
2492 waitpid_queue
= NULL
;
2494 if (debug_linux_nat_async
)
2495 fprintf_unfiltered (gdb_stdlog
,
2496 "LEQTP: linux_nat_num_queued_events(%d)\n",
2497 linux_nat_num_queued_events
);
2501 linux_nat_wait (ptid_t ptid
, struct target_waitstatus
*ourstatus
)
2503 struct lwp_info
*lp
= NULL
;
2506 pid_t pid
= PIDGET (ptid
);
2507 sigset_t flush_mask
;
2509 if (debug_linux_nat_async
)
2510 fprintf_unfiltered (gdb_stdlog
, "LLW: enter\n");
2512 /* The first time we get here after starting a new inferior, we may
2513 not have added it to the LWP list yet - this is the earliest
2514 moment at which we know its PID. */
2517 gdb_assert (!is_lwp (inferior_ptid
));
2519 inferior_ptid
= BUILD_LWP (GET_PID (inferior_ptid
),
2520 GET_PID (inferior_ptid
));
2521 lp
= add_lwp (inferior_ptid
);
2523 /* Add the main thread to GDB's thread list. */
2524 add_thread_silent (lp
->ptid
);
2527 sigemptyset (&flush_mask
);
2529 if (target_can_async_p ())
2530 /* Block events while we're here. */
2531 target_async (NULL
, 0);
2535 /* Make sure there is at least one LWP that has been resumed. */
2536 gdb_assert (iterate_over_lwps (resumed_callback
, NULL
));
2538 /* First check if there is a LWP with a wait status pending. */
2541 /* Any LWP that's been resumed will do. */
2542 lp
= iterate_over_lwps (status_callback
, NULL
);
2545 if (target_can_async_p ())
2546 internal_error (__FILE__
, __LINE__
,
2547 "Found an LWP with a pending status in async mode.");
2549 status
= lp
->status
;
2552 if (debug_linux_nat
&& status
)
2553 fprintf_unfiltered (gdb_stdlog
,
2554 "LLW: Using pending wait status %s for %s.\n",
2555 status_to_str (status
),
2556 target_pid_to_str (lp
->ptid
));
2559 /* But if we don't find one, we'll have to wait, and check both
2560 cloned and uncloned processes. We start with the cloned
2562 options
= __WCLONE
| WNOHANG
;
2564 else if (is_lwp (ptid
))
2566 if (debug_linux_nat
)
2567 fprintf_unfiltered (gdb_stdlog
,
2568 "LLW: Waiting for specific LWP %s.\n",
2569 target_pid_to_str (ptid
));
2571 /* We have a specific LWP to check. */
2572 lp
= find_lwp_pid (ptid
);
2574 status
= lp
->status
;
2577 if (debug_linux_nat
&& status
)
2578 fprintf_unfiltered (gdb_stdlog
,
2579 "LLW: Using pending wait status %s for %s.\n",
2580 status_to_str (status
),
2581 target_pid_to_str (lp
->ptid
));
2583 /* If we have to wait, take into account whether PID is a cloned
2584 process or not. And we have to convert it to something that
2585 the layer beneath us can understand. */
2586 options
= lp
->cloned
? __WCLONE
: 0;
2587 pid
= GET_LWP (ptid
);
2590 if (status
&& lp
->signalled
)
2592 /* A pending SIGSTOP may interfere with the normal stream of
2593 events. In a typical case where interference is a problem,
2594 we have a SIGSTOP signal pending for LWP A while
2595 single-stepping it, encounter an event in LWP B, and take the
2596 pending SIGSTOP while trying to stop LWP A. After processing
2597 the event in LWP B, LWP A is continued, and we'll never see
2598 the SIGTRAP associated with the last time we were
2599 single-stepping LWP A. */
2601 /* Resume the thread. It should halt immediately returning the
2603 registers_changed ();
2604 linux_ops
->to_resume (pid_to_ptid (GET_LWP (lp
->ptid
)),
2605 lp
->step
, TARGET_SIGNAL_0
);
2606 if (debug_linux_nat
)
2607 fprintf_unfiltered (gdb_stdlog
,
2608 "LLW: %s %s, 0, 0 (expect SIGSTOP)\n",
2609 lp
->step
? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2610 target_pid_to_str (lp
->ptid
));
2612 gdb_assert (lp
->resumed
);
2614 /* This should catch the pending SIGSTOP. */
2615 stop_wait_callback (lp
, NULL
);
2618 if (!target_can_async_p ())
2620 /* Causes SIGINT to be passed on to the attached process. */
2629 if (target_can_async_p ())
2630 /* In async mode, don't ever block. Only look at the locally
2632 lwpid
= queued_waitpid (pid
, &status
, options
);
2634 lwpid
= my_waitpid (pid
, &status
, options
);
2638 gdb_assert (pid
== -1 || lwpid
== pid
);
2640 if (debug_linux_nat
)
2642 fprintf_unfiltered (gdb_stdlog
,
2643 "LLW: waitpid %ld received %s\n",
2644 (long) lwpid
, status_to_str (status
));
2647 lp
= linux_nat_filter_event (lwpid
, status
, options
);
2650 /* A discarded event. */
2660 /* Alternate between checking cloned and uncloned processes. */
2661 options
^= __WCLONE
;
2663 /* And every time we have checked both:
2664 In async mode, return to event loop;
2665 In sync mode, suspend waiting for a SIGCHLD signal. */
2666 if (options
& __WCLONE
)
2668 if (target_can_async_p ())
2670 /* No interesting event. */
2671 ourstatus
->kind
= TARGET_WAITKIND_IGNORE
;
2673 /* Get ready for the next event. */
2674 target_async (inferior_event_handler
, 0);
2676 if (debug_linux_nat_async
)
2677 fprintf_unfiltered (gdb_stdlog
, "LLW: exit (ignore)\n");
2679 return minus_one_ptid
;
2682 sigsuspend (&suspend_mask
);
2686 /* We shouldn't end up here unless we want to try again. */
2687 gdb_assert (status
== 0);
2690 if (!target_can_async_p ())
2692 clear_sigio_trap ();
2693 clear_sigint_trap ();
2698 /* Don't report signals that GDB isn't interested in, such as
2699 signals that are neither printed nor stopped upon. Stopping all
2700 threads can be a bit time-consuming so if we want decent
2701 performance with heavily multi-threaded programs, especially when
2702 they're using a high frequency timer, we'd better avoid it if we
2705 if (WIFSTOPPED (status
))
2707 int signo
= target_signal_from_host (WSTOPSIG (status
));
2709 /* If we get a signal while single-stepping, we may need special
2710 care, e.g. to skip the signal handler. Defer to common code. */
2712 && signal_stop_state (signo
) == 0
2713 && signal_print_state (signo
) == 0
2714 && signal_pass_state (signo
) == 1)
2716 /* FIMXE: kettenis/2001-06-06: Should we resume all threads
2717 here? It is not clear we should. GDB may not expect
2718 other threads to run. On the other hand, not resuming
2719 newly attached threads may cause an unwanted delay in
2720 getting them running. */
2721 registers_changed ();
2722 linux_ops
->to_resume (pid_to_ptid (GET_LWP (lp
->ptid
)),
2724 if (debug_linux_nat
)
2725 fprintf_unfiltered (gdb_stdlog
,
2726 "LLW: %s %s, %s (preempt 'handle')\n",
2728 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2729 target_pid_to_str (lp
->ptid
),
2730 signo
? strsignal (signo
) : "0");
2736 if (signo
== TARGET_SIGNAL_INT
&& signal_pass_state (signo
) == 0)
2738 /* If ^C/BREAK is typed at the tty/console, SIGINT gets
2739 forwarded to the entire process group, that is, all LWP's
2740 will receive it. Since we only want to report it once,
2741 we try to flush it from all LWPs except this one. */
2742 sigaddset (&flush_mask
, SIGINT
);
2746 /* This LWP is stopped now. */
2749 if (debug_linux_nat
)
2750 fprintf_unfiltered (gdb_stdlog
, "LLW: Candidate event %s in %s.\n",
2751 status_to_str (status
), target_pid_to_str (lp
->ptid
));
2753 /* Now stop all other LWP's ... */
2754 iterate_over_lwps (stop_callback
, NULL
);
2756 /* ... and wait until all of them have reported back that they're no
2758 iterate_over_lwps (stop_wait_callback
, &flush_mask
);
2759 iterate_over_lwps (flush_callback
, &flush_mask
);
2761 /* If we're not waiting for a specific LWP, choose an event LWP from
2762 among those that have had events. Giving equal priority to all
2763 LWPs that have had events helps prevent starvation. */
2765 select_event_lwp (&lp
, &status
);
2767 /* Now that we've selected our final event LWP, cancel any
2768 breakpoints in other LWPs that have hit a GDB breakpoint. See
2769 the comment in cancel_breakpoints_callback to find out why. */
2770 iterate_over_lwps (cancel_breakpoints_callback
, lp
);
2772 if (WIFSTOPPED (status
) && WSTOPSIG (status
) == SIGTRAP
)
2774 trap_ptid
= lp
->ptid
;
2775 if (debug_linux_nat
)
2776 fprintf_unfiltered (gdb_stdlog
,
2777 "LLW: trap_ptid is %s.\n",
2778 target_pid_to_str (trap_ptid
));
2781 trap_ptid
= null_ptid
;
2783 if (lp
->waitstatus
.kind
!= TARGET_WAITKIND_IGNORE
)
2785 *ourstatus
= lp
->waitstatus
;
2786 lp
->waitstatus
.kind
= TARGET_WAITKIND_IGNORE
;
2789 store_waitstatus (ourstatus
, status
);
2791 /* Get ready for the next event. */
2792 if (target_can_async_p ())
2793 target_async (inferior_event_handler
, 0);
2795 if (debug_linux_nat_async
)
2796 fprintf_unfiltered (gdb_stdlog
, "LLW: exit\n");
2802 kill_callback (struct lwp_info
*lp
, void *data
)
2805 ptrace (PTRACE_KILL
, GET_LWP (lp
->ptid
), 0, 0);
2806 if (debug_linux_nat
)
2807 fprintf_unfiltered (gdb_stdlog
,
2808 "KC: PTRACE_KILL %s, 0, 0 (%s)\n",
2809 target_pid_to_str (lp
->ptid
),
2810 errno
? safe_strerror (errno
) : "OK");
2816 kill_wait_callback (struct lwp_info
*lp
, void *data
)
2820 /* We must make sure that there are no pending events (delayed
2821 SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current
2822 program doesn't interfere with any following debugging session. */
2824 /* For cloned processes we must check both with __WCLONE and
2825 without, since the exit status of a cloned process isn't reported
2831 pid
= my_waitpid (GET_LWP (lp
->ptid
), NULL
, __WCLONE
);
2832 if (pid
!= (pid_t
) -1)
2834 if (debug_linux_nat
)
2835 fprintf_unfiltered (gdb_stdlog
,
2836 "KWC: wait %s received unknown.\n",
2837 target_pid_to_str (lp
->ptid
));
2838 /* The Linux kernel sometimes fails to kill a thread
2839 completely after PTRACE_KILL; that goes from the stop
2840 point in do_fork out to the one in
2841 get_signal_to_deliever and waits again. So kill it
2843 kill_callback (lp
, NULL
);
2846 while (pid
== GET_LWP (lp
->ptid
));
2848 gdb_assert (pid
== -1 && errno
== ECHILD
);
2853 pid
= my_waitpid (GET_LWP (lp
->ptid
), NULL
, 0);
2854 if (pid
!= (pid_t
) -1)
2856 if (debug_linux_nat
)
2857 fprintf_unfiltered (gdb_stdlog
,
2858 "KWC: wait %s received unk.\n",
2859 target_pid_to_str (lp
->ptid
));
2860 /* See the call to kill_callback above. */
2861 kill_callback (lp
, NULL
);
2864 while (pid
== GET_LWP (lp
->ptid
));
2866 gdb_assert (pid
== -1 && errno
== ECHILD
);
2871 linux_nat_kill (void)
2873 struct target_waitstatus last
;
2877 if (target_can_async_p ())
2878 target_async (NULL
, 0);
2880 /* If we're stopped while forking and we haven't followed yet,
2881 kill the other task. We need to do this first because the
2882 parent will be sleeping if this is a vfork. */
2884 get_last_target_status (&last_ptid
, &last
);
2886 if (last
.kind
== TARGET_WAITKIND_FORKED
2887 || last
.kind
== TARGET_WAITKIND_VFORKED
)
2889 ptrace (PT_KILL
, last
.value
.related_pid
, 0, 0);
2893 if (forks_exist_p ())
2895 linux_fork_killall ();
2896 drain_queued_events (-1);
2900 /* Kill all LWP's ... */
2901 iterate_over_lwps (kill_callback
, NULL
);
2903 /* ... and wait until we've flushed all events. */
2904 iterate_over_lwps (kill_wait_callback
, NULL
);
2907 target_mourn_inferior ();
2911 linux_nat_mourn_inferior (void)
2913 trap_ptid
= null_ptid
;
2915 /* Destroy LWP info; it's no longer valid. */
2918 if (! forks_exist_p ())
2920 /* Normal case, no other forks available. */
2921 if (target_can_async_p ())
2922 linux_nat_async (NULL
, 0);
2923 linux_ops
->to_mourn_inferior ();
2926 /* Multi-fork case. The current inferior_ptid has exited, but
2927 there are other viable forks to debug. Delete the exiting
2928 one and context-switch to the first available. */
2929 linux_fork_mourn_inferior ();
2933 linux_nat_xfer_partial (struct target_ops
*ops
, enum target_object object
,
2934 const char *annex
, gdb_byte
*readbuf
,
2935 const gdb_byte
*writebuf
,
2936 ULONGEST offset
, LONGEST len
)
2938 struct cleanup
*old_chain
= save_inferior_ptid ();
2941 if (is_lwp (inferior_ptid
))
2942 inferior_ptid
= pid_to_ptid (GET_LWP (inferior_ptid
));
2944 xfer
= linux_ops
->to_xfer_partial (ops
, object
, annex
, readbuf
, writebuf
,
2947 do_cleanups (old_chain
);
2952 linux_nat_thread_alive (ptid_t ptid
)
2954 gdb_assert (is_lwp (ptid
));
2957 ptrace (PTRACE_PEEKUSER
, GET_LWP (ptid
), 0, 0);
2958 if (debug_linux_nat
)
2959 fprintf_unfiltered (gdb_stdlog
,
2960 "LLTA: PTRACE_PEEKUSER %s, 0, 0 (%s)\n",
2961 target_pid_to_str (ptid
),
2962 errno
? safe_strerror (errno
) : "OK");
2964 /* Not every Linux kernel implements PTRACE_PEEKUSER. But we can
2965 handle that case gracefully since ptrace will first do a lookup
2966 for the process based upon the passed-in pid. If that fails we
2967 will get either -ESRCH or -EPERM, otherwise the child exists and
2969 if (errno
== ESRCH
|| errno
== EPERM
)
2976 linux_nat_pid_to_str (ptid_t ptid
)
2978 static char buf
[64];
2981 && ((lwp_list
&& lwp_list
->next
)
2982 || GET_PID (ptid
) != GET_LWP (ptid
)))
2984 snprintf (buf
, sizeof (buf
), "LWP %ld", GET_LWP (ptid
));
2988 return normal_pid_to_str (ptid
);
2992 sigchld_handler (int signo
)
2994 if (linux_nat_async_enabled
2995 && linux_nat_async_events_enabled
2996 && signo
== SIGCHLD
)
2997 /* It is *always* a bug to hit this. */
2998 internal_error (__FILE__
, __LINE__
,
2999 "sigchld_handler called when async events are enabled");
3001 /* Do nothing. The only reason for this handler is that it allows
3002 us to use sigsuspend in linux_nat_wait above to wait for the
3003 arrival of a SIGCHLD. */
3006 /* Accepts an integer PID; Returns a string representing a file that
3007 can be opened to get the symbols for the child process. */
3010 linux_child_pid_to_exec_file (int pid
)
3012 char *name1
, *name2
;
3014 name1
= xmalloc (MAXPATHLEN
);
3015 name2
= xmalloc (MAXPATHLEN
);
3016 make_cleanup (xfree
, name1
);
3017 make_cleanup (xfree
, name2
);
3018 memset (name2
, 0, MAXPATHLEN
);
3020 sprintf (name1
, "/proc/%d/exe", pid
);
3021 if (readlink (name1
, name2
, MAXPATHLEN
) > 0)
3027 /* Service function for corefiles and info proc. */
3030 read_mapping (FILE *mapfile
,
3035 char *device
, long long *inode
, char *filename
)
3037 int ret
= fscanf (mapfile
, "%llx-%llx %s %llx %s %llx",
3038 addr
, endaddr
, permissions
, offset
, device
, inode
);
3041 if (ret
> 0 && ret
!= EOF
)
3043 /* Eat everything up to EOL for the filename. This will prevent
3044 weird filenames (such as one with embedded whitespace) from
3045 confusing this code. It also makes this code more robust in
3046 respect to annotations the kernel may add after the filename.
3048 Note the filename is used for informational purposes
3050 ret
+= fscanf (mapfile
, "%[^\n]\n", filename
);
3053 return (ret
!= 0 && ret
!= EOF
);
3056 /* Fills the "to_find_memory_regions" target vector. Lists the memory
3057 regions in the inferior for a corefile. */
3060 linux_nat_find_memory_regions (int (*func
) (CORE_ADDR
,
3062 int, int, int, void *), void *obfd
)
3064 long long pid
= PIDGET (inferior_ptid
);
3065 char mapsfilename
[MAXPATHLEN
];
3067 long long addr
, endaddr
, size
, offset
, inode
;
3068 char permissions
[8], device
[8], filename
[MAXPATHLEN
];
3069 int read
, write
, exec
;
3072 /* Compose the filename for the /proc memory map, and open it. */
3073 sprintf (mapsfilename
, "/proc/%lld/maps", pid
);
3074 if ((mapsfile
= fopen (mapsfilename
, "r")) == NULL
)
3075 error (_("Could not open %s."), mapsfilename
);
3078 fprintf_filtered (gdb_stdout
,
3079 "Reading memory regions from %s\n", mapsfilename
);
3081 /* Now iterate until end-of-file. */
3082 while (read_mapping (mapsfile
, &addr
, &endaddr
, &permissions
[0],
3083 &offset
, &device
[0], &inode
, &filename
[0]))
3085 size
= endaddr
- addr
;
3087 /* Get the segment's permissions. */
3088 read
= (strchr (permissions
, 'r') != 0);
3089 write
= (strchr (permissions
, 'w') != 0);
3090 exec
= (strchr (permissions
, 'x') != 0);
3094 fprintf_filtered (gdb_stdout
,
3095 "Save segment, %lld bytes at 0x%s (%c%c%c)",
3096 size
, paddr_nz (addr
),
3098 write
? 'w' : ' ', exec
? 'x' : ' ');
3100 fprintf_filtered (gdb_stdout
, " for %s", filename
);
3101 fprintf_filtered (gdb_stdout
, "\n");
3104 /* Invoke the callback function to create the corefile
3106 func (addr
, size
, read
, write
, exec
, obfd
);
3112 /* Records the thread's register state for the corefile note
3116 linux_nat_do_thread_registers (bfd
*obfd
, ptid_t ptid
,
3117 char *note_data
, int *note_size
)
3119 gdb_gregset_t gregs
;
3120 gdb_fpregset_t fpregs
;
3121 #ifdef FILL_FPXREGSET
3122 gdb_fpxregset_t fpxregs
;
3124 unsigned long lwp
= ptid_get_lwp (ptid
);
3125 struct regcache
*regcache
= get_thread_regcache (ptid
);
3126 struct gdbarch
*gdbarch
= get_regcache_arch (regcache
);
3127 const struct regset
*regset
;
3129 struct cleanup
*old_chain
;
3131 old_chain
= save_inferior_ptid ();
3132 inferior_ptid
= ptid
;
3133 target_fetch_registers (regcache
, -1);
3134 do_cleanups (old_chain
);
3136 core_regset_p
= gdbarch_regset_from_core_section_p (gdbarch
);
3138 && (regset
= gdbarch_regset_from_core_section (gdbarch
, ".reg",
3139 sizeof (gregs
))) != NULL
3140 && regset
->collect_regset
!= NULL
)
3141 regset
->collect_regset (regset
, regcache
, -1,
3142 &gregs
, sizeof (gregs
));
3144 fill_gregset (regcache
, &gregs
, -1);
3146 note_data
= (char *) elfcore_write_prstatus (obfd
,
3150 stop_signal
, &gregs
);
3153 && (regset
= gdbarch_regset_from_core_section (gdbarch
, ".reg2",
3154 sizeof (fpregs
))) != NULL
3155 && regset
->collect_regset
!= NULL
)
3156 regset
->collect_regset (regset
, regcache
, -1,
3157 &fpregs
, sizeof (fpregs
));
3159 fill_fpregset (regcache
, &fpregs
, -1);
3161 note_data
= (char *) elfcore_write_prfpreg (obfd
,
3164 &fpregs
, sizeof (fpregs
));
3166 #ifdef FILL_FPXREGSET
3168 && (regset
= gdbarch_regset_from_core_section (gdbarch
, ".reg-xfp",
3169 sizeof (fpxregs
))) != NULL
3170 && regset
->collect_regset
!= NULL
)
3171 regset
->collect_regset (regset
, regcache
, -1,
3172 &fpxregs
, sizeof (fpxregs
));
3174 fill_fpxregset (regcache
, &fpxregs
, -1);
3176 note_data
= (char *) elfcore_write_prxfpreg (obfd
,
3179 &fpxregs
, sizeof (fpxregs
));
3184 struct linux_nat_corefile_thread_data
3192 /* Called by gdbthread.c once per thread. Records the thread's
3193 register state for the corefile note section. */
3196 linux_nat_corefile_thread_callback (struct lwp_info
*ti
, void *data
)
3198 struct linux_nat_corefile_thread_data
*args
= data
;
3200 args
->note_data
= linux_nat_do_thread_registers (args
->obfd
,
3209 /* Records the register state for the corefile note section. */
3212 linux_nat_do_registers (bfd
*obfd
, ptid_t ptid
,
3213 char *note_data
, int *note_size
)
3215 return linux_nat_do_thread_registers (obfd
,
3216 ptid_build (ptid_get_pid (inferior_ptid
),
3217 ptid_get_pid (inferior_ptid
),
3219 note_data
, note_size
);
3222 /* Fills the "to_make_corefile_note" target vector. Builds the note
3223 section for a corefile, and returns it in a malloc buffer. */
3226 linux_nat_make_corefile_notes (bfd
*obfd
, int *note_size
)
3228 struct linux_nat_corefile_thread_data thread_args
;
3229 struct cleanup
*old_chain
;
3230 /* The variable size must be >= sizeof (prpsinfo_t.pr_fname). */
3231 char fname
[16] = { '\0' };
3232 /* The variable size must be >= sizeof (prpsinfo_t.pr_psargs). */
3233 char psargs
[80] = { '\0' };
3234 char *note_data
= NULL
;
3235 ptid_t current_ptid
= inferior_ptid
;
3239 if (get_exec_file (0))
3241 strncpy (fname
, strrchr (get_exec_file (0), '/') + 1, sizeof (fname
));
3242 strncpy (psargs
, get_exec_file (0), sizeof (psargs
));
3243 if (get_inferior_args ())
3246 char *psargs_end
= psargs
+ sizeof (psargs
);
3248 /* linux_elfcore_write_prpsinfo () handles zero unterminated
3250 string_end
= memchr (psargs
, 0, sizeof (psargs
));
3251 if (string_end
!= NULL
)
3253 *string_end
++ = ' ';
3254 strncpy (string_end
, get_inferior_args (),
3255 psargs_end
- string_end
);
3258 note_data
= (char *) elfcore_write_prpsinfo (obfd
,
3260 note_size
, fname
, psargs
);
3263 /* Dump information for threads. */
3264 thread_args
.obfd
= obfd
;
3265 thread_args
.note_data
= note_data
;
3266 thread_args
.note_size
= note_size
;
3267 thread_args
.num_notes
= 0;
3268 iterate_over_lwps (linux_nat_corefile_thread_callback
, &thread_args
);
3269 if (thread_args
.num_notes
== 0)
3271 /* iterate_over_threads didn't come up with any threads; just
3272 use inferior_ptid. */
3273 note_data
= linux_nat_do_registers (obfd
, inferior_ptid
,
3274 note_data
, note_size
);
3278 note_data
= thread_args
.note_data
;
3281 auxv_len
= target_read_alloc (¤t_target
, TARGET_OBJECT_AUXV
,
3285 note_data
= elfcore_write_note (obfd
, note_data
, note_size
,
3286 "CORE", NT_AUXV
, auxv
, auxv_len
);
3290 make_cleanup (xfree
, note_data
);
3294 /* Implement the "info proc" command. */
3297 linux_nat_info_proc_cmd (char *args
, int from_tty
)
3299 long long pid
= PIDGET (inferior_ptid
);
3302 char buffer
[MAXPATHLEN
];
3303 char fname1
[MAXPATHLEN
], fname2
[MAXPATHLEN
];
3316 /* Break up 'args' into an argv array. */
3317 if ((argv
= buildargv (args
)) == NULL
)
3320 make_cleanup_freeargv (argv
);
3322 while (argv
!= NULL
&& *argv
!= NULL
)
3324 if (isdigit (argv
[0][0]))
3326 pid
= strtoul (argv
[0], NULL
, 10);
3328 else if (strncmp (argv
[0], "mappings", strlen (argv
[0])) == 0)
3332 else if (strcmp (argv
[0], "status") == 0)
3336 else if (strcmp (argv
[0], "stat") == 0)
3340 else if (strcmp (argv
[0], "cmd") == 0)
3344 else if (strncmp (argv
[0], "exe", strlen (argv
[0])) == 0)
3348 else if (strcmp (argv
[0], "cwd") == 0)
3352 else if (strncmp (argv
[0], "all", strlen (argv
[0])) == 0)
3358 /* [...] (future options here) */
3363 error (_("No current process: you must name one."));
3365 sprintf (fname1
, "/proc/%lld", pid
);
3366 if (stat (fname1
, &dummy
) != 0)
3367 error (_("No /proc directory: '%s'"), fname1
);
3369 printf_filtered (_("process %lld\n"), pid
);
3370 if (cmdline_f
|| all
)
3372 sprintf (fname1
, "/proc/%lld/cmdline", pid
);
3373 if ((procfile
= fopen (fname1
, "r")) != NULL
)
3375 fgets (buffer
, sizeof (buffer
), procfile
);
3376 printf_filtered ("cmdline = '%s'\n", buffer
);
3380 warning (_("unable to open /proc file '%s'"), fname1
);
3384 sprintf (fname1
, "/proc/%lld/cwd", pid
);
3385 memset (fname2
, 0, sizeof (fname2
));
3386 if (readlink (fname1
, fname2
, sizeof (fname2
)) > 0)
3387 printf_filtered ("cwd = '%s'\n", fname2
);
3389 warning (_("unable to read link '%s'"), fname1
);
3393 sprintf (fname1
, "/proc/%lld/exe", pid
);
3394 memset (fname2
, 0, sizeof (fname2
));
3395 if (readlink (fname1
, fname2
, sizeof (fname2
)) > 0)
3396 printf_filtered ("exe = '%s'\n", fname2
);
3398 warning (_("unable to read link '%s'"), fname1
);
3400 if (mappings_f
|| all
)
3402 sprintf (fname1
, "/proc/%lld/maps", pid
);
3403 if ((procfile
= fopen (fname1
, "r")) != NULL
)
3405 long long addr
, endaddr
, size
, offset
, inode
;
3406 char permissions
[8], device
[8], filename
[MAXPATHLEN
];
3408 printf_filtered (_("Mapped address spaces:\n\n"));
3409 if (gdbarch_addr_bit (current_gdbarch
) == 32)
3411 printf_filtered ("\t%10s %10s %10s %10s %7s\n",
3414 " Size", " Offset", "objfile");
3418 printf_filtered (" %18s %18s %10s %10s %7s\n",
3421 " Size", " Offset", "objfile");
3424 while (read_mapping (procfile
, &addr
, &endaddr
, &permissions
[0],
3425 &offset
, &device
[0], &inode
, &filename
[0]))
3427 size
= endaddr
- addr
;
3429 /* FIXME: carlton/2003-08-27: Maybe the printf_filtered
3430 calls here (and possibly above) should be abstracted
3431 out into their own functions? Andrew suggests using
3432 a generic local_address_string instead to print out
3433 the addresses; that makes sense to me, too. */
3435 if (gdbarch_addr_bit (current_gdbarch
) == 32)
3437 printf_filtered ("\t%#10lx %#10lx %#10x %#10x %7s\n",
3438 (unsigned long) addr
, /* FIXME: pr_addr */
3439 (unsigned long) endaddr
,
3441 (unsigned int) offset
,
3442 filename
[0] ? filename
: "");
3446 printf_filtered (" %#18lx %#18lx %#10x %#10x %7s\n",
3447 (unsigned long) addr
, /* FIXME: pr_addr */
3448 (unsigned long) endaddr
,
3450 (unsigned int) offset
,
3451 filename
[0] ? filename
: "");
3458 warning (_("unable to open /proc file '%s'"), fname1
);
3460 if (status_f
|| all
)
3462 sprintf (fname1
, "/proc/%lld/status", pid
);
3463 if ((procfile
= fopen (fname1
, "r")) != NULL
)
3465 while (fgets (buffer
, sizeof (buffer
), procfile
) != NULL
)
3466 puts_filtered (buffer
);
3470 warning (_("unable to open /proc file '%s'"), fname1
);
3474 sprintf (fname1
, "/proc/%lld/stat", pid
);
3475 if ((procfile
= fopen (fname1
, "r")) != NULL
)
3481 if (fscanf (procfile
, "%d ", &itmp
) > 0)
3482 printf_filtered (_("Process: %d\n"), itmp
);
3483 if (fscanf (procfile
, "(%[^)]) ", &buffer
[0]) > 0)
3484 printf_filtered (_("Exec file: %s\n"), buffer
);
3485 if (fscanf (procfile
, "%c ", &ctmp
) > 0)
3486 printf_filtered (_("State: %c\n"), ctmp
);
3487 if (fscanf (procfile
, "%d ", &itmp
) > 0)
3488 printf_filtered (_("Parent process: %d\n"), itmp
);
3489 if (fscanf (procfile
, "%d ", &itmp
) > 0)
3490 printf_filtered (_("Process group: %d\n"), itmp
);
3491 if (fscanf (procfile
, "%d ", &itmp
) > 0)
3492 printf_filtered (_("Session id: %d\n"), itmp
);
3493 if (fscanf (procfile
, "%d ", &itmp
) > 0)
3494 printf_filtered (_("TTY: %d\n"), itmp
);
3495 if (fscanf (procfile
, "%d ", &itmp
) > 0)
3496 printf_filtered (_("TTY owner process group: %d\n"), itmp
);
3497 if (fscanf (procfile
, "%lu ", <mp
) > 0)
3498 printf_filtered (_("Flags: 0x%lx\n"), ltmp
);
3499 if (fscanf (procfile
, "%lu ", <mp
) > 0)
3500 printf_filtered (_("Minor faults (no memory page): %lu\n"),
3501 (unsigned long) ltmp
);
3502 if (fscanf (procfile
, "%lu ", <mp
) > 0)
3503 printf_filtered (_("Minor faults, children: %lu\n"),
3504 (unsigned long) ltmp
);
3505 if (fscanf (procfile
, "%lu ", <mp
) > 0)
3506 printf_filtered (_("Major faults (memory page faults): %lu\n"),
3507 (unsigned long) ltmp
);
3508 if (fscanf (procfile
, "%lu ", <mp
) > 0)
3509 printf_filtered (_("Major faults, children: %lu\n"),
3510 (unsigned long) ltmp
);
3511 if (fscanf (procfile
, "%ld ", <mp
) > 0)
3512 printf_filtered (_("utime: %ld\n"), ltmp
);
3513 if (fscanf (procfile
, "%ld ", <mp
) > 0)
3514 printf_filtered (_("stime: %ld\n"), ltmp
);
3515 if (fscanf (procfile
, "%ld ", <mp
) > 0)
3516 printf_filtered (_("utime, children: %ld\n"), ltmp
);
3517 if (fscanf (procfile
, "%ld ", <mp
) > 0)
3518 printf_filtered (_("stime, children: %ld\n"), ltmp
);
3519 if (fscanf (procfile
, "%ld ", <mp
) > 0)
3520 printf_filtered (_("jiffies remaining in current time slice: %ld\n"),
3522 if (fscanf (procfile
, "%ld ", <mp
) > 0)
3523 printf_filtered (_("'nice' value: %ld\n"), ltmp
);
3524 if (fscanf (procfile
, "%lu ", <mp
) > 0)
3525 printf_filtered (_("jiffies until next timeout: %lu\n"),
3526 (unsigned long) ltmp
);
3527 if (fscanf (procfile
, "%lu ", <mp
) > 0)
3528 printf_filtered (_("jiffies until next SIGALRM: %lu\n"),
3529 (unsigned long) ltmp
);
3530 if (fscanf (procfile
, "%ld ", <mp
) > 0)
3531 printf_filtered (_("start time (jiffies since system boot): %ld\n"),
3533 if (fscanf (procfile
, "%lu ", <mp
) > 0)
3534 printf_filtered (_("Virtual memory size: %lu\n"),
3535 (unsigned long) ltmp
);
3536 if (fscanf (procfile
, "%lu ", <mp
) > 0)
3537 printf_filtered (_("Resident set size: %lu\n"), (unsigned long) ltmp
);
3538 if (fscanf (procfile
, "%lu ", <mp
) > 0)
3539 printf_filtered (_("rlim: %lu\n"), (unsigned long) ltmp
);
3540 if (fscanf (procfile
, "%lu ", <mp
) > 0)
3541 printf_filtered (_("Start of text: 0x%lx\n"), ltmp
);
3542 if (fscanf (procfile
, "%lu ", <mp
) > 0)
3543 printf_filtered (_("End of text: 0x%lx\n"), ltmp
);
3544 if (fscanf (procfile
, "%lu ", <mp
) > 0)
3545 printf_filtered (_("Start of stack: 0x%lx\n"), ltmp
);
3546 #if 0 /* Don't know how architecture-dependent the rest is...
3547 Anyway the signal bitmap info is available from "status". */
3548 if (fscanf (procfile
, "%lu ", <mp
) > 0) /* FIXME arch? */
3549 printf_filtered (_("Kernel stack pointer: 0x%lx\n"), ltmp
);
3550 if (fscanf (procfile
, "%lu ", <mp
) > 0) /* FIXME arch? */
3551 printf_filtered (_("Kernel instr pointer: 0x%lx\n"), ltmp
);
3552 if (fscanf (procfile
, "%ld ", <mp
) > 0)
3553 printf_filtered (_("Pending signals bitmap: 0x%lx\n"), ltmp
);
3554 if (fscanf (procfile
, "%ld ", <mp
) > 0)
3555 printf_filtered (_("Blocked signals bitmap: 0x%lx\n"), ltmp
);
3556 if (fscanf (procfile
, "%ld ", <mp
) > 0)
3557 printf_filtered (_("Ignored signals bitmap: 0x%lx\n"), ltmp
);
3558 if (fscanf (procfile
, "%ld ", <mp
) > 0)
3559 printf_filtered (_("Catched signals bitmap: 0x%lx\n"), ltmp
);
3560 if (fscanf (procfile
, "%lu ", <mp
) > 0) /* FIXME arch? */
3561 printf_filtered (_("wchan (system call): 0x%lx\n"), ltmp
);
3566 warning (_("unable to open /proc file '%s'"), fname1
);
3570 /* Implement the to_xfer_partial interface for memory reads using the /proc
3571 filesystem. Because we can use a single read() call for /proc, this
3572 can be much more efficient than banging away at PTRACE_PEEKTEXT,
3573 but it doesn't support writes. */
3576 linux_proc_xfer_partial (struct target_ops
*ops
, enum target_object object
,
3577 const char *annex
, gdb_byte
*readbuf
,
3578 const gdb_byte
*writebuf
,
3579 ULONGEST offset
, LONGEST len
)
3585 if (object
!= TARGET_OBJECT_MEMORY
|| !readbuf
)
3588 /* Don't bother for one word. */
3589 if (len
< 3 * sizeof (long))
3592 /* We could keep this file open and cache it - possibly one per
3593 thread. That requires some juggling, but is even faster. */
3594 sprintf (filename
, "/proc/%d/mem", PIDGET (inferior_ptid
));
3595 fd
= open (filename
, O_RDONLY
| O_LARGEFILE
);
3599 /* If pread64 is available, use it. It's faster if the kernel
3600 supports it (only one syscall), and it's 64-bit safe even on
3601 32-bit platforms (for instance, SPARC debugging a SPARC64
3604 if (pread64 (fd
, readbuf
, len
, offset
) != len
)
3606 if (lseek (fd
, offset
, SEEK_SET
) == -1 || read (fd
, readbuf
, len
) != len
)
3616 /* Parse LINE as a signal set and add its set bits to SIGS. */
3619 add_line_to_sigset (const char *line
, sigset_t
*sigs
)
3621 int len
= strlen (line
) - 1;
3625 if (line
[len
] != '\n')
3626 error (_("Could not parse signal set: %s"), line
);
3634 if (*p
>= '0' && *p
<= '9')
3636 else if (*p
>= 'a' && *p
<= 'f')
3637 digit
= *p
- 'a' + 10;
3639 error (_("Could not parse signal set: %s"), line
);
3644 sigaddset (sigs
, signum
+ 1);
3646 sigaddset (sigs
, signum
+ 2);
3648 sigaddset (sigs
, signum
+ 3);
3650 sigaddset (sigs
, signum
+ 4);
3656 /* Find process PID's pending signals from /proc/pid/status and set
3660 linux_proc_pending_signals (int pid
, sigset_t
*pending
, sigset_t
*blocked
, sigset_t
*ignored
)
3663 char buffer
[MAXPATHLEN
], fname
[MAXPATHLEN
];
3666 sigemptyset (pending
);
3667 sigemptyset (blocked
);
3668 sigemptyset (ignored
);
3669 sprintf (fname
, "/proc/%d/status", pid
);
3670 procfile
= fopen (fname
, "r");
3671 if (procfile
== NULL
)
3672 error (_("Could not open %s"), fname
);
3674 while (fgets (buffer
, MAXPATHLEN
, procfile
) != NULL
)
3676 /* Normal queued signals are on the SigPnd line in the status
3677 file. However, 2.6 kernels also have a "shared" pending
3678 queue for delivering signals to a thread group, so check for
3681 Unfortunately some Red Hat kernels include the shared pending
3682 queue but not the ShdPnd status field. */
3684 if (strncmp (buffer
, "SigPnd:\t", 8) == 0)
3685 add_line_to_sigset (buffer
+ 8, pending
);
3686 else if (strncmp (buffer
, "ShdPnd:\t", 8) == 0)
3687 add_line_to_sigset (buffer
+ 8, pending
);
3688 else if (strncmp (buffer
, "SigBlk:\t", 8) == 0)
3689 add_line_to_sigset (buffer
+ 8, blocked
);
3690 else if (strncmp (buffer
, "SigIgn:\t", 8) == 0)
3691 add_line_to_sigset (buffer
+ 8, ignored
);
3698 linux_xfer_partial (struct target_ops
*ops
, enum target_object object
,
3699 const char *annex
, gdb_byte
*readbuf
,
3700 const gdb_byte
*writebuf
, ULONGEST offset
, LONGEST len
)
3704 if (object
== TARGET_OBJECT_AUXV
)
3705 return procfs_xfer_auxv (ops
, object
, annex
, readbuf
, writebuf
,
3708 xfer
= linux_proc_xfer_partial (ops
, object
, annex
, readbuf
, writebuf
,
3713 return super_xfer_partial (ops
, object
, annex
, readbuf
, writebuf
,
3717 /* Create a prototype generic GNU/Linux target. The client can override
3718 it with local methods. */
3721 linux_target_install_ops (struct target_ops
*t
)
3723 t
->to_insert_fork_catchpoint
= linux_child_insert_fork_catchpoint
;
3724 t
->to_insert_vfork_catchpoint
= linux_child_insert_vfork_catchpoint
;
3725 t
->to_insert_exec_catchpoint
= linux_child_insert_exec_catchpoint
;
3726 t
->to_pid_to_exec_file
= linux_child_pid_to_exec_file
;
3727 t
->to_post_startup_inferior
= linux_child_post_startup_inferior
;
3728 t
->to_post_attach
= linux_child_post_attach
;
3729 t
->to_follow_fork
= linux_child_follow_fork
;
3730 t
->to_find_memory_regions
= linux_nat_find_memory_regions
;
3731 t
->to_make_corefile_notes
= linux_nat_make_corefile_notes
;
3733 super_xfer_partial
= t
->to_xfer_partial
;
3734 t
->to_xfer_partial
= linux_xfer_partial
;
3740 struct target_ops
*t
;
3742 t
= inf_ptrace_target ();
3743 linux_target_install_ops (t
);
3749 linux_trad_target (CORE_ADDR (*register_u_offset
)(struct gdbarch
*, int, int))
3751 struct target_ops
*t
;
3753 t
= inf_ptrace_trad_target (register_u_offset
);
3754 linux_target_install_ops (t
);
3759 /* Controls if async mode is permitted. */
3760 static int linux_async_permitted
= 0;
3762 /* The set command writes to this variable. If the inferior is
3763 executing, linux_nat_async_permitted is *not* updated. */
3764 static int linux_async_permitted_1
= 0;
3767 set_maintenance_linux_async_permitted (char *args
, int from_tty
,
3768 struct cmd_list_element
*c
)
3770 if (target_has_execution
)
3772 linux_async_permitted_1
= linux_async_permitted
;
3773 error (_("Cannot change this setting while the inferior is running."));
3776 linux_async_permitted
= linux_async_permitted_1
;
3777 linux_nat_set_async_mode (linux_async_permitted
);
3781 show_maintenance_linux_async_permitted (struct ui_file
*file
, int from_tty
,
3782 struct cmd_list_element
*c
, const char *value
)
3784 fprintf_filtered (file
, _("\
3785 Controlling the GNU/Linux inferior in asynchronous mode is %s.\n"),
3789 /* target_is_async_p implementation. */
3792 linux_nat_is_async_p (void)
3794 /* NOTE: palves 2008-03-21: We're only async when the user requests
3795 it explicitly with the "maintenance set linux-async" command.
3796 Someday, linux will always be async. */
3797 if (!linux_async_permitted
)
3803 /* target_can_async_p implementation. */
3806 linux_nat_can_async_p (void)
3808 /* NOTE: palves 2008-03-21: We're only async when the user requests
3809 it explicitly with the "maintenance set linux-async" command.
3810 Someday, linux will always be async. */
3811 if (!linux_async_permitted
)
3814 /* See target.h/target_async_mask. */
3815 return linux_nat_async_mask_value
;
3818 /* target_async_mask implementation. */
3821 linux_nat_async_mask (int mask
)
3824 current_state
= linux_nat_async_mask_value
;
3826 if (current_state
!= mask
)
3830 linux_nat_async (NULL
, 0);
3831 linux_nat_async_mask_value
= mask
;
3832 /* We're in sync mode. Make sure SIGCHLD isn't handled by
3833 async_sigchld_handler when we come out of sigsuspend in
3835 sigaction (SIGCHLD
, &sync_sigchld_action
, NULL
);
3839 /* Restore the async handler. */
3840 sigaction (SIGCHLD
, &async_sigchld_action
, NULL
);
3841 linux_nat_async_mask_value
= mask
;
3842 linux_nat_async (inferior_event_handler
, 0);
3846 return current_state
;
3849 /* Pop an event from the event pipe. */
3852 linux_nat_event_pipe_pop (int* ptr_status
, int* ptr_options
)
3854 struct waitpid_result event
= {0};
3859 ret
= read (linux_nat_event_pipe
[0], &event
, sizeof (event
));
3861 while (ret
== -1 && errno
== EINTR
);
3863 gdb_assert (ret
== sizeof (event
));
3865 *ptr_status
= event
.status
;
3866 *ptr_options
= event
.options
;
3868 linux_nat_num_queued_events
--;
3873 /* Push an event into the event pipe. */
3876 linux_nat_event_pipe_push (int pid
, int status
, int options
)
3879 struct waitpid_result event
= {0};
3881 event
.status
= status
;
3882 event
.options
= options
;
3886 ret
= write (linux_nat_event_pipe
[1], &event
, sizeof (event
));
3887 gdb_assert ((ret
== -1 && errno
== EINTR
) || ret
== sizeof (event
));
3888 } while (ret
== -1 && errno
== EINTR
);
3890 linux_nat_num_queued_events
++;
3894 get_pending_events (void)
3896 int status
, options
, pid
;
3898 if (!linux_nat_async_enabled
|| !linux_nat_async_events_enabled
)
3899 internal_error (__FILE__
, __LINE__
,
3900 "get_pending_events called with async masked");
3905 options
= __WCLONE
| WNOHANG
;
3909 pid
= waitpid (-1, &status
, options
);
3911 while (pid
== -1 && errno
== EINTR
);
3918 pid
= waitpid (-1, &status
, options
);
3920 while (pid
== -1 && errno
== EINTR
);
3924 /* No more children reporting events. */
3927 if (debug_linux_nat_async
)
3928 fprintf_unfiltered (gdb_stdlog
, "\
3929 get_pending_events: pid(%d), status(%x), options (%x)\n",
3930 pid
, status
, options
);
3932 linux_nat_event_pipe_push (pid
, status
, options
);
3935 if (debug_linux_nat_async
)
3936 fprintf_unfiltered (gdb_stdlog
, "\
3937 get_pending_events: linux_nat_num_queued_events(%d)\n",
3938 linux_nat_num_queued_events
);
3941 /* SIGCHLD handler for async mode. */
3944 async_sigchld_handler (int signo
)
3946 if (debug_linux_nat_async
)
3947 fprintf_unfiltered (gdb_stdlog
, "async_sigchld_handler\n");
3949 get_pending_events ();
3952 /* Enable or disable async SIGCHLD handling. */
3955 linux_nat_async_events (int enable
)
3957 int current_state
= linux_nat_async_events_enabled
;
3959 if (debug_linux_nat_async
)
3960 fprintf_unfiltered (gdb_stdlog
,
3961 "LNAE: enable(%d): linux_nat_async_events_enabled(%d), "
3962 "linux_nat_num_queued_events(%d)\n",
3963 enable
, linux_nat_async_events_enabled
,
3964 linux_nat_num_queued_events
);
3966 if (current_state
!= enable
)
3969 sigemptyset (&mask
);
3970 sigaddset (&mask
, SIGCHLD
);
3973 /* Unblock target events. */
3974 linux_nat_async_events_enabled
= 1;
3976 local_event_queue_to_pipe ();
3977 /* While in masked async, we may have not collected all the
3978 pending events. Get them out now. */
3979 get_pending_events ();
3980 sigprocmask (SIG_UNBLOCK
, &mask
, NULL
);
3984 /* Block target events. */
3985 sigprocmask (SIG_BLOCK
, &mask
, NULL
);
3986 linux_nat_async_events_enabled
= 0;
3987 /* Get events out of queue, and make them available to
3988 queued_waitpid / my_waitpid. */
3989 pipe_to_local_event_queue ();
3993 return current_state
;
3996 static int async_terminal_is_ours
= 1;
3998 /* target_terminal_inferior implementation. */
4001 linux_nat_terminal_inferior (void)
4003 if (!target_is_async_p ())
4005 /* Async mode is disabled. */
4006 terminal_inferior ();
4010 /* GDB should never give the terminal to the inferior, if the
4011 inferior is running in the background (run&, continue&, etc.).
4012 This check can be removed when the common code is fixed. */
4013 if (!sync_execution
)
4016 terminal_inferior ();
4018 if (!async_terminal_is_ours
)
4021 delete_file_handler (input_fd
);
4022 async_terminal_is_ours
= 0;
4026 /* target_terminal_ours implementation. */
4029 linux_nat_terminal_ours (void)
4031 if (!target_is_async_p ())
4033 /* Async mode is disabled. */
4038 /* GDB should never give the terminal to the inferior if the
4039 inferior is running in the background (run&, continue&, etc.),
4040 but claiming it sure should. */
4043 if (!sync_execution
)
4046 if (async_terminal_is_ours
)
4049 clear_sigint_trap ();
4050 add_file_handler (input_fd
, stdin_event_handler
, 0);
4051 async_terminal_is_ours
= 1;
4054 static void (*async_client_callback
) (enum inferior_event_type event_type
,
4056 static void *async_client_context
;
4059 linux_nat_async_file_handler (int error
, gdb_client_data client_data
)
4061 async_client_callback (INF_REG_EVENT
, async_client_context
);
4064 /* target_async implementation. */
4067 linux_nat_async (void (*callback
) (enum inferior_event_type event_type
,
4068 void *context
), void *context
)
4070 if (linux_nat_async_mask_value
== 0 || !linux_nat_async_enabled
)
4071 internal_error (__FILE__
, __LINE__
,
4072 "Calling target_async when async is masked");
4074 if (callback
!= NULL
)
4076 async_client_callback
= callback
;
4077 async_client_context
= context
;
4078 add_file_handler (linux_nat_event_pipe
[0],
4079 linux_nat_async_file_handler
, NULL
);
4081 linux_nat_async_events (1);
4085 async_client_callback
= callback
;
4086 async_client_context
= context
;
4088 linux_nat_async_events (0);
4089 delete_file_handler (linux_nat_event_pipe
[0]);
4094 /* Enable/Disable async mode. */
4097 linux_nat_set_async_mode (int on
)
4099 if (linux_nat_async_enabled
!= on
)
4103 gdb_assert (waitpid_queue
== NULL
);
4104 sigaction (SIGCHLD
, &async_sigchld_action
, NULL
);
4106 if (pipe (linux_nat_event_pipe
) == -1)
4107 internal_error (__FILE__
, __LINE__
,
4108 "creating event pipe failed.");
4110 fcntl (linux_nat_event_pipe
[0], F_SETFL
, O_NONBLOCK
);
4111 fcntl (linux_nat_event_pipe
[1], F_SETFL
, O_NONBLOCK
);
4115 sigaction (SIGCHLD
, &sync_sigchld_action
, NULL
);
4117 drain_queued_events (-1);
4119 linux_nat_num_queued_events
= 0;
4120 close (linux_nat_event_pipe
[0]);
4121 close (linux_nat_event_pipe
[1]);
4122 linux_nat_event_pipe
[0] = linux_nat_event_pipe
[1] = -1;
4126 linux_nat_async_enabled
= on
;
4130 linux_nat_add_target (struct target_ops
*t
)
4132 /* Save the provided single-threaded target. We save this in a separate
4133 variable because another target we've inherited from (e.g. inf-ptrace)
4134 may have saved a pointer to T; we want to use it for the final
4135 process stratum target. */
4136 linux_ops_saved
= *t
;
4137 linux_ops
= &linux_ops_saved
;
4139 /* Override some methods for multithreading. */
4140 t
->to_create_inferior
= linux_nat_create_inferior
;
4141 t
->to_attach
= linux_nat_attach
;
4142 t
->to_detach
= linux_nat_detach
;
4143 t
->to_resume
= linux_nat_resume
;
4144 t
->to_wait
= linux_nat_wait
;
4145 t
->to_xfer_partial
= linux_nat_xfer_partial
;
4146 t
->to_kill
= linux_nat_kill
;
4147 t
->to_mourn_inferior
= linux_nat_mourn_inferior
;
4148 t
->to_thread_alive
= linux_nat_thread_alive
;
4149 t
->to_pid_to_str
= linux_nat_pid_to_str
;
4150 t
->to_has_thread_control
= tc_schedlock
;
4152 t
->to_can_async_p
= linux_nat_can_async_p
;
4153 t
->to_is_async_p
= linux_nat_is_async_p
;
4154 t
->to_async
= linux_nat_async
;
4155 t
->to_async_mask
= linux_nat_async_mask
;
4156 t
->to_terminal_inferior
= linux_nat_terminal_inferior
;
4157 t
->to_terminal_ours
= linux_nat_terminal_ours
;
4159 /* We don't change the stratum; this target will sit at
4160 process_stratum and thread_db will set at thread_stratum. This
4161 is a little strange, since this is a multi-threaded-capable
4162 target, but we want to be on the stack below thread_db, and we
4163 also want to be used for single-threaded processes. */
4167 /* TODO: Eliminate this and have libthread_db use
4168 find_target_beneath. */
4172 /* Register a method to call whenever a new thread is attached. */
4174 linux_nat_set_new_thread (struct target_ops
*t
, void (*new_thread
) (ptid_t
))
4176 /* Save the pointer. We only support a single registered instance
4177 of the GNU/Linux native target, so we do not need to map this to
4179 linux_nat_new_thread
= new_thread
;
4182 /* Return the saved siginfo associated with PTID. */
4184 linux_nat_get_siginfo (ptid_t ptid
)
4186 struct lwp_info
*lp
= find_lwp_pid (ptid
);
4188 gdb_assert (lp
!= NULL
);
4190 return &lp
->siginfo
;
4194 _initialize_linux_nat (void)
4198 add_info ("proc", linux_nat_info_proc_cmd
, _("\
4199 Show /proc process information about any running process.\n\
4200 Specify any process id, or use the program being debugged by default.\n\
4201 Specify any of the following keywords for detailed info:\n\
4202 mappings -- list of mapped memory regions.\n\
4203 stat -- list a bunch of random process info.\n\
4204 status -- list a different bunch of random process info.\n\
4205 all -- list all available /proc info."));
4207 add_setshow_zinteger_cmd ("lin-lwp", class_maintenance
,
4208 &debug_linux_nat
, _("\
4209 Set debugging of GNU/Linux lwp module."), _("\
4210 Show debugging of GNU/Linux lwp module."), _("\
4211 Enables printf debugging output."),
4213 show_debug_linux_nat
,
4214 &setdebuglist
, &showdebuglist
);
4216 add_setshow_zinteger_cmd ("lin-lwp-async", class_maintenance
,
4217 &debug_linux_nat_async
, _("\
4218 Set debugging of GNU/Linux async lwp module."), _("\
4219 Show debugging of GNU/Linux async lwp module."), _("\
4220 Enables printf debugging output."),
4222 show_debug_linux_nat_async
,
4223 &setdebuglist
, &showdebuglist
);
4225 add_setshow_boolean_cmd ("linux-async", class_maintenance
,
4226 &linux_async_permitted_1
, _("\
4227 Set whether gdb controls the GNU/Linux inferior in asynchronous mode."), _("\
4228 Show whether gdb controls the GNU/Linux inferior in asynchronous mode."), _("\
4229 Tells gdb whether to control the GNU/Linux inferior in asynchronous mode."),
4230 set_maintenance_linux_async_permitted
,
4231 show_maintenance_linux_async_permitted
,
4232 &maintenance_set_cmdlist
,
4233 &maintenance_show_cmdlist
);
4235 /* Block SIGCHLD by default. Doing this early prevents it getting
4236 unblocked if an exception is thrown due to an error while the
4237 inferior is starting (sigsetjmp/siglongjmp). */
4238 sigemptyset (&mask
);
4239 sigaddset (&mask
, SIGCHLD
);
4240 sigprocmask (SIG_BLOCK
, &mask
, NULL
);
4242 /* Save this mask as the default. */
4243 sigprocmask (SIG_SETMASK
, NULL
, &normal_mask
);
4245 /* The synchronous SIGCHLD handler. */
4246 sync_sigchld_action
.sa_handler
= sigchld_handler
;
4247 sigemptyset (&sync_sigchld_action
.sa_mask
);
4248 sync_sigchld_action
.sa_flags
= SA_RESTART
;
4250 /* Make it the default. */
4251 sigaction (SIGCHLD
, &sync_sigchld_action
, NULL
);
4253 /* Make sure we don't block SIGCHLD during a sigsuspend. */
4254 sigprocmask (SIG_SETMASK
, NULL
, &suspend_mask
);
4255 sigdelset (&suspend_mask
, SIGCHLD
);
4257 /* SIGCHLD handler for async mode. */
4258 async_sigchld_action
.sa_handler
= async_sigchld_handler
;
4259 sigemptyset (&async_sigchld_action
.sa_mask
);
4260 async_sigchld_action
.sa_flags
= SA_RESTART
;
4262 /* Install the default mode. */
4263 linux_nat_set_async_mode (linux_async_permitted
);
4267 /* FIXME: kettenis/2000-08-26: The stuff on this page is specific to
4268 the GNU/Linux Threads library and therefore doesn't really belong
4271 /* Read variable NAME in the target and return its value if found.
4272 Otherwise return zero. It is assumed that the type of the variable
4276 get_signo (const char *name
)
4278 struct minimal_symbol
*ms
;
4281 ms
= lookup_minimal_symbol (name
, NULL
, NULL
);
4285 if (target_read_memory (SYMBOL_VALUE_ADDRESS (ms
), (gdb_byte
*) &signo
,
4286 sizeof (signo
)) != 0)
4292 /* Return the set of signals used by the threads library in *SET. */
4295 lin_thread_get_thread_signals (sigset_t
*set
)
4297 struct sigaction action
;
4298 int restart
, cancel
;
4299 sigset_t blocked_mask
;
4301 sigemptyset (&blocked_mask
);
4304 restart
= get_signo ("__pthread_sig_restart");
4305 cancel
= get_signo ("__pthread_sig_cancel");
4307 /* LinuxThreads normally uses the first two RT signals, but in some legacy
4308 cases may use SIGUSR1/SIGUSR2. NPTL always uses RT signals, but does
4309 not provide any way for the debugger to query the signal numbers -
4310 fortunately they don't change! */
4313 restart
= __SIGRTMIN
;
4316 cancel
= __SIGRTMIN
+ 1;
4318 sigaddset (set
, restart
);
4319 sigaddset (set
, cancel
);
4321 /* The GNU/Linux Threads library makes terminating threads send a
4322 special "cancel" signal instead of SIGCHLD. Make sure we catch
4323 those (to prevent them from terminating GDB itself, which is
4324 likely to be their default action) and treat them the same way as
4327 action
.sa_handler
= sigchld_handler
;
4328 sigemptyset (&action
.sa_mask
);
4329 action
.sa_flags
= SA_RESTART
;
4330 sigaction (cancel
, &action
, NULL
);
4332 /* We block the "cancel" signal throughout this code ... */
4333 sigaddset (&blocked_mask
, cancel
);
4334 sigprocmask (SIG_BLOCK
, &blocked_mask
, NULL
);
4336 /* ... except during a sigsuspend. */
4337 sigdelset (&suspend_mask
, cancel
);