1 /* GNU/Linux native-dependent code common to multiple platforms.
3 Copyright (C) 2001-2012 Free Software Foundation, Inc.
5 This file is part of GDB.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
23 #include "gdb_string.h"
25 #include "gdb_assert.h"
26 #ifdef HAVE_TKILL_SYSCALL
28 #include <sys/syscall.h>
30 #include <sys/ptrace.h>
31 #include "linux-nat.h"
32 #include "linux-ptrace.h"
33 #include "linux-procfs.h"
34 #include "linux-fork.h"
35 #include "gdbthread.h"
39 #include "inf-ptrace.h"
41 #include <sys/param.h> /* for MAXPATHLEN */
42 #include <sys/procfs.h> /* for elf_gregset etc. */
43 #include "elf-bfd.h" /* for elfcore_write_* */
44 #include "gregset.h" /* for gregset */
45 #include "gdbcore.h" /* for get_exec_file */
46 #include <ctype.h> /* for isdigit */
47 #include "gdbthread.h" /* for struct thread_info etc. */
48 #include "gdb_stat.h" /* for struct stat */
49 #include <fcntl.h> /* for O_RDONLY */
51 #include "event-loop.h"
52 #include "event-top.h"
54 #include <sys/types.h>
55 #include "gdb_dirent.h"
56 #include "xml-support.h"
60 #include "linux-osdata.h"
61 #include "linux-tdep.h"
65 #define SPUFS_MAGIC 0x23c9b64e
68 #ifdef HAVE_PERSONALITY
69 # include <sys/personality.h>
70 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
71 # define ADDR_NO_RANDOMIZE 0x0040000
73 #endif /* HAVE_PERSONALITY */
75 /* This comment documents high-level logic of this file.
77 Waiting for events in sync mode
78 ===============================
80 When waiting for an event in a specific thread, we just use waitpid, passing
81 the specific pid, and not passing WNOHANG.
83 When waiting for an event in all threads, waitpid is not quite good. Prior to
84 version 2.4, Linux can either wait for event in main thread, or in secondary
85 threads. (2.4 has the __WALL flag). So, if we use blocking waitpid, we might
86 miss an event. The solution is to use non-blocking waitpid, together with
87 sigsuspend. First, we use non-blocking waitpid to get an event in the main
88 process, if any. Second, we use non-blocking waitpid with the __WCLONED
89 flag to check for events in cloned processes. If nothing is found, we use
90 sigsuspend to wait for SIGCHLD. When SIGCHLD arrives, it means something
91 happened to a child process -- and SIGCHLD will be delivered both for events
92 in main debugged process and in cloned processes. As soon as we know there's
93 an event, we get back to calling nonblocking waitpid with and without
96 Note that SIGCHLD should be blocked between waitpid and sigsuspend calls,
97 so that we don't miss a signal. If SIGCHLD arrives in between, when it's
98 blocked, the signal becomes pending and sigsuspend immediately
99 notices it and returns.
101 Waiting for events in async mode
102 ================================
104 In async mode, GDB should always be ready to handle both user input
105 and target events, so neither blocking waitpid nor sigsuspend are
106 viable options. Instead, we should asynchronously notify the GDB main
107 event loop whenever there's an unprocessed event from the target. We
108 detect asynchronous target events by handling SIGCHLD signals. To
109 notify the event loop about target events, the self-pipe trick is used
110 --- a pipe is registered as waitable event source in the event loop,
111 the event loop select/poll's on the read end of this pipe (as well on
112 other event sources, e.g., stdin), and the SIGCHLD handler writes a
113 byte to this pipe. This is more portable than relying on
114 pselect/ppoll, since on kernels that lack those syscalls, libc
115 emulates them with select/poll+sigprocmask, and that is racy
116 (a.k.a. plain broken).
118 Obviously, if we fail to notify the event loop if there's a target
119 event, it's bad. OTOH, if we notify the event loop when there's no
120 event from the target, linux_nat_wait will detect that there's no real
121 event to report, and return event of type TARGET_WAITKIND_IGNORE.
122 This is mostly harmless, but it will waste time and is better avoided.
124 The main design point is that every time GDB is outside linux-nat.c,
125 we have a SIGCHLD handler installed that is called when something
126 happens to the target and notifies the GDB event loop. Whenever GDB
127 core decides to handle the event, and calls into linux-nat.c, we
128 process things as in sync mode, except that the we never block in
131 While processing an event, we may end up momentarily blocked in
132 waitpid calls. Those waitpid calls, while blocking, are guarantied to
133 return quickly. E.g., in all-stop mode, before reporting to the core
134 that an LWP hit a breakpoint, all LWPs are stopped by sending them
135 SIGSTOP, and synchronously waiting for the SIGSTOP to be reported.
136 Note that this is different from blocking indefinitely waiting for the
137 next event --- here, we're already handling an event.
142 We stop threads by sending a SIGSTOP. The use of SIGSTOP instead of another
143 signal is not entirely significant; we just need for a signal to be delivered,
144 so that we can intercept it. SIGSTOP's advantage is that it can not be
145 blocked. A disadvantage is that it is not a real-time signal, so it can only
146 be queued once; we do not keep track of other sources of SIGSTOP.
148 Two other signals that can't be blocked are SIGCONT and SIGKILL. But we can't
149 use them, because they have special behavior when the signal is generated -
150 not when it is delivered. SIGCONT resumes the entire thread group and SIGKILL
151 kills the entire thread group.
153 A delivered SIGSTOP would stop the entire thread group, not just the thread we
154 tkill'd. But we never let the SIGSTOP be delivered; we always intercept and
155 cancel it (by PTRACE_CONT without passing SIGSTOP).
157 We could use a real-time signal instead. This would solve those problems; we
158 could use PTRACE_GETSIGINFO to locate the specific stop signals sent by GDB.
159 But we would still have to have some support for SIGSTOP, since PTRACE_ATTACH
160 generates it, and there are races with trying to find a signal that is not
164 #define O_LARGEFILE 0
167 /* Unlike other extended result codes, WSTOPSIG (status) on
168 PTRACE_O_TRACESYSGOOD syscall events doesn't return SIGTRAP, but
169 instead SIGTRAP with bit 7 set. */
170 #define SYSCALL_SIGTRAP (SIGTRAP | 0x80)
172 /* The single-threaded native GNU/Linux target_ops. We save a pointer for
173 the use of the multi-threaded target. */
174 static struct target_ops
*linux_ops
;
175 static struct target_ops linux_ops_saved
;
177 /* The method to call, if any, when a new thread is attached. */
178 static void (*linux_nat_new_thread
) (struct lwp_info
*);
180 /* Hook to call prior to resuming a thread. */
181 static void (*linux_nat_prepare_to_resume
) (struct lwp_info
*);
183 /* The method to call, if any, when the siginfo object needs to be
184 converted between the layout returned by ptrace, and the layout in
185 the architecture of the inferior. */
186 static int (*linux_nat_siginfo_fixup
) (struct siginfo
*,
190 /* The saved to_xfer_partial method, inherited from inf-ptrace.c.
191 Called by our to_xfer_partial. */
192 static LONGEST (*super_xfer_partial
) (struct target_ops
*,
194 const char *, gdb_byte
*,
198 static int debug_linux_nat
;
200 show_debug_linux_nat (struct ui_file
*file
, int from_tty
,
201 struct cmd_list_element
*c
, const char *value
)
203 fprintf_filtered (file
, _("Debugging of GNU/Linux lwp module is %s.\n"),
207 struct simple_pid_list
211 struct simple_pid_list
*next
;
213 struct simple_pid_list
*stopped_pids
;
215 /* This variable is a tri-state flag: -1 for unknown, 0 if PTRACE_O_TRACEFORK
216 can not be used, 1 if it can. */
218 static int linux_supports_tracefork_flag
= -1;
220 /* This variable is a tri-state flag: -1 for unknown, 0 if
221 PTRACE_O_TRACESYSGOOD can not be used, 1 if it can. */
223 static int linux_supports_tracesysgood_flag
= -1;
225 /* If we have PTRACE_O_TRACEFORK, this flag indicates whether we also have
226 PTRACE_O_TRACEVFORKDONE. */
228 static int linux_supports_tracevforkdone_flag
= -1;
230 /* Stores the current used ptrace() options. */
231 static int current_ptrace_options
= 0;
233 /* Async mode support. */
235 /* The read/write ends of the pipe registered as waitable file in the
237 static int linux_nat_event_pipe
[2] = { -1, -1 };
239 /* Flush the event pipe. */
242 async_file_flush (void)
249 ret
= read (linux_nat_event_pipe
[0], &buf
, 1);
251 while (ret
>= 0 || (ret
== -1 && errno
== EINTR
));
254 /* Put something (anything, doesn't matter what, or how much) in event
255 pipe, so that the select/poll in the event-loop realizes we have
256 something to process. */
259 async_file_mark (void)
263 /* It doesn't really matter what the pipe contains, as long we end
264 up with something in it. Might as well flush the previous
270 ret
= write (linux_nat_event_pipe
[1], "+", 1);
272 while (ret
== -1 && errno
== EINTR
);
274 /* Ignore EAGAIN. If the pipe is full, the event loop will already
275 be awakened anyway. */
278 static void linux_nat_async (void (*callback
)
279 (enum inferior_event_type event_type
,
282 static int kill_lwp (int lwpid
, int signo
);
284 static int stop_callback (struct lwp_info
*lp
, void *data
);
286 static void block_child_signals (sigset_t
*prev_mask
);
287 static void restore_child_signals_mask (sigset_t
*prev_mask
);
290 static struct lwp_info
*add_lwp (ptid_t ptid
);
291 static void purge_lwp_list (int pid
);
292 static void delete_lwp (ptid_t ptid
);
293 static struct lwp_info
*find_lwp_pid (ptid_t ptid
);
296 /* Trivial list manipulation functions to keep track of a list of
297 new stopped processes. */
299 add_to_pid_list (struct simple_pid_list
**listp
, int pid
, int status
)
301 struct simple_pid_list
*new_pid
= xmalloc (sizeof (struct simple_pid_list
));
304 new_pid
->status
= status
;
305 new_pid
->next
= *listp
;
310 in_pid_list_p (struct simple_pid_list
*list
, int pid
)
312 struct simple_pid_list
*p
;
314 for (p
= list
; p
!= NULL
; p
= p
->next
)
321 pull_pid_from_list (struct simple_pid_list
**listp
, int pid
, int *statusp
)
323 struct simple_pid_list
**p
;
325 for (p
= listp
; *p
!= NULL
; p
= &(*p
)->next
)
326 if ((*p
)->pid
== pid
)
328 struct simple_pid_list
*next
= (*p
)->next
;
330 *statusp
= (*p
)->status
;
339 /* A helper function for linux_test_for_tracefork, called after fork (). */
342 linux_tracefork_child (void)
344 ptrace (PTRACE_TRACEME
, 0, 0, 0);
345 kill (getpid (), SIGSTOP
);
350 /* Wrapper function for waitpid which handles EINTR. */
353 my_waitpid (int pid
, int *statusp
, int flags
)
359 ret
= waitpid (pid
, statusp
, flags
);
361 while (ret
== -1 && errno
== EINTR
);
366 /* Determine if PTRACE_O_TRACEFORK can be used to follow fork events.
368 First, we try to enable fork tracing on ORIGINAL_PID. If this fails,
369 we know that the feature is not available. This may change the tracing
370 options for ORIGINAL_PID, but we'll be setting them shortly anyway.
372 However, if it succeeds, we don't know for sure that the feature is
373 available; old versions of PTRACE_SETOPTIONS ignored unknown options. We
374 create a child process, attach to it, use PTRACE_SETOPTIONS to enable
375 fork tracing, and let it fork. If the process exits, we assume that we
376 can't use TRACEFORK; if we get the fork notification, and we can extract
377 the new child's PID, then we assume that we can. */
380 linux_test_for_tracefork (int original_pid
)
382 int child_pid
, ret
, status
;
386 /* We don't want those ptrace calls to be interrupted. */
387 block_child_signals (&prev_mask
);
389 linux_supports_tracefork_flag
= 0;
390 linux_supports_tracevforkdone_flag
= 0;
392 ret
= ptrace (PTRACE_SETOPTIONS
, original_pid
, 0, PTRACE_O_TRACEFORK
);
395 restore_child_signals_mask (&prev_mask
);
401 perror_with_name (("fork"));
404 linux_tracefork_child ();
406 ret
= my_waitpid (child_pid
, &status
, 0);
408 perror_with_name (("waitpid"));
409 else if (ret
!= child_pid
)
410 error (_("linux_test_for_tracefork: waitpid: unexpected result %d."), ret
);
411 if (! WIFSTOPPED (status
))
412 error (_("linux_test_for_tracefork: waitpid: unexpected status %d."),
415 ret
= ptrace (PTRACE_SETOPTIONS
, child_pid
, 0, PTRACE_O_TRACEFORK
);
418 ret
= ptrace (PTRACE_KILL
, child_pid
, 0, 0);
421 warning (_("linux_test_for_tracefork: failed to kill child"));
422 restore_child_signals_mask (&prev_mask
);
426 ret
= my_waitpid (child_pid
, &status
, 0);
427 if (ret
!= child_pid
)
428 warning (_("linux_test_for_tracefork: failed "
429 "to wait for killed child"));
430 else if (!WIFSIGNALED (status
))
431 warning (_("linux_test_for_tracefork: unexpected "
432 "wait status 0x%x from killed child"), status
);
434 restore_child_signals_mask (&prev_mask
);
438 /* Check whether PTRACE_O_TRACEVFORKDONE is available. */
439 ret
= ptrace (PTRACE_SETOPTIONS
, child_pid
, 0,
440 PTRACE_O_TRACEFORK
| PTRACE_O_TRACEVFORKDONE
);
441 linux_supports_tracevforkdone_flag
= (ret
== 0);
443 ret
= ptrace (PTRACE_CONT
, child_pid
, 0, 0);
445 warning (_("linux_test_for_tracefork: failed to resume child"));
447 ret
= my_waitpid (child_pid
, &status
, 0);
449 if (ret
== child_pid
&& WIFSTOPPED (status
)
450 && status
>> 16 == PTRACE_EVENT_FORK
)
453 ret
= ptrace (PTRACE_GETEVENTMSG
, child_pid
, 0, &second_pid
);
454 if (ret
== 0 && second_pid
!= 0)
458 linux_supports_tracefork_flag
= 1;
459 my_waitpid (second_pid
, &second_status
, 0);
460 ret
= ptrace (PTRACE_KILL
, second_pid
, 0, 0);
462 warning (_("linux_test_for_tracefork: "
463 "failed to kill second child"));
464 my_waitpid (second_pid
, &status
, 0);
468 warning (_("linux_test_for_tracefork: unexpected result from waitpid "
469 "(%d, status 0x%x)"), ret
, status
);
471 ret
= ptrace (PTRACE_KILL
, child_pid
, 0, 0);
473 warning (_("linux_test_for_tracefork: failed to kill child"));
474 my_waitpid (child_pid
, &status
, 0);
476 restore_child_signals_mask (&prev_mask
);
479 /* Determine if PTRACE_O_TRACESYSGOOD can be used to follow syscalls.
481 We try to enable syscall tracing on ORIGINAL_PID. If this fails,
482 we know that the feature is not available. This may change the tracing
483 options for ORIGINAL_PID, but we'll be setting them shortly anyway. */
486 linux_test_for_tracesysgood (int original_pid
)
491 /* We don't want those ptrace calls to be interrupted. */
492 block_child_signals (&prev_mask
);
494 linux_supports_tracesysgood_flag
= 0;
496 ret
= ptrace (PTRACE_SETOPTIONS
, original_pid
, 0, PTRACE_O_TRACESYSGOOD
);
500 linux_supports_tracesysgood_flag
= 1;
502 restore_child_signals_mask (&prev_mask
);
505 /* Determine wether we support PTRACE_O_TRACESYSGOOD option available.
506 This function also sets linux_supports_tracesysgood_flag. */
509 linux_supports_tracesysgood (int pid
)
511 if (linux_supports_tracesysgood_flag
== -1)
512 linux_test_for_tracesysgood (pid
);
513 return linux_supports_tracesysgood_flag
;
516 /* Return non-zero iff we have tracefork functionality available.
517 This function also sets linux_supports_tracefork_flag. */
520 linux_supports_tracefork (int pid
)
522 if (linux_supports_tracefork_flag
== -1)
523 linux_test_for_tracefork (pid
);
524 return linux_supports_tracefork_flag
;
528 linux_supports_tracevforkdone (int pid
)
530 if (linux_supports_tracefork_flag
== -1)
531 linux_test_for_tracefork (pid
);
532 return linux_supports_tracevforkdone_flag
;
536 linux_enable_tracesysgood (ptid_t ptid
)
538 int pid
= ptid_get_lwp (ptid
);
541 pid
= ptid_get_pid (ptid
);
543 if (linux_supports_tracesysgood (pid
) == 0)
546 current_ptrace_options
|= PTRACE_O_TRACESYSGOOD
;
548 ptrace (PTRACE_SETOPTIONS
, pid
, 0, current_ptrace_options
);
553 linux_enable_event_reporting (ptid_t ptid
)
555 int pid
= ptid_get_lwp (ptid
);
558 pid
= ptid_get_pid (ptid
);
560 if (! linux_supports_tracefork (pid
))
563 current_ptrace_options
|= PTRACE_O_TRACEFORK
| PTRACE_O_TRACEVFORK
564 | PTRACE_O_TRACEEXEC
| PTRACE_O_TRACECLONE
;
566 if (linux_supports_tracevforkdone (pid
))
567 current_ptrace_options
|= PTRACE_O_TRACEVFORKDONE
;
569 /* Do not enable PTRACE_O_TRACEEXIT until GDB is more prepared to support
570 read-only process state. */
572 ptrace (PTRACE_SETOPTIONS
, pid
, 0, current_ptrace_options
);
576 linux_child_post_attach (int pid
)
578 linux_enable_event_reporting (pid_to_ptid (pid
));
579 linux_enable_tracesysgood (pid_to_ptid (pid
));
583 linux_child_post_startup_inferior (ptid_t ptid
)
585 linux_enable_event_reporting (ptid
);
586 linux_enable_tracesysgood (ptid
);
589 /* Return the number of known LWPs in the tgid given by PID. */
597 for (lp
= lwp_list
; lp
; lp
= lp
->next
)
598 if (ptid_get_pid (lp
->ptid
) == pid
)
604 /* Call delete_lwp with prototype compatible for make_cleanup. */
607 delete_lwp_cleanup (void *lp_voidp
)
609 struct lwp_info
*lp
= lp_voidp
;
611 delete_lwp (lp
->ptid
);
615 linux_child_follow_fork (struct target_ops
*ops
, int follow_child
)
619 int parent_pid
, child_pid
;
621 block_child_signals (&prev_mask
);
623 has_vforked
= (inferior_thread ()->pending_follow
.kind
624 == TARGET_WAITKIND_VFORKED
);
625 parent_pid
= ptid_get_lwp (inferior_ptid
);
627 parent_pid
= ptid_get_pid (inferior_ptid
);
628 child_pid
= PIDGET (inferior_thread ()->pending_follow
.value
.related_pid
);
631 linux_enable_event_reporting (pid_to_ptid (child_pid
));
634 && !non_stop
/* Non-stop always resumes both branches. */
635 && (!target_is_async_p () || sync_execution
)
636 && !(follow_child
|| detach_fork
|| sched_multi
))
638 /* The parent stays blocked inside the vfork syscall until the
639 child execs or exits. If we don't let the child run, then
640 the parent stays blocked. If we're telling the parent to run
641 in the foreground, the user will not be able to ctrl-c to get
642 back the terminal, effectively hanging the debug session. */
643 fprintf_filtered (gdb_stderr
, _("\
644 Can not resume the parent process over vfork in the foreground while\n\
645 holding the child stopped. Try \"set detach-on-fork\" or \
646 \"set schedule-multiple\".\n"));
647 /* FIXME output string > 80 columns. */
653 struct lwp_info
*child_lp
= NULL
;
655 /* We're already attached to the parent, by default. */
657 /* Detach new forked process? */
660 struct cleanup
*old_chain
;
662 /* Before detaching from the child, remove all breakpoints
663 from it. If we forked, then this has already been taken
664 care of by infrun.c. If we vforked however, any
665 breakpoint inserted in the parent is visible in the
666 child, even those added while stopped in a vfork
667 catchpoint. This will remove the breakpoints from the
668 parent also, but they'll be reinserted below. */
671 /* keep breakpoints list in sync. */
672 remove_breakpoints_pid (GET_PID (inferior_ptid
));
675 if (info_verbose
|| debug_linux_nat
)
677 target_terminal_ours ();
678 fprintf_filtered (gdb_stdlog
,
679 "Detaching after fork from "
680 "child process %d.\n",
684 old_chain
= save_inferior_ptid ();
685 inferior_ptid
= ptid_build (child_pid
, child_pid
, 0);
687 child_lp
= add_lwp (inferior_ptid
);
688 child_lp
->stopped
= 1;
689 child_lp
->last_resume_kind
= resume_stop
;
690 make_cleanup (delete_lwp_cleanup
, child_lp
);
692 /* CHILD_LP has new PID, therefore linux_nat_new_thread is not called for it.
693 See i386_inferior_data_get for the Linux kernel specifics.
694 Ensure linux_nat_prepare_to_resume will reset the hardware debug
695 registers. It is done by the linux_nat_new_thread call, which is
696 being skipped in add_lwp above for the first lwp of a pid. */
697 gdb_assert (num_lwps (GET_PID (child_lp
->ptid
)) == 1);
698 if (linux_nat_new_thread
!= NULL
)
699 linux_nat_new_thread (child_lp
);
701 if (linux_nat_prepare_to_resume
!= NULL
)
702 linux_nat_prepare_to_resume (child_lp
);
703 ptrace (PTRACE_DETACH
, child_pid
, 0, 0);
705 do_cleanups (old_chain
);
709 struct inferior
*parent_inf
, *child_inf
;
710 struct cleanup
*old_chain
;
712 /* Add process to GDB's tables. */
713 child_inf
= add_inferior (child_pid
);
715 parent_inf
= current_inferior ();
716 child_inf
->attach_flag
= parent_inf
->attach_flag
;
717 copy_terminal_info (child_inf
, parent_inf
);
719 old_chain
= save_inferior_ptid ();
720 save_current_program_space ();
722 inferior_ptid
= ptid_build (child_pid
, child_pid
, 0);
723 add_thread (inferior_ptid
);
724 child_lp
= add_lwp (inferior_ptid
);
725 child_lp
->stopped
= 1;
726 child_lp
->last_resume_kind
= resume_stop
;
727 child_inf
->symfile_flags
= SYMFILE_NO_READ
;
729 /* If this is a vfork child, then the address-space is
730 shared with the parent. */
733 child_inf
->pspace
= parent_inf
->pspace
;
734 child_inf
->aspace
= parent_inf
->aspace
;
736 /* The parent will be frozen until the child is done
737 with the shared region. Keep track of the
739 child_inf
->vfork_parent
= parent_inf
;
740 child_inf
->pending_detach
= 0;
741 parent_inf
->vfork_child
= child_inf
;
742 parent_inf
->pending_detach
= 0;
746 child_inf
->aspace
= new_address_space ();
747 child_inf
->pspace
= add_program_space (child_inf
->aspace
);
748 child_inf
->removable
= 1;
749 set_current_program_space (child_inf
->pspace
);
750 clone_program_space (child_inf
->pspace
, parent_inf
->pspace
);
752 /* Let the shared library layer (solib-svr4) learn about
753 this new process, relocate the cloned exec, pull in
754 shared libraries, and install the solib event
755 breakpoint. If a "cloned-VM" event was propagated
756 better throughout the core, this wouldn't be
758 solib_create_inferior_hook (0);
761 /* Let the thread_db layer learn about this new process. */
762 check_for_thread_db ();
764 do_cleanups (old_chain
);
769 struct lwp_info
*parent_lp
;
770 struct inferior
*parent_inf
;
772 parent_inf
= current_inferior ();
774 /* If we detached from the child, then we have to be careful
775 to not insert breakpoints in the parent until the child
776 is done with the shared memory region. However, if we're
777 staying attached to the child, then we can and should
778 insert breakpoints, so that we can debug it. A
779 subsequent child exec or exit is enough to know when does
780 the child stops using the parent's address space. */
781 parent_inf
->waiting_for_vfork_done
= detach_fork
;
782 parent_inf
->pspace
->breakpoints_not_allowed
= detach_fork
;
784 parent_lp
= find_lwp_pid (pid_to_ptid (parent_pid
));
785 gdb_assert (linux_supports_tracefork_flag
>= 0);
787 if (linux_supports_tracevforkdone (0))
790 fprintf_unfiltered (gdb_stdlog
,
791 "LCFF: waiting for VFORK_DONE on %d\n",
793 parent_lp
->stopped
= 1;
795 /* We'll handle the VFORK_DONE event like any other
796 event, in target_wait. */
800 /* We can't insert breakpoints until the child has
801 finished with the shared memory region. We need to
802 wait until that happens. Ideal would be to just
804 - ptrace (PTRACE_SYSCALL, parent_pid, 0, 0);
805 - waitpid (parent_pid, &status, __WALL);
806 However, most architectures can't handle a syscall
807 being traced on the way out if it wasn't traced on
810 We might also think to loop, continuing the child
811 until it exits or gets a SIGTRAP. One problem is
812 that the child might call ptrace with PTRACE_TRACEME.
814 There's no simple and reliable way to figure out when
815 the vforked child will be done with its copy of the
816 shared memory. We could step it out of the syscall,
817 two instructions, let it go, and then single-step the
818 parent once. When we have hardware single-step, this
819 would work; with software single-step it could still
820 be made to work but we'd have to be able to insert
821 single-step breakpoints in the child, and we'd have
822 to insert -just- the single-step breakpoint in the
823 parent. Very awkward.
825 In the end, the best we can do is to make sure it
826 runs for a little while. Hopefully it will be out of
827 range of any breakpoints we reinsert. Usually this
828 is only the single-step breakpoint at vfork's return
832 fprintf_unfiltered (gdb_stdlog
,
833 "LCFF: no VFORK_DONE "
834 "support, sleeping a bit\n");
838 /* Pretend we've seen a PTRACE_EVENT_VFORK_DONE event,
839 and leave it pending. The next linux_nat_resume call
840 will notice a pending event, and bypasses actually
841 resuming the inferior. */
842 parent_lp
->status
= 0;
843 parent_lp
->waitstatus
.kind
= TARGET_WAITKIND_VFORK_DONE
;
844 parent_lp
->stopped
= 1;
846 /* If we're in async mode, need to tell the event loop
847 there's something here to process. */
848 if (target_can_async_p ())
855 struct inferior
*parent_inf
, *child_inf
;
856 struct lwp_info
*child_lp
;
857 struct program_space
*parent_pspace
;
859 if (info_verbose
|| debug_linux_nat
)
861 target_terminal_ours ();
863 fprintf_filtered (gdb_stdlog
,
864 _("Attaching after process %d "
865 "vfork to child process %d.\n"),
866 parent_pid
, child_pid
);
868 fprintf_filtered (gdb_stdlog
,
869 _("Attaching after process %d "
870 "fork to child process %d.\n"),
871 parent_pid
, child_pid
);
874 /* Add the new inferior first, so that the target_detach below
875 doesn't unpush the target. */
877 child_inf
= add_inferior (child_pid
);
879 parent_inf
= current_inferior ();
880 child_inf
->attach_flag
= parent_inf
->attach_flag
;
881 copy_terminal_info (child_inf
, parent_inf
);
883 parent_pspace
= parent_inf
->pspace
;
885 /* If we're vforking, we want to hold on to the parent until the
886 child exits or execs. At child exec or exit time we can
887 remove the old breakpoints from the parent and detach or
888 resume debugging it. Otherwise, detach the parent now; we'll
889 want to reuse it's program/address spaces, but we can't set
890 them to the child before removing breakpoints from the
891 parent, otherwise, the breakpoints module could decide to
892 remove breakpoints from the wrong process (since they'd be
893 assigned to the same address space). */
897 gdb_assert (child_inf
->vfork_parent
== NULL
);
898 gdb_assert (parent_inf
->vfork_child
== NULL
);
899 child_inf
->vfork_parent
= parent_inf
;
900 child_inf
->pending_detach
= 0;
901 parent_inf
->vfork_child
= child_inf
;
902 parent_inf
->pending_detach
= detach_fork
;
903 parent_inf
->waiting_for_vfork_done
= 0;
905 else if (detach_fork
)
906 target_detach (NULL
, 0);
908 /* Note that the detach above makes PARENT_INF dangling. */
910 /* Add the child thread to the appropriate lists, and switch to
911 this new thread, before cloning the program space, and
912 informing the solib layer about this new process. */
914 inferior_ptid
= ptid_build (child_pid
, child_pid
, 0);
915 add_thread (inferior_ptid
);
916 child_lp
= add_lwp (inferior_ptid
);
917 child_lp
->stopped
= 1;
918 child_lp
->last_resume_kind
= resume_stop
;
920 /* If this is a vfork child, then the address-space is shared
921 with the parent. If we detached from the parent, then we can
922 reuse the parent's program/address spaces. */
923 if (has_vforked
|| detach_fork
)
925 child_inf
->pspace
= parent_pspace
;
926 child_inf
->aspace
= child_inf
->pspace
->aspace
;
930 child_inf
->aspace
= new_address_space ();
931 child_inf
->pspace
= add_program_space (child_inf
->aspace
);
932 child_inf
->removable
= 1;
933 child_inf
->symfile_flags
= SYMFILE_NO_READ
;
934 set_current_program_space (child_inf
->pspace
);
935 clone_program_space (child_inf
->pspace
, parent_pspace
);
937 /* Let the shared library layer (solib-svr4) learn about
938 this new process, relocate the cloned exec, pull in
939 shared libraries, and install the solib event breakpoint.
940 If a "cloned-VM" event was propagated better throughout
941 the core, this wouldn't be required. */
942 solib_create_inferior_hook (0);
945 /* Let the thread_db layer learn about this new process. */
946 check_for_thread_db ();
949 restore_child_signals_mask (&prev_mask
);
955 linux_child_insert_fork_catchpoint (int pid
)
957 return !linux_supports_tracefork (pid
);
961 linux_child_remove_fork_catchpoint (int pid
)
967 linux_child_insert_vfork_catchpoint (int pid
)
969 return !linux_supports_tracefork (pid
);
973 linux_child_remove_vfork_catchpoint (int pid
)
979 linux_child_insert_exec_catchpoint (int pid
)
981 return !linux_supports_tracefork (pid
);
985 linux_child_remove_exec_catchpoint (int pid
)
991 linux_child_set_syscall_catchpoint (int pid
, int needed
, int any_count
,
992 int table_size
, int *table
)
994 if (!linux_supports_tracesysgood (pid
))
997 /* On GNU/Linux, we ignore the arguments. It means that we only
998 enable the syscall catchpoints, but do not disable them.
1000 Also, we do not use the `table' information because we do not
1001 filter system calls here. We let GDB do the logic for us. */
1005 /* On GNU/Linux there are no real LWP's. The closest thing to LWP's
1006 are processes sharing the same VM space. A multi-threaded process
1007 is basically a group of such processes. However, such a grouping
1008 is almost entirely a user-space issue; the kernel doesn't enforce
1009 such a grouping at all (this might change in the future). In
1010 general, we'll rely on the threads library (i.e. the GNU/Linux
1011 Threads library) to provide such a grouping.
1013 It is perfectly well possible to write a multi-threaded application
1014 without the assistance of a threads library, by using the clone
1015 system call directly. This module should be able to give some
1016 rudimentary support for debugging such applications if developers
1017 specify the CLONE_PTRACE flag in the clone system call, and are
1018 using the Linux kernel 2.4 or above.
1020 Note that there are some peculiarities in GNU/Linux that affect
1023 - In general one should specify the __WCLONE flag to waitpid in
1024 order to make it report events for any of the cloned processes
1025 (and leave it out for the initial process). However, if a cloned
1026 process has exited the exit status is only reported if the
1027 __WCLONE flag is absent. Linux kernel 2.4 has a __WALL flag, but
1028 we cannot use it since GDB must work on older systems too.
1030 - When a traced, cloned process exits and is waited for by the
1031 debugger, the kernel reassigns it to the original parent and
1032 keeps it around as a "zombie". Somehow, the GNU/Linux Threads
1033 library doesn't notice this, which leads to the "zombie problem":
1034 When debugged a multi-threaded process that spawns a lot of
1035 threads will run out of processes, even if the threads exit,
1036 because the "zombies" stay around. */
1038 /* List of known LWPs. */
1039 struct lwp_info
*lwp_list
;
1042 /* Original signal mask. */
1043 static sigset_t normal_mask
;
1045 /* Signal mask for use with sigsuspend in linux_nat_wait, initialized in
1046 _initialize_linux_nat. */
1047 static sigset_t suspend_mask
;
1049 /* Signals to block to make that sigsuspend work. */
1050 static sigset_t blocked_mask
;
1052 /* SIGCHLD action. */
1053 struct sigaction sigchld_action
;
1055 /* Block child signals (SIGCHLD and linux threads signals), and store
1056 the previous mask in PREV_MASK. */
1059 block_child_signals (sigset_t
*prev_mask
)
1061 /* Make sure SIGCHLD is blocked. */
1062 if (!sigismember (&blocked_mask
, SIGCHLD
))
1063 sigaddset (&blocked_mask
, SIGCHLD
);
1065 sigprocmask (SIG_BLOCK
, &blocked_mask
, prev_mask
);
1068 /* Restore child signals mask, previously returned by
1069 block_child_signals. */
1072 restore_child_signals_mask (sigset_t
*prev_mask
)
1074 sigprocmask (SIG_SETMASK
, prev_mask
, NULL
);
1077 /* Mask of signals to pass directly to the inferior. */
1078 static sigset_t pass_mask
;
1080 /* Update signals to pass to the inferior. */
1082 linux_nat_pass_signals (int numsigs
, unsigned char *pass_signals
)
1086 sigemptyset (&pass_mask
);
1088 for (signo
= 1; signo
< NSIG
; signo
++)
1090 int target_signo
= target_signal_from_host (signo
);
1091 if (target_signo
< numsigs
&& pass_signals
[target_signo
])
1092 sigaddset (&pass_mask
, signo
);
1098 /* Prototypes for local functions. */
1099 static int stop_wait_callback (struct lwp_info
*lp
, void *data
);
1100 static int linux_thread_alive (ptid_t ptid
);
1101 static char *linux_child_pid_to_exec_file (int pid
);
1104 /* Convert wait status STATUS to a string. Used for printing debug
1108 status_to_str (int status
)
1110 static char buf
[64];
1112 if (WIFSTOPPED (status
))
1114 if (WSTOPSIG (status
) == SYSCALL_SIGTRAP
)
1115 snprintf (buf
, sizeof (buf
), "%s (stopped at syscall)",
1116 strsignal (SIGTRAP
));
1118 snprintf (buf
, sizeof (buf
), "%s (stopped)",
1119 strsignal (WSTOPSIG (status
)));
1121 else if (WIFSIGNALED (status
))
1122 snprintf (buf
, sizeof (buf
), "%s (terminated)",
1123 strsignal (WTERMSIG (status
)));
1125 snprintf (buf
, sizeof (buf
), "%d (exited)", WEXITSTATUS (status
));
1130 /* Destroy and free LP. */
1133 lwp_free (struct lwp_info
*lp
)
1135 xfree (lp
->arch_private
);
1139 /* Remove all LWPs belong to PID from the lwp list. */
1142 purge_lwp_list (int pid
)
1144 struct lwp_info
*lp
, *lpprev
, *lpnext
;
1148 for (lp
= lwp_list
; lp
; lp
= lpnext
)
1152 if (ptid_get_pid (lp
->ptid
) == pid
)
1155 lwp_list
= lp
->next
;
1157 lpprev
->next
= lp
->next
;
1166 /* Add the LWP specified by PID to the list. Return a pointer to the
1167 structure describing the new LWP. The LWP should already be stopped
1168 (with an exception for the very first LWP). */
1170 static struct lwp_info
*
1171 add_lwp (ptid_t ptid
)
1173 struct lwp_info
*lp
;
1175 gdb_assert (is_lwp (ptid
));
1177 lp
= (struct lwp_info
*) xmalloc (sizeof (struct lwp_info
));
1179 memset (lp
, 0, sizeof (struct lwp_info
));
1181 lp
->last_resume_kind
= resume_continue
;
1182 lp
->waitstatus
.kind
= TARGET_WAITKIND_IGNORE
;
1187 lp
->next
= lwp_list
;
1190 /* Let the arch specific bits know about this new thread. Current
1191 clients of this callback take the opportunity to install
1192 watchpoints in the new thread. Don't do this for the first
1193 thread though. If we're spawning a child ("run"), the thread
1194 executes the shell wrapper first, and we shouldn't touch it until
1195 it execs the program we want to debug. For "attach", it'd be
1196 okay to call the callback, but it's not necessary, because
1197 watchpoints can't yet have been inserted into the inferior. */
1198 if (num_lwps (GET_PID (ptid
)) > 1 && linux_nat_new_thread
!= NULL
)
1199 linux_nat_new_thread (lp
);
1204 /* Remove the LWP specified by PID from the list. */
1207 delete_lwp (ptid_t ptid
)
1209 struct lwp_info
*lp
, *lpprev
;
1213 for (lp
= lwp_list
; lp
; lpprev
= lp
, lp
= lp
->next
)
1214 if (ptid_equal (lp
->ptid
, ptid
))
1221 lpprev
->next
= lp
->next
;
1223 lwp_list
= lp
->next
;
1228 /* Return a pointer to the structure describing the LWP corresponding
1229 to PID. If no corresponding LWP could be found, return NULL. */
1231 static struct lwp_info
*
1232 find_lwp_pid (ptid_t ptid
)
1234 struct lwp_info
*lp
;
1238 lwp
= GET_LWP (ptid
);
1240 lwp
= GET_PID (ptid
);
1242 for (lp
= lwp_list
; lp
; lp
= lp
->next
)
1243 if (lwp
== GET_LWP (lp
->ptid
))
1249 /* Call CALLBACK with its second argument set to DATA for every LWP in
1250 the list. If CALLBACK returns 1 for a particular LWP, return a
1251 pointer to the structure describing that LWP immediately.
1252 Otherwise return NULL. */
1255 iterate_over_lwps (ptid_t filter
,
1256 int (*callback
) (struct lwp_info
*, void *),
1259 struct lwp_info
*lp
, *lpnext
;
1261 for (lp
= lwp_list
; lp
; lp
= lpnext
)
1265 if (ptid_match (lp
->ptid
, filter
))
1267 if ((*callback
) (lp
, data
))
1275 /* Iterate like iterate_over_lwps does except when forking-off a child call
1276 CALLBACK with CALLBACK_DATA specifically only for that new child PID. */
1279 linux_nat_iterate_watchpoint_lwps
1280 (linux_nat_iterate_watchpoint_lwps_ftype callback
, void *callback_data
)
1282 int inferior_pid
= ptid_get_pid (inferior_ptid
);
1283 struct inferior
*inf
= current_inferior ();
1285 if (inf
->pid
== inferior_pid
)
1287 /* Iterate all the threads of the current inferior. Without specifying
1288 INFERIOR_PID it would iterate all threads of all inferiors, which is
1289 inappropriate for watchpoints. */
1291 iterate_over_lwps (pid_to_ptid (inferior_pid
), callback
, callback_data
);
1295 /* Detaching a new child PID temporarily present in INFERIOR_PID. */
1297 struct lwp_info
*child_lp
;
1298 struct cleanup
*old_chain
;
1299 pid_t child_pid
= GET_PID (inferior_ptid
);
1300 ptid_t child_ptid
= ptid_build (child_pid
, child_pid
, 0);
1302 gdb_assert (!is_lwp (inferior_ptid
));
1303 gdb_assert (find_lwp_pid (child_ptid
) == NULL
);
1304 child_lp
= add_lwp (child_ptid
);
1305 child_lp
->stopped
= 1;
1306 child_lp
->last_resume_kind
= resume_stop
;
1307 old_chain
= make_cleanup (delete_lwp_cleanup
, child_lp
);
1309 callback (child_lp
, callback_data
);
1311 do_cleanups (old_chain
);
1315 /* Update our internal state when changing from one checkpoint to
1316 another indicated by NEW_PTID. We can only switch single-threaded
1317 applications, so we only create one new LWP, and the previous list
1321 linux_nat_switch_fork (ptid_t new_ptid
)
1323 struct lwp_info
*lp
;
1325 purge_lwp_list (GET_PID (inferior_ptid
));
1327 lp
= add_lwp (new_ptid
);
1330 /* This changes the thread's ptid while preserving the gdb thread
1331 num. Also changes the inferior pid, while preserving the
1333 thread_change_ptid (inferior_ptid
, new_ptid
);
1335 /* We've just told GDB core that the thread changed target id, but,
1336 in fact, it really is a different thread, with different register
1338 registers_changed ();
1341 /* Handle the exit of a single thread LP. */
1344 exit_lwp (struct lwp_info
*lp
)
1346 struct thread_info
*th
= find_thread_ptid (lp
->ptid
);
1350 if (print_thread_events
)
1351 printf_unfiltered (_("[%s exited]\n"), target_pid_to_str (lp
->ptid
));
1353 delete_thread (lp
->ptid
);
1356 delete_lwp (lp
->ptid
);
1359 /* Wait for the LWP specified by LP, which we have just attached to.
1360 Returns a wait status for that LWP, to cache. */
1363 linux_nat_post_attach_wait (ptid_t ptid
, int first
, int *cloned
,
1366 pid_t new_pid
, pid
= GET_LWP (ptid
);
1369 if (linux_proc_pid_is_stopped (pid
))
1371 if (debug_linux_nat
)
1372 fprintf_unfiltered (gdb_stdlog
,
1373 "LNPAW: Attaching to a stopped process\n");
1375 /* The process is definitely stopped. It is in a job control
1376 stop, unless the kernel predates the TASK_STOPPED /
1377 TASK_TRACED distinction, in which case it might be in a
1378 ptrace stop. Make sure it is in a ptrace stop; from there we
1379 can kill it, signal it, et cetera.
1381 First make sure there is a pending SIGSTOP. Since we are
1382 already attached, the process can not transition from stopped
1383 to running without a PTRACE_CONT; so we know this signal will
1384 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1385 probably already in the queue (unless this kernel is old
1386 enough to use TASK_STOPPED for ptrace stops); but since SIGSTOP
1387 is not an RT signal, it can only be queued once. */
1388 kill_lwp (pid
, SIGSTOP
);
1390 /* Finally, resume the stopped process. This will deliver the SIGSTOP
1391 (or a higher priority signal, just like normal PTRACE_ATTACH). */
1392 ptrace (PTRACE_CONT
, pid
, 0, 0);
1395 /* Make sure the initial process is stopped. The user-level threads
1396 layer might want to poke around in the inferior, and that won't
1397 work if things haven't stabilized yet. */
1398 new_pid
= my_waitpid (pid
, &status
, 0);
1399 if (new_pid
== -1 && errno
== ECHILD
)
1402 warning (_("%s is a cloned process"), target_pid_to_str (ptid
));
1404 /* Try again with __WCLONE to check cloned processes. */
1405 new_pid
= my_waitpid (pid
, &status
, __WCLONE
);
1409 gdb_assert (pid
== new_pid
);
1411 if (!WIFSTOPPED (status
))
1413 /* The pid we tried to attach has apparently just exited. */
1414 if (debug_linux_nat
)
1415 fprintf_unfiltered (gdb_stdlog
, "LNPAW: Failed to stop %d: %s",
1416 pid
, status_to_str (status
));
1420 if (WSTOPSIG (status
) != SIGSTOP
)
1423 if (debug_linux_nat
)
1424 fprintf_unfiltered (gdb_stdlog
,
1425 "LNPAW: Received %s after attaching\n",
1426 status_to_str (status
));
1432 /* Attach to the LWP specified by PID. Return 0 if successful, -1 if
1433 the new LWP could not be attached, or 1 if we're already auto
1434 attached to this thread, but haven't processed the
1435 PTRACE_EVENT_CLONE event of its parent thread, so we just ignore
1436 its existance, without considering it an error. */
1439 lin_lwp_attach_lwp (ptid_t ptid
)
1441 struct lwp_info
*lp
;
1445 gdb_assert (is_lwp (ptid
));
1447 block_child_signals (&prev_mask
);
1449 lp
= find_lwp_pid (ptid
);
1450 lwpid
= GET_LWP (ptid
);
1452 /* We assume that we're already attached to any LWP that has an id
1453 equal to the overall process id, and to any LWP that is already
1454 in our list of LWPs. If we're not seeing exit events from threads
1455 and we've had PID wraparound since we last tried to stop all threads,
1456 this assumption might be wrong; fortunately, this is very unlikely
1458 if (lwpid
!= GET_PID (ptid
) && lp
== NULL
)
1460 int status
, cloned
= 0, signalled
= 0;
1462 if (ptrace (PTRACE_ATTACH
, lwpid
, 0, 0) < 0)
1464 if (linux_supports_tracefork_flag
)
1466 /* If we haven't stopped all threads when we get here,
1467 we may have seen a thread listed in thread_db's list,
1468 but not processed the PTRACE_EVENT_CLONE yet. If
1469 that's the case, ignore this new thread, and let
1470 normal event handling discover it later. */
1471 if (in_pid_list_p (stopped_pids
, lwpid
))
1473 /* We've already seen this thread stop, but we
1474 haven't seen the PTRACE_EVENT_CLONE extended
1476 restore_child_signals_mask (&prev_mask
);
1484 /* See if we've got a stop for this new child
1485 pending. If so, we're already attached. */
1486 new_pid
= my_waitpid (lwpid
, &status
, WNOHANG
);
1487 if (new_pid
== -1 && errno
== ECHILD
)
1488 new_pid
= my_waitpid (lwpid
, &status
, __WCLONE
| WNOHANG
);
1491 if (WIFSTOPPED (status
))
1492 add_to_pid_list (&stopped_pids
, lwpid
, status
);
1494 restore_child_signals_mask (&prev_mask
);
1500 /* If we fail to attach to the thread, issue a warning,
1501 but continue. One way this can happen is if thread
1502 creation is interrupted; as of Linux kernel 2.6.19, a
1503 bug may place threads in the thread list and then fail
1505 warning (_("Can't attach %s: %s"), target_pid_to_str (ptid
),
1506 safe_strerror (errno
));
1507 restore_child_signals_mask (&prev_mask
);
1511 if (debug_linux_nat
)
1512 fprintf_unfiltered (gdb_stdlog
,
1513 "LLAL: PTRACE_ATTACH %s, 0, 0 (OK)\n",
1514 target_pid_to_str (ptid
));
1516 status
= linux_nat_post_attach_wait (ptid
, 0, &cloned
, &signalled
);
1517 if (!WIFSTOPPED (status
))
1519 restore_child_signals_mask (&prev_mask
);
1523 lp
= add_lwp (ptid
);
1525 lp
->cloned
= cloned
;
1526 lp
->signalled
= signalled
;
1527 if (WSTOPSIG (status
) != SIGSTOP
)
1530 lp
->status
= status
;
1533 target_post_attach (GET_LWP (lp
->ptid
));
1535 if (debug_linux_nat
)
1537 fprintf_unfiltered (gdb_stdlog
,
1538 "LLAL: waitpid %s received %s\n",
1539 target_pid_to_str (ptid
),
1540 status_to_str (status
));
1545 /* We assume that the LWP representing the original process is
1546 already stopped. Mark it as stopped in the data structure
1547 that the GNU/linux ptrace layer uses to keep track of
1548 threads. Note that this won't have already been done since
1549 the main thread will have, we assume, been stopped by an
1550 attach from a different layer. */
1552 lp
= add_lwp (ptid
);
1556 lp
->last_resume_kind
= resume_stop
;
1557 restore_child_signals_mask (&prev_mask
);
1562 linux_nat_create_inferior (struct target_ops
*ops
,
1563 char *exec_file
, char *allargs
, char **env
,
1566 #ifdef HAVE_PERSONALITY
1567 int personality_orig
= 0, personality_set
= 0;
1568 #endif /* HAVE_PERSONALITY */
1570 /* The fork_child mechanism is synchronous and calls target_wait, so
1571 we have to mask the async mode. */
1573 #ifdef HAVE_PERSONALITY
1574 if (disable_randomization
)
1577 personality_orig
= personality (0xffffffff);
1578 if (errno
== 0 && !(personality_orig
& ADDR_NO_RANDOMIZE
))
1580 personality_set
= 1;
1581 personality (personality_orig
| ADDR_NO_RANDOMIZE
);
1583 if (errno
!= 0 || (personality_set
1584 && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE
)))
1585 warning (_("Error disabling address space randomization: %s"),
1586 safe_strerror (errno
));
1588 #endif /* HAVE_PERSONALITY */
1590 /* Make sure we report all signals during startup. */
1591 linux_nat_pass_signals (0, NULL
);
1593 linux_ops
->to_create_inferior (ops
, exec_file
, allargs
, env
, from_tty
);
1595 #ifdef HAVE_PERSONALITY
1596 if (personality_set
)
1599 personality (personality_orig
);
1601 warning (_("Error restoring address space randomization: %s"),
1602 safe_strerror (errno
));
1604 #endif /* HAVE_PERSONALITY */
1608 linux_nat_attach (struct target_ops
*ops
, char *args
, int from_tty
)
1610 struct lwp_info
*lp
;
1614 /* Make sure we report all signals during attach. */
1615 linux_nat_pass_signals (0, NULL
);
1617 linux_ops
->to_attach (ops
, args
, from_tty
);
1619 /* The ptrace base target adds the main thread with (pid,0,0)
1620 format. Decorate it with lwp info. */
1621 ptid
= BUILD_LWP (GET_PID (inferior_ptid
), GET_PID (inferior_ptid
));
1622 thread_change_ptid (inferior_ptid
, ptid
);
1624 /* Add the initial process as the first LWP to the list. */
1625 lp
= add_lwp (ptid
);
1627 status
= linux_nat_post_attach_wait (lp
->ptid
, 1, &lp
->cloned
,
1629 if (!WIFSTOPPED (status
))
1631 if (WIFEXITED (status
))
1633 int exit_code
= WEXITSTATUS (status
);
1635 target_terminal_ours ();
1636 target_mourn_inferior ();
1638 error (_("Unable to attach: program exited normally."));
1640 error (_("Unable to attach: program exited with code %d."),
1643 else if (WIFSIGNALED (status
))
1645 enum target_signal signo
;
1647 target_terminal_ours ();
1648 target_mourn_inferior ();
1650 signo
= target_signal_from_host (WTERMSIG (status
));
1651 error (_("Unable to attach: program terminated with signal "
1653 target_signal_to_name (signo
),
1654 target_signal_to_string (signo
));
1657 internal_error (__FILE__
, __LINE__
,
1658 _("unexpected status %d for PID %ld"),
1659 status
, (long) GET_LWP (ptid
));
1664 /* Save the wait status to report later. */
1666 if (debug_linux_nat
)
1667 fprintf_unfiltered (gdb_stdlog
,
1668 "LNA: waitpid %ld, saving status %s\n",
1669 (long) GET_PID (lp
->ptid
), status_to_str (status
));
1671 lp
->status
= status
;
1673 if (target_can_async_p ())
1674 target_async (inferior_event_handler
, 0);
1677 /* Get pending status of LP. */
1679 get_pending_status (struct lwp_info
*lp
, int *status
)
1681 enum target_signal signo
= TARGET_SIGNAL_0
;
1683 /* If we paused threads momentarily, we may have stored pending
1684 events in lp->status or lp->waitstatus (see stop_wait_callback),
1685 and GDB core hasn't seen any signal for those threads.
1686 Otherwise, the last signal reported to the core is found in the
1687 thread object's stop_signal.
1689 There's a corner case that isn't handled here at present. Only
1690 if the thread stopped with a TARGET_WAITKIND_STOPPED does
1691 stop_signal make sense as a real signal to pass to the inferior.
1692 Some catchpoint related events, like
1693 TARGET_WAITKIND_(V)FORK|EXEC|SYSCALL, have their stop_signal set
1694 to TARGET_SIGNAL_SIGTRAP when the catchpoint triggers. But,
1695 those traps are debug API (ptrace in our case) related and
1696 induced; the inferior wouldn't see them if it wasn't being
1697 traced. Hence, we should never pass them to the inferior, even
1698 when set to pass state. Since this corner case isn't handled by
1699 infrun.c when proceeding with a signal, for consistency, neither
1700 do we handle it here (or elsewhere in the file we check for
1701 signal pass state). Normally SIGTRAP isn't set to pass state, so
1702 this is really a corner case. */
1704 if (lp
->waitstatus
.kind
!= TARGET_WAITKIND_IGNORE
)
1705 signo
= TARGET_SIGNAL_0
; /* a pending ptrace event, not a real signal. */
1706 else if (lp
->status
)
1707 signo
= target_signal_from_host (WSTOPSIG (lp
->status
));
1708 else if (non_stop
&& !is_executing (lp
->ptid
))
1710 struct thread_info
*tp
= find_thread_ptid (lp
->ptid
);
1712 signo
= tp
->suspend
.stop_signal
;
1716 struct target_waitstatus last
;
1719 get_last_target_status (&last_ptid
, &last
);
1721 if (GET_LWP (lp
->ptid
) == GET_LWP (last_ptid
))
1723 struct thread_info
*tp
= find_thread_ptid (lp
->ptid
);
1725 signo
= tp
->suspend
.stop_signal
;
1731 if (signo
== TARGET_SIGNAL_0
)
1733 if (debug_linux_nat
)
1734 fprintf_unfiltered (gdb_stdlog
,
1735 "GPT: lwp %s has no pending signal\n",
1736 target_pid_to_str (lp
->ptid
));
1738 else if (!signal_pass_state (signo
))
1740 if (debug_linux_nat
)
1741 fprintf_unfiltered (gdb_stdlog
,
1742 "GPT: lwp %s had signal %s, "
1743 "but it is in no pass state\n",
1744 target_pid_to_str (lp
->ptid
),
1745 target_signal_to_string (signo
));
1749 *status
= W_STOPCODE (target_signal_to_host (signo
));
1751 if (debug_linux_nat
)
1752 fprintf_unfiltered (gdb_stdlog
,
1753 "GPT: lwp %s has pending signal %s\n",
1754 target_pid_to_str (lp
->ptid
),
1755 target_signal_to_string (signo
));
1762 detach_callback (struct lwp_info
*lp
, void *data
)
1764 gdb_assert (lp
->status
== 0 || WIFSTOPPED (lp
->status
));
1766 if (debug_linux_nat
&& lp
->status
)
1767 fprintf_unfiltered (gdb_stdlog
, "DC: Pending %s for %s on detach.\n",
1768 strsignal (WSTOPSIG (lp
->status
)),
1769 target_pid_to_str (lp
->ptid
));
1771 /* If there is a pending SIGSTOP, get rid of it. */
1774 if (debug_linux_nat
)
1775 fprintf_unfiltered (gdb_stdlog
,
1776 "DC: Sending SIGCONT to %s\n",
1777 target_pid_to_str (lp
->ptid
));
1779 kill_lwp (GET_LWP (lp
->ptid
), SIGCONT
);
1783 /* We don't actually detach from the LWP that has an id equal to the
1784 overall process id just yet. */
1785 if (GET_LWP (lp
->ptid
) != GET_PID (lp
->ptid
))
1789 /* Pass on any pending signal for this LWP. */
1790 get_pending_status (lp
, &status
);
1792 if (linux_nat_prepare_to_resume
!= NULL
)
1793 linux_nat_prepare_to_resume (lp
);
1795 if (ptrace (PTRACE_DETACH
, GET_LWP (lp
->ptid
), 0,
1796 WSTOPSIG (status
)) < 0)
1797 error (_("Can't detach %s: %s"), target_pid_to_str (lp
->ptid
),
1798 safe_strerror (errno
));
1800 if (debug_linux_nat
)
1801 fprintf_unfiltered (gdb_stdlog
,
1802 "PTRACE_DETACH (%s, %s, 0) (OK)\n",
1803 target_pid_to_str (lp
->ptid
),
1804 strsignal (WSTOPSIG (status
)));
1806 delete_lwp (lp
->ptid
);
1813 linux_nat_detach (struct target_ops
*ops
, char *args
, int from_tty
)
1817 struct lwp_info
*main_lwp
;
1819 pid
= GET_PID (inferior_ptid
);
1821 if (target_can_async_p ())
1822 linux_nat_async (NULL
, 0);
1824 /* Stop all threads before detaching. ptrace requires that the
1825 thread is stopped to sucessfully detach. */
1826 iterate_over_lwps (pid_to_ptid (pid
), stop_callback
, NULL
);
1827 /* ... and wait until all of them have reported back that
1828 they're no longer running. */
1829 iterate_over_lwps (pid_to_ptid (pid
), stop_wait_callback
, NULL
);
1831 iterate_over_lwps (pid_to_ptid (pid
), detach_callback
, NULL
);
1833 /* Only the initial process should be left right now. */
1834 gdb_assert (num_lwps (GET_PID (inferior_ptid
)) == 1);
1836 main_lwp
= find_lwp_pid (pid_to_ptid (pid
));
1838 /* Pass on any pending signal for the last LWP. */
1839 if ((args
== NULL
|| *args
== '\0')
1840 && get_pending_status (main_lwp
, &status
) != -1
1841 && WIFSTOPPED (status
))
1843 /* Put the signal number in ARGS so that inf_ptrace_detach will
1844 pass it along with PTRACE_DETACH. */
1846 sprintf (args
, "%d", (int) WSTOPSIG (status
));
1847 if (debug_linux_nat
)
1848 fprintf_unfiltered (gdb_stdlog
,
1849 "LND: Sending signal %s to %s\n",
1851 target_pid_to_str (main_lwp
->ptid
));
1854 if (linux_nat_prepare_to_resume
!= NULL
)
1855 linux_nat_prepare_to_resume (main_lwp
);
1856 delete_lwp (main_lwp
->ptid
);
1858 if (forks_exist_p ())
1860 /* Multi-fork case. The current inferior_ptid is being detached
1861 from, but there are other viable forks to debug. Detach from
1862 the current fork, and context-switch to the first
1864 linux_fork_detach (args
, from_tty
);
1866 if (non_stop
&& target_can_async_p ())
1867 target_async (inferior_event_handler
, 0);
1870 linux_ops
->to_detach (ops
, args
, from_tty
);
1876 resume_lwp (struct lwp_info
*lp
, int step
)
1880 struct inferior
*inf
= find_inferior_pid (GET_PID (lp
->ptid
));
1882 if (inf
->vfork_child
!= NULL
)
1884 if (debug_linux_nat
)
1885 fprintf_unfiltered (gdb_stdlog
,
1886 "RC: Not resuming %s (vfork parent)\n",
1887 target_pid_to_str (lp
->ptid
));
1889 else if (lp
->status
== 0
1890 && lp
->waitstatus
.kind
== TARGET_WAITKIND_IGNORE
)
1892 if (debug_linux_nat
)
1893 fprintf_unfiltered (gdb_stdlog
,
1894 "RC: PTRACE_CONT %s, 0, 0 (resuming sibling)\n",
1895 target_pid_to_str (lp
->ptid
));
1897 if (linux_nat_prepare_to_resume
!= NULL
)
1898 linux_nat_prepare_to_resume (lp
);
1899 linux_ops
->to_resume (linux_ops
,
1900 pid_to_ptid (GET_LWP (lp
->ptid
)),
1901 step
, TARGET_SIGNAL_0
);
1904 memset (&lp
->siginfo
, 0, sizeof (lp
->siginfo
));
1905 lp
->stopped_by_watchpoint
= 0;
1909 if (debug_linux_nat
)
1910 fprintf_unfiltered (gdb_stdlog
,
1911 "RC: Not resuming sibling %s (has pending)\n",
1912 target_pid_to_str (lp
->ptid
));
1917 if (debug_linux_nat
)
1918 fprintf_unfiltered (gdb_stdlog
,
1919 "RC: Not resuming sibling %s (not stopped)\n",
1920 target_pid_to_str (lp
->ptid
));
1925 resume_callback (struct lwp_info
*lp
, void *data
)
1932 resume_clear_callback (struct lwp_info
*lp
, void *data
)
1935 lp
->last_resume_kind
= resume_stop
;
1940 resume_set_callback (struct lwp_info
*lp
, void *data
)
1943 lp
->last_resume_kind
= resume_continue
;
1948 linux_nat_resume (struct target_ops
*ops
,
1949 ptid_t ptid
, int step
, enum target_signal signo
)
1952 struct lwp_info
*lp
;
1955 if (debug_linux_nat
)
1956 fprintf_unfiltered (gdb_stdlog
,
1957 "LLR: Preparing to %s %s, %s, inferior_ptid %s\n",
1958 step
? "step" : "resume",
1959 target_pid_to_str (ptid
),
1960 (signo
!= TARGET_SIGNAL_0
1961 ? strsignal (target_signal_to_host (signo
)) : "0"),
1962 target_pid_to_str (inferior_ptid
));
1964 block_child_signals (&prev_mask
);
1966 /* A specific PTID means `step only this process id'. */
1967 resume_many
= (ptid_equal (minus_one_ptid
, ptid
)
1968 || ptid_is_pid (ptid
));
1970 /* Mark the lwps we're resuming as resumed. */
1971 iterate_over_lwps (ptid
, resume_set_callback
, NULL
);
1973 /* See if it's the current inferior that should be handled
1976 lp
= find_lwp_pid (inferior_ptid
);
1978 lp
= find_lwp_pid (ptid
);
1979 gdb_assert (lp
!= NULL
);
1981 /* Remember if we're stepping. */
1983 lp
->last_resume_kind
= step
? resume_step
: resume_continue
;
1985 /* If we have a pending wait status for this thread, there is no
1986 point in resuming the process. But first make sure that
1987 linux_nat_wait won't preemptively handle the event - we
1988 should never take this short-circuit if we are going to
1989 leave LP running, since we have skipped resuming all the
1990 other threads. This bit of code needs to be synchronized
1991 with linux_nat_wait. */
1993 if (lp
->status
&& WIFSTOPPED (lp
->status
))
1996 && WSTOPSIG (lp
->status
)
1997 && sigismember (&pass_mask
, WSTOPSIG (lp
->status
)))
1999 if (debug_linux_nat
)
2000 fprintf_unfiltered (gdb_stdlog
,
2001 "LLR: Not short circuiting for ignored "
2002 "status 0x%x\n", lp
->status
);
2004 /* FIXME: What should we do if we are supposed to continue
2005 this thread with a signal? */
2006 gdb_assert (signo
== TARGET_SIGNAL_0
);
2007 signo
= target_signal_from_host (WSTOPSIG (lp
->status
));
2012 if (lp
->status
|| lp
->waitstatus
.kind
!= TARGET_WAITKIND_IGNORE
)
2014 /* FIXME: What should we do if we are supposed to continue
2015 this thread with a signal? */
2016 gdb_assert (signo
== TARGET_SIGNAL_0
);
2018 if (debug_linux_nat
)
2019 fprintf_unfiltered (gdb_stdlog
,
2020 "LLR: Short circuiting for status 0x%x\n",
2023 restore_child_signals_mask (&prev_mask
);
2024 if (target_can_async_p ())
2026 target_async (inferior_event_handler
, 0);
2027 /* Tell the event loop we have something to process. */
2033 /* Mark LWP as not stopped to prevent it from being continued by
2038 iterate_over_lwps (ptid
, resume_callback
, NULL
);
2040 /* Convert to something the lower layer understands. */
2041 ptid
= pid_to_ptid (GET_LWP (lp
->ptid
));
2043 if (linux_nat_prepare_to_resume
!= NULL
)
2044 linux_nat_prepare_to_resume (lp
);
2045 linux_ops
->to_resume (linux_ops
, ptid
, step
, signo
);
2046 memset (&lp
->siginfo
, 0, sizeof (lp
->siginfo
));
2047 lp
->stopped_by_watchpoint
= 0;
2049 if (debug_linux_nat
)
2050 fprintf_unfiltered (gdb_stdlog
,
2051 "LLR: %s %s, %s (resume event thread)\n",
2052 step
? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2053 target_pid_to_str (ptid
),
2054 (signo
!= TARGET_SIGNAL_0
2055 ? strsignal (target_signal_to_host (signo
)) : "0"));
2057 restore_child_signals_mask (&prev_mask
);
2058 if (target_can_async_p ())
2059 target_async (inferior_event_handler
, 0);
2062 /* Send a signal to an LWP. */
2065 kill_lwp (int lwpid
, int signo
)
2067 /* Use tkill, if possible, in case we are using nptl threads. If tkill
2068 fails, then we are not using nptl threads and we should be using kill. */
2070 #ifdef HAVE_TKILL_SYSCALL
2072 static int tkill_failed
;
2079 ret
= syscall (__NR_tkill
, lwpid
, signo
);
2080 if (errno
!= ENOSYS
)
2087 return kill (lwpid
, signo
);
2090 /* Handle a GNU/Linux syscall trap wait response. If we see a syscall
2091 event, check if the core is interested in it: if not, ignore the
2092 event, and keep waiting; otherwise, we need to toggle the LWP's
2093 syscall entry/exit status, since the ptrace event itself doesn't
2094 indicate it, and report the trap to higher layers. */
2097 linux_handle_syscall_trap (struct lwp_info
*lp
, int stopping
)
2099 struct target_waitstatus
*ourstatus
= &lp
->waitstatus
;
2100 struct gdbarch
*gdbarch
= target_thread_architecture (lp
->ptid
);
2101 int syscall_number
= (int) gdbarch_get_syscall_number (gdbarch
, lp
->ptid
);
2105 /* If we're stopping threads, there's a SIGSTOP pending, which
2106 makes it so that the LWP reports an immediate syscall return,
2107 followed by the SIGSTOP. Skip seeing that "return" using
2108 PTRACE_CONT directly, and let stop_wait_callback collect the
2109 SIGSTOP. Later when the thread is resumed, a new syscall
2110 entry event. If we didn't do this (and returned 0), we'd
2111 leave a syscall entry pending, and our caller, by using
2112 PTRACE_CONT to collect the SIGSTOP, skips the syscall return
2113 itself. Later, when the user re-resumes this LWP, we'd see
2114 another syscall entry event and we'd mistake it for a return.
2116 If stop_wait_callback didn't force the SIGSTOP out of the LWP
2117 (leaving immediately with LWP->signalled set, without issuing
2118 a PTRACE_CONT), it would still be problematic to leave this
2119 syscall enter pending, as later when the thread is resumed,
2120 it would then see the same syscall exit mentioned above,
2121 followed by the delayed SIGSTOP, while the syscall didn't
2122 actually get to execute. It seems it would be even more
2123 confusing to the user. */
2125 if (debug_linux_nat
)
2126 fprintf_unfiltered (gdb_stdlog
,
2127 "LHST: ignoring syscall %d "
2128 "for LWP %ld (stopping threads), "
2129 "resuming with PTRACE_CONT for SIGSTOP\n",
2131 GET_LWP (lp
->ptid
));
2133 lp
->syscall_state
= TARGET_WAITKIND_IGNORE
;
2134 ptrace (PTRACE_CONT
, GET_LWP (lp
->ptid
), 0, 0);
2138 if (catch_syscall_enabled ())
2140 /* Always update the entry/return state, even if this particular
2141 syscall isn't interesting to the core now. In async mode,
2142 the user could install a new catchpoint for this syscall
2143 between syscall enter/return, and we'll need to know to
2144 report a syscall return if that happens. */
2145 lp
->syscall_state
= (lp
->syscall_state
== TARGET_WAITKIND_SYSCALL_ENTRY
2146 ? TARGET_WAITKIND_SYSCALL_RETURN
2147 : TARGET_WAITKIND_SYSCALL_ENTRY
);
2149 if (catching_syscall_number (syscall_number
))
2151 /* Alright, an event to report. */
2152 ourstatus
->kind
= lp
->syscall_state
;
2153 ourstatus
->value
.syscall_number
= syscall_number
;
2155 if (debug_linux_nat
)
2156 fprintf_unfiltered (gdb_stdlog
,
2157 "LHST: stopping for %s of syscall %d"
2160 == TARGET_WAITKIND_SYSCALL_ENTRY
2161 ? "entry" : "return",
2163 GET_LWP (lp
->ptid
));
2167 if (debug_linux_nat
)
2168 fprintf_unfiltered (gdb_stdlog
,
2169 "LHST: ignoring %s of syscall %d "
2171 lp
->syscall_state
== TARGET_WAITKIND_SYSCALL_ENTRY
2172 ? "entry" : "return",
2174 GET_LWP (lp
->ptid
));
2178 /* If we had been syscall tracing, and hence used PT_SYSCALL
2179 before on this LWP, it could happen that the user removes all
2180 syscall catchpoints before we get to process this event.
2181 There are two noteworthy issues here:
2183 - When stopped at a syscall entry event, resuming with
2184 PT_STEP still resumes executing the syscall and reports a
2187 - Only PT_SYSCALL catches syscall enters. If we last
2188 single-stepped this thread, then this event can't be a
2189 syscall enter. If we last single-stepped this thread, this
2190 has to be a syscall exit.
2192 The points above mean that the next resume, be it PT_STEP or
2193 PT_CONTINUE, can not trigger a syscall trace event. */
2194 if (debug_linux_nat
)
2195 fprintf_unfiltered (gdb_stdlog
,
2196 "LHST: caught syscall event "
2197 "with no syscall catchpoints."
2198 " %d for LWP %ld, ignoring\n",
2200 GET_LWP (lp
->ptid
));
2201 lp
->syscall_state
= TARGET_WAITKIND_IGNORE
;
2204 /* The core isn't interested in this event. For efficiency, avoid
2205 stopping all threads only to have the core resume them all again.
2206 Since we're not stopping threads, if we're still syscall tracing
2207 and not stepping, we can't use PTRACE_CONT here, as we'd miss any
2208 subsequent syscall. Simply resume using the inf-ptrace layer,
2209 which knows when to use PT_SYSCALL or PT_CONTINUE. */
2211 /* Note that gdbarch_get_syscall_number may access registers, hence
2213 registers_changed ();
2214 if (linux_nat_prepare_to_resume
!= NULL
)
2215 linux_nat_prepare_to_resume (lp
);
2216 linux_ops
->to_resume (linux_ops
, pid_to_ptid (GET_LWP (lp
->ptid
)),
2217 lp
->step
, TARGET_SIGNAL_0
);
2221 /* Handle a GNU/Linux extended wait response. If we see a clone
2222 event, we need to add the new LWP to our list (and not report the
2223 trap to higher layers). This function returns non-zero if the
2224 event should be ignored and we should wait again. If STOPPING is
2225 true, the new LWP remains stopped, otherwise it is continued. */
2228 linux_handle_extended_wait (struct lwp_info
*lp
, int status
,
2231 int pid
= GET_LWP (lp
->ptid
);
2232 struct target_waitstatus
*ourstatus
= &lp
->waitstatus
;
2233 int event
= status
>> 16;
2235 if (event
== PTRACE_EVENT_FORK
|| event
== PTRACE_EVENT_VFORK
2236 || event
== PTRACE_EVENT_CLONE
)
2238 unsigned long new_pid
;
2241 ptrace (PTRACE_GETEVENTMSG
, pid
, 0, &new_pid
);
2243 /* If we haven't already seen the new PID stop, wait for it now. */
2244 if (! pull_pid_from_list (&stopped_pids
, new_pid
, &status
))
2246 /* The new child has a pending SIGSTOP. We can't affect it until it
2247 hits the SIGSTOP, but we're already attached. */
2248 ret
= my_waitpid (new_pid
, &status
,
2249 (event
== PTRACE_EVENT_CLONE
) ? __WCLONE
: 0);
2251 perror_with_name (_("waiting for new child"));
2252 else if (ret
!= new_pid
)
2253 internal_error (__FILE__
, __LINE__
,
2254 _("wait returned unexpected PID %d"), ret
);
2255 else if (!WIFSTOPPED (status
))
2256 internal_error (__FILE__
, __LINE__
,
2257 _("wait returned unexpected status 0x%x"), status
);
2260 ourstatus
->value
.related_pid
= ptid_build (new_pid
, new_pid
, 0);
2262 if (event
== PTRACE_EVENT_FORK
2263 && linux_fork_checkpointing_p (GET_PID (lp
->ptid
)))
2265 /* Handle checkpointing by linux-fork.c here as a special
2266 case. We don't want the follow-fork-mode or 'catch fork'
2267 to interfere with this. */
2269 /* This won't actually modify the breakpoint list, but will
2270 physically remove the breakpoints from the child. */
2271 detach_breakpoints (new_pid
);
2273 /* Retain child fork in ptrace (stopped) state. */
2274 if (!find_fork_pid (new_pid
))
2277 /* Report as spurious, so that infrun doesn't want to follow
2278 this fork. We're actually doing an infcall in
2280 ourstatus
->kind
= TARGET_WAITKIND_SPURIOUS
;
2281 linux_enable_event_reporting (pid_to_ptid (new_pid
));
2283 /* Report the stop to the core. */
2287 if (event
== PTRACE_EVENT_FORK
)
2288 ourstatus
->kind
= TARGET_WAITKIND_FORKED
;
2289 else if (event
== PTRACE_EVENT_VFORK
)
2290 ourstatus
->kind
= TARGET_WAITKIND_VFORKED
;
2293 struct lwp_info
*new_lp
;
2295 ourstatus
->kind
= TARGET_WAITKIND_IGNORE
;
2297 if (debug_linux_nat
)
2298 fprintf_unfiltered (gdb_stdlog
,
2299 "LHEW: Got clone event "
2300 "from LWP %d, new child is LWP %ld\n",
2303 new_lp
= add_lwp (BUILD_LWP (new_pid
, GET_PID (lp
->ptid
)));
2305 new_lp
->stopped
= 1;
2307 if (WSTOPSIG (status
) != SIGSTOP
)
2309 /* This can happen if someone starts sending signals to
2310 the new thread before it gets a chance to run, which
2311 have a lower number than SIGSTOP (e.g. SIGUSR1).
2312 This is an unlikely case, and harder to handle for
2313 fork / vfork than for clone, so we do not try - but
2314 we handle it for clone events here. We'll send
2315 the other signal on to the thread below. */
2317 new_lp
->signalled
= 1;
2321 struct thread_info
*tp
;
2323 /* When we stop for an event in some other thread, and
2324 pull the thread list just as this thread has cloned,
2325 we'll have seen the new thread in the thread_db list
2326 before handling the CLONE event (glibc's
2327 pthread_create adds the new thread to the thread list
2328 before clone'ing, and has the kernel fill in the
2329 thread's tid on the clone call with
2330 CLONE_PARENT_SETTID). If that happened, and the core
2331 had requested the new thread to stop, we'll have
2332 killed it with SIGSTOP. But since SIGSTOP is not an
2333 RT signal, it can only be queued once. We need to be
2334 careful to not resume the LWP if we wanted it to
2335 stop. In that case, we'll leave the SIGSTOP pending.
2336 It will later be reported as TARGET_SIGNAL_0. */
2337 tp
= find_thread_ptid (new_lp
->ptid
);
2338 if (tp
!= NULL
&& tp
->stop_requested
)
2339 new_lp
->last_resume_kind
= resume_stop
;
2346 /* Add the new thread to GDB's lists as soon as possible
2349 1) the frontend doesn't have to wait for a stop to
2352 2) we tag it with the correct running state. */
2354 /* If the thread_db layer is active, let it know about
2355 this new thread, and add it to GDB's list. */
2356 if (!thread_db_attach_lwp (new_lp
->ptid
))
2358 /* We're not using thread_db. Add it to GDB's
2360 target_post_attach (GET_LWP (new_lp
->ptid
));
2361 add_thread (new_lp
->ptid
);
2366 set_running (new_lp
->ptid
, 1);
2367 set_executing (new_lp
->ptid
, 1);
2368 /* thread_db_attach_lwp -> lin_lwp_attach_lwp forced
2370 new_lp
->last_resume_kind
= resume_continue
;
2376 /* We created NEW_LP so it cannot yet contain STATUS. */
2377 gdb_assert (new_lp
->status
== 0);
2379 /* Save the wait status to report later. */
2380 if (debug_linux_nat
)
2381 fprintf_unfiltered (gdb_stdlog
,
2382 "LHEW: waitpid of new LWP %ld, "
2383 "saving status %s\n",
2384 (long) GET_LWP (new_lp
->ptid
),
2385 status_to_str (status
));
2386 new_lp
->status
= status
;
2389 /* Note the need to use the low target ops to resume, to
2390 handle resuming with PT_SYSCALL if we have syscall
2394 new_lp
->resumed
= 1;
2398 gdb_assert (new_lp
->last_resume_kind
== resume_continue
);
2399 if (debug_linux_nat
)
2400 fprintf_unfiltered (gdb_stdlog
,
2401 "LHEW: resuming new LWP %ld\n",
2402 GET_LWP (new_lp
->ptid
));
2403 if (linux_nat_prepare_to_resume
!= NULL
)
2404 linux_nat_prepare_to_resume (new_lp
);
2405 linux_ops
->to_resume (linux_ops
, pid_to_ptid (new_pid
),
2406 0, TARGET_SIGNAL_0
);
2407 new_lp
->stopped
= 0;
2411 if (debug_linux_nat
)
2412 fprintf_unfiltered (gdb_stdlog
,
2413 "LHEW: resuming parent LWP %d\n", pid
);
2414 if (linux_nat_prepare_to_resume
!= NULL
)
2415 linux_nat_prepare_to_resume (lp
);
2416 linux_ops
->to_resume (linux_ops
, pid_to_ptid (GET_LWP (lp
->ptid
)),
2417 0, TARGET_SIGNAL_0
);
2425 if (event
== PTRACE_EVENT_EXEC
)
2427 if (debug_linux_nat
)
2428 fprintf_unfiltered (gdb_stdlog
,
2429 "LHEW: Got exec event from LWP %ld\n",
2430 GET_LWP (lp
->ptid
));
2432 ourstatus
->kind
= TARGET_WAITKIND_EXECD
;
2433 ourstatus
->value
.execd_pathname
2434 = xstrdup (linux_child_pid_to_exec_file (pid
));
2439 if (event
== PTRACE_EVENT_VFORK_DONE
)
2441 if (current_inferior ()->waiting_for_vfork_done
)
2443 if (debug_linux_nat
)
2444 fprintf_unfiltered (gdb_stdlog
,
2445 "LHEW: Got expected PTRACE_EVENT_"
2446 "VFORK_DONE from LWP %ld: stopping\n",
2447 GET_LWP (lp
->ptid
));
2449 ourstatus
->kind
= TARGET_WAITKIND_VFORK_DONE
;
2453 if (debug_linux_nat
)
2454 fprintf_unfiltered (gdb_stdlog
,
2455 "LHEW: Got PTRACE_EVENT_VFORK_DONE "
2456 "from LWP %ld: resuming\n",
2457 GET_LWP (lp
->ptid
));
2458 ptrace (PTRACE_CONT
, GET_LWP (lp
->ptid
), 0, 0);
2462 internal_error (__FILE__
, __LINE__
,
2463 _("unknown ptrace event %d"), event
);
2466 /* Return non-zero if LWP is a zombie. */
2469 linux_lwp_is_zombie (long lwp
)
2471 char buffer
[MAXPATHLEN
];
2476 xsnprintf (buffer
, sizeof (buffer
), "/proc/%ld/status", lwp
);
2477 procfile
= fopen (buffer
, "r");
2478 if (procfile
== NULL
)
2480 warning (_("unable to open /proc file '%s'"), buffer
);
2485 while (fgets (buffer
, sizeof (buffer
), procfile
) != NULL
)
2486 if (strncmp (buffer
, "State:", 6) == 0)
2491 retval
= (have_state
2492 && strcmp (buffer
, "State:\tZ (zombie)\n") == 0);
2497 /* Wait for LP to stop. Returns the wait status, or 0 if the LWP has
2501 wait_lwp (struct lwp_info
*lp
)
2505 int thread_dead
= 0;
2508 gdb_assert (!lp
->stopped
);
2509 gdb_assert (lp
->status
== 0);
2511 /* Make sure SIGCHLD is blocked for sigsuspend avoiding a race below. */
2512 block_child_signals (&prev_mask
);
2516 /* If my_waitpid returns 0 it means the __WCLONE vs. non-__WCLONE kind
2517 was right and we should just call sigsuspend. */
2519 pid
= my_waitpid (GET_LWP (lp
->ptid
), &status
, WNOHANG
);
2520 if (pid
== -1 && errno
== ECHILD
)
2521 pid
= my_waitpid (GET_LWP (lp
->ptid
), &status
, __WCLONE
| WNOHANG
);
2522 if (pid
== -1 && errno
== ECHILD
)
2524 /* The thread has previously exited. We need to delete it
2525 now because, for some vendor 2.4 kernels with NPTL
2526 support backported, there won't be an exit event unless
2527 it is the main thread. 2.6 kernels will report an exit
2528 event for each thread that exits, as expected. */
2530 if (debug_linux_nat
)
2531 fprintf_unfiltered (gdb_stdlog
, "WL: %s vanished.\n",
2532 target_pid_to_str (lp
->ptid
));
2537 /* Bugs 10970, 12702.
2538 Thread group leader may have exited in which case we'll lock up in
2539 waitpid if there are other threads, even if they are all zombies too.
2540 Basically, we're not supposed to use waitpid this way.
2541 __WCLONE is not applicable for the leader so we can't use that.
2542 LINUX_NAT_THREAD_ALIVE cannot be used here as it requires a STOPPED
2543 process; it gets ESRCH both for the zombie and for running processes.
2545 As a workaround, check if we're waiting for the thread group leader and
2546 if it's a zombie, and avoid calling waitpid if it is.
2548 This is racy, what if the tgl becomes a zombie right after we check?
2549 Therefore always use WNOHANG with sigsuspend - it is equivalent to
2550 waiting waitpid but the linux_lwp_is_zombie is safe this way. */
2552 if (GET_PID (lp
->ptid
) == GET_LWP (lp
->ptid
)
2553 && linux_lwp_is_zombie (GET_LWP (lp
->ptid
)))
2556 if (debug_linux_nat
)
2557 fprintf_unfiltered (gdb_stdlog
,
2558 "WL: Thread group leader %s vanished.\n",
2559 target_pid_to_str (lp
->ptid
));
2563 /* Wait for next SIGCHLD and try again. This may let SIGCHLD handlers
2564 get invoked despite our caller had them intentionally blocked by
2565 block_child_signals. This is sensitive only to the loop of
2566 linux_nat_wait_1 and there if we get called my_waitpid gets called
2567 again before it gets to sigsuspend so we can safely let the handlers
2568 get executed here. */
2570 sigsuspend (&suspend_mask
);
2573 restore_child_signals_mask (&prev_mask
);
2577 gdb_assert (pid
== GET_LWP (lp
->ptid
));
2579 if (debug_linux_nat
)
2581 fprintf_unfiltered (gdb_stdlog
,
2582 "WL: waitpid %s received %s\n",
2583 target_pid_to_str (lp
->ptid
),
2584 status_to_str (status
));
2587 /* Check if the thread has exited. */
2588 if (WIFEXITED (status
) || WIFSIGNALED (status
))
2591 if (debug_linux_nat
)
2592 fprintf_unfiltered (gdb_stdlog
, "WL: %s exited.\n",
2593 target_pid_to_str (lp
->ptid
));
2603 gdb_assert (WIFSTOPPED (status
));
2605 /* Handle GNU/Linux's syscall SIGTRAPs. */
2606 if (WIFSTOPPED (status
) && WSTOPSIG (status
) == SYSCALL_SIGTRAP
)
2608 /* No longer need the sysgood bit. The ptrace event ends up
2609 recorded in lp->waitstatus if we care for it. We can carry
2610 on handling the event like a regular SIGTRAP from here
2612 status
= W_STOPCODE (SIGTRAP
);
2613 if (linux_handle_syscall_trap (lp
, 1))
2614 return wait_lwp (lp
);
2617 /* Handle GNU/Linux's extended waitstatus for trace events. */
2618 if (WIFSTOPPED (status
) && WSTOPSIG (status
) == SIGTRAP
&& status
>> 16 != 0)
2620 if (debug_linux_nat
)
2621 fprintf_unfiltered (gdb_stdlog
,
2622 "WL: Handling extended status 0x%06x\n",
2624 if (linux_handle_extended_wait (lp
, status
, 1))
2625 return wait_lwp (lp
);
2631 /* Save the most recent siginfo for LP. This is currently only called
2632 for SIGTRAP; some ports use the si_addr field for
2633 target_stopped_data_address. In the future, it may also be used to
2634 restore the siginfo of requeued signals. */
2637 save_siginfo (struct lwp_info
*lp
)
2640 ptrace (PTRACE_GETSIGINFO
, GET_LWP (lp
->ptid
),
2641 (PTRACE_TYPE_ARG3
) 0, &lp
->siginfo
);
2644 memset (&lp
->siginfo
, 0, sizeof (lp
->siginfo
));
2647 /* Send a SIGSTOP to LP. */
2650 stop_callback (struct lwp_info
*lp
, void *data
)
2652 if (!lp
->stopped
&& !lp
->signalled
)
2656 if (debug_linux_nat
)
2658 fprintf_unfiltered (gdb_stdlog
,
2659 "SC: kill %s **<SIGSTOP>**\n",
2660 target_pid_to_str (lp
->ptid
));
2663 ret
= kill_lwp (GET_LWP (lp
->ptid
), SIGSTOP
);
2664 if (debug_linux_nat
)
2666 fprintf_unfiltered (gdb_stdlog
,
2667 "SC: lwp kill %d %s\n",
2669 errno
? safe_strerror (errno
) : "ERRNO-OK");
2673 gdb_assert (lp
->status
== 0);
2679 /* Request a stop on LWP. */
2682 linux_stop_lwp (struct lwp_info
*lwp
)
2684 stop_callback (lwp
, NULL
);
2687 /* Return non-zero if LWP PID has a pending SIGINT. */
2690 linux_nat_has_pending_sigint (int pid
)
2692 sigset_t pending
, blocked
, ignored
;
2694 linux_proc_pending_signals (pid
, &pending
, &blocked
, &ignored
);
2696 if (sigismember (&pending
, SIGINT
)
2697 && !sigismember (&ignored
, SIGINT
))
2703 /* Set a flag in LP indicating that we should ignore its next SIGINT. */
2706 set_ignore_sigint (struct lwp_info
*lp
, void *data
)
2708 /* If a thread has a pending SIGINT, consume it; otherwise, set a
2709 flag to consume the next one. */
2710 if (lp
->stopped
&& lp
->status
!= 0 && WIFSTOPPED (lp
->status
)
2711 && WSTOPSIG (lp
->status
) == SIGINT
)
2714 lp
->ignore_sigint
= 1;
2719 /* If LP does not have a SIGINT pending, then clear the ignore_sigint flag.
2720 This function is called after we know the LWP has stopped; if the LWP
2721 stopped before the expected SIGINT was delivered, then it will never have
2722 arrived. Also, if the signal was delivered to a shared queue and consumed
2723 by a different thread, it will never be delivered to this LWP. */
2726 maybe_clear_ignore_sigint (struct lwp_info
*lp
)
2728 if (!lp
->ignore_sigint
)
2731 if (!linux_nat_has_pending_sigint (GET_LWP (lp
->ptid
)))
2733 if (debug_linux_nat
)
2734 fprintf_unfiltered (gdb_stdlog
,
2735 "MCIS: Clearing bogus flag for %s\n",
2736 target_pid_to_str (lp
->ptid
));
2737 lp
->ignore_sigint
= 0;
2741 /* Fetch the possible triggered data watchpoint info and store it in
2744 On some archs, like x86, that use debug registers to set
2745 watchpoints, it's possible that the way to know which watched
2746 address trapped, is to check the register that is used to select
2747 which address to watch. Problem is, between setting the watchpoint
2748 and reading back which data address trapped, the user may change
2749 the set of watchpoints, and, as a consequence, GDB changes the
2750 debug registers in the inferior. To avoid reading back a stale
2751 stopped-data-address when that happens, we cache in LP the fact
2752 that a watchpoint trapped, and the corresponding data address, as
2753 soon as we see LP stop with a SIGTRAP. If GDB changes the debug
2754 registers meanwhile, we have the cached data we can rely on. */
2757 save_sigtrap (struct lwp_info
*lp
)
2759 struct cleanup
*old_chain
;
2761 if (linux_ops
->to_stopped_by_watchpoint
== NULL
)
2763 lp
->stopped_by_watchpoint
= 0;
2767 old_chain
= save_inferior_ptid ();
2768 inferior_ptid
= lp
->ptid
;
2770 lp
->stopped_by_watchpoint
= linux_ops
->to_stopped_by_watchpoint ();
2772 if (lp
->stopped_by_watchpoint
)
2774 if (linux_ops
->to_stopped_data_address
!= NULL
)
2775 lp
->stopped_data_address_p
=
2776 linux_ops
->to_stopped_data_address (¤t_target
,
2777 &lp
->stopped_data_address
);
2779 lp
->stopped_data_address_p
= 0;
2782 do_cleanups (old_chain
);
2785 /* See save_sigtrap. */
2788 linux_nat_stopped_by_watchpoint (void)
2790 struct lwp_info
*lp
= find_lwp_pid (inferior_ptid
);
2792 gdb_assert (lp
!= NULL
);
2794 return lp
->stopped_by_watchpoint
;
2798 linux_nat_stopped_data_address (struct target_ops
*ops
, CORE_ADDR
*addr_p
)
2800 struct lwp_info
*lp
= find_lwp_pid (inferior_ptid
);
2802 gdb_assert (lp
!= NULL
);
2804 *addr_p
= lp
->stopped_data_address
;
2806 return lp
->stopped_data_address_p
;
2809 /* Commonly any breakpoint / watchpoint generate only SIGTRAP. */
2812 sigtrap_is_event (int status
)
2814 return WIFSTOPPED (status
) && WSTOPSIG (status
) == SIGTRAP
;
2817 /* SIGTRAP-like events recognizer. */
2819 static int (*linux_nat_status_is_event
) (int status
) = sigtrap_is_event
;
2821 /* Check for SIGTRAP-like events in LP. */
2824 linux_nat_lp_status_is_event (struct lwp_info
*lp
)
2826 /* We check for lp->waitstatus in addition to lp->status, because we can
2827 have pending process exits recorded in lp->status
2828 and W_EXITCODE(0,0) == 0. We should probably have an additional
2829 lp->status_p flag. */
2831 return (lp
->waitstatus
.kind
== TARGET_WAITKIND_IGNORE
2832 && linux_nat_status_is_event (lp
->status
));
2835 /* Set alternative SIGTRAP-like events recognizer. If
2836 breakpoint_inserted_here_p there then gdbarch_decr_pc_after_break will be
2840 linux_nat_set_status_is_event (struct target_ops
*t
,
2841 int (*status_is_event
) (int status
))
2843 linux_nat_status_is_event
= status_is_event
;
2846 /* Wait until LP is stopped. */
2849 stop_wait_callback (struct lwp_info
*lp
, void *data
)
2851 struct inferior
*inf
= find_inferior_pid (GET_PID (lp
->ptid
));
2853 /* If this is a vfork parent, bail out, it is not going to report
2854 any SIGSTOP until the vfork is done with. */
2855 if (inf
->vfork_child
!= NULL
)
2862 status
= wait_lwp (lp
);
2866 if (lp
->ignore_sigint
&& WIFSTOPPED (status
)
2867 && WSTOPSIG (status
) == SIGINT
)
2869 lp
->ignore_sigint
= 0;
2872 ptrace (PTRACE_CONT
, GET_LWP (lp
->ptid
), 0, 0);
2873 if (debug_linux_nat
)
2874 fprintf_unfiltered (gdb_stdlog
,
2875 "PTRACE_CONT %s, 0, 0 (%s) "
2876 "(discarding SIGINT)\n",
2877 target_pid_to_str (lp
->ptid
),
2878 errno
? safe_strerror (errno
) : "OK");
2880 return stop_wait_callback (lp
, NULL
);
2883 maybe_clear_ignore_sigint (lp
);
2885 if (WSTOPSIG (status
) != SIGSTOP
)
2887 if (linux_nat_status_is_event (status
))
2889 /* If a LWP other than the LWP that we're reporting an
2890 event for has hit a GDB breakpoint (as opposed to
2891 some random trap signal), then just arrange for it to
2892 hit it again later. We don't keep the SIGTRAP status
2893 and don't forward the SIGTRAP signal to the LWP. We
2894 will handle the current event, eventually we will
2895 resume all LWPs, and this one will get its breakpoint
2898 If we do not do this, then we run the risk that the
2899 user will delete or disable the breakpoint, but the
2900 thread will have already tripped on it. */
2902 /* Save the trap's siginfo in case we need it later. */
2907 /* Now resume this LWP and get the SIGSTOP event. */
2909 ptrace (PTRACE_CONT
, GET_LWP (lp
->ptid
), 0, 0);
2910 if (debug_linux_nat
)
2912 fprintf_unfiltered (gdb_stdlog
,
2913 "PTRACE_CONT %s, 0, 0 (%s)\n",
2914 target_pid_to_str (lp
->ptid
),
2915 errno
? safe_strerror (errno
) : "OK");
2917 fprintf_unfiltered (gdb_stdlog
,
2918 "SWC: Candidate SIGTRAP event in %s\n",
2919 target_pid_to_str (lp
->ptid
));
2921 /* Hold this event/waitstatus while we check to see if
2922 there are any more (we still want to get that SIGSTOP). */
2923 stop_wait_callback (lp
, NULL
);
2925 /* Hold the SIGTRAP for handling by linux_nat_wait. If
2926 there's another event, throw it back into the
2930 if (debug_linux_nat
)
2931 fprintf_unfiltered (gdb_stdlog
,
2932 "SWC: kill %s, %s\n",
2933 target_pid_to_str (lp
->ptid
),
2934 status_to_str ((int) status
));
2935 kill_lwp (GET_LWP (lp
->ptid
), WSTOPSIG (lp
->status
));
2938 /* Save the sigtrap event. */
2939 lp
->status
= status
;
2944 /* The thread was stopped with a signal other than
2945 SIGSTOP, and didn't accidentally trip a breakpoint. */
2947 if (debug_linux_nat
)
2949 fprintf_unfiltered (gdb_stdlog
,
2950 "SWC: Pending event %s in %s\n",
2951 status_to_str ((int) status
),
2952 target_pid_to_str (lp
->ptid
));
2954 /* Now resume this LWP and get the SIGSTOP event. */
2956 ptrace (PTRACE_CONT
, GET_LWP (lp
->ptid
), 0, 0);
2957 if (debug_linux_nat
)
2958 fprintf_unfiltered (gdb_stdlog
,
2959 "SWC: PTRACE_CONT %s, 0, 0 (%s)\n",
2960 target_pid_to_str (lp
->ptid
),
2961 errno
? safe_strerror (errno
) : "OK");
2963 /* Hold this event/waitstatus while we check to see if
2964 there are any more (we still want to get that SIGSTOP). */
2965 stop_wait_callback (lp
, NULL
);
2967 /* If the lp->status field is still empty, use it to
2968 hold this event. If not, then this event must be
2969 returned to the event queue of the LWP. */
2972 if (debug_linux_nat
)
2974 fprintf_unfiltered (gdb_stdlog
,
2975 "SWC: kill %s, %s\n",
2976 target_pid_to_str (lp
->ptid
),
2977 status_to_str ((int) status
));
2979 kill_lwp (GET_LWP (lp
->ptid
), WSTOPSIG (status
));
2982 lp
->status
= status
;
2988 /* We caught the SIGSTOP that we intended to catch, so
2989 there's no SIGSTOP pending. */
2998 /* Return non-zero if LP has a wait status pending. */
3001 status_callback (struct lwp_info
*lp
, void *data
)
3003 /* Only report a pending wait status if we pretend that this has
3004 indeed been resumed. */
3008 if (lp
->waitstatus
.kind
!= TARGET_WAITKIND_IGNORE
)
3010 /* A ptrace event, like PTRACE_FORK|VFORK|EXEC, syscall event,
3011 or a pending process exit. Note that `W_EXITCODE(0,0) ==
3012 0', so a clean process exit can not be stored pending in
3013 lp->status, it is indistinguishable from
3014 no-pending-status. */
3018 if (lp
->status
!= 0)
3024 /* Return non-zero if LP isn't stopped. */
3027 running_callback (struct lwp_info
*lp
, void *data
)
3029 return (!lp
->stopped
3030 || ((lp
->status
!= 0
3031 || lp
->waitstatus
.kind
!= TARGET_WAITKIND_IGNORE
)
3035 /* Count the LWP's that have had events. */
3038 count_events_callback (struct lwp_info
*lp
, void *data
)
3042 gdb_assert (count
!= NULL
);
3044 /* Count only resumed LWPs that have a SIGTRAP event pending. */
3045 if (lp
->resumed
&& linux_nat_lp_status_is_event (lp
))
3051 /* Select the LWP (if any) that is currently being single-stepped. */
3054 select_singlestep_lwp_callback (struct lwp_info
*lp
, void *data
)
3056 if (lp
->last_resume_kind
== resume_step
3063 /* Select the Nth LWP that has had a SIGTRAP event. */
3066 select_event_lwp_callback (struct lwp_info
*lp
, void *data
)
3068 int *selector
= data
;
3070 gdb_assert (selector
!= NULL
);
3072 /* Select only resumed LWPs that have a SIGTRAP event pending. */
3073 if (lp
->resumed
&& linux_nat_lp_status_is_event (lp
))
3074 if ((*selector
)-- == 0)
3081 cancel_breakpoint (struct lwp_info
*lp
)
3083 /* Arrange for a breakpoint to be hit again later. We don't keep
3084 the SIGTRAP status and don't forward the SIGTRAP signal to the
3085 LWP. We will handle the current event, eventually we will resume
3086 this LWP, and this breakpoint will trap again.
3088 If we do not do this, then we run the risk that the user will
3089 delete or disable the breakpoint, but the LWP will have already
3092 struct regcache
*regcache
= get_thread_regcache (lp
->ptid
);
3093 struct gdbarch
*gdbarch
= get_regcache_arch (regcache
);
3096 pc
= regcache_read_pc (regcache
) - gdbarch_decr_pc_after_break (gdbarch
);
3097 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache
), pc
))
3099 if (debug_linux_nat
)
3100 fprintf_unfiltered (gdb_stdlog
,
3101 "CB: Push back breakpoint for %s\n",
3102 target_pid_to_str (lp
->ptid
));
3104 /* Back up the PC if necessary. */
3105 if (gdbarch_decr_pc_after_break (gdbarch
))
3106 regcache_write_pc (regcache
, pc
);
3114 cancel_breakpoints_callback (struct lwp_info
*lp
, void *data
)
3116 struct lwp_info
*event_lp
= data
;
3118 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
3122 /* If a LWP other than the LWP that we're reporting an event for has
3123 hit a GDB breakpoint (as opposed to some random trap signal),
3124 then just arrange for it to hit it again later. We don't keep
3125 the SIGTRAP status and don't forward the SIGTRAP signal to the
3126 LWP. We will handle the current event, eventually we will resume
3127 all LWPs, and this one will get its breakpoint trap again.
3129 If we do not do this, then we run the risk that the user will
3130 delete or disable the breakpoint, but the LWP will have already
3133 if (linux_nat_lp_status_is_event (lp
)
3134 && cancel_breakpoint (lp
))
3135 /* Throw away the SIGTRAP. */
3141 /* Select one LWP out of those that have events pending. */
3144 select_event_lwp (ptid_t filter
, struct lwp_info
**orig_lp
, int *status
)
3147 int random_selector
;
3148 struct lwp_info
*event_lp
;
3150 /* Record the wait status for the original LWP. */
3151 (*orig_lp
)->status
= *status
;
3153 /* Give preference to any LWP that is being single-stepped. */
3154 event_lp
= iterate_over_lwps (filter
,
3155 select_singlestep_lwp_callback
, NULL
);
3156 if (event_lp
!= NULL
)
3158 if (debug_linux_nat
)
3159 fprintf_unfiltered (gdb_stdlog
,
3160 "SEL: Select single-step %s\n",
3161 target_pid_to_str (event_lp
->ptid
));
3165 /* No single-stepping LWP. Select one at random, out of those
3166 which have had SIGTRAP events. */
3168 /* First see how many SIGTRAP events we have. */
3169 iterate_over_lwps (filter
, count_events_callback
, &num_events
);
3171 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
3172 random_selector
= (int)
3173 ((num_events
* (double) rand ()) / (RAND_MAX
+ 1.0));
3175 if (debug_linux_nat
&& num_events
> 1)
3176 fprintf_unfiltered (gdb_stdlog
,
3177 "SEL: Found %d SIGTRAP events, selecting #%d\n",
3178 num_events
, random_selector
);
3180 event_lp
= iterate_over_lwps (filter
,
3181 select_event_lwp_callback
,
3185 if (event_lp
!= NULL
)
3187 /* Switch the event LWP. */
3188 *orig_lp
= event_lp
;
3189 *status
= event_lp
->status
;
3192 /* Flush the wait status for the event LWP. */
3193 (*orig_lp
)->status
= 0;
3196 /* Return non-zero if LP has been resumed. */
3199 resumed_callback (struct lwp_info
*lp
, void *data
)
3204 /* Stop an active thread, verify it still exists, then resume it. If
3205 the thread ends up with a pending status, then it is not resumed,
3206 and *DATA (really a pointer to int), is set. */
3209 stop_and_resume_callback (struct lwp_info
*lp
, void *data
)
3211 int *new_pending_p
= data
;
3215 ptid_t ptid
= lp
->ptid
;
3217 stop_callback (lp
, NULL
);
3218 stop_wait_callback (lp
, NULL
);
3220 /* Resume if the lwp still exists, and the core wanted it
3222 lp
= find_lwp_pid (ptid
);
3225 if (lp
->last_resume_kind
== resume_stop
3228 /* The core wanted the LWP to stop. Even if it stopped
3229 cleanly (with SIGSTOP), leave the event pending. */
3230 if (debug_linux_nat
)
3231 fprintf_unfiltered (gdb_stdlog
,
3232 "SARC: core wanted LWP %ld stopped "
3233 "(leaving SIGSTOP pending)\n",
3234 GET_LWP (lp
->ptid
));
3235 lp
->status
= W_STOPCODE (SIGSTOP
);
3238 if (lp
->status
== 0)
3240 if (debug_linux_nat
)
3241 fprintf_unfiltered (gdb_stdlog
,
3242 "SARC: re-resuming LWP %ld\n",
3243 GET_LWP (lp
->ptid
));
3244 resume_lwp (lp
, lp
->step
);
3248 if (debug_linux_nat
)
3249 fprintf_unfiltered (gdb_stdlog
,
3250 "SARC: not re-resuming LWP %ld "
3252 GET_LWP (lp
->ptid
));
3261 /* Check if we should go on and pass this event to common code.
3262 Return the affected lwp if we are, or NULL otherwise. If we stop
3263 all lwps temporarily, we may end up with new pending events in some
3264 other lwp. In that case set *NEW_PENDING_P to true. */
3266 static struct lwp_info
*
3267 linux_nat_filter_event (int lwpid
, int status
, int *new_pending_p
)
3269 struct lwp_info
*lp
;
3273 lp
= find_lwp_pid (pid_to_ptid (lwpid
));
3275 /* Check for stop events reported by a process we didn't already
3276 know about - anything not already in our LWP list.
3278 If we're expecting to receive stopped processes after
3279 fork, vfork, and clone events, then we'll just add the
3280 new one to our list and go back to waiting for the event
3281 to be reported - the stopped process might be returned
3282 from waitpid before or after the event is.
3284 But note the case of a non-leader thread exec'ing after the
3285 leader having exited, and gone from our lists. The non-leader
3286 thread changes its tid to the tgid. */
3288 if (WIFSTOPPED (status
) && lp
== NULL
3289 && (WSTOPSIG (status
) == SIGTRAP
&& status
>> 16 == PTRACE_EVENT_EXEC
))
3291 /* A multi-thread exec after we had seen the leader exiting. */
3292 if (debug_linux_nat
)
3293 fprintf_unfiltered (gdb_stdlog
,
3294 "LLW: Re-adding thread group leader LWP %d.\n",
3297 lp
= add_lwp (BUILD_LWP (lwpid
, lwpid
));
3300 add_thread (lp
->ptid
);
3303 if (WIFSTOPPED (status
) && !lp
)
3305 add_to_pid_list (&stopped_pids
, lwpid
, status
);
3309 /* Make sure we don't report an event for the exit of an LWP not in
3310 our list, i.e. not part of the current process. This can happen
3311 if we detach from a program we originally forked and then it
3313 if (!WIFSTOPPED (status
) && !lp
)
3316 /* Handle GNU/Linux's syscall SIGTRAPs. */
3317 if (WIFSTOPPED (status
) && WSTOPSIG (status
) == SYSCALL_SIGTRAP
)
3319 /* No longer need the sysgood bit. The ptrace event ends up
3320 recorded in lp->waitstatus if we care for it. We can carry
3321 on handling the event like a regular SIGTRAP from here
3323 status
= W_STOPCODE (SIGTRAP
);
3324 if (linux_handle_syscall_trap (lp
, 0))
3328 /* Handle GNU/Linux's extended waitstatus for trace events. */
3329 if (WIFSTOPPED (status
) && WSTOPSIG (status
) == SIGTRAP
&& status
>> 16 != 0)
3331 if (debug_linux_nat
)
3332 fprintf_unfiltered (gdb_stdlog
,
3333 "LLW: Handling extended status 0x%06x\n",
3335 if (linux_handle_extended_wait (lp
, status
, 0))
3339 if (linux_nat_status_is_event (status
))
3341 /* Save the trap's siginfo in case we need it later. */
3347 /* Check if the thread has exited. */
3348 if ((WIFEXITED (status
) || WIFSIGNALED (status
))
3349 && num_lwps (GET_PID (lp
->ptid
)) > 1)
3351 /* If this is the main thread, we must stop all threads and verify
3352 if they are still alive. This is because in the nptl thread model
3353 on Linux 2.4, there is no signal issued for exiting LWPs
3354 other than the main thread. We only get the main thread exit
3355 signal once all child threads have already exited. If we
3356 stop all the threads and use the stop_wait_callback to check
3357 if they have exited we can determine whether this signal
3358 should be ignored or whether it means the end of the debugged
3359 application, regardless of which threading model is being
3361 if (GET_PID (lp
->ptid
) == GET_LWP (lp
->ptid
))
3364 iterate_over_lwps (pid_to_ptid (GET_PID (lp
->ptid
)),
3365 stop_and_resume_callback
, new_pending_p
);
3368 if (debug_linux_nat
)
3369 fprintf_unfiltered (gdb_stdlog
,
3370 "LLW: %s exited.\n",
3371 target_pid_to_str (lp
->ptid
));
3373 if (num_lwps (GET_PID (lp
->ptid
)) > 1)
3375 /* If there is at least one more LWP, then the exit signal
3376 was not the end of the debugged application and should be
3383 /* Check if the current LWP has previously exited. In the nptl
3384 thread model, LWPs other than the main thread do not issue
3385 signals when they exit so we must check whenever the thread has
3386 stopped. A similar check is made in stop_wait_callback(). */
3387 if (num_lwps (GET_PID (lp
->ptid
)) > 1 && !linux_thread_alive (lp
->ptid
))
3389 ptid_t ptid
= pid_to_ptid (GET_PID (lp
->ptid
));
3391 if (debug_linux_nat
)
3392 fprintf_unfiltered (gdb_stdlog
,
3393 "LLW: %s exited.\n",
3394 target_pid_to_str (lp
->ptid
));
3398 /* Make sure there is at least one thread running. */
3399 gdb_assert (iterate_over_lwps (ptid
, running_callback
, NULL
));
3401 /* Discard the event. */
3405 /* Make sure we don't report a SIGSTOP that we sent ourselves in
3406 an attempt to stop an LWP. */
3408 && WIFSTOPPED (status
) && WSTOPSIG (status
) == SIGSTOP
)
3410 if (debug_linux_nat
)
3411 fprintf_unfiltered (gdb_stdlog
,
3412 "LLW: Delayed SIGSTOP caught for %s.\n",
3413 target_pid_to_str (lp
->ptid
));
3417 if (lp
->last_resume_kind
!= resume_stop
)
3419 /* This is a delayed SIGSTOP. */
3421 registers_changed ();
3423 if (linux_nat_prepare_to_resume
!= NULL
)
3424 linux_nat_prepare_to_resume (lp
);
3425 linux_ops
->to_resume (linux_ops
, pid_to_ptid (GET_LWP (lp
->ptid
)),
3426 lp
->step
, TARGET_SIGNAL_0
);
3427 if (debug_linux_nat
)
3428 fprintf_unfiltered (gdb_stdlog
,
3429 "LLW: %s %s, 0, 0 (discard SIGSTOP)\n",
3431 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3432 target_pid_to_str (lp
->ptid
));
3435 gdb_assert (lp
->resumed
);
3437 /* Discard the event. */
3442 /* Make sure we don't report a SIGINT that we have already displayed
3443 for another thread. */
3444 if (lp
->ignore_sigint
3445 && WIFSTOPPED (status
) && WSTOPSIG (status
) == SIGINT
)
3447 if (debug_linux_nat
)
3448 fprintf_unfiltered (gdb_stdlog
,
3449 "LLW: Delayed SIGINT caught for %s.\n",
3450 target_pid_to_str (lp
->ptid
));
3452 /* This is a delayed SIGINT. */
3453 lp
->ignore_sigint
= 0;
3455 registers_changed ();
3456 if (linux_nat_prepare_to_resume
!= NULL
)
3457 linux_nat_prepare_to_resume (lp
);
3458 linux_ops
->to_resume (linux_ops
, pid_to_ptid (GET_LWP (lp
->ptid
)),
3459 lp
->step
, TARGET_SIGNAL_0
);
3460 if (debug_linux_nat
)
3461 fprintf_unfiltered (gdb_stdlog
,
3462 "LLW: %s %s, 0, 0 (discard SIGINT)\n",
3464 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3465 target_pid_to_str (lp
->ptid
));
3468 gdb_assert (lp
->resumed
);
3470 /* Discard the event. */
3474 /* An interesting event. */
3476 lp
->status
= status
;
3480 /* Detect zombie thread group leaders, and "exit" them. We can't reap
3481 their exits until all other threads in the group have exited. */
3484 check_zombie_leaders (void)
3486 struct inferior
*inf
;
3490 struct lwp_info
*leader_lp
;
3495 leader_lp
= find_lwp_pid (pid_to_ptid (inf
->pid
));
3496 if (leader_lp
!= NULL
3497 /* Check if there are other threads in the group, as we may
3498 have raced with the inferior simply exiting. */
3499 && num_lwps (inf
->pid
) > 1
3500 && linux_lwp_is_zombie (inf
->pid
))
3502 if (debug_linux_nat
)
3503 fprintf_unfiltered (gdb_stdlog
,
3504 "CZL: Thread group leader %d zombie "
3505 "(it exited, or another thread execd).\n",
3508 /* A leader zombie can mean one of two things:
3510 - It exited, and there's an exit status pending
3511 available, or only the leader exited (not the whole
3512 program). In the latter case, we can't waitpid the
3513 leader's exit status until all other threads are gone.
3515 - There are 3 or more threads in the group, and a thread
3516 other than the leader exec'd. On an exec, the Linux
3517 kernel destroys all other threads (except the execing
3518 one) in the thread group, and resets the execing thread's
3519 tid to the tgid. No exit notification is sent for the
3520 execing thread -- from the ptracer's perspective, it
3521 appears as though the execing thread just vanishes.
3522 Until we reap all other threads except the leader and the
3523 execing thread, the leader will be zombie, and the
3524 execing thread will be in `D (disc sleep)'. As soon as
3525 all other threads are reaped, the execing thread changes
3526 it's tid to the tgid, and the previous (zombie) leader
3527 vanishes, giving place to the "new" leader. We could try
3528 distinguishing the exit and exec cases, by waiting once
3529 more, and seeing if something comes out, but it doesn't
3530 sound useful. The previous leader _does_ go away, and
3531 we'll re-add the new one once we see the exec event
3532 (which is just the same as what would happen if the
3533 previous leader did exit voluntarily before some other
3536 if (debug_linux_nat
)
3537 fprintf_unfiltered (gdb_stdlog
,
3538 "CZL: Thread group leader %d vanished.\n",
3540 exit_lwp (leader_lp
);
3546 linux_nat_wait_1 (struct target_ops
*ops
,
3547 ptid_t ptid
, struct target_waitstatus
*ourstatus
,
3550 static sigset_t prev_mask
;
3551 enum resume_kind last_resume_kind
;
3552 struct lwp_info
*lp
;
3555 if (debug_linux_nat
)
3556 fprintf_unfiltered (gdb_stdlog
, "LLW: enter\n");
3558 /* The first time we get here after starting a new inferior, we may
3559 not have added it to the LWP list yet - this is the earliest
3560 moment at which we know its PID. */
3561 if (ptid_is_pid (inferior_ptid
))
3563 /* Upgrade the main thread's ptid. */
3564 thread_change_ptid (inferior_ptid
,
3565 BUILD_LWP (GET_PID (inferior_ptid
),
3566 GET_PID (inferior_ptid
)));
3568 lp
= add_lwp (inferior_ptid
);
3572 /* Make sure SIGCHLD is blocked. */
3573 block_child_signals (&prev_mask
);
3579 /* First check if there is a LWP with a wait status pending. */
3580 if (ptid_equal (ptid
, minus_one_ptid
) || ptid_is_pid (ptid
))
3582 /* Any LWP in the PTID group that's been resumed will do. */
3583 lp
= iterate_over_lwps (ptid
, status_callback
, NULL
);
3586 if (debug_linux_nat
&& lp
->status
)
3587 fprintf_unfiltered (gdb_stdlog
,
3588 "LLW: Using pending wait status %s for %s.\n",
3589 status_to_str (lp
->status
),
3590 target_pid_to_str (lp
->ptid
));
3593 else if (is_lwp (ptid
))
3595 if (debug_linux_nat
)
3596 fprintf_unfiltered (gdb_stdlog
,
3597 "LLW: Waiting for specific LWP %s.\n",
3598 target_pid_to_str (ptid
));
3600 /* We have a specific LWP to check. */
3601 lp
= find_lwp_pid (ptid
);
3604 if (debug_linux_nat
&& lp
->status
)
3605 fprintf_unfiltered (gdb_stdlog
,
3606 "LLW: Using pending wait status %s for %s.\n",
3607 status_to_str (lp
->status
),
3608 target_pid_to_str (lp
->ptid
));
3610 /* We check for lp->waitstatus in addition to lp->status,
3611 because we can have pending process exits recorded in
3612 lp->status and W_EXITCODE(0,0) == 0. We should probably have
3613 an additional lp->status_p flag. */
3614 if (lp
->status
== 0 && lp
->waitstatus
.kind
== TARGET_WAITKIND_IGNORE
)
3618 if (lp
&& lp
->signalled
&& lp
->last_resume_kind
!= resume_stop
)
3620 /* A pending SIGSTOP may interfere with the normal stream of
3621 events. In a typical case where interference is a problem,
3622 we have a SIGSTOP signal pending for LWP A while
3623 single-stepping it, encounter an event in LWP B, and take the
3624 pending SIGSTOP while trying to stop LWP A. After processing
3625 the event in LWP B, LWP A is continued, and we'll never see
3626 the SIGTRAP associated with the last time we were
3627 single-stepping LWP A. */
3629 /* Resume the thread. It should halt immediately returning the
3631 registers_changed ();
3632 if (linux_nat_prepare_to_resume
!= NULL
)
3633 linux_nat_prepare_to_resume (lp
);
3634 linux_ops
->to_resume (linux_ops
, pid_to_ptid (GET_LWP (lp
->ptid
)),
3635 lp
->step
, TARGET_SIGNAL_0
);
3636 if (debug_linux_nat
)
3637 fprintf_unfiltered (gdb_stdlog
,
3638 "LLW: %s %s, 0, 0 (expect SIGSTOP)\n",
3639 lp
->step
? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3640 target_pid_to_str (lp
->ptid
));
3642 gdb_assert (lp
->resumed
);
3644 /* Catch the pending SIGSTOP. */
3645 status
= lp
->status
;
3648 stop_wait_callback (lp
, NULL
);
3650 /* If the lp->status field isn't empty, we caught another signal
3651 while flushing the SIGSTOP. Return it back to the event
3652 queue of the LWP, as we already have an event to handle. */
3655 if (debug_linux_nat
)
3656 fprintf_unfiltered (gdb_stdlog
,
3657 "LLW: kill %s, %s\n",
3658 target_pid_to_str (lp
->ptid
),
3659 status_to_str (lp
->status
));
3660 kill_lwp (GET_LWP (lp
->ptid
), WSTOPSIG (lp
->status
));
3663 lp
->status
= status
;
3666 if (!target_can_async_p ())
3668 /* Causes SIGINT to be passed on to the attached process. */
3672 /* But if we don't find a pending event, we'll have to wait. */
3678 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
3681 - If the thread group leader exits while other threads in the
3682 thread group still exist, waitpid(TGID, ...) hangs. That
3683 waitpid won't return an exit status until the other threads
3684 in the group are reapped.
3686 - When a non-leader thread execs, that thread just vanishes
3687 without reporting an exit (so we'd hang if we waited for it
3688 explicitly in that case). The exec event is reported to
3692 lwpid
= my_waitpid (-1, &status
, __WCLONE
| WNOHANG
);
3693 if (lwpid
== 0 || (lwpid
== -1 && errno
== ECHILD
))
3694 lwpid
= my_waitpid (-1, &status
, WNOHANG
);
3696 if (debug_linux_nat
)
3697 fprintf_unfiltered (gdb_stdlog
,
3698 "LNW: waitpid(-1, ...) returned %d, %s\n",
3699 lwpid
, errno
? safe_strerror (errno
) : "ERRNO-OK");
3703 /* If this is true, then we paused LWPs momentarily, and may
3704 now have pending events to handle. */
3707 if (debug_linux_nat
)
3709 fprintf_unfiltered (gdb_stdlog
,
3710 "LLW: waitpid %ld received %s\n",
3711 (long) lwpid
, status_to_str (status
));
3714 lp
= linux_nat_filter_event (lwpid
, status
, &new_pending
);
3716 /* STATUS is now no longer valid, use LP->STATUS instead. */
3719 if (lp
&& !ptid_match (lp
->ptid
, ptid
))
3721 gdb_assert (lp
->resumed
);
3723 if (debug_linux_nat
)
3725 "LWP %ld got an event %06x, leaving pending.\n",
3726 ptid_get_lwp (lp
->ptid
), lp
->status
);
3728 if (WIFSTOPPED (lp
->status
))
3730 if (WSTOPSIG (lp
->status
) != SIGSTOP
)
3732 /* Cancel breakpoint hits. The breakpoint may
3733 be removed before we fetch events from this
3734 process to report to the core. It is best
3735 not to assume the moribund breakpoints
3736 heuristic always handles these cases --- it
3737 could be too many events go through to the
3738 core before this one is handled. All-stop
3739 always cancels breakpoint hits in all
3742 && linux_nat_lp_status_is_event (lp
)
3743 && cancel_breakpoint (lp
))
3745 /* Throw away the SIGTRAP. */
3748 if (debug_linux_nat
)
3750 "LLW: LWP %ld hit a breakpoint while"
3751 " waiting for another process;"
3753 ptid_get_lwp (lp
->ptid
));
3763 else if (WIFEXITED (lp
->status
) || WIFSIGNALED (lp
->status
))
3765 if (debug_linux_nat
)
3767 "Process %ld exited while stopping LWPs\n",
3768 ptid_get_lwp (lp
->ptid
));
3770 /* This was the last lwp in the process. Since
3771 events are serialized to GDB core, and we can't
3772 report this one right now, but GDB core and the
3773 other target layers will want to be notified
3774 about the exit code/signal, leave the status
3775 pending for the next time we're able to report
3778 /* Prevent trying to stop this thread again. We'll
3779 never try to resume it because it has a pending
3783 /* Dead LWP's aren't expected to reported a pending
3787 /* Store the pending event in the waitstatus as
3788 well, because W_EXITCODE(0,0) == 0. */
3789 store_waitstatus (&lp
->waitstatus
, lp
->status
);
3798 /* Some LWP now has a pending event. Go all the way
3799 back to check it. */
3805 /* We got an event to report to the core. */
3809 /* Retry until nothing comes out of waitpid. A single
3810 SIGCHLD can indicate more than one child stopped. */
3814 /* Check for zombie thread group leaders. Those can't be reaped
3815 until all other threads in the thread group are. */
3816 check_zombie_leaders ();
3818 /* If there are no resumed children left, bail. We'd be stuck
3819 forever in the sigsuspend call below otherwise. */
3820 if (iterate_over_lwps (ptid
, resumed_callback
, NULL
) == NULL
)
3822 if (debug_linux_nat
)
3823 fprintf_unfiltered (gdb_stdlog
, "LLW: exit (no resumed LWP)\n");
3825 ourstatus
->kind
= TARGET_WAITKIND_NO_RESUMED
;
3827 if (!target_can_async_p ())
3828 clear_sigint_trap ();
3830 restore_child_signals_mask (&prev_mask
);
3831 return minus_one_ptid
;
3834 /* No interesting event to report to the core. */
3836 if (target_options
& TARGET_WNOHANG
)
3838 if (debug_linux_nat
)
3839 fprintf_unfiltered (gdb_stdlog
, "LLW: exit (ignore)\n");
3841 ourstatus
->kind
= TARGET_WAITKIND_IGNORE
;
3842 restore_child_signals_mask (&prev_mask
);
3843 return minus_one_ptid
;
3846 /* We shouldn't end up here unless we want to try again. */
3847 gdb_assert (lp
== NULL
);
3849 /* Block until we get an event reported with SIGCHLD. */
3850 sigsuspend (&suspend_mask
);
3853 if (!target_can_async_p ())
3854 clear_sigint_trap ();
3858 status
= lp
->status
;
3861 /* Don't report signals that GDB isn't interested in, such as
3862 signals that are neither printed nor stopped upon. Stopping all
3863 threads can be a bit time-consuming so if we want decent
3864 performance with heavily multi-threaded programs, especially when
3865 they're using a high frequency timer, we'd better avoid it if we
3868 if (WIFSTOPPED (status
))
3870 enum target_signal signo
= target_signal_from_host (WSTOPSIG (status
));
3872 /* When using hardware single-step, we need to report every signal.
3873 Otherwise, signals in pass_mask may be short-circuited. */
3875 && WSTOPSIG (status
) && sigismember (&pass_mask
, WSTOPSIG (status
)))
3877 /* FIMXE: kettenis/2001-06-06: Should we resume all threads
3878 here? It is not clear we should. GDB may not expect
3879 other threads to run. On the other hand, not resuming
3880 newly attached threads may cause an unwanted delay in
3881 getting them running. */
3882 registers_changed ();
3883 if (linux_nat_prepare_to_resume
!= NULL
)
3884 linux_nat_prepare_to_resume (lp
);
3885 linux_ops
->to_resume (linux_ops
, pid_to_ptid (GET_LWP (lp
->ptid
)),
3887 if (debug_linux_nat
)
3888 fprintf_unfiltered (gdb_stdlog
,
3889 "LLW: %s %s, %s (preempt 'handle')\n",
3891 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3892 target_pid_to_str (lp
->ptid
),
3893 (signo
!= TARGET_SIGNAL_0
3894 ? strsignal (target_signal_to_host (signo
))
3902 /* Only do the below in all-stop, as we currently use SIGINT
3903 to implement target_stop (see linux_nat_stop) in
3905 if (signo
== TARGET_SIGNAL_INT
&& signal_pass_state (signo
) == 0)
3907 /* If ^C/BREAK is typed at the tty/console, SIGINT gets
3908 forwarded to the entire process group, that is, all LWPs
3909 will receive it - unless they're using CLONE_THREAD to
3910 share signals. Since we only want to report it once, we
3911 mark it as ignored for all LWPs except this one. */
3912 iterate_over_lwps (pid_to_ptid (ptid_get_pid (ptid
)),
3913 set_ignore_sigint
, NULL
);
3914 lp
->ignore_sigint
= 0;
3917 maybe_clear_ignore_sigint (lp
);
3921 /* This LWP is stopped now. */
3924 if (debug_linux_nat
)
3925 fprintf_unfiltered (gdb_stdlog
, "LLW: Candidate event %s in %s.\n",
3926 status_to_str (status
), target_pid_to_str (lp
->ptid
));
3930 /* Now stop all other LWP's ... */
3931 iterate_over_lwps (minus_one_ptid
, stop_callback
, NULL
);
3933 /* ... and wait until all of them have reported back that
3934 they're no longer running. */
3935 iterate_over_lwps (minus_one_ptid
, stop_wait_callback
, NULL
);
3937 /* If we're not waiting for a specific LWP, choose an event LWP
3938 from among those that have had events. Giving equal priority
3939 to all LWPs that have had events helps prevent
3941 if (ptid_equal (ptid
, minus_one_ptid
) || ptid_is_pid (ptid
))
3942 select_event_lwp (ptid
, &lp
, &status
);
3944 /* Now that we've selected our final event LWP, cancel any
3945 breakpoints in other LWPs that have hit a GDB breakpoint.
3946 See the comment in cancel_breakpoints_callback to find out
3948 iterate_over_lwps (minus_one_ptid
, cancel_breakpoints_callback
, lp
);
3950 /* We'll need this to determine whether to report a SIGSTOP as
3951 TARGET_WAITKIND_0. Need to take a copy because
3952 resume_clear_callback clears it. */
3953 last_resume_kind
= lp
->last_resume_kind
;
3955 /* In all-stop, from the core's perspective, all LWPs are now
3956 stopped until a new resume action is sent over. */
3957 iterate_over_lwps (minus_one_ptid
, resume_clear_callback
, NULL
);
3962 last_resume_kind
= lp
->last_resume_kind
;
3963 resume_clear_callback (lp
, NULL
);
3966 if (linux_nat_status_is_event (status
))
3968 if (debug_linux_nat
)
3969 fprintf_unfiltered (gdb_stdlog
,
3970 "LLW: trap ptid is %s.\n",
3971 target_pid_to_str (lp
->ptid
));
3974 if (lp
->waitstatus
.kind
!= TARGET_WAITKIND_IGNORE
)
3976 *ourstatus
= lp
->waitstatus
;
3977 lp
->waitstatus
.kind
= TARGET_WAITKIND_IGNORE
;
3980 store_waitstatus (ourstatus
, status
);
3982 if (debug_linux_nat
)
3983 fprintf_unfiltered (gdb_stdlog
, "LLW: exit\n");
3985 restore_child_signals_mask (&prev_mask
);
3987 if (last_resume_kind
== resume_stop
3988 && ourstatus
->kind
== TARGET_WAITKIND_STOPPED
3989 && WSTOPSIG (status
) == SIGSTOP
)
3991 /* A thread that has been requested to stop by GDB with
3992 target_stop, and it stopped cleanly, so report as SIG0. The
3993 use of SIGSTOP is an implementation detail. */
3994 ourstatus
->value
.sig
= TARGET_SIGNAL_0
;
3997 if (ourstatus
->kind
== TARGET_WAITKIND_EXITED
3998 || ourstatus
->kind
== TARGET_WAITKIND_SIGNALLED
)
4001 lp
->core
= linux_nat_core_of_thread_1 (lp
->ptid
);
4006 /* Resume LWPs that are currently stopped without any pending status
4007 to report, but are resumed from the core's perspective. */
4010 resume_stopped_resumed_lwps (struct lwp_info
*lp
, void *data
)
4012 ptid_t
*wait_ptid_p
= data
;
4017 && lp
->waitstatus
.kind
== TARGET_WAITKIND_IGNORE
)
4019 struct regcache
*regcache
= get_thread_regcache (lp
->ptid
);
4020 struct gdbarch
*gdbarch
= get_regcache_arch (regcache
);
4021 CORE_ADDR pc
= regcache_read_pc (regcache
);
4023 gdb_assert (is_executing (lp
->ptid
));
4025 /* Don't bother if there's a breakpoint at PC that we'd hit
4026 immediately, and we're not waiting for this LWP. */
4027 if (!ptid_match (lp
->ptid
, *wait_ptid_p
))
4029 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache
), pc
))
4033 if (debug_linux_nat
)
4034 fprintf_unfiltered (gdb_stdlog
,
4035 "RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
4036 target_pid_to_str (lp
->ptid
),
4037 paddress (gdbarch
, pc
),
4040 registers_changed ();
4041 if (linux_nat_prepare_to_resume
!= NULL
)
4042 linux_nat_prepare_to_resume (lp
);
4043 linux_ops
->to_resume (linux_ops
, pid_to_ptid (GET_LWP (lp
->ptid
)),
4044 lp
->step
, TARGET_SIGNAL_0
);
4046 memset (&lp
->siginfo
, 0, sizeof (lp
->siginfo
));
4047 lp
->stopped_by_watchpoint
= 0;
4054 linux_nat_wait (struct target_ops
*ops
,
4055 ptid_t ptid
, struct target_waitstatus
*ourstatus
,
4060 if (debug_linux_nat
)
4061 fprintf_unfiltered (gdb_stdlog
,
4062 "linux_nat_wait: [%s]\n", target_pid_to_str (ptid
));
4064 /* Flush the async file first. */
4065 if (target_can_async_p ())
4066 async_file_flush ();
4068 /* Resume LWPs that are currently stopped without any pending status
4069 to report, but are resumed from the core's perspective. LWPs get
4070 in this state if we find them stopping at a time we're not
4071 interested in reporting the event (target_wait on a
4072 specific_process, for example, see linux_nat_wait_1), and
4073 meanwhile the event became uninteresting. Don't bother resuming
4074 LWPs we're not going to wait for if they'd stop immediately. */
4076 iterate_over_lwps (minus_one_ptid
, resume_stopped_resumed_lwps
, &ptid
);
4078 event_ptid
= linux_nat_wait_1 (ops
, ptid
, ourstatus
, target_options
);
4080 /* If we requested any event, and something came out, assume there
4081 may be more. If we requested a specific lwp or process, also
4082 assume there may be more. */
4083 if (target_can_async_p ()
4084 && ((ourstatus
->kind
!= TARGET_WAITKIND_IGNORE
4085 && ourstatus
->kind
!= TARGET_WAITKIND_NO_RESUMED
)
4086 || !ptid_equal (ptid
, minus_one_ptid
)))
4089 /* Get ready for the next event. */
4090 if (target_can_async_p ())
4091 target_async (inferior_event_handler
, 0);
4097 kill_callback (struct lwp_info
*lp
, void *data
)
4099 /* PTRACE_KILL may resume the inferior. Send SIGKILL first. */
4102 kill (GET_LWP (lp
->ptid
), SIGKILL
);
4103 if (debug_linux_nat
)
4104 fprintf_unfiltered (gdb_stdlog
,
4105 "KC: kill (SIGKILL) %s, 0, 0 (%s)\n",
4106 target_pid_to_str (lp
->ptid
),
4107 errno
? safe_strerror (errno
) : "OK");
4109 /* Some kernels ignore even SIGKILL for processes under ptrace. */
4112 ptrace (PTRACE_KILL
, GET_LWP (lp
->ptid
), 0, 0);
4113 if (debug_linux_nat
)
4114 fprintf_unfiltered (gdb_stdlog
,
4115 "KC: PTRACE_KILL %s, 0, 0 (%s)\n",
4116 target_pid_to_str (lp
->ptid
),
4117 errno
? safe_strerror (errno
) : "OK");
4123 kill_wait_callback (struct lwp_info
*lp
, void *data
)
4127 /* We must make sure that there are no pending events (delayed
4128 SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current
4129 program doesn't interfere with any following debugging session. */
4131 /* For cloned processes we must check both with __WCLONE and
4132 without, since the exit status of a cloned process isn't reported
4138 pid
= my_waitpid (GET_LWP (lp
->ptid
), NULL
, __WCLONE
);
4139 if (pid
!= (pid_t
) -1)
4141 if (debug_linux_nat
)
4142 fprintf_unfiltered (gdb_stdlog
,
4143 "KWC: wait %s received unknown.\n",
4144 target_pid_to_str (lp
->ptid
));
4145 /* The Linux kernel sometimes fails to kill a thread
4146 completely after PTRACE_KILL; that goes from the stop
4147 point in do_fork out to the one in
4148 get_signal_to_deliever and waits again. So kill it
4150 kill_callback (lp
, NULL
);
4153 while (pid
== GET_LWP (lp
->ptid
));
4155 gdb_assert (pid
== -1 && errno
== ECHILD
);
4160 pid
= my_waitpid (GET_LWP (lp
->ptid
), NULL
, 0);
4161 if (pid
!= (pid_t
) -1)
4163 if (debug_linux_nat
)
4164 fprintf_unfiltered (gdb_stdlog
,
4165 "KWC: wait %s received unk.\n",
4166 target_pid_to_str (lp
->ptid
));
4167 /* See the call to kill_callback above. */
4168 kill_callback (lp
, NULL
);
4171 while (pid
== GET_LWP (lp
->ptid
));
4173 gdb_assert (pid
== -1 && errno
== ECHILD
);
4178 linux_nat_kill (struct target_ops
*ops
)
4180 struct target_waitstatus last
;
4184 /* If we're stopped while forking and we haven't followed yet,
4185 kill the other task. We need to do this first because the
4186 parent will be sleeping if this is a vfork. */
4188 get_last_target_status (&last_ptid
, &last
);
4190 if (last
.kind
== TARGET_WAITKIND_FORKED
4191 || last
.kind
== TARGET_WAITKIND_VFORKED
)
4193 ptrace (PT_KILL
, PIDGET (last
.value
.related_pid
), 0, 0);
4197 if (forks_exist_p ())
4198 linux_fork_killall ();
4201 ptid_t ptid
= pid_to_ptid (ptid_get_pid (inferior_ptid
));
4203 /* Stop all threads before killing them, since ptrace requires
4204 that the thread is stopped to sucessfully PTRACE_KILL. */
4205 iterate_over_lwps (ptid
, stop_callback
, NULL
);
4206 /* ... and wait until all of them have reported back that
4207 they're no longer running. */
4208 iterate_over_lwps (ptid
, stop_wait_callback
, NULL
);
4210 /* Kill all LWP's ... */
4211 iterate_over_lwps (ptid
, kill_callback
, NULL
);
4213 /* ... and wait until we've flushed all events. */
4214 iterate_over_lwps (ptid
, kill_wait_callback
, NULL
);
4217 target_mourn_inferior ();
4221 linux_nat_mourn_inferior (struct target_ops
*ops
)
4223 purge_lwp_list (ptid_get_pid (inferior_ptid
));
4225 if (! forks_exist_p ())
4226 /* Normal case, no other forks available. */
4227 linux_ops
->to_mourn_inferior (ops
);
4229 /* Multi-fork case. The current inferior_ptid has exited, but
4230 there are other viable forks to debug. Delete the exiting
4231 one and context-switch to the first available. */
4232 linux_fork_mourn_inferior ();
4235 /* Convert a native/host siginfo object, into/from the siginfo in the
4236 layout of the inferiors' architecture. */
4239 siginfo_fixup (struct siginfo
*siginfo
, gdb_byte
*inf_siginfo
, int direction
)
4243 if (linux_nat_siginfo_fixup
!= NULL
)
4244 done
= linux_nat_siginfo_fixup (siginfo
, inf_siginfo
, direction
);
4246 /* If there was no callback, or the callback didn't do anything,
4247 then just do a straight memcpy. */
4251 memcpy (siginfo
, inf_siginfo
, sizeof (struct siginfo
));
4253 memcpy (inf_siginfo
, siginfo
, sizeof (struct siginfo
));
4258 linux_xfer_siginfo (struct target_ops
*ops
, enum target_object object
,
4259 const char *annex
, gdb_byte
*readbuf
,
4260 const gdb_byte
*writebuf
, ULONGEST offset
, LONGEST len
)
4263 struct siginfo siginfo
;
4264 gdb_byte inf_siginfo
[sizeof (struct siginfo
)];
4266 gdb_assert (object
== TARGET_OBJECT_SIGNAL_INFO
);
4267 gdb_assert (readbuf
|| writebuf
);
4269 pid
= GET_LWP (inferior_ptid
);
4271 pid
= GET_PID (inferior_ptid
);
4273 if (offset
> sizeof (siginfo
))
4277 ptrace (PTRACE_GETSIGINFO
, pid
, (PTRACE_TYPE_ARG3
) 0, &siginfo
);
4281 /* When GDB is built as a 64-bit application, ptrace writes into
4282 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
4283 inferior with a 64-bit GDB should look the same as debugging it
4284 with a 32-bit GDB, we need to convert it. GDB core always sees
4285 the converted layout, so any read/write will have to be done
4287 siginfo_fixup (&siginfo
, inf_siginfo
, 0);
4289 if (offset
+ len
> sizeof (siginfo
))
4290 len
= sizeof (siginfo
) - offset
;
4292 if (readbuf
!= NULL
)
4293 memcpy (readbuf
, inf_siginfo
+ offset
, len
);
4296 memcpy (inf_siginfo
+ offset
, writebuf
, len
);
4298 /* Convert back to ptrace layout before flushing it out. */
4299 siginfo_fixup (&siginfo
, inf_siginfo
, 1);
4302 ptrace (PTRACE_SETSIGINFO
, pid
, (PTRACE_TYPE_ARG3
) 0, &siginfo
);
4311 linux_nat_xfer_partial (struct target_ops
*ops
, enum target_object object
,
4312 const char *annex
, gdb_byte
*readbuf
,
4313 const gdb_byte
*writebuf
,
4314 ULONGEST offset
, LONGEST len
)
4316 struct cleanup
*old_chain
;
4319 if (object
== TARGET_OBJECT_SIGNAL_INFO
)
4320 return linux_xfer_siginfo (ops
, object
, annex
, readbuf
, writebuf
,
4323 /* The target is connected but no live inferior is selected. Pass
4324 this request down to a lower stratum (e.g., the executable
4326 if (object
== TARGET_OBJECT_MEMORY
&& ptid_equal (inferior_ptid
, null_ptid
))
4329 old_chain
= save_inferior_ptid ();
4331 if (is_lwp (inferior_ptid
))
4332 inferior_ptid
= pid_to_ptid (GET_LWP (inferior_ptid
));
4334 xfer
= linux_ops
->to_xfer_partial (ops
, object
, annex
, readbuf
, writebuf
,
4337 do_cleanups (old_chain
);
4342 linux_thread_alive (ptid_t ptid
)
4346 gdb_assert (is_lwp (ptid
));
4348 /* Send signal 0 instead of anything ptrace, because ptracing a
4349 running thread errors out claiming that the thread doesn't
4351 err
= kill_lwp (GET_LWP (ptid
), 0);
4353 if (debug_linux_nat
)
4354 fprintf_unfiltered (gdb_stdlog
,
4355 "LLTA: KILL(SIG0) %s (%s)\n",
4356 target_pid_to_str (ptid
),
4357 err
? safe_strerror (tmp_errno
) : "OK");
4366 linux_nat_thread_alive (struct target_ops
*ops
, ptid_t ptid
)
4368 return linux_thread_alive (ptid
);
4372 linux_nat_pid_to_str (struct target_ops
*ops
, ptid_t ptid
)
4374 static char buf
[64];
4377 && (GET_PID (ptid
) != GET_LWP (ptid
)
4378 || num_lwps (GET_PID (ptid
)) > 1))
4380 snprintf (buf
, sizeof (buf
), "LWP %ld", GET_LWP (ptid
));
4384 return normal_pid_to_str (ptid
);
4388 linux_nat_thread_name (struct thread_info
*thr
)
4390 int pid
= ptid_get_pid (thr
->ptid
);
4391 long lwp
= ptid_get_lwp (thr
->ptid
);
4392 #define FORMAT "/proc/%d/task/%ld/comm"
4393 char buf
[sizeof (FORMAT
) + 30];
4395 char *result
= NULL
;
4397 snprintf (buf
, sizeof (buf
), FORMAT
, pid
, lwp
);
4398 comm_file
= fopen (buf
, "r");
4401 /* Not exported by the kernel, so we define it here. */
4403 static char line
[COMM_LEN
+ 1];
4405 if (fgets (line
, sizeof (line
), comm_file
))
4407 char *nl
= strchr (line
, '\n');
4424 /* Accepts an integer PID; Returns a string representing a file that
4425 can be opened to get the symbols for the child process. */
4428 linux_child_pid_to_exec_file (int pid
)
4430 char *name1
, *name2
;
4432 name1
= xmalloc (MAXPATHLEN
);
4433 name2
= xmalloc (MAXPATHLEN
);
4434 make_cleanup (xfree
, name1
);
4435 make_cleanup (xfree
, name2
);
4436 memset (name2
, 0, MAXPATHLEN
);
4438 sprintf (name1
, "/proc/%d/exe", pid
);
4439 if (readlink (name1
, name2
, MAXPATHLEN
) > 0)
4445 /* Records the thread's register state for the corefile note
4449 linux_nat_collect_thread_registers (const struct regcache
*regcache
,
4450 ptid_t ptid
, bfd
*obfd
,
4451 char *note_data
, int *note_size
,
4452 enum target_signal stop_signal
)
4454 struct gdbarch
*gdbarch
= get_regcache_arch (regcache
);
4455 const struct regset
*regset
;
4457 gdb_gregset_t gregs
;
4458 gdb_fpregset_t fpregs
;
4460 core_regset_p
= gdbarch_regset_from_core_section_p (gdbarch
);
4463 && (regset
= gdbarch_regset_from_core_section (gdbarch
, ".reg",
4465 != NULL
&& regset
->collect_regset
!= NULL
)
4466 regset
->collect_regset (regset
, regcache
, -1, &gregs
, sizeof (gregs
));
4468 fill_gregset (regcache
, &gregs
, -1);
4470 note_data
= (char *) elfcore_write_prstatus
4471 (obfd
, note_data
, note_size
, ptid_get_lwp (ptid
),
4472 target_signal_to_host (stop_signal
), &gregs
);
4475 && (regset
= gdbarch_regset_from_core_section (gdbarch
, ".reg2",
4477 != NULL
&& regset
->collect_regset
!= NULL
)
4478 regset
->collect_regset (regset
, regcache
, -1, &fpregs
, sizeof (fpregs
));
4480 fill_fpregset (regcache
, &fpregs
, -1);
4482 note_data
= (char *) elfcore_write_prfpreg (obfd
, note_data
, note_size
,
4483 &fpregs
, sizeof (fpregs
));
4488 /* Fills the "to_make_corefile_note" target vector. Builds the note
4489 section for a corefile, and returns it in a malloc buffer. */
4492 linux_nat_make_corefile_notes (bfd
*obfd
, int *note_size
)
4494 /* FIXME: uweigand/2011-10-06: Once all GNU/Linux architectures have been
4495 converted to gdbarch_core_regset_sections, this function can go away. */
4496 return linux_make_corefile_notes (target_gdbarch
, obfd
, note_size
,
4497 linux_nat_collect_thread_registers
);
4500 /* Implement the to_xfer_partial interface for memory reads using the /proc
4501 filesystem. Because we can use a single read() call for /proc, this
4502 can be much more efficient than banging away at PTRACE_PEEKTEXT,
4503 but it doesn't support writes. */
4506 linux_proc_xfer_partial (struct target_ops
*ops
, enum target_object object
,
4507 const char *annex
, gdb_byte
*readbuf
,
4508 const gdb_byte
*writebuf
,
4509 ULONGEST offset
, LONGEST len
)
4515 if (object
!= TARGET_OBJECT_MEMORY
|| !readbuf
)
4518 /* Don't bother for one word. */
4519 if (len
< 3 * sizeof (long))
4522 /* We could keep this file open and cache it - possibly one per
4523 thread. That requires some juggling, but is even faster. */
4524 sprintf (filename
, "/proc/%d/mem", PIDGET (inferior_ptid
));
4525 fd
= open (filename
, O_RDONLY
| O_LARGEFILE
);
4529 /* If pread64 is available, use it. It's faster if the kernel
4530 supports it (only one syscall), and it's 64-bit safe even on
4531 32-bit platforms (for instance, SPARC debugging a SPARC64
4534 if (pread64 (fd
, readbuf
, len
, offset
) != len
)
4536 if (lseek (fd
, offset
, SEEK_SET
) == -1 || read (fd
, readbuf
, len
) != len
)
4547 /* Enumerate spufs IDs for process PID. */
4549 spu_enumerate_spu_ids (int pid
, gdb_byte
*buf
, ULONGEST offset
, LONGEST len
)
4551 enum bfd_endian byte_order
= gdbarch_byte_order (target_gdbarch
);
4553 LONGEST written
= 0;
4556 struct dirent
*entry
;
4558 xsnprintf (path
, sizeof path
, "/proc/%d/fd", pid
);
4559 dir
= opendir (path
);
4564 while ((entry
= readdir (dir
)) != NULL
)
4570 fd
= atoi (entry
->d_name
);
4574 xsnprintf (path
, sizeof path
, "/proc/%d/fd/%d", pid
, fd
);
4575 if (stat (path
, &st
) != 0)
4577 if (!S_ISDIR (st
.st_mode
))
4580 if (statfs (path
, &stfs
) != 0)
4582 if (stfs
.f_type
!= SPUFS_MAGIC
)
4585 if (pos
>= offset
&& pos
+ 4 <= offset
+ len
)
4587 store_unsigned_integer (buf
+ pos
- offset
, 4, byte_order
, fd
);
4597 /* Implement the to_xfer_partial interface for the TARGET_OBJECT_SPU
4598 object type, using the /proc file system. */
4600 linux_proc_xfer_spu (struct target_ops
*ops
, enum target_object object
,
4601 const char *annex
, gdb_byte
*readbuf
,
4602 const gdb_byte
*writebuf
,
4603 ULONGEST offset
, LONGEST len
)
4608 int pid
= PIDGET (inferior_ptid
);
4615 return spu_enumerate_spu_ids (pid
, readbuf
, offset
, len
);
4618 xsnprintf (buf
, sizeof buf
, "/proc/%d/fd/%s", pid
, annex
);
4619 fd
= open (buf
, writebuf
? O_WRONLY
: O_RDONLY
);
4624 && lseek (fd
, (off_t
) offset
, SEEK_SET
) != (off_t
) offset
)
4631 ret
= write (fd
, writebuf
, (size_t) len
);
4633 ret
= read (fd
, readbuf
, (size_t) len
);
4640 /* Parse LINE as a signal set and add its set bits to SIGS. */
4643 add_line_to_sigset (const char *line
, sigset_t
*sigs
)
4645 int len
= strlen (line
) - 1;
4649 if (line
[len
] != '\n')
4650 error (_("Could not parse signal set: %s"), line
);
4658 if (*p
>= '0' && *p
<= '9')
4660 else if (*p
>= 'a' && *p
<= 'f')
4661 digit
= *p
- 'a' + 10;
4663 error (_("Could not parse signal set: %s"), line
);
4668 sigaddset (sigs
, signum
+ 1);
4670 sigaddset (sigs
, signum
+ 2);
4672 sigaddset (sigs
, signum
+ 3);
4674 sigaddset (sigs
, signum
+ 4);
4680 /* Find process PID's pending signals from /proc/pid/status and set
4684 linux_proc_pending_signals (int pid
, sigset_t
*pending
,
4685 sigset_t
*blocked
, sigset_t
*ignored
)
4688 char buffer
[MAXPATHLEN
], fname
[MAXPATHLEN
];
4689 struct cleanup
*cleanup
;
4691 sigemptyset (pending
);
4692 sigemptyset (blocked
);
4693 sigemptyset (ignored
);
4694 sprintf (fname
, "/proc/%d/status", pid
);
4695 procfile
= fopen (fname
, "r");
4696 if (procfile
== NULL
)
4697 error (_("Could not open %s"), fname
);
4698 cleanup
= make_cleanup_fclose (procfile
);
4700 while (fgets (buffer
, MAXPATHLEN
, procfile
) != NULL
)
4702 /* Normal queued signals are on the SigPnd line in the status
4703 file. However, 2.6 kernels also have a "shared" pending
4704 queue for delivering signals to a thread group, so check for
4707 Unfortunately some Red Hat kernels include the shared pending
4708 queue but not the ShdPnd status field. */
4710 if (strncmp (buffer
, "SigPnd:\t", 8) == 0)
4711 add_line_to_sigset (buffer
+ 8, pending
);
4712 else if (strncmp (buffer
, "ShdPnd:\t", 8) == 0)
4713 add_line_to_sigset (buffer
+ 8, pending
);
4714 else if (strncmp (buffer
, "SigBlk:\t", 8) == 0)
4715 add_line_to_sigset (buffer
+ 8, blocked
);
4716 else if (strncmp (buffer
, "SigIgn:\t", 8) == 0)
4717 add_line_to_sigset (buffer
+ 8, ignored
);
4720 do_cleanups (cleanup
);
4724 linux_nat_xfer_osdata (struct target_ops
*ops
, enum target_object object
,
4725 const char *annex
, gdb_byte
*readbuf
,
4726 const gdb_byte
*writebuf
, ULONGEST offset
, LONGEST len
)
4728 gdb_assert (object
== TARGET_OBJECT_OSDATA
);
4730 return linux_common_xfer_osdata (annex
, readbuf
, offset
, len
);
4734 linux_xfer_partial (struct target_ops
*ops
, enum target_object object
,
4735 const char *annex
, gdb_byte
*readbuf
,
4736 const gdb_byte
*writebuf
, ULONGEST offset
, LONGEST len
)
4740 if (object
== TARGET_OBJECT_AUXV
)
4741 return memory_xfer_auxv (ops
, object
, annex
, readbuf
, writebuf
,
4744 if (object
== TARGET_OBJECT_OSDATA
)
4745 return linux_nat_xfer_osdata (ops
, object
, annex
, readbuf
, writebuf
,
4748 if (object
== TARGET_OBJECT_SPU
)
4749 return linux_proc_xfer_spu (ops
, object
, annex
, readbuf
, writebuf
,
4752 /* GDB calculates all the addresses in possibly larget width of the address.
4753 Address width needs to be masked before its final use - either by
4754 linux_proc_xfer_partial or inf_ptrace_xfer_partial.
4756 Compare ADDR_BIT first to avoid a compiler warning on shift overflow. */
4758 if (object
== TARGET_OBJECT_MEMORY
)
4760 int addr_bit
= gdbarch_addr_bit (target_gdbarch
);
4762 if (addr_bit
< (sizeof (ULONGEST
) * HOST_CHAR_BIT
))
4763 offset
&= ((ULONGEST
) 1 << addr_bit
) - 1;
4766 xfer
= linux_proc_xfer_partial (ops
, object
, annex
, readbuf
, writebuf
,
4771 return super_xfer_partial (ops
, object
, annex
, readbuf
, writebuf
,
4775 /* Create a prototype generic GNU/Linux target. The client can override
4776 it with local methods. */
4779 linux_target_install_ops (struct target_ops
*t
)
4781 t
->to_insert_fork_catchpoint
= linux_child_insert_fork_catchpoint
;
4782 t
->to_remove_fork_catchpoint
= linux_child_remove_fork_catchpoint
;
4783 t
->to_insert_vfork_catchpoint
= linux_child_insert_vfork_catchpoint
;
4784 t
->to_remove_vfork_catchpoint
= linux_child_remove_vfork_catchpoint
;
4785 t
->to_insert_exec_catchpoint
= linux_child_insert_exec_catchpoint
;
4786 t
->to_remove_exec_catchpoint
= linux_child_remove_exec_catchpoint
;
4787 t
->to_set_syscall_catchpoint
= linux_child_set_syscall_catchpoint
;
4788 t
->to_pid_to_exec_file
= linux_child_pid_to_exec_file
;
4789 t
->to_post_startup_inferior
= linux_child_post_startup_inferior
;
4790 t
->to_post_attach
= linux_child_post_attach
;
4791 t
->to_follow_fork
= linux_child_follow_fork
;
4792 t
->to_make_corefile_notes
= linux_nat_make_corefile_notes
;
4794 super_xfer_partial
= t
->to_xfer_partial
;
4795 t
->to_xfer_partial
= linux_xfer_partial
;
4801 struct target_ops
*t
;
4803 t
= inf_ptrace_target ();
4804 linux_target_install_ops (t
);
4810 linux_trad_target (CORE_ADDR (*register_u_offset
)(struct gdbarch
*, int, int))
4812 struct target_ops
*t
;
4814 t
= inf_ptrace_trad_target (register_u_offset
);
4815 linux_target_install_ops (t
);
4820 /* target_is_async_p implementation. */
4823 linux_nat_is_async_p (void)
4825 /* NOTE: palves 2008-03-21: We're only async when the user requests
4826 it explicitly with the "set target-async" command.
4827 Someday, linux will always be async. */
4828 return target_async_permitted
;
4831 /* target_can_async_p implementation. */
4834 linux_nat_can_async_p (void)
4836 /* NOTE: palves 2008-03-21: We're only async when the user requests
4837 it explicitly with the "set target-async" command.
4838 Someday, linux will always be async. */
4839 return target_async_permitted
;
4843 linux_nat_supports_non_stop (void)
4848 /* True if we want to support multi-process. To be removed when GDB
4849 supports multi-exec. */
4851 int linux_multi_process
= 1;
4854 linux_nat_supports_multi_process (void)
4856 return linux_multi_process
;
4860 linux_nat_supports_disable_randomization (void)
4862 #ifdef HAVE_PERSONALITY
4869 static int async_terminal_is_ours
= 1;
4871 /* target_terminal_inferior implementation. */
4874 linux_nat_terminal_inferior (void)
4876 if (!target_is_async_p ())
4878 /* Async mode is disabled. */
4879 terminal_inferior ();
4883 terminal_inferior ();
4885 /* Calls to target_terminal_*() are meant to be idempotent. */
4886 if (!async_terminal_is_ours
)
4889 delete_file_handler (input_fd
);
4890 async_terminal_is_ours
= 0;
4894 /* target_terminal_ours implementation. */
4897 linux_nat_terminal_ours (void)
4899 if (!target_is_async_p ())
4901 /* Async mode is disabled. */
4906 /* GDB should never give the terminal to the inferior if the
4907 inferior is running in the background (run&, continue&, etc.),
4908 but claiming it sure should. */
4911 if (async_terminal_is_ours
)
4914 clear_sigint_trap ();
4915 add_file_handler (input_fd
, stdin_event_handler
, 0);
4916 async_terminal_is_ours
= 1;
4919 static void (*async_client_callback
) (enum inferior_event_type event_type
,
4921 static void *async_client_context
;
4923 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4924 so we notice when any child changes state, and notify the
4925 event-loop; it allows us to use sigsuspend in linux_nat_wait_1
4926 above to wait for the arrival of a SIGCHLD. */
4929 sigchld_handler (int signo
)
4931 int old_errno
= errno
;
4933 if (debug_linux_nat
)
4934 ui_file_write_async_safe (gdb_stdlog
,
4935 "sigchld\n", sizeof ("sigchld\n") - 1);
4937 if (signo
== SIGCHLD
4938 && linux_nat_event_pipe
[0] != -1)
4939 async_file_mark (); /* Let the event loop know that there are
4940 events to handle. */
4945 /* Callback registered with the target events file descriptor. */
4948 handle_target_event (int error
, gdb_client_data client_data
)
4950 (*async_client_callback
) (INF_REG_EVENT
, async_client_context
);
4953 /* Create/destroy the target events pipe. Returns previous state. */
4956 linux_async_pipe (int enable
)
4958 int previous
= (linux_nat_event_pipe
[0] != -1);
4960 if (previous
!= enable
)
4964 block_child_signals (&prev_mask
);
4968 if (pipe (linux_nat_event_pipe
) == -1)
4969 internal_error (__FILE__
, __LINE__
,
4970 "creating event pipe failed.");
4972 fcntl (linux_nat_event_pipe
[0], F_SETFL
, O_NONBLOCK
);
4973 fcntl (linux_nat_event_pipe
[1], F_SETFL
, O_NONBLOCK
);
4977 close (linux_nat_event_pipe
[0]);
4978 close (linux_nat_event_pipe
[1]);
4979 linux_nat_event_pipe
[0] = -1;
4980 linux_nat_event_pipe
[1] = -1;
4983 restore_child_signals_mask (&prev_mask
);
4989 /* target_async implementation. */
4992 linux_nat_async (void (*callback
) (enum inferior_event_type event_type
,
4993 void *context
), void *context
)
4995 if (callback
!= NULL
)
4997 async_client_callback
= callback
;
4998 async_client_context
= context
;
4999 if (!linux_async_pipe (1))
5001 add_file_handler (linux_nat_event_pipe
[0],
5002 handle_target_event
, NULL
);
5003 /* There may be pending events to handle. Tell the event loop
5010 async_client_callback
= callback
;
5011 async_client_context
= context
;
5012 delete_file_handler (linux_nat_event_pipe
[0]);
5013 linux_async_pipe (0);
5018 /* Stop an LWP, and push a TARGET_SIGNAL_0 stop status if no other
5022 linux_nat_stop_lwp (struct lwp_info
*lwp
, void *data
)
5026 ptid_t ptid
= lwp
->ptid
;
5028 if (debug_linux_nat
)
5029 fprintf_unfiltered (gdb_stdlog
,
5030 "LNSL: running -> suspending %s\n",
5031 target_pid_to_str (lwp
->ptid
));
5034 if (lwp
->last_resume_kind
== resume_stop
)
5036 if (debug_linux_nat
)
5037 fprintf_unfiltered (gdb_stdlog
,
5038 "linux-nat: already stopping LWP %ld at "
5040 ptid_get_lwp (lwp
->ptid
));
5044 stop_callback (lwp
, NULL
);
5045 lwp
->last_resume_kind
= resume_stop
;
5049 /* Already known to be stopped; do nothing. */
5051 if (debug_linux_nat
)
5053 if (find_thread_ptid (lwp
->ptid
)->stop_requested
)
5054 fprintf_unfiltered (gdb_stdlog
,
5055 "LNSL: already stopped/stop_requested %s\n",
5056 target_pid_to_str (lwp
->ptid
));
5058 fprintf_unfiltered (gdb_stdlog
,
5059 "LNSL: already stopped/no "
5060 "stop_requested yet %s\n",
5061 target_pid_to_str (lwp
->ptid
));
5068 linux_nat_stop (ptid_t ptid
)
5071 iterate_over_lwps (ptid
, linux_nat_stop_lwp
, NULL
);
5073 linux_ops
->to_stop (ptid
);
5077 linux_nat_close (int quitting
)
5079 /* Unregister from the event loop. */
5080 if (linux_nat_is_async_p ())
5081 linux_nat_async (NULL
, 0);
5083 if (linux_ops
->to_close
)
5084 linux_ops
->to_close (quitting
);
5087 /* When requests are passed down from the linux-nat layer to the
5088 single threaded inf-ptrace layer, ptids of (lwpid,0,0) form are
5089 used. The address space pointer is stored in the inferior object,
5090 but the common code that is passed such ptid can't tell whether
5091 lwpid is a "main" process id or not (it assumes so). We reverse
5092 look up the "main" process id from the lwp here. */
5094 struct address_space
*
5095 linux_nat_thread_address_space (struct target_ops
*t
, ptid_t ptid
)
5097 struct lwp_info
*lwp
;
5098 struct inferior
*inf
;
5101 pid
= GET_LWP (ptid
);
5102 if (GET_LWP (ptid
) == 0)
5104 /* An (lwpid,0,0) ptid. Look up the lwp object to get at the
5106 lwp
= find_lwp_pid (ptid
);
5107 pid
= GET_PID (lwp
->ptid
);
5111 /* A (pid,lwpid,0) ptid. */
5112 pid
= GET_PID (ptid
);
5115 inf
= find_inferior_pid (pid
);
5116 gdb_assert (inf
!= NULL
);
5121 linux_nat_core_of_thread_1 (ptid_t ptid
)
5123 struct cleanup
*back_to
;
5126 char *content
= NULL
;
5129 int content_read
= 0;
5133 filename
= xstrprintf ("/proc/%d/task/%ld/stat",
5134 GET_PID (ptid
), GET_LWP (ptid
));
5135 back_to
= make_cleanup (xfree
, filename
);
5137 f
= fopen (filename
, "r");
5140 do_cleanups (back_to
);
5144 make_cleanup_fclose (f
);
5150 content
= xrealloc (content
, content_read
+ 1024);
5151 n
= fread (content
+ content_read
, 1, 1024, f
);
5155 content
[content_read
] = '\0';
5160 make_cleanup (xfree
, content
);
5162 p
= strchr (content
, '(');
5166 p
= strchr (p
, ')');
5170 /* If the first field after program name has index 0, then core number is
5171 the field with index 36. There's no constant for that anywhere. */
5173 p
= strtok_r (p
, " ", &ts
);
5174 for (i
= 0; p
!= NULL
&& i
!= 36; ++i
)
5175 p
= strtok_r (NULL
, " ", &ts
);
5177 if (p
== NULL
|| sscanf (p
, "%d", &core
) == 0)
5180 do_cleanups (back_to
);
5185 /* Return the cached value of the processor core for thread PTID. */
5188 linux_nat_core_of_thread (struct target_ops
*ops
, ptid_t ptid
)
5190 struct lwp_info
*info
= find_lwp_pid (ptid
);
5198 linux_nat_add_target (struct target_ops
*t
)
5200 /* Save the provided single-threaded target. We save this in a separate
5201 variable because another target we've inherited from (e.g. inf-ptrace)
5202 may have saved a pointer to T; we want to use it for the final
5203 process stratum target. */
5204 linux_ops_saved
= *t
;
5205 linux_ops
= &linux_ops_saved
;
5207 /* Override some methods for multithreading. */
5208 t
->to_create_inferior
= linux_nat_create_inferior
;
5209 t
->to_attach
= linux_nat_attach
;
5210 t
->to_detach
= linux_nat_detach
;
5211 t
->to_resume
= linux_nat_resume
;
5212 t
->to_wait
= linux_nat_wait
;
5213 t
->to_pass_signals
= linux_nat_pass_signals
;
5214 t
->to_xfer_partial
= linux_nat_xfer_partial
;
5215 t
->to_kill
= linux_nat_kill
;
5216 t
->to_mourn_inferior
= linux_nat_mourn_inferior
;
5217 t
->to_thread_alive
= linux_nat_thread_alive
;
5218 t
->to_pid_to_str
= linux_nat_pid_to_str
;
5219 t
->to_thread_name
= linux_nat_thread_name
;
5220 t
->to_has_thread_control
= tc_schedlock
;
5221 t
->to_thread_address_space
= linux_nat_thread_address_space
;
5222 t
->to_stopped_by_watchpoint
= linux_nat_stopped_by_watchpoint
;
5223 t
->to_stopped_data_address
= linux_nat_stopped_data_address
;
5225 t
->to_can_async_p
= linux_nat_can_async_p
;
5226 t
->to_is_async_p
= linux_nat_is_async_p
;
5227 t
->to_supports_non_stop
= linux_nat_supports_non_stop
;
5228 t
->to_async
= linux_nat_async
;
5229 t
->to_terminal_inferior
= linux_nat_terminal_inferior
;
5230 t
->to_terminal_ours
= linux_nat_terminal_ours
;
5231 t
->to_close
= linux_nat_close
;
5233 /* Methods for non-stop support. */
5234 t
->to_stop
= linux_nat_stop
;
5236 t
->to_supports_multi_process
= linux_nat_supports_multi_process
;
5238 t
->to_supports_disable_randomization
5239 = linux_nat_supports_disable_randomization
;
5241 t
->to_core_of_thread
= linux_nat_core_of_thread
;
5243 /* We don't change the stratum; this target will sit at
5244 process_stratum and thread_db will set at thread_stratum. This
5245 is a little strange, since this is a multi-threaded-capable
5246 target, but we want to be on the stack below thread_db, and we
5247 also want to be used for single-threaded processes. */
5252 /* Register a method to call whenever a new thread is attached. */
5254 linux_nat_set_new_thread (struct target_ops
*t
,
5255 void (*new_thread
) (struct lwp_info
*))
5257 /* Save the pointer. We only support a single registered instance
5258 of the GNU/Linux native target, so we do not need to map this to
5260 linux_nat_new_thread
= new_thread
;
5263 /* Register a method that converts a siginfo object between the layout
5264 that ptrace returns, and the layout in the architecture of the
5267 linux_nat_set_siginfo_fixup (struct target_ops
*t
,
5268 int (*siginfo_fixup
) (struct siginfo
*,
5272 /* Save the pointer. */
5273 linux_nat_siginfo_fixup
= siginfo_fixup
;
5276 /* Register a method to call prior to resuming a thread. */
5279 linux_nat_set_prepare_to_resume (struct target_ops
*t
,
5280 void (*prepare_to_resume
) (struct lwp_info
*))
5282 /* Save the pointer. */
5283 linux_nat_prepare_to_resume
= prepare_to_resume
;
5286 /* Return the saved siginfo associated with PTID. */
5288 linux_nat_get_siginfo (ptid_t ptid
)
5290 struct lwp_info
*lp
= find_lwp_pid (ptid
);
5292 gdb_assert (lp
!= NULL
);
5294 return &lp
->siginfo
;
5297 /* Provide a prototype to silence -Wmissing-prototypes. */
5298 extern initialize_file_ftype _initialize_linux_nat
;
5301 _initialize_linux_nat (void)
5303 add_setshow_zinteger_cmd ("lin-lwp", class_maintenance
,
5304 &debug_linux_nat
, _("\
5305 Set debugging of GNU/Linux lwp module."), _("\
5306 Show debugging of GNU/Linux lwp module."), _("\
5307 Enables printf debugging output."),
5309 show_debug_linux_nat
,
5310 &setdebuglist
, &showdebuglist
);
5312 /* Save this mask as the default. */
5313 sigprocmask (SIG_SETMASK
, NULL
, &normal_mask
);
5315 /* Install a SIGCHLD handler. */
5316 sigchld_action
.sa_handler
= sigchld_handler
;
5317 sigemptyset (&sigchld_action
.sa_mask
);
5318 sigchld_action
.sa_flags
= SA_RESTART
;
5320 /* Make it the default. */
5321 sigaction (SIGCHLD
, &sigchld_action
, NULL
);
5323 /* Make sure we don't block SIGCHLD during a sigsuspend. */
5324 sigprocmask (SIG_SETMASK
, NULL
, &suspend_mask
);
5325 sigdelset (&suspend_mask
, SIGCHLD
);
5327 sigemptyset (&blocked_mask
);
5331 /* FIXME: kettenis/2000-08-26: The stuff on this page is specific to
5332 the GNU/Linux Threads library and therefore doesn't really belong
5335 /* Read variable NAME in the target and return its value if found.
5336 Otherwise return zero. It is assumed that the type of the variable
5340 get_signo (const char *name
)
5342 struct minimal_symbol
*ms
;
5345 ms
= lookup_minimal_symbol (name
, NULL
, NULL
);
5349 if (target_read_memory (SYMBOL_VALUE_ADDRESS (ms
), (gdb_byte
*) &signo
,
5350 sizeof (signo
)) != 0)
5356 /* Return the set of signals used by the threads library in *SET. */
5359 lin_thread_get_thread_signals (sigset_t
*set
)
5361 struct sigaction action
;
5362 int restart
, cancel
;
5364 sigemptyset (&blocked_mask
);
5367 restart
= get_signo ("__pthread_sig_restart");
5368 cancel
= get_signo ("__pthread_sig_cancel");
5370 /* LinuxThreads normally uses the first two RT signals, but in some legacy
5371 cases may use SIGUSR1/SIGUSR2. NPTL always uses RT signals, but does
5372 not provide any way for the debugger to query the signal numbers -
5373 fortunately they don't change! */
5376 restart
= __SIGRTMIN
;
5379 cancel
= __SIGRTMIN
+ 1;
5381 sigaddset (set
, restart
);
5382 sigaddset (set
, cancel
);
5384 /* The GNU/Linux Threads library makes terminating threads send a
5385 special "cancel" signal instead of SIGCHLD. Make sure we catch
5386 those (to prevent them from terminating GDB itself, which is
5387 likely to be their default action) and treat them the same way as
5390 action
.sa_handler
= sigchld_handler
;
5391 sigemptyset (&action
.sa_mask
);
5392 action
.sa_flags
= SA_RESTART
;
5393 sigaction (cancel
, &action
, NULL
);
5395 /* We block the "cancel" signal throughout this code ... */
5396 sigaddset (&blocked_mask
, cancel
);
5397 sigprocmask (SIG_BLOCK
, &blocked_mask
, NULL
);
5399 /* ... except during a sigsuspend. */
5400 sigdelset (&suspend_mask
, cancel
);