1 /* GNU/Linux native-dependent code common to multiple platforms.
3 Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
4 Free Software Foundation, Inc.
6 This file is part of GDB.
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "gdb_string.h"
26 #include "gdb_assert.h"
27 #ifdef HAVE_TKILL_SYSCALL
29 #include <sys/syscall.h>
31 #include <sys/ptrace.h>
32 #include "linux-nat.h"
33 #include "linux-fork.h"
34 #include "gdbthread.h"
38 #include "inf-ptrace.h"
40 #include <sys/param.h> /* for MAXPATHLEN */
41 #include <sys/procfs.h> /* for elf_gregset etc. */
42 #include "elf-bfd.h" /* for elfcore_write_* */
43 #include "gregset.h" /* for gregset */
44 #include "gdbcore.h" /* for get_exec_file */
45 #include <ctype.h> /* for isdigit */
46 #include "gdbthread.h" /* for struct thread_info etc. */
47 #include "gdb_stat.h" /* for struct stat */
48 #include <fcntl.h> /* for O_RDONLY */
50 #include "event-loop.h"
51 #include "event-top.h"
53 #ifdef HAVE_PERSONALITY
54 # include <sys/personality.h>
55 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
56 # define ADDR_NO_RANDOMIZE 0x0040000
58 #endif /* HAVE_PERSONALITY */
60 /* This comment documents high-level logic of this file.
62 Waiting for events in sync mode
63 ===============================
65 When waiting for an event in a specific thread, we just use waitpid, passing
66 the specific pid, and not passing WNOHANG.
68 When waiting for an event in all threads, waitpid is not quite good. Prior to
69 version 2.4, Linux can either wait for event in main thread, or in secondary
70 threads. (2.4 has the __WALL flag). So, if we use blocking waitpid, we might
71 miss an event. The solution is to use non-blocking waitpid, together with
72 sigsuspend. First, we use non-blocking waitpid to get an event in the main
73 process, if any. Second, we use non-blocking waitpid with the __WCLONED
74 flag to check for events in cloned processes. If nothing is found, we use
75 sigsuspend to wait for SIGCHLD. When SIGCHLD arrives, it means something
76 happened to a child process -- and SIGCHLD will be delivered both for events
77 in main debugged process and in cloned processes. As soon as we know there's
78 an event, we get back to calling nonblocking waitpid with and without __WCLONED.
80 Note that SIGCHLD should be blocked between waitpid and sigsuspend calls,
81 so that we don't miss a signal. If SIGCHLD arrives in between, when it's
82 blocked, the signal becomes pending and sigsuspend immediately
83 notices it and returns.
85 Waiting for events in async mode
86 ================================
88 In async mode, GDB should always be ready to handle both user input and target
89 events, so neither blocking waitpid nor sigsuspend are viable
90 options. Instead, we should notify the GDB main event loop whenever there's
91 unprocessed event from the target. The only way to notify this event loop is
92 to make it wait on input from a pipe, and write something to the pipe whenever
93 there's event. Obviously, if we fail to notify the event loop if there's
94 target event, it's bad. If we notify the event loop when there's no event
95 from target, linux-nat.c will detect that there's no event, actually, and
96 report event of type TARGET_WAITKIND_IGNORE, but it will waste time and
99 The main design point is that every time GDB is outside linux-nat.c, we have a
100 SIGCHLD handler installed that is called when something happens to the target
101 and notifies the GDB event loop. Also, the event is extracted from the target
102 using waitpid and stored for future use. Whenever GDB core decides to handle
103 the event, and calls into linux-nat.c, we disable SIGCHLD and process things
104 as in sync mode, except that before waitpid call we check if there are any
105 previously read events.
107 It could happen that during event processing, we'll try to get more events
108 than there are events in the local queue, which will result to waitpid call.
109 Those waitpid calls, while blocking, are guarantied to always have
110 something for waitpid to return. E.g., stopping a thread with SIGSTOP, and
111 waiting for the lwp to stop.
113 The event loop is notified about new events using a pipe. SIGCHLD handler does
114 waitpid and writes the results in to a pipe. GDB event loop has the other end
115 of the pipe among the sources. When event loop starts to process the event
116 and calls a function in linux-nat.c, all events from the pipe are transferred
117 into a local queue and SIGCHLD is blocked. Further processing goes as in sync
118 mode. Before we return from linux_nat_wait, we transfer all unprocessed events
119 from local queue back to the pipe, so that when we get back to event loop,
120 event loop will notice there's something more to do.
122 SIGCHLD is blocked when we're inside target_wait, so that should we actually
123 want to wait for some more events, SIGCHLD handler does not steal them from
124 us. Technically, it would be possible to add new events to the local queue but
125 it's about the same amount of work as blocking SIGCHLD.
127 This moving of events from pipe into local queue and back into pipe when we
128 enter/leave linux-nat.c is somewhat ugly. Unfortunately, GDB event loop is
129 home-grown and incapable to wait on any queue.
134 We stop threads by sending a SIGSTOP. The use of SIGSTOP instead of another
135 signal is not entirely significant; we just need for a signal to be delivered,
136 so that we can intercept it. SIGSTOP's advantage is that it can not be
137 blocked. A disadvantage is that it is not a real-time signal, so it can only
138 be queued once; we do not keep track of other sources of SIGSTOP.
140 Two other signals that can't be blocked are SIGCONT and SIGKILL. But we can't
141 use them, because they have special behavior when the signal is generated -
142 not when it is delivered. SIGCONT resumes the entire thread group and SIGKILL
143 kills the entire thread group.
145 A delivered SIGSTOP would stop the entire thread group, not just the thread we
146 tkill'd. But we never let the SIGSTOP be delivered; we always intercept and
147 cancel it (by PTRACE_CONT without passing SIGSTOP).
149 We could use a real-time signal instead. This would solve those problems; we
150 could use PTRACE_GETSIGINFO to locate the specific stop signals sent by GDB.
151 But we would still have to have some support for SIGSTOP, since PTRACE_ATTACH
152 generates it, and there are races with trying to find a signal that is not
156 #define O_LARGEFILE 0
159 /* If the system headers did not provide the constants, hard-code the normal
161 #ifndef PTRACE_EVENT_FORK
163 #define PTRACE_SETOPTIONS 0x4200
164 #define PTRACE_GETEVENTMSG 0x4201
166 /* options set using PTRACE_SETOPTIONS */
167 #define PTRACE_O_TRACESYSGOOD 0x00000001
168 #define PTRACE_O_TRACEFORK 0x00000002
169 #define PTRACE_O_TRACEVFORK 0x00000004
170 #define PTRACE_O_TRACECLONE 0x00000008
171 #define PTRACE_O_TRACEEXEC 0x00000010
172 #define PTRACE_O_TRACEVFORKDONE 0x00000020
173 #define PTRACE_O_TRACEEXIT 0x00000040
175 /* Wait extended result codes for the above trace options. */
176 #define PTRACE_EVENT_FORK 1
177 #define PTRACE_EVENT_VFORK 2
178 #define PTRACE_EVENT_CLONE 3
179 #define PTRACE_EVENT_EXEC 4
180 #define PTRACE_EVENT_VFORK_DONE 5
181 #define PTRACE_EVENT_EXIT 6
183 #endif /* PTRACE_EVENT_FORK */
185 /* We can't always assume that this flag is available, but all systems
186 with the ptrace event handlers also have __WALL, so it's safe to use
189 #define __WALL 0x40000000 /* Wait for any child. */
192 #ifndef PTRACE_GETSIGINFO
193 #define PTRACE_GETSIGINFO 0x4202
196 /* The single-threaded native GNU/Linux target_ops. We save a pointer for
197 the use of the multi-threaded target. */
198 static struct target_ops
*linux_ops
;
199 static struct target_ops linux_ops_saved
;
201 /* The method to call, if any, when a new thread is attached. */
202 static void (*linux_nat_new_thread
) (ptid_t
);
204 /* The saved to_xfer_partial method, inherited from inf-ptrace.c.
205 Called by our to_xfer_partial. */
206 static LONGEST (*super_xfer_partial
) (struct target_ops
*,
208 const char *, gdb_byte
*,
212 static int debug_linux_nat
;
214 show_debug_linux_nat (struct ui_file
*file
, int from_tty
,
215 struct cmd_list_element
*c
, const char *value
)
217 fprintf_filtered (file
, _("Debugging of GNU/Linux lwp module is %s.\n"),
221 static int debug_linux_nat_async
= 0;
223 show_debug_linux_nat_async (struct ui_file
*file
, int from_tty
,
224 struct cmd_list_element
*c
, const char *value
)
226 fprintf_filtered (file
, _("Debugging of GNU/Linux async lwp module is %s.\n"),
230 static int disable_randomization
= 1;
233 show_disable_randomization (struct ui_file
*file
, int from_tty
,
234 struct cmd_list_element
*c
, const char *value
)
236 #ifdef HAVE_PERSONALITY
237 fprintf_filtered (file
, _("\
238 Disabling randomization of debuggee's virtual address space is %s.\n"),
240 #else /* !HAVE_PERSONALITY */
242 Disabling randomization of debuggee's virtual address space is unsupported on\n\
243 this platform.\n"), file
);
244 #endif /* !HAVE_PERSONALITY */
248 set_disable_randomization (char *args
, int from_tty
, struct cmd_list_element
*c
)
250 #ifndef HAVE_PERSONALITY
252 Disabling randomization of debuggee's virtual address space is unsupported on\n\
254 #endif /* !HAVE_PERSONALITY */
257 static int linux_parent_pid
;
259 struct simple_pid_list
263 struct simple_pid_list
*next
;
265 struct simple_pid_list
*stopped_pids
;
267 /* This variable is a tri-state flag: -1 for unknown, 0 if PTRACE_O_TRACEFORK
268 can not be used, 1 if it can. */
270 static int linux_supports_tracefork_flag
= -1;
272 /* If we have PTRACE_O_TRACEFORK, this flag indicates whether we also have
273 PTRACE_O_TRACEVFORKDONE. */
275 static int linux_supports_tracevforkdone_flag
= -1;
277 /* Async mode support */
279 /* Zero if the async mode, although enabled, is masked, which means
280 linux_nat_wait should behave as if async mode was off. */
281 static int linux_nat_async_mask_value
= 1;
283 /* The read/write ends of the pipe registered as waitable file in the
285 static int linux_nat_event_pipe
[2] = { -1, -1 };
287 /* Number of queued events in the pipe. */
288 static volatile int linux_nat_num_queued_events
;
290 /* The possible SIGCHLD handling states. */
294 /* SIGCHLD disabled, with action set to sigchld_handler, for the
295 sigsuspend in linux_nat_wait. */
297 /* SIGCHLD enabled, with action set to async_sigchld_handler. */
299 /* Set SIGCHLD to default action. Used while creating an
304 /* The current SIGCHLD handling state. */
305 static enum sigchld_state linux_nat_async_events_state
;
307 static enum sigchld_state
linux_nat_async_events (enum sigchld_state enable
);
308 static void pipe_to_local_event_queue (void);
309 static void local_event_queue_to_pipe (void);
310 static void linux_nat_event_pipe_push (int pid
, int status
, int options
);
311 static int linux_nat_event_pipe_pop (int* ptr_status
, int* ptr_options
);
312 static void linux_nat_set_async_mode (int on
);
313 static void linux_nat_async (void (*callback
)
314 (enum inferior_event_type event_type
, void *context
),
316 static int linux_nat_async_mask (int mask
);
317 static int kill_lwp (int lwpid
, int signo
);
319 static int send_sigint_callback (struct lwp_info
*lp
, void *data
);
320 static int stop_callback (struct lwp_info
*lp
, void *data
);
322 /* Captures the result of a successful waitpid call, along with the
323 options used in that call. */
324 struct waitpid_result
329 struct waitpid_result
*next
;
332 /* A singly-linked list of the results of the waitpid calls performed
333 in the async SIGCHLD handler. */
334 static struct waitpid_result
*waitpid_queue
= NULL
;
337 queued_waitpid (int pid
, int *status
, int flags
)
339 struct waitpid_result
*msg
= waitpid_queue
, *prev
= NULL
;
341 if (debug_linux_nat_async
)
342 fprintf_unfiltered (gdb_stdlog
,
344 QWPID: linux_nat_async_events_state(%d), linux_nat_num_queued_events(%d)\n",
345 linux_nat_async_events_state
,
346 linux_nat_num_queued_events
);
350 for (; msg
; prev
= msg
, msg
= msg
->next
)
351 if (pid
== -1 || pid
== msg
->pid
)
354 else if (flags
& __WCLONE
)
356 for (; msg
; prev
= msg
, msg
= msg
->next
)
357 if (msg
->options
& __WCLONE
358 && (pid
== -1 || pid
== msg
->pid
))
363 for (; msg
; prev
= msg
, msg
= msg
->next
)
364 if ((msg
->options
& __WCLONE
) == 0
365 && (pid
== -1 || pid
== msg
->pid
))
374 prev
->next
= msg
->next
;
376 waitpid_queue
= msg
->next
;
380 *status
= msg
->status
;
383 if (debug_linux_nat_async
)
384 fprintf_unfiltered (gdb_stdlog
, "QWPID: pid(%d), status(%x)\n",
391 if (debug_linux_nat_async
)
392 fprintf_unfiltered (gdb_stdlog
, "QWPID: miss\n");
400 push_waitpid (int pid
, int status
, int options
)
402 struct waitpid_result
*event
, *new_event
;
404 new_event
= xmalloc (sizeof (*new_event
));
405 new_event
->pid
= pid
;
406 new_event
->status
= status
;
407 new_event
->options
= options
;
408 new_event
->next
= NULL
;
412 for (event
= waitpid_queue
;
413 event
&& event
->next
;
417 event
->next
= new_event
;
420 waitpid_queue
= new_event
;
423 /* Drain all queued events of PID. If PID is -1, the effect is of
424 draining all events. */
426 drain_queued_events (int pid
)
428 while (queued_waitpid (pid
, NULL
, __WALL
) != -1)
433 /* Trivial list manipulation functions to keep track of a list of
434 new stopped processes. */
436 add_to_pid_list (struct simple_pid_list
**listp
, int pid
, int status
)
438 struct simple_pid_list
*new_pid
= xmalloc (sizeof (struct simple_pid_list
));
440 new_pid
->status
= status
;
441 new_pid
->next
= *listp
;
446 pull_pid_from_list (struct simple_pid_list
**listp
, int pid
, int *status
)
448 struct simple_pid_list
**p
;
450 for (p
= listp
; *p
!= NULL
; p
= &(*p
)->next
)
451 if ((*p
)->pid
== pid
)
453 struct simple_pid_list
*next
= (*p
)->next
;
454 *status
= (*p
)->status
;
463 linux_record_stopped_pid (int pid
, int status
)
465 add_to_pid_list (&stopped_pids
, pid
, status
);
469 /* A helper function for linux_test_for_tracefork, called after fork (). */
472 linux_tracefork_child (void)
476 ptrace (PTRACE_TRACEME
, 0, 0, 0);
477 kill (getpid (), SIGSTOP
);
482 /* Wrapper function for waitpid which handles EINTR, and checks for
483 locally queued events. */
486 my_waitpid (int pid
, int *status
, int flags
)
490 /* There should be no concurrent calls to waitpid. */
491 gdb_assert (linux_nat_async_events_state
== sigchld_sync
);
493 ret
= queued_waitpid (pid
, status
, flags
);
499 ret
= waitpid (pid
, status
, flags
);
501 while (ret
== -1 && errno
== EINTR
);
506 /* Determine if PTRACE_O_TRACEFORK can be used to follow fork events.
508 First, we try to enable fork tracing on ORIGINAL_PID. If this fails,
509 we know that the feature is not available. This may change the tracing
510 options for ORIGINAL_PID, but we'll be setting them shortly anyway.
512 However, if it succeeds, we don't know for sure that the feature is
513 available; old versions of PTRACE_SETOPTIONS ignored unknown options. We
514 create a child process, attach to it, use PTRACE_SETOPTIONS to enable
515 fork tracing, and let it fork. If the process exits, we assume that we
516 can't use TRACEFORK; if we get the fork notification, and we can extract
517 the new child's PID, then we assume that we can. */
520 linux_test_for_tracefork (int original_pid
)
522 int child_pid
, ret
, status
;
524 enum sigchld_state async_events_original_state
;
526 async_events_original_state
= linux_nat_async_events (sigchld_sync
);
528 linux_supports_tracefork_flag
= 0;
529 linux_supports_tracevforkdone_flag
= 0;
531 ret
= ptrace (PTRACE_SETOPTIONS
, original_pid
, 0, PTRACE_O_TRACEFORK
);
537 perror_with_name (("fork"));
540 linux_tracefork_child ();
542 ret
= my_waitpid (child_pid
, &status
, 0);
544 perror_with_name (("waitpid"));
545 else if (ret
!= child_pid
)
546 error (_("linux_test_for_tracefork: waitpid: unexpected result %d."), ret
);
547 if (! WIFSTOPPED (status
))
548 error (_("linux_test_for_tracefork: waitpid: unexpected status %d."), status
);
550 ret
= ptrace (PTRACE_SETOPTIONS
, child_pid
, 0, PTRACE_O_TRACEFORK
);
553 ret
= ptrace (PTRACE_KILL
, child_pid
, 0, 0);
556 warning (_("linux_test_for_tracefork: failed to kill child"));
557 linux_nat_async_events (async_events_original_state
);
561 ret
= my_waitpid (child_pid
, &status
, 0);
562 if (ret
!= child_pid
)
563 warning (_("linux_test_for_tracefork: failed to wait for killed child"));
564 else if (!WIFSIGNALED (status
))
565 warning (_("linux_test_for_tracefork: unexpected wait status 0x%x from "
566 "killed child"), status
);
568 linux_nat_async_events (async_events_original_state
);
572 /* Check whether PTRACE_O_TRACEVFORKDONE is available. */
573 ret
= ptrace (PTRACE_SETOPTIONS
, child_pid
, 0,
574 PTRACE_O_TRACEFORK
| PTRACE_O_TRACEVFORKDONE
);
575 linux_supports_tracevforkdone_flag
= (ret
== 0);
577 ret
= ptrace (PTRACE_CONT
, child_pid
, 0, 0);
579 warning (_("linux_test_for_tracefork: failed to resume child"));
581 ret
= my_waitpid (child_pid
, &status
, 0);
583 if (ret
== child_pid
&& WIFSTOPPED (status
)
584 && status
>> 16 == PTRACE_EVENT_FORK
)
587 ret
= ptrace (PTRACE_GETEVENTMSG
, child_pid
, 0, &second_pid
);
588 if (ret
== 0 && second_pid
!= 0)
592 linux_supports_tracefork_flag
= 1;
593 my_waitpid (second_pid
, &second_status
, 0);
594 ret
= ptrace (PTRACE_KILL
, second_pid
, 0, 0);
596 warning (_("linux_test_for_tracefork: failed to kill second child"));
597 my_waitpid (second_pid
, &status
, 0);
601 warning (_("linux_test_for_tracefork: unexpected result from waitpid "
602 "(%d, status 0x%x)"), ret
, status
);
604 ret
= ptrace (PTRACE_KILL
, child_pid
, 0, 0);
606 warning (_("linux_test_for_tracefork: failed to kill child"));
607 my_waitpid (child_pid
, &status
, 0);
609 linux_nat_async_events (async_events_original_state
);
612 /* Return non-zero iff we have tracefork functionality available.
613 This function also sets linux_supports_tracefork_flag. */
616 linux_supports_tracefork (int pid
)
618 if (linux_supports_tracefork_flag
== -1)
619 linux_test_for_tracefork (pid
);
620 return linux_supports_tracefork_flag
;
624 linux_supports_tracevforkdone (int pid
)
626 if (linux_supports_tracefork_flag
== -1)
627 linux_test_for_tracefork (pid
);
628 return linux_supports_tracevforkdone_flag
;
633 linux_enable_event_reporting (ptid_t ptid
)
635 int pid
= ptid_get_lwp (ptid
);
639 pid
= ptid_get_pid (ptid
);
641 if (! linux_supports_tracefork (pid
))
644 options
= PTRACE_O_TRACEFORK
| PTRACE_O_TRACEVFORK
| PTRACE_O_TRACEEXEC
645 | PTRACE_O_TRACECLONE
;
646 if (linux_supports_tracevforkdone (pid
))
647 options
|= PTRACE_O_TRACEVFORKDONE
;
649 /* Do not enable PTRACE_O_TRACEEXIT until GDB is more prepared to support
650 read-only process state. */
652 ptrace (PTRACE_SETOPTIONS
, pid
, 0, options
);
656 linux_child_post_attach (int pid
)
658 linux_enable_event_reporting (pid_to_ptid (pid
));
659 check_for_thread_db ();
663 linux_child_post_startup_inferior (ptid_t ptid
)
665 linux_enable_event_reporting (ptid
);
666 check_for_thread_db ();
670 linux_child_follow_fork (struct target_ops
*ops
, int follow_child
)
673 struct target_waitstatus last_status
;
675 int parent_pid
, child_pid
;
677 if (target_can_async_p ())
678 target_async (NULL
, 0);
680 get_last_target_status (&last_ptid
, &last_status
);
681 has_vforked
= (last_status
.kind
== TARGET_WAITKIND_VFORKED
);
682 parent_pid
= ptid_get_lwp (last_ptid
);
684 parent_pid
= ptid_get_pid (last_ptid
);
685 child_pid
= PIDGET (last_status
.value
.related_pid
);
689 /* We're already attached to the parent, by default. */
691 /* Before detaching from the child, remove all breakpoints from
692 it. (This won't actually modify the breakpoint list, but will
693 physically remove the breakpoints from the child.) */
694 /* If we vforked this will remove the breakpoints from the parent
695 also, but they'll be reinserted below. */
696 detach_breakpoints (child_pid
);
698 /* Detach new forked process? */
701 if (info_verbose
|| debug_linux_nat
)
703 target_terminal_ours ();
704 fprintf_filtered (gdb_stdlog
,
705 "Detaching after fork from child process %d.\n",
709 ptrace (PTRACE_DETACH
, child_pid
, 0, 0);
713 struct fork_info
*fp
;
714 /* Retain child fork in ptrace (stopped) state. */
715 fp
= find_fork_pid (child_pid
);
717 fp
= add_fork (child_pid
);
718 fork_save_infrun_state (fp
, 0);
723 gdb_assert (linux_supports_tracefork_flag
>= 0);
724 if (linux_supports_tracevforkdone (0))
728 ptrace (PTRACE_CONT
, parent_pid
, 0, 0);
729 my_waitpid (parent_pid
, &status
, __WALL
);
730 if ((status
>> 16) != PTRACE_EVENT_VFORK_DONE
)
731 warning (_("Unexpected waitpid result %06x when waiting for "
732 "vfork-done"), status
);
736 /* We can't insert breakpoints until the child has
737 finished with the shared memory region. We need to
738 wait until that happens. Ideal would be to just
740 - ptrace (PTRACE_SYSCALL, parent_pid, 0, 0);
741 - waitpid (parent_pid, &status, __WALL);
742 However, most architectures can't handle a syscall
743 being traced on the way out if it wasn't traced on
746 We might also think to loop, continuing the child
747 until it exits or gets a SIGTRAP. One problem is
748 that the child might call ptrace with PTRACE_TRACEME.
750 There's no simple and reliable way to figure out when
751 the vforked child will be done with its copy of the
752 shared memory. We could step it out of the syscall,
753 two instructions, let it go, and then single-step the
754 parent once. When we have hardware single-step, this
755 would work; with software single-step it could still
756 be made to work but we'd have to be able to insert
757 single-step breakpoints in the child, and we'd have
758 to insert -just- the single-step breakpoint in the
759 parent. Very awkward.
761 In the end, the best we can do is to make sure it
762 runs for a little while. Hopefully it will be out of
763 range of any breakpoints we reinsert. Usually this
764 is only the single-step breakpoint at vfork's return
770 /* Since we vforked, breakpoints were removed in the parent
771 too. Put them back. */
772 reattach_breakpoints (parent_pid
);
777 struct thread_info
*last_tp
= find_thread_pid (last_ptid
);
778 struct thread_info
*tp
;
779 char child_pid_spelling
[40];
781 /* Copy user stepping state to the new inferior thread. */
782 struct breakpoint
*step_resume_breakpoint
= last_tp
->step_resume_breakpoint
;
783 CORE_ADDR step_range_start
= last_tp
->step_range_start
;
784 CORE_ADDR step_range_end
= last_tp
->step_range_end
;
785 struct frame_id step_frame_id
= last_tp
->step_frame_id
;
787 /* Otherwise, deleting the parent would get rid of this
789 last_tp
->step_resume_breakpoint
= NULL
;
791 /* Needed to keep the breakpoint lists in sync. */
793 detach_breakpoints (child_pid
);
795 /* Before detaching from the parent, remove all breakpoints from it. */
796 remove_breakpoints ();
798 if (info_verbose
|| debug_linux_nat
)
800 target_terminal_ours ();
801 fprintf_filtered (gdb_stdlog
,
802 "Attaching after fork to child process %d.\n",
806 /* If we're vforking, we may want to hold on to the parent until
807 the child exits or execs. At exec time we can remove the old
808 breakpoints from the parent and detach it; at exit time we
809 could do the same (or even, sneakily, resume debugging it - the
810 child's exec has failed, or something similar).
812 This doesn't clean up "properly", because we can't call
813 target_detach, but that's OK; if the current target is "child",
814 then it doesn't need any further cleanups, and lin_lwp will
815 generally not encounter vfork (vfork is defined to fork
818 The holding part is very easy if we have VFORKDONE events;
819 but keeping track of both processes is beyond GDB at the
820 moment. So we don't expose the parent to the rest of GDB.
821 Instead we quietly hold onto it until such time as we can
825 linux_parent_pid
= parent_pid
;
826 else if (!detach_fork
)
828 struct fork_info
*fp
;
829 /* Retain parent fork in ptrace (stopped) state. */
830 fp
= find_fork_pid (parent_pid
);
832 fp
= add_fork (parent_pid
);
833 fork_save_infrun_state (fp
, 0);
836 target_detach (NULL
, 0);
838 inferior_ptid
= ptid_build (child_pid
, child_pid
, 0);
840 /* Reinstall ourselves, since we might have been removed in
841 target_detach (which does other necessary cleanup). */
844 linux_nat_switch_fork (inferior_ptid
);
845 check_for_thread_db ();
847 tp
= inferior_thread ();
848 tp
->step_resume_breakpoint
= step_resume_breakpoint
;
849 tp
->step_range_start
= step_range_start
;
850 tp
->step_range_end
= step_range_end
;
851 tp
->step_frame_id
= step_frame_id
;
853 /* Reset breakpoints in the child as appropriate. */
854 follow_inferior_reset_breakpoints ();
857 if (target_can_async_p ())
858 target_async (inferior_event_handler
, 0);
865 linux_child_insert_fork_catchpoint (int pid
)
867 if (! linux_supports_tracefork (pid
))
868 error (_("Your system does not support fork catchpoints."));
872 linux_child_insert_vfork_catchpoint (int pid
)
874 if (!linux_supports_tracefork (pid
))
875 error (_("Your system does not support vfork catchpoints."));
879 linux_child_insert_exec_catchpoint (int pid
)
881 if (!linux_supports_tracefork (pid
))
882 error (_("Your system does not support exec catchpoints."));
885 /* On GNU/Linux there are no real LWP's. The closest thing to LWP's
886 are processes sharing the same VM space. A multi-threaded process
887 is basically a group of such processes. However, such a grouping
888 is almost entirely a user-space issue; the kernel doesn't enforce
889 such a grouping at all (this might change in the future). In
890 general, we'll rely on the threads library (i.e. the GNU/Linux
891 Threads library) to provide such a grouping.
893 It is perfectly well possible to write a multi-threaded application
894 without the assistance of a threads library, by using the clone
895 system call directly. This module should be able to give some
896 rudimentary support for debugging such applications if developers
897 specify the CLONE_PTRACE flag in the clone system call, and are
898 using the Linux kernel 2.4 or above.
900 Note that there are some peculiarities in GNU/Linux that affect
903 - In general one should specify the __WCLONE flag to waitpid in
904 order to make it report events for any of the cloned processes
905 (and leave it out for the initial process). However, if a cloned
906 process has exited the exit status is only reported if the
907 __WCLONE flag is absent. Linux kernel 2.4 has a __WALL flag, but
908 we cannot use it since GDB must work on older systems too.
910 - When a traced, cloned process exits and is waited for by the
911 debugger, the kernel reassigns it to the original parent and
912 keeps it around as a "zombie". Somehow, the GNU/Linux Threads
913 library doesn't notice this, which leads to the "zombie problem":
914 When debugged a multi-threaded process that spawns a lot of
915 threads will run out of processes, even if the threads exit,
916 because the "zombies" stay around. */
918 /* List of known LWPs. */
919 struct lwp_info
*lwp_list
;
921 /* Number of LWPs in the list. */
925 /* Original signal mask. */
926 static sigset_t normal_mask
;
928 /* Signal mask for use with sigsuspend in linux_nat_wait, initialized in
929 _initialize_linux_nat. */
930 static sigset_t suspend_mask
;
932 /* SIGCHLD action for synchronous mode. */
933 struct sigaction sync_sigchld_action
;
935 /* SIGCHLD action for asynchronous mode. */
936 static struct sigaction async_sigchld_action
;
938 /* SIGCHLD default action, to pass to new inferiors. */
939 static struct sigaction sigchld_default_action
;
942 /* Prototypes for local functions. */
943 static int stop_wait_callback (struct lwp_info
*lp
, void *data
);
944 static int linux_nat_thread_alive (ptid_t ptid
);
945 static char *linux_child_pid_to_exec_file (int pid
);
946 static int cancel_breakpoint (struct lwp_info
*lp
);
949 /* Convert wait status STATUS to a string. Used for printing debug
953 status_to_str (int status
)
957 if (WIFSTOPPED (status
))
958 snprintf (buf
, sizeof (buf
), "%s (stopped)",
959 strsignal (WSTOPSIG (status
)));
960 else if (WIFSIGNALED (status
))
961 snprintf (buf
, sizeof (buf
), "%s (terminated)",
962 strsignal (WSTOPSIG (status
)));
964 snprintf (buf
, sizeof (buf
), "%d (exited)", WEXITSTATUS (status
));
969 /* Initialize the list of LWPs. Note that this module, contrary to
970 what GDB's generic threads layer does for its thread list,
971 re-initializes the LWP lists whenever we mourn or detach (which
972 doesn't involve mourning) the inferior. */
977 struct lwp_info
*lp
, *lpnext
;
979 for (lp
= lwp_list
; lp
; lp
= lpnext
)
989 /* Add the LWP specified by PID to the list. Return a pointer to the
990 structure describing the new LWP. The LWP should already be stopped
991 (with an exception for the very first LWP). */
993 static struct lwp_info
*
994 add_lwp (ptid_t ptid
)
998 gdb_assert (is_lwp (ptid
));
1000 lp
= (struct lwp_info
*) xmalloc (sizeof (struct lwp_info
));
1002 memset (lp
, 0, sizeof (struct lwp_info
));
1004 lp
->waitstatus
.kind
= TARGET_WAITKIND_IGNORE
;
1008 lp
->next
= lwp_list
;
1012 if (num_lwps
> 1 && linux_nat_new_thread
!= NULL
)
1013 linux_nat_new_thread (ptid
);
1018 /* Remove the LWP specified by PID from the list. */
1021 delete_lwp (ptid_t ptid
)
1023 struct lwp_info
*lp
, *lpprev
;
1027 for (lp
= lwp_list
; lp
; lpprev
= lp
, lp
= lp
->next
)
1028 if (ptid_equal (lp
->ptid
, ptid
))
1037 lpprev
->next
= lp
->next
;
1039 lwp_list
= lp
->next
;
1044 /* Return a pointer to the structure describing the LWP corresponding
1045 to PID. If no corresponding LWP could be found, return NULL. */
1047 static struct lwp_info
*
1048 find_lwp_pid (ptid_t ptid
)
1050 struct lwp_info
*lp
;
1054 lwp
= GET_LWP (ptid
);
1056 lwp
= GET_PID (ptid
);
1058 for (lp
= lwp_list
; lp
; lp
= lp
->next
)
1059 if (lwp
== GET_LWP (lp
->ptid
))
1065 /* Call CALLBACK with its second argument set to DATA for every LWP in
1066 the list. If CALLBACK returns 1 for a particular LWP, return a
1067 pointer to the structure describing that LWP immediately.
1068 Otherwise return NULL. */
1071 iterate_over_lwps (int (*callback
) (struct lwp_info
*, void *), void *data
)
1073 struct lwp_info
*lp
, *lpnext
;
1075 for (lp
= lwp_list
; lp
; lp
= lpnext
)
1078 if ((*callback
) (lp
, data
))
1085 /* Update our internal state when changing from one fork (checkpoint,
1086 et cetera) to another indicated by NEW_PTID. We can only switch
1087 single-threaded applications, so we only create one new LWP, and
1088 the previous list is discarded. */
1091 linux_nat_switch_fork (ptid_t new_ptid
)
1093 struct lwp_info
*lp
;
1096 lp
= add_lwp (new_ptid
);
1099 init_thread_list ();
1100 add_thread_silent (new_ptid
);
1103 /* Handle the exit of a single thread LP. */
1106 exit_lwp (struct lwp_info
*lp
)
1108 struct thread_info
*th
= find_thread_pid (lp
->ptid
);
1112 if (print_thread_events
)
1113 printf_unfiltered (_("[%s exited]\n"), target_pid_to_str (lp
->ptid
));
1115 delete_thread (lp
->ptid
);
1118 delete_lwp (lp
->ptid
);
1121 /* Detect `T (stopped)' in `/proc/PID/status'.
1122 Other states including `T (tracing stop)' are reported as false. */
1125 pid_is_stopped (pid_t pid
)
1131 snprintf (buf
, sizeof (buf
), "/proc/%d/status", (int) pid
);
1132 status_file
= fopen (buf
, "r");
1133 if (status_file
!= NULL
)
1137 while (fgets (buf
, sizeof (buf
), status_file
))
1139 if (strncmp (buf
, "State:", 6) == 0)
1145 if (have_state
&& strstr (buf
, "T (stopped)") != NULL
)
1147 fclose (status_file
);
1152 /* Wait for the LWP specified by LP, which we have just attached to.
1153 Returns a wait status for that LWP, to cache. */
1156 linux_nat_post_attach_wait (ptid_t ptid
, int first
, int *cloned
,
1159 pid_t new_pid
, pid
= GET_LWP (ptid
);
1162 if (pid_is_stopped (pid
))
1164 if (debug_linux_nat
)
1165 fprintf_unfiltered (gdb_stdlog
,
1166 "LNPAW: Attaching to a stopped process\n");
1168 /* The process is definitely stopped. It is in a job control
1169 stop, unless the kernel predates the TASK_STOPPED /
1170 TASK_TRACED distinction, in which case it might be in a
1171 ptrace stop. Make sure it is in a ptrace stop; from there we
1172 can kill it, signal it, et cetera.
1174 First make sure there is a pending SIGSTOP. Since we are
1175 already attached, the process can not transition from stopped
1176 to running without a PTRACE_CONT; so we know this signal will
1177 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1178 probably already in the queue (unless this kernel is old
1179 enough to use TASK_STOPPED for ptrace stops); but since SIGSTOP
1180 is not an RT signal, it can only be queued once. */
1181 kill_lwp (pid
, SIGSTOP
);
1183 /* Finally, resume the stopped process. This will deliver the SIGSTOP
1184 (or a higher priority signal, just like normal PTRACE_ATTACH). */
1185 ptrace (PTRACE_CONT
, pid
, 0, 0);
1188 /* Make sure the initial process is stopped. The user-level threads
1189 layer might want to poke around in the inferior, and that won't
1190 work if things haven't stabilized yet. */
1191 new_pid
= my_waitpid (pid
, &status
, 0);
1192 if (new_pid
== -1 && errno
== ECHILD
)
1195 warning (_("%s is a cloned process"), target_pid_to_str (ptid
));
1197 /* Try again with __WCLONE to check cloned processes. */
1198 new_pid
= my_waitpid (pid
, &status
, __WCLONE
);
1202 gdb_assert (pid
== new_pid
&& WIFSTOPPED (status
));
1204 if (WSTOPSIG (status
) != SIGSTOP
)
1207 if (debug_linux_nat
)
1208 fprintf_unfiltered (gdb_stdlog
,
1209 "LNPAW: Received %s after attaching\n",
1210 status_to_str (status
));
1216 /* Attach to the LWP specified by PID. Return 0 if successful or -1
1217 if the new LWP could not be attached. */
1220 lin_lwp_attach_lwp (ptid_t ptid
)
1222 struct lwp_info
*lp
;
1223 enum sigchld_state async_events_original_state
;
1225 gdb_assert (is_lwp (ptid
));
1227 async_events_original_state
= linux_nat_async_events (sigchld_sync
);
1229 lp
= find_lwp_pid (ptid
);
1231 /* We assume that we're already attached to any LWP that has an id
1232 equal to the overall process id, and to any LWP that is already
1233 in our list of LWPs. If we're not seeing exit events from threads
1234 and we've had PID wraparound since we last tried to stop all threads,
1235 this assumption might be wrong; fortunately, this is very unlikely
1237 if (GET_LWP (ptid
) != GET_PID (ptid
) && lp
== NULL
)
1239 int status
, cloned
= 0, signalled
= 0;
1241 if (ptrace (PTRACE_ATTACH
, GET_LWP (ptid
), 0, 0) < 0)
1243 /* If we fail to attach to the thread, issue a warning,
1244 but continue. One way this can happen is if thread
1245 creation is interrupted; as of Linux kernel 2.6.19, a
1246 bug may place threads in the thread list and then fail
1248 warning (_("Can't attach %s: %s"), target_pid_to_str (ptid
),
1249 safe_strerror (errno
));
1253 if (debug_linux_nat
)
1254 fprintf_unfiltered (gdb_stdlog
,
1255 "LLAL: PTRACE_ATTACH %s, 0, 0 (OK)\n",
1256 target_pid_to_str (ptid
));
1258 status
= linux_nat_post_attach_wait (ptid
, 0, &cloned
, &signalled
);
1259 lp
= add_lwp (ptid
);
1261 lp
->cloned
= cloned
;
1262 lp
->signalled
= signalled
;
1263 if (WSTOPSIG (status
) != SIGSTOP
)
1266 lp
->status
= status
;
1269 target_post_attach (GET_LWP (lp
->ptid
));
1271 if (debug_linux_nat
)
1273 fprintf_unfiltered (gdb_stdlog
,
1274 "LLAL: waitpid %s received %s\n",
1275 target_pid_to_str (ptid
),
1276 status_to_str (status
));
1281 /* We assume that the LWP representing the original process is
1282 already stopped. Mark it as stopped in the data structure
1283 that the GNU/linux ptrace layer uses to keep track of
1284 threads. Note that this won't have already been done since
1285 the main thread will have, we assume, been stopped by an
1286 attach from a different layer. */
1288 lp
= add_lwp (ptid
);
1292 linux_nat_async_events (async_events_original_state
);
1297 linux_nat_create_inferior (char *exec_file
, char *allargs
, char **env
,
1300 int saved_async
= 0;
1301 #ifdef HAVE_PERSONALITY
1302 int personality_orig
= 0, personality_set
= 0;
1303 #endif /* HAVE_PERSONALITY */
1305 /* The fork_child mechanism is synchronous and calls target_wait, so
1306 we have to mask the async mode. */
1308 if (target_can_async_p ())
1309 /* Mask async mode. Creating a child requires a loop calling
1310 wait_for_inferior currently. */
1311 saved_async
= linux_nat_async_mask (0);
1314 /* Restore the original signal mask. */
1315 sigprocmask (SIG_SETMASK
, &normal_mask
, NULL
);
1316 /* Make sure we don't block SIGCHLD during a sigsuspend. */
1317 suspend_mask
= normal_mask
;
1318 sigdelset (&suspend_mask
, SIGCHLD
);
1321 /* Set SIGCHLD to the default action, until after execing the child,
1322 since the inferior inherits the superior's signal mask. It will
1323 be blocked again in linux_nat_wait, which is only reached after
1324 the inferior execing. */
1325 linux_nat_async_events (sigchld_default
);
1327 #ifdef HAVE_PERSONALITY
1328 if (disable_randomization
)
1331 personality_orig
= personality (0xffffffff);
1332 if (errno
== 0 && !(personality_orig
& ADDR_NO_RANDOMIZE
))
1334 personality_set
= 1;
1335 personality (personality_orig
| ADDR_NO_RANDOMIZE
);
1337 if (errno
!= 0 || (personality_set
1338 && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE
)))
1339 warning (_("Error disabling address space randomization: %s"),
1340 safe_strerror (errno
));
1342 #endif /* HAVE_PERSONALITY */
1344 linux_ops
->to_create_inferior (exec_file
, allargs
, env
, from_tty
);
1346 #ifdef HAVE_PERSONALITY
1347 if (personality_set
)
1350 personality (personality_orig
);
1352 warning (_("Error restoring address space randomization: %s"),
1353 safe_strerror (errno
));
1355 #endif /* HAVE_PERSONALITY */
1358 linux_nat_async_mask (saved_async
);
1362 linux_nat_attach (char *args
, int from_tty
)
1364 struct lwp_info
*lp
;
1368 /* FIXME: We should probably accept a list of process id's, and
1369 attach all of them. */
1370 linux_ops
->to_attach (args
, from_tty
);
1372 if (!target_can_async_p ())
1374 /* Restore the original signal mask. */
1375 sigprocmask (SIG_SETMASK
, &normal_mask
, NULL
);
1376 /* Make sure we don't block SIGCHLD during a sigsuspend. */
1377 suspend_mask
= normal_mask
;
1378 sigdelset (&suspend_mask
, SIGCHLD
);
1381 /* The ptrace base target adds the main thread with (pid,0,0)
1382 format. Decorate it with lwp info. */
1383 ptid
= BUILD_LWP (GET_PID (inferior_ptid
), GET_PID (inferior_ptid
));
1384 thread_change_ptid (inferior_ptid
, ptid
);
1386 /* Add the initial process as the first LWP to the list. */
1387 lp
= add_lwp (ptid
);
1389 status
= linux_nat_post_attach_wait (lp
->ptid
, 1, &lp
->cloned
,
1393 /* Save the wait status to report later. */
1395 if (debug_linux_nat
)
1396 fprintf_unfiltered (gdb_stdlog
,
1397 "LNA: waitpid %ld, saving status %s\n",
1398 (long) GET_PID (lp
->ptid
), status_to_str (status
));
1400 if (!target_can_async_p ())
1401 lp
->status
= status
;
1404 /* We already waited for this LWP, so put the wait result on the
1405 pipe. The event loop will wake up and gets us to handling
1407 linux_nat_event_pipe_push (GET_PID (lp
->ptid
), status
,
1408 lp
->cloned
? __WCLONE
: 0);
1409 /* Register in the event loop. */
1410 target_async (inferior_event_handler
, 0);
1414 /* Get pending status of LP. */
1416 get_pending_status (struct lwp_info
*lp
, int *status
)
1418 struct target_waitstatus last
;
1421 get_last_target_status (&last_ptid
, &last
);
1423 /* If this lwp is the ptid that GDB is processing an event from, the
1424 signal will be in stop_signal. Otherwise, in all-stop + sync
1425 mode, we may cache pending events in lp->status while trying to
1426 stop all threads (see stop_wait_callback). In async mode, the
1427 events are always cached in waitpid_queue. */
1433 enum target_signal signo
= TARGET_SIGNAL_0
;
1435 if (is_executing (lp
->ptid
))
1437 /* If the core thought this lwp was executing --- e.g., the
1438 executing property hasn't been updated yet, but the
1439 thread has been stopped with a stop_callback /
1440 stop_wait_callback sequence (see linux_nat_detach for
1441 example) --- we can only have pending events in the local
1443 if (queued_waitpid (GET_LWP (lp
->ptid
), status
, __WALL
) != -1)
1445 if (WIFSTOPPED (status
))
1446 signo
= target_signal_from_host (WSTOPSIG (status
));
1448 /* If not stopped, then the lwp is gone, no use in
1449 resending a signal. */
1454 /* If the core knows the thread is not executing, then we
1455 have the last signal recorded in
1456 thread_info->stop_signal. */
1458 struct thread_info
*tp
= find_thread_pid (lp
->ptid
);
1459 signo
= tp
->stop_signal
;
1462 if (signo
!= TARGET_SIGNAL_0
1463 && !signal_pass_state (signo
))
1465 if (debug_linux_nat
)
1466 fprintf_unfiltered (gdb_stdlog
, "\
1467 GPT: lwp %s had signal %s, but it is in no pass state\n",
1468 target_pid_to_str (lp
->ptid
),
1469 target_signal_to_string (signo
));
1473 if (signo
!= TARGET_SIGNAL_0
)
1474 *status
= W_STOPCODE (target_signal_to_host (signo
));
1476 if (debug_linux_nat
)
1477 fprintf_unfiltered (gdb_stdlog
,
1478 "GPT: lwp %s as pending signal %s\n",
1479 target_pid_to_str (lp
->ptid
),
1480 target_signal_to_string (signo
));
1485 if (GET_LWP (lp
->ptid
) == GET_LWP (last_ptid
))
1487 struct thread_info
*tp
= find_thread_pid (lp
->ptid
);
1488 if (tp
->stop_signal
!= TARGET_SIGNAL_0
1489 && signal_pass_state (tp
->stop_signal
))
1490 *status
= W_STOPCODE (target_signal_to_host (tp
->stop_signal
));
1492 else if (target_can_async_p ())
1493 queued_waitpid (GET_LWP (lp
->ptid
), status
, __WALL
);
1495 *status
= lp
->status
;
1502 detach_callback (struct lwp_info
*lp
, void *data
)
1504 gdb_assert (lp
->status
== 0 || WIFSTOPPED (lp
->status
));
1506 if (debug_linux_nat
&& lp
->status
)
1507 fprintf_unfiltered (gdb_stdlog
, "DC: Pending %s for %s on detach.\n",
1508 strsignal (WSTOPSIG (lp
->status
)),
1509 target_pid_to_str (lp
->ptid
));
1511 /* If there is a pending SIGSTOP, get rid of it. */
1514 if (debug_linux_nat
)
1515 fprintf_unfiltered (gdb_stdlog
,
1516 "DC: Sending SIGCONT to %s\n",
1517 target_pid_to_str (lp
->ptid
));
1519 kill_lwp (GET_LWP (lp
->ptid
), SIGCONT
);
1523 /* We don't actually detach from the LWP that has an id equal to the
1524 overall process id just yet. */
1525 if (GET_LWP (lp
->ptid
) != GET_PID (lp
->ptid
))
1529 /* Pass on any pending signal for this LWP. */
1530 get_pending_status (lp
, &status
);
1533 if (ptrace (PTRACE_DETACH
, GET_LWP (lp
->ptid
), 0,
1534 WSTOPSIG (status
)) < 0)
1535 error (_("Can't detach %s: %s"), target_pid_to_str (lp
->ptid
),
1536 safe_strerror (errno
));
1538 if (debug_linux_nat
)
1539 fprintf_unfiltered (gdb_stdlog
,
1540 "PTRACE_DETACH (%s, %s, 0) (OK)\n",
1541 target_pid_to_str (lp
->ptid
),
1542 strsignal (WSTOPSIG (lp
->status
)));
1544 delete_lwp (lp
->ptid
);
1551 linux_nat_detach (char *args
, int from_tty
)
1555 enum target_signal sig
;
1557 if (target_can_async_p ())
1558 linux_nat_async (NULL
, 0);
1560 /* Stop all threads before detaching. ptrace requires that the
1561 thread is stopped to sucessfully detach. */
1562 iterate_over_lwps (stop_callback
, NULL
);
1563 /* ... and wait until all of them have reported back that
1564 they're no longer running. */
1565 iterate_over_lwps (stop_wait_callback
, NULL
);
1567 iterate_over_lwps (detach_callback
, NULL
);
1569 /* Only the initial process should be left right now. */
1570 gdb_assert (num_lwps
== 1);
1572 /* Pass on any pending signal for the last LWP. */
1573 if ((args
== NULL
|| *args
== '\0')
1574 && get_pending_status (lwp_list
, &status
) != -1
1575 && WIFSTOPPED (status
))
1577 /* Put the signal number in ARGS so that inf_ptrace_detach will
1578 pass it along with PTRACE_DETACH. */
1580 sprintf (args
, "%d", (int) WSTOPSIG (status
));
1581 fprintf_unfiltered (gdb_stdlog
,
1582 "LND: Sending signal %s to %s\n",
1584 target_pid_to_str (lwp_list
->ptid
));
1587 /* Destroy LWP info; it's no longer valid. */
1590 pid
= GET_PID (inferior_ptid
);
1591 inferior_ptid
= pid_to_ptid (pid
);
1592 linux_ops
->to_detach (args
, from_tty
);
1594 if (target_can_async_p ())
1595 drain_queued_events (pid
);
1601 resume_callback (struct lwp_info
*lp
, void *data
)
1603 if (lp
->stopped
&& lp
->status
== 0)
1605 linux_ops
->to_resume (pid_to_ptid (GET_LWP (lp
->ptid
)),
1606 0, TARGET_SIGNAL_0
);
1607 if (debug_linux_nat
)
1608 fprintf_unfiltered (gdb_stdlog
,
1609 "RC: PTRACE_CONT %s, 0, 0 (resume sibling)\n",
1610 target_pid_to_str (lp
->ptid
));
1613 memset (&lp
->siginfo
, 0, sizeof (lp
->siginfo
));
1615 else if (lp
->stopped
&& debug_linux_nat
)
1616 fprintf_unfiltered (gdb_stdlog
, "RC: Not resuming sibling %s (has pending)\n",
1617 target_pid_to_str (lp
->ptid
));
1618 else if (debug_linux_nat
)
1619 fprintf_unfiltered (gdb_stdlog
, "RC: Not resuming sibling %s (not stopped)\n",
1620 target_pid_to_str (lp
->ptid
));
1626 resume_clear_callback (struct lwp_info
*lp
, void *data
)
1633 resume_set_callback (struct lwp_info
*lp
, void *data
)
1640 linux_nat_resume (ptid_t ptid
, int step
, enum target_signal signo
)
1642 struct lwp_info
*lp
;
1645 if (debug_linux_nat
)
1646 fprintf_unfiltered (gdb_stdlog
,
1647 "LLR: Preparing to %s %s, %s, inferior_ptid %s\n",
1648 step
? "step" : "resume",
1649 target_pid_to_str (ptid
),
1650 signo
? strsignal (signo
) : "0",
1651 target_pid_to_str (inferior_ptid
));
1653 if (target_can_async_p ())
1654 /* Block events while we're here. */
1655 linux_nat_async_events (sigchld_sync
);
1657 /* A specific PTID means `step only this process id'. */
1658 resume_all
= (PIDGET (ptid
) == -1);
1660 if (non_stop
&& resume_all
)
1661 internal_error (__FILE__
, __LINE__
,
1662 "can't resume all in non-stop mode");
1667 iterate_over_lwps (resume_set_callback
, NULL
);
1669 iterate_over_lwps (resume_clear_callback
, NULL
);
1672 /* If PID is -1, it's the current inferior that should be
1673 handled specially. */
1674 if (PIDGET (ptid
) == -1)
1675 ptid
= inferior_ptid
;
1677 lp
= find_lwp_pid (ptid
);
1678 gdb_assert (lp
!= NULL
);
1680 /* Convert to something the lower layer understands. */
1681 ptid
= pid_to_ptid (GET_LWP (lp
->ptid
));
1683 /* Remember if we're stepping. */
1686 /* Mark this LWP as resumed. */
1689 /* If we have a pending wait status for this thread, there is no
1690 point in resuming the process. But first make sure that
1691 linux_nat_wait won't preemptively handle the event - we
1692 should never take this short-circuit if we are going to
1693 leave LP running, since we have skipped resuming all the
1694 other threads. This bit of code needs to be synchronized
1695 with linux_nat_wait. */
1697 /* In async mode, we never have pending wait status. */
1698 if (target_can_async_p () && lp
->status
)
1699 internal_error (__FILE__
, __LINE__
, "Pending status in async mode");
1701 if (lp
->status
&& WIFSTOPPED (lp
->status
))
1703 int saved_signo
= target_signal_from_host (WSTOPSIG (lp
->status
));
1705 if (signal_stop_state (saved_signo
) == 0
1706 && signal_print_state (saved_signo
) == 0
1707 && signal_pass_state (saved_signo
) == 1)
1709 if (debug_linux_nat
)
1710 fprintf_unfiltered (gdb_stdlog
,
1711 "LLR: Not short circuiting for ignored "
1712 "status 0x%x\n", lp
->status
);
1714 /* FIXME: What should we do if we are supposed to continue
1715 this thread with a signal? */
1716 gdb_assert (signo
== TARGET_SIGNAL_0
);
1717 signo
= saved_signo
;
1724 /* FIXME: What should we do if we are supposed to continue
1725 this thread with a signal? */
1726 gdb_assert (signo
== TARGET_SIGNAL_0
);
1728 if (debug_linux_nat
)
1729 fprintf_unfiltered (gdb_stdlog
,
1730 "LLR: Short circuiting for status 0x%x\n",
1736 /* Mark LWP as not stopped to prevent it from being continued by
1741 iterate_over_lwps (resume_callback
, NULL
);
1743 linux_ops
->to_resume (ptid
, step
, signo
);
1744 memset (&lp
->siginfo
, 0, sizeof (lp
->siginfo
));
1746 if (debug_linux_nat
)
1747 fprintf_unfiltered (gdb_stdlog
,
1748 "LLR: %s %s, %s (resume event thread)\n",
1749 step
? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
1750 target_pid_to_str (ptid
),
1751 signo
? strsignal (signo
) : "0");
1753 if (target_can_async_p ())
1754 target_async (inferior_event_handler
, 0);
1757 /* Issue kill to specified lwp. */
1759 static int tkill_failed
;
1762 kill_lwp (int lwpid
, int signo
)
1766 /* Use tkill, if possible, in case we are using nptl threads. If tkill
1767 fails, then we are not using nptl threads and we should be using kill. */
1769 #ifdef HAVE_TKILL_SYSCALL
1772 int ret
= syscall (__NR_tkill
, lwpid
, signo
);
1773 if (errno
!= ENOSYS
)
1780 return kill (lwpid
, signo
);
1783 /* Handle a GNU/Linux extended wait response. If we see a clone
1784 event, we need to add the new LWP to our list (and not report the
1785 trap to higher layers). This function returns non-zero if the
1786 event should be ignored and we should wait again. If STOPPING is
1787 true, the new LWP remains stopped, otherwise it is continued. */
1790 linux_handle_extended_wait (struct lwp_info
*lp
, int status
,
1793 int pid
= GET_LWP (lp
->ptid
);
1794 struct target_waitstatus
*ourstatus
= &lp
->waitstatus
;
1795 struct lwp_info
*new_lp
= NULL
;
1796 int event
= status
>> 16;
1798 if (event
== PTRACE_EVENT_FORK
|| event
== PTRACE_EVENT_VFORK
1799 || event
== PTRACE_EVENT_CLONE
)
1801 unsigned long new_pid
;
1804 ptrace (PTRACE_GETEVENTMSG
, pid
, 0, &new_pid
);
1806 /* If we haven't already seen the new PID stop, wait for it now. */
1807 if (! pull_pid_from_list (&stopped_pids
, new_pid
, &status
))
1809 /* The new child has a pending SIGSTOP. We can't affect it until it
1810 hits the SIGSTOP, but we're already attached. */
1811 ret
= my_waitpid (new_pid
, &status
,
1812 (event
== PTRACE_EVENT_CLONE
) ? __WCLONE
: 0);
1814 perror_with_name (_("waiting for new child"));
1815 else if (ret
!= new_pid
)
1816 internal_error (__FILE__
, __LINE__
,
1817 _("wait returned unexpected PID %d"), ret
);
1818 else if (!WIFSTOPPED (status
))
1819 internal_error (__FILE__
, __LINE__
,
1820 _("wait returned unexpected status 0x%x"), status
);
1823 ourstatus
->value
.related_pid
= ptid_build (new_pid
, new_pid
, 0);
1825 if (event
== PTRACE_EVENT_FORK
)
1826 ourstatus
->kind
= TARGET_WAITKIND_FORKED
;
1827 else if (event
== PTRACE_EVENT_VFORK
)
1828 ourstatus
->kind
= TARGET_WAITKIND_VFORKED
;
1831 struct cleanup
*old_chain
;
1833 ourstatus
->kind
= TARGET_WAITKIND_IGNORE
;
1834 new_lp
= add_lwp (BUILD_LWP (new_pid
, GET_PID (inferior_ptid
)));
1836 new_lp
->stopped
= 1;
1838 if (WSTOPSIG (status
) != SIGSTOP
)
1840 /* This can happen if someone starts sending signals to
1841 the new thread before it gets a chance to run, which
1842 have a lower number than SIGSTOP (e.g. SIGUSR1).
1843 This is an unlikely case, and harder to handle for
1844 fork / vfork than for clone, so we do not try - but
1845 we handle it for clone events here. We'll send
1846 the other signal on to the thread below. */
1848 new_lp
->signalled
= 1;
1855 /* Add the new thread to GDB's lists as soon as possible
1858 1) the frontend doesn't have to wait for a stop to
1861 2) we tag it with the correct running state. */
1863 /* If the thread_db layer is active, let it know about
1864 this new thread, and add it to GDB's list. */
1865 if (!thread_db_attach_lwp (new_lp
->ptid
))
1867 /* We're not using thread_db. Add it to GDB's
1869 target_post_attach (GET_LWP (new_lp
->ptid
));
1870 add_thread (new_lp
->ptid
);
1875 set_running (new_lp
->ptid
, 1);
1876 set_executing (new_lp
->ptid
, 1);
1882 new_lp
->stopped
= 0;
1883 new_lp
->resumed
= 1;
1884 ptrace (PTRACE_CONT
, new_pid
, 0,
1885 status
? WSTOPSIG (status
) : 0);
1888 if (debug_linux_nat
)
1889 fprintf_unfiltered (gdb_stdlog
,
1890 "LHEW: Got clone event from LWP %ld, resuming\n",
1891 GET_LWP (lp
->ptid
));
1892 ptrace (PTRACE_CONT
, GET_LWP (lp
->ptid
), 0, 0);
1900 if (event
== PTRACE_EVENT_EXEC
)
1902 ourstatus
->kind
= TARGET_WAITKIND_EXECD
;
1903 ourstatus
->value
.execd_pathname
1904 = xstrdup (linux_child_pid_to_exec_file (pid
));
1906 if (linux_parent_pid
)
1908 detach_breakpoints (linux_parent_pid
);
1909 ptrace (PTRACE_DETACH
, linux_parent_pid
, 0, 0);
1911 linux_parent_pid
= 0;
1914 /* At this point, all inserted breakpoints are gone. Doing this
1915 as soon as we detect an exec prevents the badness of deleting
1916 a breakpoint writing the current "shadow contents" to lift
1917 the bp. That shadow is NOT valid after an exec.
1919 Note that we have to do this after the detach_breakpoints
1920 call above, otherwise breakpoints wouldn't be lifted from the
1921 parent on a vfork, because detach_breakpoints would think
1922 that breakpoints are not inserted. */
1923 mark_breakpoints_out ();
1927 internal_error (__FILE__
, __LINE__
,
1928 _("unknown ptrace event %d"), event
);
1931 /* Wait for LP to stop. Returns the wait status, or 0 if the LWP has
1935 wait_lwp (struct lwp_info
*lp
)
1939 int thread_dead
= 0;
1941 gdb_assert (!lp
->stopped
);
1942 gdb_assert (lp
->status
== 0);
1944 pid
= my_waitpid (GET_LWP (lp
->ptid
), &status
, 0);
1945 if (pid
== -1 && errno
== ECHILD
)
1947 pid
= my_waitpid (GET_LWP (lp
->ptid
), &status
, __WCLONE
);
1948 if (pid
== -1 && errno
== ECHILD
)
1950 /* The thread has previously exited. We need to delete it
1951 now because, for some vendor 2.4 kernels with NPTL
1952 support backported, there won't be an exit event unless
1953 it is the main thread. 2.6 kernels will report an exit
1954 event for each thread that exits, as expected. */
1956 if (debug_linux_nat
)
1957 fprintf_unfiltered (gdb_stdlog
, "WL: %s vanished.\n",
1958 target_pid_to_str (lp
->ptid
));
1964 gdb_assert (pid
== GET_LWP (lp
->ptid
));
1966 if (debug_linux_nat
)
1968 fprintf_unfiltered (gdb_stdlog
,
1969 "WL: waitpid %s received %s\n",
1970 target_pid_to_str (lp
->ptid
),
1971 status_to_str (status
));
1975 /* Check if the thread has exited. */
1976 if (WIFEXITED (status
) || WIFSIGNALED (status
))
1979 if (debug_linux_nat
)
1980 fprintf_unfiltered (gdb_stdlog
, "WL: %s exited.\n",
1981 target_pid_to_str (lp
->ptid
));
1990 gdb_assert (WIFSTOPPED (status
));
1992 /* Handle GNU/Linux's extended waitstatus for trace events. */
1993 if (WIFSTOPPED (status
) && WSTOPSIG (status
) == SIGTRAP
&& status
>> 16 != 0)
1995 if (debug_linux_nat
)
1996 fprintf_unfiltered (gdb_stdlog
,
1997 "WL: Handling extended status 0x%06x\n",
1999 if (linux_handle_extended_wait (lp
, status
, 1))
2000 return wait_lwp (lp
);
2006 /* Save the most recent siginfo for LP. This is currently only called
2007 for SIGTRAP; some ports use the si_addr field for
2008 target_stopped_data_address. In the future, it may also be used to
2009 restore the siginfo of requeued signals. */
2012 save_siginfo (struct lwp_info
*lp
)
2015 ptrace (PTRACE_GETSIGINFO
, GET_LWP (lp
->ptid
),
2016 (PTRACE_TYPE_ARG3
) 0, &lp
->siginfo
);
2019 memset (&lp
->siginfo
, 0, sizeof (lp
->siginfo
));
2022 /* Send a SIGSTOP to LP. */
2025 stop_callback (struct lwp_info
*lp
, void *data
)
2027 if (!lp
->stopped
&& !lp
->signalled
)
2031 if (debug_linux_nat
)
2033 fprintf_unfiltered (gdb_stdlog
,
2034 "SC: kill %s **<SIGSTOP>**\n",
2035 target_pid_to_str (lp
->ptid
));
2038 ret
= kill_lwp (GET_LWP (lp
->ptid
), SIGSTOP
);
2039 if (debug_linux_nat
)
2041 fprintf_unfiltered (gdb_stdlog
,
2042 "SC: lwp kill %d %s\n",
2044 errno
? safe_strerror (errno
) : "ERRNO-OK");
2048 gdb_assert (lp
->status
== 0);
2054 /* Return non-zero if LWP PID has a pending SIGINT. */
2057 linux_nat_has_pending_sigint (int pid
)
2059 sigset_t pending
, blocked
, ignored
;
2062 linux_proc_pending_signals (pid
, &pending
, &blocked
, &ignored
);
2064 if (sigismember (&pending
, SIGINT
)
2065 && !sigismember (&ignored
, SIGINT
))
2071 /* Set a flag in LP indicating that we should ignore its next SIGINT. */
2074 set_ignore_sigint (struct lwp_info
*lp
, void *data
)
2076 /* If a thread has a pending SIGINT, consume it; otherwise, set a
2077 flag to consume the next one. */
2078 if (lp
->stopped
&& lp
->status
!= 0 && WIFSTOPPED (lp
->status
)
2079 && WSTOPSIG (lp
->status
) == SIGINT
)
2082 lp
->ignore_sigint
= 1;
2087 /* If LP does not have a SIGINT pending, then clear the ignore_sigint flag.
2088 This function is called after we know the LWP has stopped; if the LWP
2089 stopped before the expected SIGINT was delivered, then it will never have
2090 arrived. Also, if the signal was delivered to a shared queue and consumed
2091 by a different thread, it will never be delivered to this LWP. */
2094 maybe_clear_ignore_sigint (struct lwp_info
*lp
)
2096 if (!lp
->ignore_sigint
)
2099 if (!linux_nat_has_pending_sigint (GET_LWP (lp
->ptid
)))
2101 if (debug_linux_nat
)
2102 fprintf_unfiltered (gdb_stdlog
,
2103 "MCIS: Clearing bogus flag for %s\n",
2104 target_pid_to_str (lp
->ptid
));
2105 lp
->ignore_sigint
= 0;
2109 /* Wait until LP is stopped. */
2112 stop_wait_callback (struct lwp_info
*lp
, void *data
)
2118 status
= wait_lwp (lp
);
2122 if (lp
->ignore_sigint
&& WIFSTOPPED (status
)
2123 && WSTOPSIG (status
) == SIGINT
)
2125 lp
->ignore_sigint
= 0;
2128 ptrace (PTRACE_CONT
, GET_LWP (lp
->ptid
), 0, 0);
2129 if (debug_linux_nat
)
2130 fprintf_unfiltered (gdb_stdlog
,
2131 "PTRACE_CONT %s, 0, 0 (%s) (discarding SIGINT)\n",
2132 target_pid_to_str (lp
->ptid
),
2133 errno
? safe_strerror (errno
) : "OK");
2135 return stop_wait_callback (lp
, NULL
);
2138 maybe_clear_ignore_sigint (lp
);
2140 if (WSTOPSIG (status
) != SIGSTOP
)
2142 if (WSTOPSIG (status
) == SIGTRAP
)
2144 /* If a LWP other than the LWP that we're reporting an
2145 event for has hit a GDB breakpoint (as opposed to
2146 some random trap signal), then just arrange for it to
2147 hit it again later. We don't keep the SIGTRAP status
2148 and don't forward the SIGTRAP signal to the LWP. We
2149 will handle the current event, eventually we will
2150 resume all LWPs, and this one will get its breakpoint
2153 If we do not do this, then we run the risk that the
2154 user will delete or disable the breakpoint, but the
2155 thread will have already tripped on it. */
2157 /* Save the trap's siginfo in case we need it later. */
2160 /* Now resume this LWP and get the SIGSTOP event. */
2162 ptrace (PTRACE_CONT
, GET_LWP (lp
->ptid
), 0, 0);
2163 if (debug_linux_nat
)
2165 fprintf_unfiltered (gdb_stdlog
,
2166 "PTRACE_CONT %s, 0, 0 (%s)\n",
2167 target_pid_to_str (lp
->ptid
),
2168 errno
? safe_strerror (errno
) : "OK");
2170 fprintf_unfiltered (gdb_stdlog
,
2171 "SWC: Candidate SIGTRAP event in %s\n",
2172 target_pid_to_str (lp
->ptid
));
2174 /* Hold this event/waitstatus while we check to see if
2175 there are any more (we still want to get that SIGSTOP). */
2176 stop_wait_callback (lp
, NULL
);
2178 if (target_can_async_p ())
2180 /* Don't leave a pending wait status in async mode.
2181 Retrigger the breakpoint. */
2182 if (!cancel_breakpoint (lp
))
2184 /* There was no gdb breakpoint set at pc. Put
2185 the event back in the queue. */
2186 if (debug_linux_nat
)
2187 fprintf_unfiltered (gdb_stdlog
,
2188 "SWC: kill %s, %s\n",
2189 target_pid_to_str (lp
->ptid
),
2190 status_to_str ((int) status
));
2191 kill_lwp (GET_LWP (lp
->ptid
), WSTOPSIG (status
));
2196 /* Hold the SIGTRAP for handling by
2198 /* If there's another event, throw it back into the
2202 if (debug_linux_nat
)
2203 fprintf_unfiltered (gdb_stdlog
,
2204 "SWC: kill %s, %s\n",
2205 target_pid_to_str (lp
->ptid
),
2206 status_to_str ((int) status
));
2207 kill_lwp (GET_LWP (lp
->ptid
), WSTOPSIG (lp
->status
));
2209 /* Save the sigtrap event. */
2210 lp
->status
= status
;
2216 /* The thread was stopped with a signal other than
2217 SIGSTOP, and didn't accidentally trip a breakpoint. */
2219 if (debug_linux_nat
)
2221 fprintf_unfiltered (gdb_stdlog
,
2222 "SWC: Pending event %s in %s\n",
2223 status_to_str ((int) status
),
2224 target_pid_to_str (lp
->ptid
));
2226 /* Now resume this LWP and get the SIGSTOP event. */
2228 ptrace (PTRACE_CONT
, GET_LWP (lp
->ptid
), 0, 0);
2229 if (debug_linux_nat
)
2230 fprintf_unfiltered (gdb_stdlog
,
2231 "SWC: PTRACE_CONT %s, 0, 0 (%s)\n",
2232 target_pid_to_str (lp
->ptid
),
2233 errno
? safe_strerror (errno
) : "OK");
2235 /* Hold this event/waitstatus while we check to see if
2236 there are any more (we still want to get that SIGSTOP). */
2237 stop_wait_callback (lp
, NULL
);
2239 /* If the lp->status field is still empty, use it to
2240 hold this event. If not, then this event must be
2241 returned to the event queue of the LWP. */
2242 if (lp
->status
|| target_can_async_p ())
2244 if (debug_linux_nat
)
2246 fprintf_unfiltered (gdb_stdlog
,
2247 "SWC: kill %s, %s\n",
2248 target_pid_to_str (lp
->ptid
),
2249 status_to_str ((int) status
));
2251 kill_lwp (GET_LWP (lp
->ptid
), WSTOPSIG (status
));
2254 lp
->status
= status
;
2260 /* We caught the SIGSTOP that we intended to catch, so
2261 there's no SIGSTOP pending. */
2270 /* Return non-zero if LP has a wait status pending. */
2273 status_callback (struct lwp_info
*lp
, void *data
)
2275 /* Only report a pending wait status if we pretend that this has
2276 indeed been resumed. */
2277 return (lp
->status
!= 0 && lp
->resumed
);
2280 /* Return non-zero if LP isn't stopped. */
2283 running_callback (struct lwp_info
*lp
, void *data
)
2285 return (lp
->stopped
== 0 || (lp
->status
!= 0 && lp
->resumed
));
2288 /* Count the LWP's that have had events. */
2291 count_events_callback (struct lwp_info
*lp
, void *data
)
2295 gdb_assert (count
!= NULL
);
2297 /* Count only resumed LWPs that have a SIGTRAP event pending. */
2298 if (lp
->status
!= 0 && lp
->resumed
2299 && WIFSTOPPED (lp
->status
) && WSTOPSIG (lp
->status
) == SIGTRAP
)
2305 /* Select the LWP (if any) that is currently being single-stepped. */
2308 select_singlestep_lwp_callback (struct lwp_info
*lp
, void *data
)
2310 if (lp
->step
&& lp
->status
!= 0)
2316 /* Select the Nth LWP that has had a SIGTRAP event. */
2319 select_event_lwp_callback (struct lwp_info
*lp
, void *data
)
2321 int *selector
= data
;
2323 gdb_assert (selector
!= NULL
);
2325 /* Select only resumed LWPs that have a SIGTRAP event pending. */
2326 if (lp
->status
!= 0 && lp
->resumed
2327 && WIFSTOPPED (lp
->status
) && WSTOPSIG (lp
->status
) == SIGTRAP
)
2328 if ((*selector
)-- == 0)
2335 cancel_breakpoint (struct lwp_info
*lp
)
2337 /* Arrange for a breakpoint to be hit again later. We don't keep
2338 the SIGTRAP status and don't forward the SIGTRAP signal to the
2339 LWP. We will handle the current event, eventually we will resume
2340 this LWP, and this breakpoint will trap again.
2342 If we do not do this, then we run the risk that the user will
2343 delete or disable the breakpoint, but the LWP will have already
2346 struct regcache
*regcache
= get_thread_regcache (lp
->ptid
);
2347 struct gdbarch
*gdbarch
= get_regcache_arch (regcache
);
2350 pc
= regcache_read_pc (regcache
) - gdbarch_decr_pc_after_break (gdbarch
);
2351 if (breakpoint_inserted_here_p (pc
))
2353 if (debug_linux_nat
)
2354 fprintf_unfiltered (gdb_stdlog
,
2355 "CB: Push back breakpoint for %s\n",
2356 target_pid_to_str (lp
->ptid
));
2358 /* Back up the PC if necessary. */
2359 if (gdbarch_decr_pc_after_break (gdbarch
))
2360 regcache_write_pc (regcache
, pc
);
2368 cancel_breakpoints_callback (struct lwp_info
*lp
, void *data
)
2370 struct lwp_info
*event_lp
= data
;
2372 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
2376 /* If a LWP other than the LWP that we're reporting an event for has
2377 hit a GDB breakpoint (as opposed to some random trap signal),
2378 then just arrange for it to hit it again later. We don't keep
2379 the SIGTRAP status and don't forward the SIGTRAP signal to the
2380 LWP. We will handle the current event, eventually we will resume
2381 all LWPs, and this one will get its breakpoint trap again.
2383 If we do not do this, then we run the risk that the user will
2384 delete or disable the breakpoint, but the LWP will have already
2388 && WIFSTOPPED (lp
->status
) && WSTOPSIG (lp
->status
) == SIGTRAP
2389 && cancel_breakpoint (lp
))
2390 /* Throw away the SIGTRAP. */
2396 /* Select one LWP out of those that have events pending. */
2399 select_event_lwp (struct lwp_info
**orig_lp
, int *status
)
2402 int random_selector
;
2403 struct lwp_info
*event_lp
;
2405 /* Record the wait status for the original LWP. */
2406 (*orig_lp
)->status
= *status
;
2408 /* Give preference to any LWP that is being single-stepped. */
2409 event_lp
= iterate_over_lwps (select_singlestep_lwp_callback
, NULL
);
2410 if (event_lp
!= NULL
)
2412 if (debug_linux_nat
)
2413 fprintf_unfiltered (gdb_stdlog
,
2414 "SEL: Select single-step %s\n",
2415 target_pid_to_str (event_lp
->ptid
));
2419 /* No single-stepping LWP. Select one at random, out of those
2420 which have had SIGTRAP events. */
2422 /* First see how many SIGTRAP events we have. */
2423 iterate_over_lwps (count_events_callback
, &num_events
);
2425 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
2426 random_selector
= (int)
2427 ((num_events
* (double) rand ()) / (RAND_MAX
+ 1.0));
2429 if (debug_linux_nat
&& num_events
> 1)
2430 fprintf_unfiltered (gdb_stdlog
,
2431 "SEL: Found %d SIGTRAP events, selecting #%d\n",
2432 num_events
, random_selector
);
2434 event_lp
= iterate_over_lwps (select_event_lwp_callback
,
2438 if (event_lp
!= NULL
)
2440 /* Switch the event LWP. */
2441 *orig_lp
= event_lp
;
2442 *status
= event_lp
->status
;
2445 /* Flush the wait status for the event LWP. */
2446 (*orig_lp
)->status
= 0;
2449 /* Return non-zero if LP has been resumed. */
2452 resumed_callback (struct lwp_info
*lp
, void *data
)
2457 /* Stop an active thread, verify it still exists, then resume it. */
2460 stop_and_resume_callback (struct lwp_info
*lp
, void *data
)
2462 struct lwp_info
*ptr
;
2464 if (!lp
->stopped
&& !lp
->signalled
)
2466 stop_callback (lp
, NULL
);
2467 stop_wait_callback (lp
, NULL
);
2468 /* Resume if the lwp still exists. */
2469 for (ptr
= lwp_list
; ptr
; ptr
= ptr
->next
)
2472 resume_callback (lp
, NULL
);
2473 resume_set_callback (lp
, NULL
);
2479 /* Check if we should go on and pass this event to common code.
2480 Return the affected lwp if we are, or NULL otherwise. */
2481 static struct lwp_info
*
2482 linux_nat_filter_event (int lwpid
, int status
, int options
)
2484 struct lwp_info
*lp
;
2486 lp
= find_lwp_pid (pid_to_ptid (lwpid
));
2488 /* Check for stop events reported by a process we didn't already
2489 know about - anything not already in our LWP list.
2491 If we're expecting to receive stopped processes after
2492 fork, vfork, and clone events, then we'll just add the
2493 new one to our list and go back to waiting for the event
2494 to be reported - the stopped process might be returned
2495 from waitpid before or after the event is. */
2496 if (WIFSTOPPED (status
) && !lp
)
2498 linux_record_stopped_pid (lwpid
, status
);
2502 /* Make sure we don't report an event for the exit of an LWP not in
2503 our list, i.e. not part of the current process. This can happen
2504 if we detach from a program we original forked and then it
2506 if (!WIFSTOPPED (status
) && !lp
)
2509 /* NOTE drow/2003-06-17: This code seems to be meant for debugging
2510 CLONE_PTRACE processes which do not use the thread library -
2511 otherwise we wouldn't find the new LWP this way. That doesn't
2512 currently work, and the following code is currently unreachable
2513 due to the two blocks above. If it's fixed some day, this code
2514 should be broken out into a function so that we can also pick up
2515 LWPs from the new interface. */
2518 lp
= add_lwp (BUILD_LWP (lwpid
, GET_PID (inferior_ptid
)));
2519 if (options
& __WCLONE
)
2522 gdb_assert (WIFSTOPPED (status
)
2523 && WSTOPSIG (status
) == SIGSTOP
);
2526 if (!in_thread_list (inferior_ptid
))
2528 inferior_ptid
= BUILD_LWP (GET_PID (inferior_ptid
),
2529 GET_PID (inferior_ptid
));
2530 add_thread (inferior_ptid
);
2533 add_thread (lp
->ptid
);
2536 /* Save the trap's siginfo in case we need it later. */
2537 if (WIFSTOPPED (status
) && WSTOPSIG (status
) == SIGTRAP
)
2540 /* Handle GNU/Linux's extended waitstatus for trace events. */
2541 if (WIFSTOPPED (status
) && WSTOPSIG (status
) == SIGTRAP
&& status
>> 16 != 0)
2543 if (debug_linux_nat
)
2544 fprintf_unfiltered (gdb_stdlog
,
2545 "LLW: Handling extended status 0x%06x\n",
2547 if (linux_handle_extended_wait (lp
, status
, 0))
2551 /* Check if the thread has exited. */
2552 if ((WIFEXITED (status
) || WIFSIGNALED (status
)) && num_lwps
> 1)
2554 /* If this is the main thread, we must stop all threads and
2555 verify if they are still alive. This is because in the nptl
2556 thread model, there is no signal issued for exiting LWPs
2557 other than the main thread. We only get the main thread exit
2558 signal once all child threads have already exited. If we
2559 stop all the threads and use the stop_wait_callback to check
2560 if they have exited we can determine whether this signal
2561 should be ignored or whether it means the end of the debugged
2562 application, regardless of which threading model is being
2564 if (GET_PID (lp
->ptid
) == GET_LWP (lp
->ptid
))
2567 iterate_over_lwps (stop_and_resume_callback
, NULL
);
2570 if (debug_linux_nat
)
2571 fprintf_unfiltered (gdb_stdlog
,
2572 "LLW: %s exited.\n",
2573 target_pid_to_str (lp
->ptid
));
2577 /* If there is at least one more LWP, then the exit signal was
2578 not the end of the debugged application and should be
2584 /* Check if the current LWP has previously exited. In the nptl
2585 thread model, LWPs other than the main thread do not issue
2586 signals when they exit so we must check whenever the thread has
2587 stopped. A similar check is made in stop_wait_callback(). */
2588 if (num_lwps
> 1 && !linux_nat_thread_alive (lp
->ptid
))
2590 if (debug_linux_nat
)
2591 fprintf_unfiltered (gdb_stdlog
,
2592 "LLW: %s exited.\n",
2593 target_pid_to_str (lp
->ptid
));
2597 /* Make sure there is at least one thread running. */
2598 gdb_assert (iterate_over_lwps (running_callback
, NULL
));
2600 /* Discard the event. */
2604 /* Make sure we don't report a SIGSTOP that we sent ourselves in
2605 an attempt to stop an LWP. */
2607 && WIFSTOPPED (status
) && WSTOPSIG (status
) == SIGSTOP
)
2609 if (debug_linux_nat
)
2610 fprintf_unfiltered (gdb_stdlog
,
2611 "LLW: Delayed SIGSTOP caught for %s.\n",
2612 target_pid_to_str (lp
->ptid
));
2614 /* This is a delayed SIGSTOP. */
2617 registers_changed ();
2619 linux_ops
->to_resume (pid_to_ptid (GET_LWP (lp
->ptid
)),
2620 lp
->step
, TARGET_SIGNAL_0
);
2621 if (debug_linux_nat
)
2622 fprintf_unfiltered (gdb_stdlog
,
2623 "LLW: %s %s, 0, 0 (discard SIGSTOP)\n",
2625 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2626 target_pid_to_str (lp
->ptid
));
2629 gdb_assert (lp
->resumed
);
2631 /* Discard the event. */
2635 /* Make sure we don't report a SIGINT that we have already displayed
2636 for another thread. */
2637 if (lp
->ignore_sigint
2638 && WIFSTOPPED (status
) && WSTOPSIG (status
) == SIGINT
)
2640 if (debug_linux_nat
)
2641 fprintf_unfiltered (gdb_stdlog
,
2642 "LLW: Delayed SIGINT caught for %s.\n",
2643 target_pid_to_str (lp
->ptid
));
2645 /* This is a delayed SIGINT. */
2646 lp
->ignore_sigint
= 0;
2648 registers_changed ();
2649 linux_ops
->to_resume (pid_to_ptid (GET_LWP (lp
->ptid
)),
2650 lp
->step
, TARGET_SIGNAL_0
);
2651 if (debug_linux_nat
)
2652 fprintf_unfiltered (gdb_stdlog
,
2653 "LLW: %s %s, 0, 0 (discard SIGINT)\n",
2655 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2656 target_pid_to_str (lp
->ptid
));
2659 gdb_assert (lp
->resumed
);
2661 /* Discard the event. */
2665 /* An interesting event. */
2670 /* Get the events stored in the pipe into the local queue, so they are
2671 accessible to queued_waitpid. We need to do this, since it is not
2672 always the case that the event at the head of the pipe is the event
2676 pipe_to_local_event_queue (void)
2678 if (debug_linux_nat_async
)
2679 fprintf_unfiltered (gdb_stdlog
,
2680 "PTLEQ: linux_nat_num_queued_events(%d)\n",
2681 linux_nat_num_queued_events
);
2682 while (linux_nat_num_queued_events
)
2684 int lwpid
, status
, options
;
2685 lwpid
= linux_nat_event_pipe_pop (&status
, &options
);
2686 gdb_assert (lwpid
> 0);
2687 push_waitpid (lwpid
, status
, options
);
2691 /* Get the unprocessed events stored in the local queue back into the
2692 pipe, so the event loop realizes there's something else to
2696 local_event_queue_to_pipe (void)
2698 struct waitpid_result
*w
= waitpid_queue
;
2701 struct waitpid_result
*next
= w
->next
;
2702 linux_nat_event_pipe_push (w
->pid
,
2708 waitpid_queue
= NULL
;
2710 if (debug_linux_nat_async
)
2711 fprintf_unfiltered (gdb_stdlog
,
2712 "LEQTP: linux_nat_num_queued_events(%d)\n",
2713 linux_nat_num_queued_events
);
2717 linux_nat_wait (ptid_t ptid
, struct target_waitstatus
*ourstatus
)
2719 struct lwp_info
*lp
= NULL
;
2722 pid_t pid
= PIDGET (ptid
);
2724 if (debug_linux_nat_async
)
2725 fprintf_unfiltered (gdb_stdlog
, "LLW: enter\n");
2727 /* The first time we get here after starting a new inferior, we may
2728 not have added it to the LWP list yet - this is the earliest
2729 moment at which we know its PID. */
2732 gdb_assert (!is_lwp (inferior_ptid
));
2734 /* Upgrade the main thread's ptid. */
2735 thread_change_ptid (inferior_ptid
,
2736 BUILD_LWP (GET_PID (inferior_ptid
),
2737 GET_PID (inferior_ptid
)));
2739 lp
= add_lwp (inferior_ptid
);
2743 /* Block events while we're here. */
2744 linux_nat_async_events (sigchld_sync
);
2748 /* Make sure there is at least one LWP that has been resumed. */
2749 gdb_assert (iterate_over_lwps (resumed_callback
, NULL
));
2751 /* First check if there is a LWP with a wait status pending. */
2754 /* Any LWP that's been resumed will do. */
2755 lp
= iterate_over_lwps (status_callback
, NULL
);
2758 if (target_can_async_p ())
2759 internal_error (__FILE__
, __LINE__
,
2760 "Found an LWP with a pending status in async mode.");
2762 status
= lp
->status
;
2765 if (debug_linux_nat
&& status
)
2766 fprintf_unfiltered (gdb_stdlog
,
2767 "LLW: Using pending wait status %s for %s.\n",
2768 status_to_str (status
),
2769 target_pid_to_str (lp
->ptid
));
2772 /* But if we don't find one, we'll have to wait, and check both
2773 cloned and uncloned processes. We start with the cloned
2775 options
= __WCLONE
| WNOHANG
;
2777 else if (is_lwp (ptid
))
2779 if (debug_linux_nat
)
2780 fprintf_unfiltered (gdb_stdlog
,
2781 "LLW: Waiting for specific LWP %s.\n",
2782 target_pid_to_str (ptid
));
2784 /* We have a specific LWP to check. */
2785 lp
= find_lwp_pid (ptid
);
2787 status
= lp
->status
;
2790 if (debug_linux_nat
&& status
)
2791 fprintf_unfiltered (gdb_stdlog
,
2792 "LLW: Using pending wait status %s for %s.\n",
2793 status_to_str (status
),
2794 target_pid_to_str (lp
->ptid
));
2796 /* If we have to wait, take into account whether PID is a cloned
2797 process or not. And we have to convert it to something that
2798 the layer beneath us can understand. */
2799 options
= lp
->cloned
? __WCLONE
: 0;
2800 pid
= GET_LWP (ptid
);
2803 if (status
&& lp
->signalled
)
2805 /* A pending SIGSTOP may interfere with the normal stream of
2806 events. In a typical case where interference is a problem,
2807 we have a SIGSTOP signal pending for LWP A while
2808 single-stepping it, encounter an event in LWP B, and take the
2809 pending SIGSTOP while trying to stop LWP A. After processing
2810 the event in LWP B, LWP A is continued, and we'll never see
2811 the SIGTRAP associated with the last time we were
2812 single-stepping LWP A. */
2814 /* Resume the thread. It should halt immediately returning the
2816 registers_changed ();
2817 linux_ops
->to_resume (pid_to_ptid (GET_LWP (lp
->ptid
)),
2818 lp
->step
, TARGET_SIGNAL_0
);
2819 if (debug_linux_nat
)
2820 fprintf_unfiltered (gdb_stdlog
,
2821 "LLW: %s %s, 0, 0 (expect SIGSTOP)\n",
2822 lp
->step
? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2823 target_pid_to_str (lp
->ptid
));
2825 gdb_assert (lp
->resumed
);
2827 /* This should catch the pending SIGSTOP. */
2828 stop_wait_callback (lp
, NULL
);
2831 if (!target_can_async_p ())
2833 /* Causes SIGINT to be passed on to the attached process. */
2842 if (target_can_async_p ())
2843 /* In async mode, don't ever block. Only look at the locally
2845 lwpid
= queued_waitpid (pid
, &status
, options
);
2847 lwpid
= my_waitpid (pid
, &status
, options
);
2851 gdb_assert (pid
== -1 || lwpid
== pid
);
2853 if (debug_linux_nat
)
2855 fprintf_unfiltered (gdb_stdlog
,
2856 "LLW: waitpid %ld received %s\n",
2857 (long) lwpid
, status_to_str (status
));
2860 lp
= linux_nat_filter_event (lwpid
, status
, options
);
2863 /* A discarded event. */
2873 /* Alternate between checking cloned and uncloned processes. */
2874 options
^= __WCLONE
;
2876 /* And every time we have checked both:
2877 In async mode, return to event loop;
2878 In sync mode, suspend waiting for a SIGCHLD signal. */
2879 if (options
& __WCLONE
)
2881 if (target_can_async_p ())
2883 /* No interesting event. */
2884 ourstatus
->kind
= TARGET_WAITKIND_IGNORE
;
2886 /* Get ready for the next event. */
2887 target_async (inferior_event_handler
, 0);
2889 if (debug_linux_nat_async
)
2890 fprintf_unfiltered (gdb_stdlog
, "LLW: exit (ignore)\n");
2892 return minus_one_ptid
;
2895 sigsuspend (&suspend_mask
);
2899 /* We shouldn't end up here unless we want to try again. */
2900 gdb_assert (status
== 0);
2903 if (!target_can_async_p ())
2905 clear_sigio_trap ();
2906 clear_sigint_trap ();
2911 /* Don't report signals that GDB isn't interested in, such as
2912 signals that are neither printed nor stopped upon. Stopping all
2913 threads can be a bit time-consuming so if we want decent
2914 performance with heavily multi-threaded programs, especially when
2915 they're using a high frequency timer, we'd better avoid it if we
2918 if (WIFSTOPPED (status
))
2920 int signo
= target_signal_from_host (WSTOPSIG (status
));
2922 /* If we get a signal while single-stepping, we may need special
2923 care, e.g. to skip the signal handler. Defer to common code. */
2925 && signal_stop_state (signo
) == 0
2926 && signal_print_state (signo
) == 0
2927 && signal_pass_state (signo
) == 1)
2929 /* FIMXE: kettenis/2001-06-06: Should we resume all threads
2930 here? It is not clear we should. GDB may not expect
2931 other threads to run. On the other hand, not resuming
2932 newly attached threads may cause an unwanted delay in
2933 getting them running. */
2934 registers_changed ();
2935 linux_ops
->to_resume (pid_to_ptid (GET_LWP (lp
->ptid
)),
2937 if (debug_linux_nat
)
2938 fprintf_unfiltered (gdb_stdlog
,
2939 "LLW: %s %s, %s (preempt 'handle')\n",
2941 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2942 target_pid_to_str (lp
->ptid
),
2943 signo
? strsignal (signo
) : "0");
2949 if (signo
== TARGET_SIGNAL_INT
&& signal_pass_state (signo
) == 0)
2951 /* If ^C/BREAK is typed at the tty/console, SIGINT gets
2952 forwarded to the entire process group, that is, all LWPs
2953 will receive it - unless they're using CLONE_THREAD to
2954 share signals. Since we only want to report it once, we
2955 mark it as ignored for all LWPs except this one. */
2956 iterate_over_lwps (set_ignore_sigint
, NULL
);
2957 lp
->ignore_sigint
= 0;
2960 maybe_clear_ignore_sigint (lp
);
2963 /* This LWP is stopped now. */
2966 if (debug_linux_nat
)
2967 fprintf_unfiltered (gdb_stdlog
, "LLW: Candidate event %s in %s.\n",
2968 status_to_str (status
), target_pid_to_str (lp
->ptid
));
2972 /* Now stop all other LWP's ... */
2973 iterate_over_lwps (stop_callback
, NULL
);
2975 /* ... and wait until all of them have reported back that
2976 they're no longer running. */
2977 iterate_over_lwps (stop_wait_callback
, NULL
);
2979 /* If we're not waiting for a specific LWP, choose an event LWP
2980 from among those that have had events. Giving equal priority
2981 to all LWPs that have had events helps prevent
2984 select_event_lwp (&lp
, &status
);
2987 /* Now that we've selected our final event LWP, cancel any
2988 breakpoints in other LWPs that have hit a GDB breakpoint. See
2989 the comment in cancel_breakpoints_callback to find out why. */
2990 iterate_over_lwps (cancel_breakpoints_callback
, lp
);
2992 if (WIFSTOPPED (status
) && WSTOPSIG (status
) == SIGTRAP
)
2994 if (debug_linux_nat
)
2995 fprintf_unfiltered (gdb_stdlog
,
2996 "LLW: trap ptid is %s.\n",
2997 target_pid_to_str (lp
->ptid
));
3000 if (lp
->waitstatus
.kind
!= TARGET_WAITKIND_IGNORE
)
3002 *ourstatus
= lp
->waitstatus
;
3003 lp
->waitstatus
.kind
= TARGET_WAITKIND_IGNORE
;
3006 store_waitstatus (ourstatus
, status
);
3008 /* Get ready for the next event. */
3009 if (target_can_async_p ())
3010 target_async (inferior_event_handler
, 0);
3012 if (debug_linux_nat_async
)
3013 fprintf_unfiltered (gdb_stdlog
, "LLW: exit\n");
3019 kill_callback (struct lwp_info
*lp
, void *data
)
3022 ptrace (PTRACE_KILL
, GET_LWP (lp
->ptid
), 0, 0);
3023 if (debug_linux_nat
)
3024 fprintf_unfiltered (gdb_stdlog
,
3025 "KC: PTRACE_KILL %s, 0, 0 (%s)\n",
3026 target_pid_to_str (lp
->ptid
),
3027 errno
? safe_strerror (errno
) : "OK");
3033 kill_wait_callback (struct lwp_info
*lp
, void *data
)
3037 /* We must make sure that there are no pending events (delayed
3038 SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current
3039 program doesn't interfere with any following debugging session. */
3041 /* For cloned processes we must check both with __WCLONE and
3042 without, since the exit status of a cloned process isn't reported
3048 pid
= my_waitpid (GET_LWP (lp
->ptid
), NULL
, __WCLONE
);
3049 if (pid
!= (pid_t
) -1)
3051 if (debug_linux_nat
)
3052 fprintf_unfiltered (gdb_stdlog
,
3053 "KWC: wait %s received unknown.\n",
3054 target_pid_to_str (lp
->ptid
));
3055 /* The Linux kernel sometimes fails to kill a thread
3056 completely after PTRACE_KILL; that goes from the stop
3057 point in do_fork out to the one in
3058 get_signal_to_deliever and waits again. So kill it
3060 kill_callback (lp
, NULL
);
3063 while (pid
== GET_LWP (lp
->ptid
));
3065 gdb_assert (pid
== -1 && errno
== ECHILD
);
3070 pid
= my_waitpid (GET_LWP (lp
->ptid
), NULL
, 0);
3071 if (pid
!= (pid_t
) -1)
3073 if (debug_linux_nat
)
3074 fprintf_unfiltered (gdb_stdlog
,
3075 "KWC: wait %s received unk.\n",
3076 target_pid_to_str (lp
->ptid
));
3077 /* See the call to kill_callback above. */
3078 kill_callback (lp
, NULL
);
3081 while (pid
== GET_LWP (lp
->ptid
));
3083 gdb_assert (pid
== -1 && errno
== ECHILD
);
3088 linux_nat_kill (void)
3090 struct target_waitstatus last
;
3094 if (target_can_async_p ())
3095 target_async (NULL
, 0);
3097 /* If we're stopped while forking and we haven't followed yet,
3098 kill the other task. We need to do this first because the
3099 parent will be sleeping if this is a vfork. */
3101 get_last_target_status (&last_ptid
, &last
);
3103 if (last
.kind
== TARGET_WAITKIND_FORKED
3104 || last
.kind
== TARGET_WAITKIND_VFORKED
)
3106 ptrace (PT_KILL
, PIDGET (last
.value
.related_pid
), 0, 0);
3110 if (forks_exist_p ())
3112 linux_fork_killall ();
3113 drain_queued_events (-1);
3117 /* Stop all threads before killing them, since ptrace requires
3118 that the thread is stopped to sucessfully PTRACE_KILL. */
3119 iterate_over_lwps (stop_callback
, NULL
);
3120 /* ... and wait until all of them have reported back that
3121 they're no longer running. */
3122 iterate_over_lwps (stop_wait_callback
, NULL
);
3124 /* Kill all LWP's ... */
3125 iterate_over_lwps (kill_callback
, NULL
);
3127 /* ... and wait until we've flushed all events. */
3128 iterate_over_lwps (kill_wait_callback
, NULL
);
3131 target_mourn_inferior ();
3135 linux_nat_mourn_inferior (void)
3137 /* Destroy LWP info; it's no longer valid. */
3140 if (! forks_exist_p ())
3142 /* Normal case, no other forks available. */
3143 if (target_can_async_p ())
3144 linux_nat_async (NULL
, 0);
3145 linux_ops
->to_mourn_inferior ();
3148 /* Multi-fork case. The current inferior_ptid has exited, but
3149 there are other viable forks to debug. Delete the exiting
3150 one and context-switch to the first available. */
3151 linux_fork_mourn_inferior ();
3155 linux_nat_xfer_partial (struct target_ops
*ops
, enum target_object object
,
3156 const char *annex
, gdb_byte
*readbuf
,
3157 const gdb_byte
*writebuf
,
3158 ULONGEST offset
, LONGEST len
)
3160 struct cleanup
*old_chain
= save_inferior_ptid ();
3163 if (is_lwp (inferior_ptid
))
3164 inferior_ptid
= pid_to_ptid (GET_LWP (inferior_ptid
));
3166 xfer
= linux_ops
->to_xfer_partial (ops
, object
, annex
, readbuf
, writebuf
,
3169 do_cleanups (old_chain
);
3174 linux_nat_thread_alive (ptid_t ptid
)
3178 gdb_assert (is_lwp (ptid
));
3180 /* Send signal 0 instead of anything ptrace, because ptracing a
3181 running thread errors out claiming that the thread doesn't
3183 err
= kill_lwp (GET_LWP (ptid
), 0);
3185 if (debug_linux_nat
)
3186 fprintf_unfiltered (gdb_stdlog
,
3187 "LLTA: KILL(SIG0) %s (%s)\n",
3188 target_pid_to_str (ptid
),
3189 err
? safe_strerror (err
) : "OK");
3198 linux_nat_pid_to_str (ptid_t ptid
)
3200 static char buf
[64];
3203 && ((lwp_list
&& lwp_list
->next
)
3204 || GET_PID (ptid
) != GET_LWP (ptid
)))
3206 snprintf (buf
, sizeof (buf
), "LWP %ld", GET_LWP (ptid
));
3210 return normal_pid_to_str (ptid
);
3214 sigchld_handler (int signo
)
3216 if (target_async_permitted
3217 && linux_nat_async_events_state
!= sigchld_sync
3218 && signo
== SIGCHLD
)
3219 /* It is *always* a bug to hit this. */
3220 internal_error (__FILE__
, __LINE__
,
3221 "sigchld_handler called when async events are enabled");
3223 /* Do nothing. The only reason for this handler is that it allows
3224 us to use sigsuspend in linux_nat_wait above to wait for the
3225 arrival of a SIGCHLD. */
3228 /* Accepts an integer PID; Returns a string representing a file that
3229 can be opened to get the symbols for the child process. */
3232 linux_child_pid_to_exec_file (int pid
)
3234 char *name1
, *name2
;
3236 name1
= xmalloc (MAXPATHLEN
);
3237 name2
= xmalloc (MAXPATHLEN
);
3238 make_cleanup (xfree
, name1
);
3239 make_cleanup (xfree
, name2
);
3240 memset (name2
, 0, MAXPATHLEN
);
3242 sprintf (name1
, "/proc/%d/exe", pid
);
3243 if (readlink (name1
, name2
, MAXPATHLEN
) > 0)
3249 /* Service function for corefiles and info proc. */
3252 read_mapping (FILE *mapfile
,
3257 char *device
, long long *inode
, char *filename
)
3259 int ret
= fscanf (mapfile
, "%llx-%llx %s %llx %s %llx",
3260 addr
, endaddr
, permissions
, offset
, device
, inode
);
3263 if (ret
> 0 && ret
!= EOF
)
3265 /* Eat everything up to EOL for the filename. This will prevent
3266 weird filenames (such as one with embedded whitespace) from
3267 confusing this code. It also makes this code more robust in
3268 respect to annotations the kernel may add after the filename.
3270 Note the filename is used for informational purposes
3272 ret
+= fscanf (mapfile
, "%[^\n]\n", filename
);
3275 return (ret
!= 0 && ret
!= EOF
);
3278 /* Fills the "to_find_memory_regions" target vector. Lists the memory
3279 regions in the inferior for a corefile. */
3282 linux_nat_find_memory_regions (int (*func
) (CORE_ADDR
,
3284 int, int, int, void *), void *obfd
)
3286 long long pid
= PIDGET (inferior_ptid
);
3287 char mapsfilename
[MAXPATHLEN
];
3289 long long addr
, endaddr
, size
, offset
, inode
;
3290 char permissions
[8], device
[8], filename
[MAXPATHLEN
];
3291 int read
, write
, exec
;
3294 /* Compose the filename for the /proc memory map, and open it. */
3295 sprintf (mapsfilename
, "/proc/%lld/maps", pid
);
3296 if ((mapsfile
= fopen (mapsfilename
, "r")) == NULL
)
3297 error (_("Could not open %s."), mapsfilename
);
3300 fprintf_filtered (gdb_stdout
,
3301 "Reading memory regions from %s\n", mapsfilename
);
3303 /* Now iterate until end-of-file. */
3304 while (read_mapping (mapsfile
, &addr
, &endaddr
, &permissions
[0],
3305 &offset
, &device
[0], &inode
, &filename
[0]))
3307 size
= endaddr
- addr
;
3309 /* Get the segment's permissions. */
3310 read
= (strchr (permissions
, 'r') != 0);
3311 write
= (strchr (permissions
, 'w') != 0);
3312 exec
= (strchr (permissions
, 'x') != 0);
3316 fprintf_filtered (gdb_stdout
,
3317 "Save segment, %lld bytes at 0x%s (%c%c%c)",
3318 size
, paddr_nz (addr
),
3320 write
? 'w' : ' ', exec
? 'x' : ' ');
3322 fprintf_filtered (gdb_stdout
, " for %s", filename
);
3323 fprintf_filtered (gdb_stdout
, "\n");
3326 /* Invoke the callback function to create the corefile
3328 func (addr
, size
, read
, write
, exec
, obfd
);
3335 find_signalled_thread (struct thread_info
*info
, void *data
)
3337 if (info
->stop_signal
!= TARGET_SIGNAL_0
3338 && ptid_get_pid (info
->ptid
) == ptid_get_pid (inferior_ptid
))
3344 static enum target_signal
3345 find_stop_signal (void)
3347 struct thread_info
*info
=
3348 iterate_over_threads (find_signalled_thread
, NULL
);
3351 return info
->stop_signal
;
3353 return TARGET_SIGNAL_0
;
3356 /* Records the thread's register state for the corefile note
3360 linux_nat_do_thread_registers (bfd
*obfd
, ptid_t ptid
,
3361 char *note_data
, int *note_size
,
3362 enum target_signal stop_signal
)
3364 gdb_gregset_t gregs
;
3365 gdb_fpregset_t fpregs
;
3366 unsigned long lwp
= ptid_get_lwp (ptid
);
3367 struct regcache
*regcache
= get_thread_regcache (ptid
);
3368 struct gdbarch
*gdbarch
= get_regcache_arch (regcache
);
3369 const struct regset
*regset
;
3371 struct cleanup
*old_chain
;
3372 struct core_regset_section
*sect_list
;
3375 old_chain
= save_inferior_ptid ();
3376 inferior_ptid
= ptid
;
3377 target_fetch_registers (regcache
, -1);
3378 do_cleanups (old_chain
);
3380 core_regset_p
= gdbarch_regset_from_core_section_p (gdbarch
);
3381 sect_list
= gdbarch_core_regset_sections (gdbarch
);
3384 && (regset
= gdbarch_regset_from_core_section (gdbarch
, ".reg",
3385 sizeof (gregs
))) != NULL
3386 && regset
->collect_regset
!= NULL
)
3387 regset
->collect_regset (regset
, regcache
, -1,
3388 &gregs
, sizeof (gregs
));
3390 fill_gregset (regcache
, &gregs
, -1);
3392 note_data
= (char *) elfcore_write_prstatus (obfd
,
3396 stop_signal
, &gregs
);
3398 /* The loop below uses the new struct core_regset_section, which stores
3399 the supported section names and sizes for the core file. Note that
3400 note PRSTATUS needs to be treated specially. But the other notes are
3401 structurally the same, so they can benefit from the new struct. */
3402 if (core_regset_p
&& sect_list
!= NULL
)
3403 while (sect_list
->sect_name
!= NULL
)
3405 /* .reg was already handled above. */
3406 if (strcmp (sect_list
->sect_name
, ".reg") == 0)
3411 regset
= gdbarch_regset_from_core_section (gdbarch
,
3412 sect_list
->sect_name
,
3414 gdb_assert (regset
&& regset
->collect_regset
);
3415 gdb_regset
= xmalloc (sect_list
->size
);
3416 regset
->collect_regset (regset
, regcache
, -1,
3417 gdb_regset
, sect_list
->size
);
3418 note_data
= (char *) elfcore_write_register_note (obfd
,
3421 sect_list
->sect_name
,
3428 /* For architectures that does not have the struct core_regset_section
3429 implemented, we use the old method. When all the architectures have
3430 the new support, the code below should be deleted. */
3434 && (regset
= gdbarch_regset_from_core_section (gdbarch
, ".reg2",
3435 sizeof (fpregs
))) != NULL
3436 && regset
->collect_regset
!= NULL
)
3437 regset
->collect_regset (regset
, regcache
, -1,
3438 &fpregs
, sizeof (fpregs
));
3440 fill_fpregset (regcache
, &fpregs
, -1);
3442 note_data
= (char *) elfcore_write_prfpreg (obfd
,
3445 &fpregs
, sizeof (fpregs
));
3451 struct linux_nat_corefile_thread_data
3457 enum target_signal stop_signal
;
3460 /* Called by gdbthread.c once per thread. Records the thread's
3461 register state for the corefile note section. */
3464 linux_nat_corefile_thread_callback (struct lwp_info
*ti
, void *data
)
3466 struct linux_nat_corefile_thread_data
*args
= data
;
3468 args
->note_data
= linux_nat_do_thread_registers (args
->obfd
,
3478 /* Fills the "to_make_corefile_note" target vector. Builds the note
3479 section for a corefile, and returns it in a malloc buffer. */
3482 linux_nat_make_corefile_notes (bfd
*obfd
, int *note_size
)
3484 struct linux_nat_corefile_thread_data thread_args
;
3485 struct cleanup
*old_chain
;
3486 /* The variable size must be >= sizeof (prpsinfo_t.pr_fname). */
3487 char fname
[16] = { '\0' };
3488 /* The variable size must be >= sizeof (prpsinfo_t.pr_psargs). */
3489 char psargs
[80] = { '\0' };
3490 char *note_data
= NULL
;
3491 ptid_t current_ptid
= inferior_ptid
;
3495 if (get_exec_file (0))
3497 strncpy (fname
, strrchr (get_exec_file (0), '/') + 1, sizeof (fname
));
3498 strncpy (psargs
, get_exec_file (0), sizeof (psargs
));
3499 if (get_inferior_args ())
3502 char *psargs_end
= psargs
+ sizeof (psargs
);
3504 /* linux_elfcore_write_prpsinfo () handles zero unterminated
3506 string_end
= memchr (psargs
, 0, sizeof (psargs
));
3507 if (string_end
!= NULL
)
3509 *string_end
++ = ' ';
3510 strncpy (string_end
, get_inferior_args (),
3511 psargs_end
- string_end
);
3514 note_data
= (char *) elfcore_write_prpsinfo (obfd
,
3516 note_size
, fname
, psargs
);
3519 /* Dump information for threads. */
3520 thread_args
.obfd
= obfd
;
3521 thread_args
.note_data
= note_data
;
3522 thread_args
.note_size
= note_size
;
3523 thread_args
.num_notes
= 0;
3524 thread_args
.stop_signal
= find_stop_signal ();
3525 iterate_over_lwps (linux_nat_corefile_thread_callback
, &thread_args
);
3526 gdb_assert (thread_args
.num_notes
!= 0);
3527 note_data
= thread_args
.note_data
;
3529 auxv_len
= target_read_alloc (¤t_target
, TARGET_OBJECT_AUXV
,
3533 note_data
= elfcore_write_note (obfd
, note_data
, note_size
,
3534 "CORE", NT_AUXV
, auxv
, auxv_len
);
3538 make_cleanup (xfree
, note_data
);
3542 /* Implement the "info proc" command. */
3545 linux_nat_info_proc_cmd (char *args
, int from_tty
)
3547 long long pid
= PIDGET (inferior_ptid
);
3550 char buffer
[MAXPATHLEN
];
3551 char fname1
[MAXPATHLEN
], fname2
[MAXPATHLEN
];
3564 /* Break up 'args' into an argv array. */
3565 if ((argv
= buildargv (args
)) == NULL
)
3568 make_cleanup_freeargv (argv
);
3570 while (argv
!= NULL
&& *argv
!= NULL
)
3572 if (isdigit (argv
[0][0]))
3574 pid
= strtoul (argv
[0], NULL
, 10);
3576 else if (strncmp (argv
[0], "mappings", strlen (argv
[0])) == 0)
3580 else if (strcmp (argv
[0], "status") == 0)
3584 else if (strcmp (argv
[0], "stat") == 0)
3588 else if (strcmp (argv
[0], "cmd") == 0)
3592 else if (strncmp (argv
[0], "exe", strlen (argv
[0])) == 0)
3596 else if (strcmp (argv
[0], "cwd") == 0)
3600 else if (strncmp (argv
[0], "all", strlen (argv
[0])) == 0)
3606 /* [...] (future options here) */
3611 error (_("No current process: you must name one."));
3613 sprintf (fname1
, "/proc/%lld", pid
);
3614 if (stat (fname1
, &dummy
) != 0)
3615 error (_("No /proc directory: '%s'"), fname1
);
3617 printf_filtered (_("process %lld\n"), pid
);
3618 if (cmdline_f
|| all
)
3620 sprintf (fname1
, "/proc/%lld/cmdline", pid
);
3621 if ((procfile
= fopen (fname1
, "r")) != NULL
)
3623 fgets (buffer
, sizeof (buffer
), procfile
);
3624 printf_filtered ("cmdline = '%s'\n", buffer
);
3628 warning (_("unable to open /proc file '%s'"), fname1
);
3632 sprintf (fname1
, "/proc/%lld/cwd", pid
);
3633 memset (fname2
, 0, sizeof (fname2
));
3634 if (readlink (fname1
, fname2
, sizeof (fname2
)) > 0)
3635 printf_filtered ("cwd = '%s'\n", fname2
);
3637 warning (_("unable to read link '%s'"), fname1
);
3641 sprintf (fname1
, "/proc/%lld/exe", pid
);
3642 memset (fname2
, 0, sizeof (fname2
));
3643 if (readlink (fname1
, fname2
, sizeof (fname2
)) > 0)
3644 printf_filtered ("exe = '%s'\n", fname2
);
3646 warning (_("unable to read link '%s'"), fname1
);
3648 if (mappings_f
|| all
)
3650 sprintf (fname1
, "/proc/%lld/maps", pid
);
3651 if ((procfile
= fopen (fname1
, "r")) != NULL
)
3653 long long addr
, endaddr
, size
, offset
, inode
;
3654 char permissions
[8], device
[8], filename
[MAXPATHLEN
];
3656 printf_filtered (_("Mapped address spaces:\n\n"));
3657 if (gdbarch_addr_bit (current_gdbarch
) == 32)
3659 printf_filtered ("\t%10s %10s %10s %10s %7s\n",
3662 " Size", " Offset", "objfile");
3666 printf_filtered (" %18s %18s %10s %10s %7s\n",
3669 " Size", " Offset", "objfile");
3672 while (read_mapping (procfile
, &addr
, &endaddr
, &permissions
[0],
3673 &offset
, &device
[0], &inode
, &filename
[0]))
3675 size
= endaddr
- addr
;
3677 /* FIXME: carlton/2003-08-27: Maybe the printf_filtered
3678 calls here (and possibly above) should be abstracted
3679 out into their own functions? Andrew suggests using
3680 a generic local_address_string instead to print out
3681 the addresses; that makes sense to me, too. */
3683 if (gdbarch_addr_bit (current_gdbarch
) == 32)
3685 printf_filtered ("\t%#10lx %#10lx %#10x %#10x %7s\n",
3686 (unsigned long) addr
, /* FIXME: pr_addr */
3687 (unsigned long) endaddr
,
3689 (unsigned int) offset
,
3690 filename
[0] ? filename
: "");
3694 printf_filtered (" %#18lx %#18lx %#10x %#10x %7s\n",
3695 (unsigned long) addr
, /* FIXME: pr_addr */
3696 (unsigned long) endaddr
,
3698 (unsigned int) offset
,
3699 filename
[0] ? filename
: "");
3706 warning (_("unable to open /proc file '%s'"), fname1
);
3708 if (status_f
|| all
)
3710 sprintf (fname1
, "/proc/%lld/status", pid
);
3711 if ((procfile
= fopen (fname1
, "r")) != NULL
)
3713 while (fgets (buffer
, sizeof (buffer
), procfile
) != NULL
)
3714 puts_filtered (buffer
);
3718 warning (_("unable to open /proc file '%s'"), fname1
);
3722 sprintf (fname1
, "/proc/%lld/stat", pid
);
3723 if ((procfile
= fopen (fname1
, "r")) != NULL
)
3729 if (fscanf (procfile
, "%d ", &itmp
) > 0)
3730 printf_filtered (_("Process: %d\n"), itmp
);
3731 if (fscanf (procfile
, "(%[^)]) ", &buffer
[0]) > 0)
3732 printf_filtered (_("Exec file: %s\n"), buffer
);
3733 if (fscanf (procfile
, "%c ", &ctmp
) > 0)
3734 printf_filtered (_("State: %c\n"), ctmp
);
3735 if (fscanf (procfile
, "%d ", &itmp
) > 0)
3736 printf_filtered (_("Parent process: %d\n"), itmp
);
3737 if (fscanf (procfile
, "%d ", &itmp
) > 0)
3738 printf_filtered (_("Process group: %d\n"), itmp
);
3739 if (fscanf (procfile
, "%d ", &itmp
) > 0)
3740 printf_filtered (_("Session id: %d\n"), itmp
);
3741 if (fscanf (procfile
, "%d ", &itmp
) > 0)
3742 printf_filtered (_("TTY: %d\n"), itmp
);
3743 if (fscanf (procfile
, "%d ", &itmp
) > 0)
3744 printf_filtered (_("TTY owner process group: %d\n"), itmp
);
3745 if (fscanf (procfile
, "%lu ", <mp
) > 0)
3746 printf_filtered (_("Flags: 0x%lx\n"), ltmp
);
3747 if (fscanf (procfile
, "%lu ", <mp
) > 0)
3748 printf_filtered (_("Minor faults (no memory page): %lu\n"),
3749 (unsigned long) ltmp
);
3750 if (fscanf (procfile
, "%lu ", <mp
) > 0)
3751 printf_filtered (_("Minor faults, children: %lu\n"),
3752 (unsigned long) ltmp
);
3753 if (fscanf (procfile
, "%lu ", <mp
) > 0)
3754 printf_filtered (_("Major faults (memory page faults): %lu\n"),
3755 (unsigned long) ltmp
);
3756 if (fscanf (procfile
, "%lu ", <mp
) > 0)
3757 printf_filtered (_("Major faults, children: %lu\n"),
3758 (unsigned long) ltmp
);
3759 if (fscanf (procfile
, "%ld ", <mp
) > 0)
3760 printf_filtered (_("utime: %ld\n"), ltmp
);
3761 if (fscanf (procfile
, "%ld ", <mp
) > 0)
3762 printf_filtered (_("stime: %ld\n"), ltmp
);
3763 if (fscanf (procfile
, "%ld ", <mp
) > 0)
3764 printf_filtered (_("utime, children: %ld\n"), ltmp
);
3765 if (fscanf (procfile
, "%ld ", <mp
) > 0)
3766 printf_filtered (_("stime, children: %ld\n"), ltmp
);
3767 if (fscanf (procfile
, "%ld ", <mp
) > 0)
3768 printf_filtered (_("jiffies remaining in current time slice: %ld\n"),
3770 if (fscanf (procfile
, "%ld ", <mp
) > 0)
3771 printf_filtered (_("'nice' value: %ld\n"), ltmp
);
3772 if (fscanf (procfile
, "%lu ", <mp
) > 0)
3773 printf_filtered (_("jiffies until next timeout: %lu\n"),
3774 (unsigned long) ltmp
);
3775 if (fscanf (procfile
, "%lu ", <mp
) > 0)
3776 printf_filtered (_("jiffies until next SIGALRM: %lu\n"),
3777 (unsigned long) ltmp
);
3778 if (fscanf (procfile
, "%ld ", <mp
) > 0)
3779 printf_filtered (_("start time (jiffies since system boot): %ld\n"),
3781 if (fscanf (procfile
, "%lu ", <mp
) > 0)
3782 printf_filtered (_("Virtual memory size: %lu\n"),
3783 (unsigned long) ltmp
);
3784 if (fscanf (procfile
, "%lu ", <mp
) > 0)
3785 printf_filtered (_("Resident set size: %lu\n"), (unsigned long) ltmp
);
3786 if (fscanf (procfile
, "%lu ", <mp
) > 0)
3787 printf_filtered (_("rlim: %lu\n"), (unsigned long) ltmp
);
3788 if (fscanf (procfile
, "%lu ", <mp
) > 0)
3789 printf_filtered (_("Start of text: 0x%lx\n"), ltmp
);
3790 if (fscanf (procfile
, "%lu ", <mp
) > 0)
3791 printf_filtered (_("End of text: 0x%lx\n"), ltmp
);
3792 if (fscanf (procfile
, "%lu ", <mp
) > 0)
3793 printf_filtered (_("Start of stack: 0x%lx\n"), ltmp
);
3794 #if 0 /* Don't know how architecture-dependent the rest is...
3795 Anyway the signal bitmap info is available from "status". */
3796 if (fscanf (procfile
, "%lu ", <mp
) > 0) /* FIXME arch? */
3797 printf_filtered (_("Kernel stack pointer: 0x%lx\n"), ltmp
);
3798 if (fscanf (procfile
, "%lu ", <mp
) > 0) /* FIXME arch? */
3799 printf_filtered (_("Kernel instr pointer: 0x%lx\n"), ltmp
);
3800 if (fscanf (procfile
, "%ld ", <mp
) > 0)
3801 printf_filtered (_("Pending signals bitmap: 0x%lx\n"), ltmp
);
3802 if (fscanf (procfile
, "%ld ", <mp
) > 0)
3803 printf_filtered (_("Blocked signals bitmap: 0x%lx\n"), ltmp
);
3804 if (fscanf (procfile
, "%ld ", <mp
) > 0)
3805 printf_filtered (_("Ignored signals bitmap: 0x%lx\n"), ltmp
);
3806 if (fscanf (procfile
, "%ld ", <mp
) > 0)
3807 printf_filtered (_("Catched signals bitmap: 0x%lx\n"), ltmp
);
3808 if (fscanf (procfile
, "%lu ", <mp
) > 0) /* FIXME arch? */
3809 printf_filtered (_("wchan (system call): 0x%lx\n"), ltmp
);
3814 warning (_("unable to open /proc file '%s'"), fname1
);
3818 /* Implement the to_xfer_partial interface for memory reads using the /proc
3819 filesystem. Because we can use a single read() call for /proc, this
3820 can be much more efficient than banging away at PTRACE_PEEKTEXT,
3821 but it doesn't support writes. */
3824 linux_proc_xfer_partial (struct target_ops
*ops
, enum target_object object
,
3825 const char *annex
, gdb_byte
*readbuf
,
3826 const gdb_byte
*writebuf
,
3827 ULONGEST offset
, LONGEST len
)
3833 if (object
!= TARGET_OBJECT_MEMORY
|| !readbuf
)
3836 /* Don't bother for one word. */
3837 if (len
< 3 * sizeof (long))
3840 /* We could keep this file open and cache it - possibly one per
3841 thread. That requires some juggling, but is even faster. */
3842 sprintf (filename
, "/proc/%d/mem", PIDGET (inferior_ptid
));
3843 fd
= open (filename
, O_RDONLY
| O_LARGEFILE
);
3847 /* If pread64 is available, use it. It's faster if the kernel
3848 supports it (only one syscall), and it's 64-bit safe even on
3849 32-bit platforms (for instance, SPARC debugging a SPARC64
3852 if (pread64 (fd
, readbuf
, len
, offset
) != len
)
3854 if (lseek (fd
, offset
, SEEK_SET
) == -1 || read (fd
, readbuf
, len
) != len
)
3864 /* Parse LINE as a signal set and add its set bits to SIGS. */
3867 add_line_to_sigset (const char *line
, sigset_t
*sigs
)
3869 int len
= strlen (line
) - 1;
3873 if (line
[len
] != '\n')
3874 error (_("Could not parse signal set: %s"), line
);
3882 if (*p
>= '0' && *p
<= '9')
3884 else if (*p
>= 'a' && *p
<= 'f')
3885 digit
= *p
- 'a' + 10;
3887 error (_("Could not parse signal set: %s"), line
);
3892 sigaddset (sigs
, signum
+ 1);
3894 sigaddset (sigs
, signum
+ 2);
3896 sigaddset (sigs
, signum
+ 3);
3898 sigaddset (sigs
, signum
+ 4);
3904 /* Find process PID's pending signals from /proc/pid/status and set
3908 linux_proc_pending_signals (int pid
, sigset_t
*pending
, sigset_t
*blocked
, sigset_t
*ignored
)
3911 char buffer
[MAXPATHLEN
], fname
[MAXPATHLEN
];
3914 sigemptyset (pending
);
3915 sigemptyset (blocked
);
3916 sigemptyset (ignored
);
3917 sprintf (fname
, "/proc/%d/status", pid
);
3918 procfile
= fopen (fname
, "r");
3919 if (procfile
== NULL
)
3920 error (_("Could not open %s"), fname
);
3922 while (fgets (buffer
, MAXPATHLEN
, procfile
) != NULL
)
3924 /* Normal queued signals are on the SigPnd line in the status
3925 file. However, 2.6 kernels also have a "shared" pending
3926 queue for delivering signals to a thread group, so check for
3929 Unfortunately some Red Hat kernels include the shared pending
3930 queue but not the ShdPnd status field. */
3932 if (strncmp (buffer
, "SigPnd:\t", 8) == 0)
3933 add_line_to_sigset (buffer
+ 8, pending
);
3934 else if (strncmp (buffer
, "ShdPnd:\t", 8) == 0)
3935 add_line_to_sigset (buffer
+ 8, pending
);
3936 else if (strncmp (buffer
, "SigBlk:\t", 8) == 0)
3937 add_line_to_sigset (buffer
+ 8, blocked
);
3938 else if (strncmp (buffer
, "SigIgn:\t", 8) == 0)
3939 add_line_to_sigset (buffer
+ 8, ignored
);
3946 linux_xfer_partial (struct target_ops
*ops
, enum target_object object
,
3947 const char *annex
, gdb_byte
*readbuf
,
3948 const gdb_byte
*writebuf
, ULONGEST offset
, LONGEST len
)
3952 if (object
== TARGET_OBJECT_AUXV
)
3953 return procfs_xfer_auxv (ops
, object
, annex
, readbuf
, writebuf
,
3956 xfer
= linux_proc_xfer_partial (ops
, object
, annex
, readbuf
, writebuf
,
3961 return super_xfer_partial (ops
, object
, annex
, readbuf
, writebuf
,
3965 /* Create a prototype generic GNU/Linux target. The client can override
3966 it with local methods. */
3969 linux_target_install_ops (struct target_ops
*t
)
3971 t
->to_insert_fork_catchpoint
= linux_child_insert_fork_catchpoint
;
3972 t
->to_insert_vfork_catchpoint
= linux_child_insert_vfork_catchpoint
;
3973 t
->to_insert_exec_catchpoint
= linux_child_insert_exec_catchpoint
;
3974 t
->to_pid_to_exec_file
= linux_child_pid_to_exec_file
;
3975 t
->to_post_startup_inferior
= linux_child_post_startup_inferior
;
3976 t
->to_post_attach
= linux_child_post_attach
;
3977 t
->to_follow_fork
= linux_child_follow_fork
;
3978 t
->to_find_memory_regions
= linux_nat_find_memory_regions
;
3979 t
->to_make_corefile_notes
= linux_nat_make_corefile_notes
;
3981 super_xfer_partial
= t
->to_xfer_partial
;
3982 t
->to_xfer_partial
= linux_xfer_partial
;
3988 struct target_ops
*t
;
3990 t
= inf_ptrace_target ();
3991 linux_target_install_ops (t
);
3997 linux_trad_target (CORE_ADDR (*register_u_offset
)(struct gdbarch
*, int, int))
3999 struct target_ops
*t
;
4001 t
= inf_ptrace_trad_target (register_u_offset
);
4002 linux_target_install_ops (t
);
4007 /* target_is_async_p implementation. */
4010 linux_nat_is_async_p (void)
4012 /* NOTE: palves 2008-03-21: We're only async when the user requests
4013 it explicitly with the "maintenance set target-async" command.
4014 Someday, linux will always be async. */
4015 if (!target_async_permitted
)
4021 /* target_can_async_p implementation. */
4024 linux_nat_can_async_p (void)
4026 /* NOTE: palves 2008-03-21: We're only async when the user requests
4027 it explicitly with the "maintenance set target-async" command.
4028 Someday, linux will always be async. */
4029 if (!target_async_permitted
)
4032 /* See target.h/target_async_mask. */
4033 return linux_nat_async_mask_value
;
4037 linux_nat_supports_non_stop (void)
4042 /* target_async_mask implementation. */
4045 linux_nat_async_mask (int mask
)
4048 current_state
= linux_nat_async_mask_value
;
4050 if (current_state
!= mask
)
4054 linux_nat_async (NULL
, 0);
4055 linux_nat_async_mask_value
= mask
;
4059 linux_nat_async_mask_value
= mask
;
4060 linux_nat_async (inferior_event_handler
, 0);
4064 return current_state
;
4067 /* Pop an event from the event pipe. */
4070 linux_nat_event_pipe_pop (int* ptr_status
, int* ptr_options
)
4072 struct waitpid_result event
= {0};
4077 ret
= read (linux_nat_event_pipe
[0], &event
, sizeof (event
));
4079 while (ret
== -1 && errno
== EINTR
);
4081 gdb_assert (ret
== sizeof (event
));
4083 *ptr_status
= event
.status
;
4084 *ptr_options
= event
.options
;
4086 linux_nat_num_queued_events
--;
4091 /* Push an event into the event pipe. */
4094 linux_nat_event_pipe_push (int pid
, int status
, int options
)
4097 struct waitpid_result event
= {0};
4099 event
.status
= status
;
4100 event
.options
= options
;
4104 ret
= write (linux_nat_event_pipe
[1], &event
, sizeof (event
));
4105 gdb_assert ((ret
== -1 && errno
== EINTR
) || ret
== sizeof (event
));
4106 } while (ret
== -1 && errno
== EINTR
);
4108 linux_nat_num_queued_events
++;
4112 get_pending_events (void)
4114 int status
, options
, pid
;
4116 if (!target_async_permitted
4117 || linux_nat_async_events_state
!= sigchld_async
)
4118 internal_error (__FILE__
, __LINE__
,
4119 "get_pending_events called with async masked");
4124 options
= __WCLONE
| WNOHANG
;
4128 pid
= waitpid (-1, &status
, options
);
4130 while (pid
== -1 && errno
== EINTR
);
4137 pid
= waitpid (-1, &status
, options
);
4139 while (pid
== -1 && errno
== EINTR
);
4143 /* No more children reporting events. */
4146 if (debug_linux_nat_async
)
4147 fprintf_unfiltered (gdb_stdlog
, "\
4148 get_pending_events: pid(%d), status(%x), options (%x)\n",
4149 pid
, status
, options
);
4151 linux_nat_event_pipe_push (pid
, status
, options
);
4154 if (debug_linux_nat_async
)
4155 fprintf_unfiltered (gdb_stdlog
, "\
4156 get_pending_events: linux_nat_num_queued_events(%d)\n",
4157 linux_nat_num_queued_events
);
4160 /* SIGCHLD handler for async mode. */
4163 async_sigchld_handler (int signo
)
4165 if (debug_linux_nat_async
)
4166 fprintf_unfiltered (gdb_stdlog
, "async_sigchld_handler\n");
4168 get_pending_events ();
4171 /* Set SIGCHLD handling state to STATE. Returns previous state. */
4173 static enum sigchld_state
4174 linux_nat_async_events (enum sigchld_state state
)
4176 enum sigchld_state current_state
= linux_nat_async_events_state
;
4178 if (debug_linux_nat_async
)
4179 fprintf_unfiltered (gdb_stdlog
,
4180 "LNAE: state(%d): linux_nat_async_events_state(%d), "
4181 "linux_nat_num_queued_events(%d)\n",
4182 state
, linux_nat_async_events_state
,
4183 linux_nat_num_queued_events
);
4185 if (current_state
!= state
)
4188 sigemptyset (&mask
);
4189 sigaddset (&mask
, SIGCHLD
);
4191 /* Always block before changing state. */
4192 sigprocmask (SIG_BLOCK
, &mask
, NULL
);
4194 /* Set new state. */
4195 linux_nat_async_events_state
= state
;
4201 /* Block target events. */
4202 sigprocmask (SIG_BLOCK
, &mask
, NULL
);
4203 sigaction (SIGCHLD
, &sync_sigchld_action
, NULL
);
4204 /* Get events out of queue, and make them available to
4205 queued_waitpid / my_waitpid. */
4206 pipe_to_local_event_queue ();
4211 /* Unblock target events for async mode. */
4213 sigprocmask (SIG_BLOCK
, &mask
, NULL
);
4215 /* Put events we already waited on, in the pipe first, so
4217 local_event_queue_to_pipe ();
4218 /* While in masked async, we may have not collected all
4219 the pending events. Get them out now. */
4220 get_pending_events ();
4223 sigaction (SIGCHLD
, &async_sigchld_action
, NULL
);
4224 sigprocmask (SIG_UNBLOCK
, &mask
, NULL
);
4227 case sigchld_default
:
4229 /* SIGCHLD default mode. */
4230 sigaction (SIGCHLD
, &sigchld_default_action
, NULL
);
4232 /* Get events out of queue, and make them available to
4233 queued_waitpid / my_waitpid. */
4234 pipe_to_local_event_queue ();
4236 /* Unblock SIGCHLD. */
4237 sigprocmask (SIG_UNBLOCK
, &mask
, NULL
);
4243 return current_state
;
4246 static int async_terminal_is_ours
= 1;
4248 /* target_terminal_inferior implementation. */
4251 linux_nat_terminal_inferior (void)
4253 if (!target_is_async_p ())
4255 /* Async mode is disabled. */
4256 terminal_inferior ();
4260 /* GDB should never give the terminal to the inferior, if the
4261 inferior is running in the background (run&, continue&, etc.).
4262 This check can be removed when the common code is fixed. */
4263 if (!sync_execution
)
4266 terminal_inferior ();
4268 if (!async_terminal_is_ours
)
4271 delete_file_handler (input_fd
);
4272 async_terminal_is_ours
= 0;
4276 /* target_terminal_ours implementation. */
4279 linux_nat_terminal_ours (void)
4281 if (!target_is_async_p ())
4283 /* Async mode is disabled. */
4288 /* GDB should never give the terminal to the inferior if the
4289 inferior is running in the background (run&, continue&, etc.),
4290 but claiming it sure should. */
4293 if (!sync_execution
)
4296 if (async_terminal_is_ours
)
4299 clear_sigint_trap ();
4300 add_file_handler (input_fd
, stdin_event_handler
, 0);
4301 async_terminal_is_ours
= 1;
4304 static void (*async_client_callback
) (enum inferior_event_type event_type
,
4306 static void *async_client_context
;
4309 linux_nat_async_file_handler (int error
, gdb_client_data client_data
)
4311 async_client_callback (INF_REG_EVENT
, async_client_context
);
4314 /* target_async implementation. */
4317 linux_nat_async (void (*callback
) (enum inferior_event_type event_type
,
4318 void *context
), void *context
)
4320 if (linux_nat_async_mask_value
== 0 || !target_async_permitted
)
4321 internal_error (__FILE__
, __LINE__
,
4322 "Calling target_async when async is masked");
4324 if (callback
!= NULL
)
4326 async_client_callback
= callback
;
4327 async_client_context
= context
;
4328 add_file_handler (linux_nat_event_pipe
[0],
4329 linux_nat_async_file_handler
, NULL
);
4331 linux_nat_async_events (sigchld_async
);
4335 async_client_callback
= callback
;
4336 async_client_context
= context
;
4338 linux_nat_async_events (sigchld_sync
);
4339 delete_file_handler (linux_nat_event_pipe
[0]);
4345 send_sigint_callback (struct lwp_info
*lp
, void *data
)
4347 /* Use is_running instead of !lp->stopped, because the lwp may be
4348 stopped due to an internal event, and we want to interrupt it in
4349 that case too. What we want is to check if the thread is stopped
4350 from the point of view of the user. */
4351 if (is_running (lp
->ptid
))
4352 kill_lwp (GET_LWP (lp
->ptid
), SIGINT
);
4357 linux_nat_stop (ptid_t ptid
)
4361 if (ptid_equal (ptid
, minus_one_ptid
))
4362 iterate_over_lwps (send_sigint_callback
, &ptid
);
4365 struct lwp_info
*lp
= find_lwp_pid (ptid
);
4366 send_sigint_callback (lp
, NULL
);
4370 linux_ops
->to_stop (ptid
);
4374 linux_nat_add_target (struct target_ops
*t
)
4376 /* Save the provided single-threaded target. We save this in a separate
4377 variable because another target we've inherited from (e.g. inf-ptrace)
4378 may have saved a pointer to T; we want to use it for the final
4379 process stratum target. */
4380 linux_ops_saved
= *t
;
4381 linux_ops
= &linux_ops_saved
;
4383 /* Override some methods for multithreading. */
4384 t
->to_create_inferior
= linux_nat_create_inferior
;
4385 t
->to_attach
= linux_nat_attach
;
4386 t
->to_detach
= linux_nat_detach
;
4387 t
->to_resume
= linux_nat_resume
;
4388 t
->to_wait
= linux_nat_wait
;
4389 t
->to_xfer_partial
= linux_nat_xfer_partial
;
4390 t
->to_kill
= linux_nat_kill
;
4391 t
->to_mourn_inferior
= linux_nat_mourn_inferior
;
4392 t
->to_thread_alive
= linux_nat_thread_alive
;
4393 t
->to_pid_to_str
= linux_nat_pid_to_str
;
4394 t
->to_has_thread_control
= tc_schedlock
;
4396 t
->to_can_async_p
= linux_nat_can_async_p
;
4397 t
->to_is_async_p
= linux_nat_is_async_p
;
4398 t
->to_supports_non_stop
= linux_nat_supports_non_stop
;
4399 t
->to_async
= linux_nat_async
;
4400 t
->to_async_mask
= linux_nat_async_mask
;
4401 t
->to_terminal_inferior
= linux_nat_terminal_inferior
;
4402 t
->to_terminal_ours
= linux_nat_terminal_ours
;
4404 /* Methods for non-stop support. */
4405 t
->to_stop
= linux_nat_stop
;
4407 /* We don't change the stratum; this target will sit at
4408 process_stratum and thread_db will set at thread_stratum. This
4409 is a little strange, since this is a multi-threaded-capable
4410 target, but we want to be on the stack below thread_db, and we
4411 also want to be used for single-threaded processes. */
4415 /* TODO: Eliminate this and have libthread_db use
4416 find_target_beneath. */
4420 /* Register a method to call whenever a new thread is attached. */
4422 linux_nat_set_new_thread (struct target_ops
*t
, void (*new_thread
) (ptid_t
))
4424 /* Save the pointer. We only support a single registered instance
4425 of the GNU/Linux native target, so we do not need to map this to
4427 linux_nat_new_thread
= new_thread
;
4430 /* Return the saved siginfo associated with PTID. */
4432 linux_nat_get_siginfo (ptid_t ptid
)
4434 struct lwp_info
*lp
= find_lwp_pid (ptid
);
4436 gdb_assert (lp
!= NULL
);
4438 return &lp
->siginfo
;
4441 /* Enable/Disable async mode. */
4444 linux_nat_setup_async (void)
4446 if (pipe (linux_nat_event_pipe
) == -1)
4447 internal_error (__FILE__
, __LINE__
,
4448 "creating event pipe failed.");
4449 fcntl (linux_nat_event_pipe
[0], F_SETFL
, O_NONBLOCK
);
4450 fcntl (linux_nat_event_pipe
[1], F_SETFL
, O_NONBLOCK
);
4454 _initialize_linux_nat (void)
4458 add_info ("proc", linux_nat_info_proc_cmd
, _("\
4459 Show /proc process information about any running process.\n\
4460 Specify any process id, or use the program being debugged by default.\n\
4461 Specify any of the following keywords for detailed info:\n\
4462 mappings -- list of mapped memory regions.\n\
4463 stat -- list a bunch of random process info.\n\
4464 status -- list a different bunch of random process info.\n\
4465 all -- list all available /proc info."));
4467 add_setshow_zinteger_cmd ("lin-lwp", class_maintenance
,
4468 &debug_linux_nat
, _("\
4469 Set debugging of GNU/Linux lwp module."), _("\
4470 Show debugging of GNU/Linux lwp module."), _("\
4471 Enables printf debugging output."),
4473 show_debug_linux_nat
,
4474 &setdebuglist
, &showdebuglist
);
4476 add_setshow_zinteger_cmd ("lin-lwp-async", class_maintenance
,
4477 &debug_linux_nat_async
, _("\
4478 Set debugging of GNU/Linux async lwp module."), _("\
4479 Show debugging of GNU/Linux async lwp module."), _("\
4480 Enables printf debugging output."),
4482 show_debug_linux_nat_async
,
4483 &setdebuglist
, &showdebuglist
);
4485 /* Get the default SIGCHLD action. Used while forking an inferior
4486 (see linux_nat_create_inferior/linux_nat_async_events). */
4487 sigaction (SIGCHLD
, NULL
, &sigchld_default_action
);
4489 /* Block SIGCHLD by default. Doing this early prevents it getting
4490 unblocked if an exception is thrown due to an error while the
4491 inferior is starting (sigsetjmp/siglongjmp). */
4492 sigemptyset (&mask
);
4493 sigaddset (&mask
, SIGCHLD
);
4494 sigprocmask (SIG_BLOCK
, &mask
, NULL
);
4496 /* Save this mask as the default. */
4497 sigprocmask (SIG_SETMASK
, NULL
, &normal_mask
);
4499 /* The synchronous SIGCHLD handler. */
4500 sync_sigchld_action
.sa_handler
= sigchld_handler
;
4501 sigemptyset (&sync_sigchld_action
.sa_mask
);
4502 sync_sigchld_action
.sa_flags
= SA_RESTART
;
4504 /* Make it the default. */
4505 sigaction (SIGCHLD
, &sync_sigchld_action
, NULL
);
4507 /* Make sure we don't block SIGCHLD during a sigsuspend. */
4508 sigprocmask (SIG_SETMASK
, NULL
, &suspend_mask
);
4509 sigdelset (&suspend_mask
, SIGCHLD
);
4511 /* SIGCHLD handler for async mode. */
4512 async_sigchld_action
.sa_handler
= async_sigchld_handler
;
4513 sigemptyset (&async_sigchld_action
.sa_mask
);
4514 async_sigchld_action
.sa_flags
= SA_RESTART
;
4516 linux_nat_setup_async ();
4518 add_setshow_boolean_cmd ("disable-randomization", class_support
,
4519 &disable_randomization
, _("\
4520 Set disabling of debuggee's virtual address space randomization."), _("\
4521 Show disabling of debuggee's virtual address space randomization."), _("\
4522 When this mode is on (which is the default), randomization of the virtual\n\
4523 address space is disabled. Standalone programs run with the randomization\n\
4524 enabled by default on some platforms."),
4525 &set_disable_randomization
,
4526 &show_disable_randomization
,
4527 &setlist
, &showlist
);
4531 /* FIXME: kettenis/2000-08-26: The stuff on this page is specific to
4532 the GNU/Linux Threads library and therefore doesn't really belong
4535 /* Read variable NAME in the target and return its value if found.
4536 Otherwise return zero. It is assumed that the type of the variable
4540 get_signo (const char *name
)
4542 struct minimal_symbol
*ms
;
4545 ms
= lookup_minimal_symbol (name
, NULL
, NULL
);
4549 if (target_read_memory (SYMBOL_VALUE_ADDRESS (ms
), (gdb_byte
*) &signo
,
4550 sizeof (signo
)) != 0)
4556 /* Return the set of signals used by the threads library in *SET. */
4559 lin_thread_get_thread_signals (sigset_t
*set
)
4561 struct sigaction action
;
4562 int restart
, cancel
;
4563 sigset_t blocked_mask
;
4565 sigemptyset (&blocked_mask
);
4568 restart
= get_signo ("__pthread_sig_restart");
4569 cancel
= get_signo ("__pthread_sig_cancel");
4571 /* LinuxThreads normally uses the first two RT signals, but in some legacy
4572 cases may use SIGUSR1/SIGUSR2. NPTL always uses RT signals, but does
4573 not provide any way for the debugger to query the signal numbers -
4574 fortunately they don't change! */
4577 restart
= __SIGRTMIN
;
4580 cancel
= __SIGRTMIN
+ 1;
4582 sigaddset (set
, restart
);
4583 sigaddset (set
, cancel
);
4585 /* The GNU/Linux Threads library makes terminating threads send a
4586 special "cancel" signal instead of SIGCHLD. Make sure we catch
4587 those (to prevent them from terminating GDB itself, which is
4588 likely to be their default action) and treat them the same way as
4591 action
.sa_handler
= sigchld_handler
;
4592 sigemptyset (&action
.sa_mask
);
4593 action
.sa_flags
= SA_RESTART
;
4594 sigaction (cancel
, &action
, NULL
);
4596 /* We block the "cancel" signal throughout this code ... */
4597 sigaddset (&blocked_mask
, cancel
);
4598 sigprocmask (SIG_BLOCK
, &blocked_mask
, NULL
);
4600 /* ... except during a sigsuspend. */
4601 sigdelset (&suspend_mask
, cancel
);