Refactor native follow-fork.
[deliverable/binutils-gdb.git] / gdb / linux-nat.c
1 /* GNU/Linux native-dependent code common to multiple platforms.
2
3 Copyright (C) 2001-2014 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21 #include "inferior.h"
22 #include "infrun.h"
23 #include "target.h"
24 #include "nat/linux-nat.h"
25 #include "nat/linux-waitpid.h"
26 #include "gdb_wait.h"
27 #ifdef HAVE_TKILL_SYSCALL
28 #include <unistd.h>
29 #include <sys/syscall.h>
30 #endif
31 #include <sys/ptrace.h>
32 #include "linux-nat.h"
33 #include "nat/linux-ptrace.h"
34 #include "nat/linux-procfs.h"
35 #include "linux-fork.h"
36 #include "gdbthread.h"
37 #include "gdbcmd.h"
38 #include "regcache.h"
39 #include "regset.h"
40 #include "inf-child.h"
41 #include "inf-ptrace.h"
42 #include "auxv.h"
43 #include <sys/procfs.h> /* for elf_gregset etc. */
44 #include "elf-bfd.h" /* for elfcore_write_* */
45 #include "gregset.h" /* for gregset */
46 #include "gdbcore.h" /* for get_exec_file */
47 #include <ctype.h> /* for isdigit */
48 #include <sys/stat.h> /* for struct stat */
49 #include <fcntl.h> /* for O_RDONLY */
50 #include "inf-loop.h"
51 #include "event-loop.h"
52 #include "event-top.h"
53 #include <pwd.h>
54 #include <sys/types.h>
55 #include <dirent.h>
56 #include "xml-support.h"
57 #include <sys/vfs.h>
58 #include "solib.h"
59 #include "nat/linux-osdata.h"
60 #include "linux-tdep.h"
61 #include "symfile.h"
62 #include "agent.h"
63 #include "tracepoint.h"
64 #include "exceptions.h"
65 #include "buffer.h"
66 #include "target-descriptions.h"
67 #include "filestuff.h"
68 #include "objfiles.h"
69
70 #ifndef SPUFS_MAGIC
71 #define SPUFS_MAGIC 0x23c9b64e
72 #endif
73
74 #ifdef HAVE_PERSONALITY
75 # include <sys/personality.h>
76 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
77 # define ADDR_NO_RANDOMIZE 0x0040000
78 # endif
79 #endif /* HAVE_PERSONALITY */
80
81 /* This comment documents high-level logic of this file.
82
83 Waiting for events in sync mode
84 ===============================
85
86 When waiting for an event in a specific thread, we just use waitpid, passing
87 the specific pid, and not passing WNOHANG.
88
89 When waiting for an event in all threads, waitpid is not quite good. Prior to
90 version 2.4, Linux can either wait for event in main thread, or in secondary
91 threads. (2.4 has the __WALL flag). So, if we use blocking waitpid, we might
92 miss an event. The solution is to use non-blocking waitpid, together with
93 sigsuspend. First, we use non-blocking waitpid to get an event in the main
94 process, if any. Second, we use non-blocking waitpid with the __WCLONED
95 flag to check for events in cloned processes. If nothing is found, we use
96 sigsuspend to wait for SIGCHLD. When SIGCHLD arrives, it means something
97 happened to a child process -- and SIGCHLD will be delivered both for events
98 in main debugged process and in cloned processes. As soon as we know there's
99 an event, we get back to calling nonblocking waitpid with and without
100 __WCLONED.
101
102 Note that SIGCHLD should be blocked between waitpid and sigsuspend calls,
103 so that we don't miss a signal. If SIGCHLD arrives in between, when it's
104 blocked, the signal becomes pending and sigsuspend immediately
105 notices it and returns.
106
107 Waiting for events in async mode
108 ================================
109
110 In async mode, GDB should always be ready to handle both user input
111 and target events, so neither blocking waitpid nor sigsuspend are
112 viable options. Instead, we should asynchronously notify the GDB main
113 event loop whenever there's an unprocessed event from the target. We
114 detect asynchronous target events by handling SIGCHLD signals. To
115 notify the event loop about target events, the self-pipe trick is used
116 --- a pipe is registered as waitable event source in the event loop,
117 the event loop select/poll's on the read end of this pipe (as well on
118 other event sources, e.g., stdin), and the SIGCHLD handler writes a
119 byte to this pipe. This is more portable than relying on
120 pselect/ppoll, since on kernels that lack those syscalls, libc
121 emulates them with select/poll+sigprocmask, and that is racy
122 (a.k.a. plain broken).
123
124 Obviously, if we fail to notify the event loop if there's a target
125 event, it's bad. OTOH, if we notify the event loop when there's no
126 event from the target, linux_nat_wait will detect that there's no real
127 event to report, and return event of type TARGET_WAITKIND_IGNORE.
128 This is mostly harmless, but it will waste time and is better avoided.
129
130 The main design point is that every time GDB is outside linux-nat.c,
131 we have a SIGCHLD handler installed that is called when something
132 happens to the target and notifies the GDB event loop. Whenever GDB
133 core decides to handle the event, and calls into linux-nat.c, we
134 process things as in sync mode, except that the we never block in
135 sigsuspend.
136
137 While processing an event, we may end up momentarily blocked in
138 waitpid calls. Those waitpid calls, while blocking, are guarantied to
139 return quickly. E.g., in all-stop mode, before reporting to the core
140 that an LWP hit a breakpoint, all LWPs are stopped by sending them
141 SIGSTOP, and synchronously waiting for the SIGSTOP to be reported.
142 Note that this is different from blocking indefinitely waiting for the
143 next event --- here, we're already handling an event.
144
145 Use of signals
146 ==============
147
148 We stop threads by sending a SIGSTOP. The use of SIGSTOP instead of another
149 signal is not entirely significant; we just need for a signal to be delivered,
150 so that we can intercept it. SIGSTOP's advantage is that it can not be
151 blocked. A disadvantage is that it is not a real-time signal, so it can only
152 be queued once; we do not keep track of other sources of SIGSTOP.
153
154 Two other signals that can't be blocked are SIGCONT and SIGKILL. But we can't
155 use them, because they have special behavior when the signal is generated -
156 not when it is delivered. SIGCONT resumes the entire thread group and SIGKILL
157 kills the entire thread group.
158
159 A delivered SIGSTOP would stop the entire thread group, not just the thread we
160 tkill'd. But we never let the SIGSTOP be delivered; we always intercept and
161 cancel it (by PTRACE_CONT without passing SIGSTOP).
162
163 We could use a real-time signal instead. This would solve those problems; we
164 could use PTRACE_GETSIGINFO to locate the specific stop signals sent by GDB.
165 But we would still have to have some support for SIGSTOP, since PTRACE_ATTACH
166 generates it, and there are races with trying to find a signal that is not
167 blocked. */
168
169 #ifndef O_LARGEFILE
170 #define O_LARGEFILE 0
171 #endif
172
173 /* The single-threaded native GNU/Linux target_ops. We save a pointer for
174 the use of the multi-threaded target. */
175 static struct target_ops *linux_ops;
176 static struct target_ops linux_ops_saved;
177
178 /* The method to call, if any, when a new thread is attached. */
179 static void (*linux_nat_new_thread) (struct lwp_info *);
180
181 /* The method to call, if any, when a new fork is attached. */
182 static linux_nat_new_fork_ftype *linux_nat_new_fork;
183
184 /* The method to call, if any, when a process is no longer
185 attached. */
186 static linux_nat_forget_process_ftype *linux_nat_forget_process_hook;
187
188 /* Hook to call prior to resuming a thread. */
189 static void (*linux_nat_prepare_to_resume) (struct lwp_info *);
190
191 /* The method to call, if any, when the siginfo object needs to be
192 converted between the layout returned by ptrace, and the layout in
193 the architecture of the inferior. */
194 static int (*linux_nat_siginfo_fixup) (siginfo_t *,
195 gdb_byte *,
196 int);
197
198 /* The saved to_xfer_partial method, inherited from inf-ptrace.c.
199 Called by our to_xfer_partial. */
200 static target_xfer_partial_ftype *super_xfer_partial;
201
202 /* The saved to_close method, inherited from inf-ptrace.c.
203 Called by our to_close. */
204 static void (*super_close) (struct target_ops *);
205
206 static unsigned int debug_linux_nat;
207 static void
208 show_debug_linux_nat (struct ui_file *file, int from_tty,
209 struct cmd_list_element *c, const char *value)
210 {
211 fprintf_filtered (file, _("Debugging of GNU/Linux lwp module is %s.\n"),
212 value);
213 }
214
215 struct simple_pid_list
216 {
217 int pid;
218 int status;
219 struct simple_pid_list *next;
220 };
221 struct simple_pid_list *stopped_pids;
222
223 /* Async mode support. */
224
225 /* The read/write ends of the pipe registered as waitable file in the
226 event loop. */
227 static int linux_nat_event_pipe[2] = { -1, -1 };
228
229 /* Flush the event pipe. */
230
231 static void
232 async_file_flush (void)
233 {
234 int ret;
235 char buf;
236
237 do
238 {
239 ret = read (linux_nat_event_pipe[0], &buf, 1);
240 }
241 while (ret >= 0 || (ret == -1 && errno == EINTR));
242 }
243
244 /* Put something (anything, doesn't matter what, or how much) in event
245 pipe, so that the select/poll in the event-loop realizes we have
246 something to process. */
247
248 static void
249 async_file_mark (void)
250 {
251 int ret;
252
253 /* It doesn't really matter what the pipe contains, as long we end
254 up with something in it. Might as well flush the previous
255 left-overs. */
256 async_file_flush ();
257
258 do
259 {
260 ret = write (linux_nat_event_pipe[1], "+", 1);
261 }
262 while (ret == -1 && errno == EINTR);
263
264 /* Ignore EAGAIN. If the pipe is full, the event loop will already
265 be awakened anyway. */
266 }
267
268 static int kill_lwp (int lwpid, int signo);
269
270 static int stop_callback (struct lwp_info *lp, void *data);
271
272 static void block_child_signals (sigset_t *prev_mask);
273 static void restore_child_signals_mask (sigset_t *prev_mask);
274
275 struct lwp_info;
276 static struct lwp_info *add_lwp (ptid_t ptid);
277 static void purge_lwp_list (int pid);
278 static void delete_lwp (ptid_t ptid);
279 static struct lwp_info *find_lwp_pid (ptid_t ptid);
280
281 \f
282 /* Trivial list manipulation functions to keep track of a list of
283 new stopped processes. */
284 static void
285 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
286 {
287 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
288
289 new_pid->pid = pid;
290 new_pid->status = status;
291 new_pid->next = *listp;
292 *listp = new_pid;
293 }
294
295 static int
296 in_pid_list_p (struct simple_pid_list *list, int pid)
297 {
298 struct simple_pid_list *p;
299
300 for (p = list; p != NULL; p = p->next)
301 if (p->pid == pid)
302 return 1;
303 return 0;
304 }
305
306 static int
307 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
308 {
309 struct simple_pid_list **p;
310
311 for (p = listp; *p != NULL; p = &(*p)->next)
312 if ((*p)->pid == pid)
313 {
314 struct simple_pid_list *next = (*p)->next;
315
316 *statusp = (*p)->status;
317 xfree (*p);
318 *p = next;
319 return 1;
320 }
321 return 0;
322 }
323
324 /* Initialize ptrace warnings and check for supported ptrace
325 features given PID. */
326
327 static void
328 linux_init_ptrace (pid_t pid)
329 {
330 linux_enable_event_reporting (pid);
331 linux_ptrace_init_warnings ();
332 }
333
334 static void
335 linux_child_post_attach (struct target_ops *self, int pid)
336 {
337 linux_init_ptrace (pid);
338 }
339
340 static void
341 linux_child_post_startup_inferior (struct target_ops *self, ptid_t ptid)
342 {
343 linux_init_ptrace (ptid_get_pid (ptid));
344 }
345
346 /* Return the number of known LWPs in the tgid given by PID. */
347
348 static int
349 num_lwps (int pid)
350 {
351 int count = 0;
352 struct lwp_info *lp;
353
354 for (lp = lwp_list; lp; lp = lp->next)
355 if (ptid_get_pid (lp->ptid) == pid)
356 count++;
357
358 return count;
359 }
360
361 /* Call delete_lwp with prototype compatible for make_cleanup. */
362
363 static void
364 delete_lwp_cleanup (void *lp_voidp)
365 {
366 struct lwp_info *lp = lp_voidp;
367
368 delete_lwp (lp->ptid);
369 }
370
371 /* Target hook for follow_fork. On entry inferior_ptid must be the
372 ptid of the followed inferior. At return, inferior_ptid will be
373 unchanged. */
374
375 static int
376 linux_child_follow_fork (struct target_ops *ops, int follow_child,
377 int detach_fork)
378 {
379 if (!follow_child)
380 {
381 struct lwp_info *child_lp = NULL;
382 int status = W_STOPCODE (0);
383 struct cleanup *old_chain;
384 int has_vforked;
385 int parent_pid, child_pid;
386
387 has_vforked = (inferior_thread ()->pending_follow.kind
388 == TARGET_WAITKIND_VFORKED);
389 parent_pid = ptid_get_lwp (inferior_ptid);
390 if (parent_pid == 0)
391 parent_pid = ptid_get_pid (inferior_ptid);
392 child_pid
393 = ptid_get_pid (inferior_thread ()->pending_follow.value.related_pid);
394
395
396 /* We're already attached to the parent, by default. */
397 old_chain = save_inferior_ptid ();
398 inferior_ptid = ptid_build (child_pid, child_pid, 0);
399 child_lp = add_lwp (inferior_ptid);
400 child_lp->stopped = 1;
401 child_lp->last_resume_kind = resume_stop;
402
403 /* Detach new forked process? */
404 if (detach_fork)
405 {
406 make_cleanup (delete_lwp_cleanup, child_lp);
407
408 if (linux_nat_prepare_to_resume != NULL)
409 linux_nat_prepare_to_resume (child_lp);
410
411 /* When debugging an inferior in an architecture that supports
412 hardware single stepping on a kernel without commit
413 6580807da14c423f0d0a708108e6df6ebc8bc83d, the vfork child
414 process starts with the TIF_SINGLESTEP/X86_EFLAGS_TF bits
415 set if the parent process had them set.
416 To work around this, single step the child process
417 once before detaching to clear the flags. */
418
419 if (!gdbarch_software_single_step_p (target_thread_architecture
420 (child_lp->ptid)))
421 {
422 linux_disable_event_reporting (child_pid);
423 if (ptrace (PTRACE_SINGLESTEP, child_pid, 0, 0) < 0)
424 perror_with_name (_("Couldn't do single step"));
425 if (my_waitpid (child_pid, &status, 0) < 0)
426 perror_with_name (_("Couldn't wait vfork process"));
427 }
428
429 if (WIFSTOPPED (status))
430 {
431 int signo;
432
433 signo = WSTOPSIG (status);
434 if (signo != 0
435 && !signal_pass_state (gdb_signal_from_host (signo)))
436 signo = 0;
437 ptrace (PTRACE_DETACH, child_pid, 0, signo);
438 }
439
440 /* Resets value of inferior_ptid to parent ptid. */
441 do_cleanups (old_chain);
442 }
443 else
444 {
445 /* Let the thread_db layer learn about this new process. */
446 check_for_thread_db ();
447 }
448
449 do_cleanups (old_chain);
450
451 if (has_vforked)
452 {
453 struct lwp_info *parent_lp;
454
455 parent_lp = find_lwp_pid (pid_to_ptid (parent_pid));
456 gdb_assert (linux_supports_tracefork () >= 0);
457
458 if (linux_supports_tracevforkdone ())
459 {
460 if (debug_linux_nat)
461 fprintf_unfiltered (gdb_stdlog,
462 "LCFF: waiting for VFORK_DONE on %d\n",
463 parent_pid);
464 parent_lp->stopped = 1;
465
466 /* We'll handle the VFORK_DONE event like any other
467 event, in target_wait. */
468 }
469 else
470 {
471 /* We can't insert breakpoints until the child has
472 finished with the shared memory region. We need to
473 wait until that happens. Ideal would be to just
474 call:
475 - ptrace (PTRACE_SYSCALL, parent_pid, 0, 0);
476 - waitpid (parent_pid, &status, __WALL);
477 However, most architectures can't handle a syscall
478 being traced on the way out if it wasn't traced on
479 the way in.
480
481 We might also think to loop, continuing the child
482 until it exits or gets a SIGTRAP. One problem is
483 that the child might call ptrace with PTRACE_TRACEME.
484
485 There's no simple and reliable way to figure out when
486 the vforked child will be done with its copy of the
487 shared memory. We could step it out of the syscall,
488 two instructions, let it go, and then single-step the
489 parent once. When we have hardware single-step, this
490 would work; with software single-step it could still
491 be made to work but we'd have to be able to insert
492 single-step breakpoints in the child, and we'd have
493 to insert -just- the single-step breakpoint in the
494 parent. Very awkward.
495
496 In the end, the best we can do is to make sure it
497 runs for a little while. Hopefully it will be out of
498 range of any breakpoints we reinsert. Usually this
499 is only the single-step breakpoint at vfork's return
500 point. */
501
502 if (debug_linux_nat)
503 fprintf_unfiltered (gdb_stdlog,
504 "LCFF: no VFORK_DONE "
505 "support, sleeping a bit\n");
506
507 usleep (10000);
508
509 /* Pretend we've seen a PTRACE_EVENT_VFORK_DONE event,
510 and leave it pending. The next linux_nat_resume call
511 will notice a pending event, and bypasses actually
512 resuming the inferior. */
513 parent_lp->status = 0;
514 parent_lp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
515 parent_lp->stopped = 1;
516
517 /* If we're in async mode, need to tell the event loop
518 there's something here to process. */
519 if (target_can_async_p ())
520 async_file_mark ();
521 }
522 }
523 }
524 else
525 {
526 struct lwp_info *child_lp;
527
528 child_lp = add_lwp (inferior_ptid);
529 child_lp->stopped = 1;
530 child_lp->last_resume_kind = resume_stop;
531
532 /* Let the thread_db layer learn about this new process. */
533 check_for_thread_db ();
534 }
535
536 return 0;
537 }
538
539 \f
540 static int
541 linux_child_insert_fork_catchpoint (struct target_ops *self, int pid)
542 {
543 return !linux_supports_tracefork ();
544 }
545
546 static int
547 linux_child_remove_fork_catchpoint (struct target_ops *self, int pid)
548 {
549 return 0;
550 }
551
552 static int
553 linux_child_insert_vfork_catchpoint (struct target_ops *self, int pid)
554 {
555 return !linux_supports_tracefork ();
556 }
557
558 static int
559 linux_child_remove_vfork_catchpoint (struct target_ops *self, int pid)
560 {
561 return 0;
562 }
563
564 static int
565 linux_child_insert_exec_catchpoint (struct target_ops *self, int pid)
566 {
567 return !linux_supports_tracefork ();
568 }
569
570 static int
571 linux_child_remove_exec_catchpoint (struct target_ops *self, int pid)
572 {
573 return 0;
574 }
575
576 static int
577 linux_child_set_syscall_catchpoint (struct target_ops *self,
578 int pid, int needed, int any_count,
579 int table_size, int *table)
580 {
581 if (!linux_supports_tracesysgood ())
582 return 1;
583
584 /* On GNU/Linux, we ignore the arguments. It means that we only
585 enable the syscall catchpoints, but do not disable them.
586
587 Also, we do not use the `table' information because we do not
588 filter system calls here. We let GDB do the logic for us. */
589 return 0;
590 }
591
592 /* On GNU/Linux there are no real LWP's. The closest thing to LWP's
593 are processes sharing the same VM space. A multi-threaded process
594 is basically a group of such processes. However, such a grouping
595 is almost entirely a user-space issue; the kernel doesn't enforce
596 such a grouping at all (this might change in the future). In
597 general, we'll rely on the threads library (i.e. the GNU/Linux
598 Threads library) to provide such a grouping.
599
600 It is perfectly well possible to write a multi-threaded application
601 without the assistance of a threads library, by using the clone
602 system call directly. This module should be able to give some
603 rudimentary support for debugging such applications if developers
604 specify the CLONE_PTRACE flag in the clone system call, and are
605 using the Linux kernel 2.4 or above.
606
607 Note that there are some peculiarities in GNU/Linux that affect
608 this code:
609
610 - In general one should specify the __WCLONE flag to waitpid in
611 order to make it report events for any of the cloned processes
612 (and leave it out for the initial process). However, if a cloned
613 process has exited the exit status is only reported if the
614 __WCLONE flag is absent. Linux kernel 2.4 has a __WALL flag, but
615 we cannot use it since GDB must work on older systems too.
616
617 - When a traced, cloned process exits and is waited for by the
618 debugger, the kernel reassigns it to the original parent and
619 keeps it around as a "zombie". Somehow, the GNU/Linux Threads
620 library doesn't notice this, which leads to the "zombie problem":
621 When debugged a multi-threaded process that spawns a lot of
622 threads will run out of processes, even if the threads exit,
623 because the "zombies" stay around. */
624
625 /* List of known LWPs. */
626 struct lwp_info *lwp_list;
627 \f
628
629 /* Original signal mask. */
630 static sigset_t normal_mask;
631
632 /* Signal mask for use with sigsuspend in linux_nat_wait, initialized in
633 _initialize_linux_nat. */
634 static sigset_t suspend_mask;
635
636 /* Signals to block to make that sigsuspend work. */
637 static sigset_t blocked_mask;
638
639 /* SIGCHLD action. */
640 struct sigaction sigchld_action;
641
642 /* Block child signals (SIGCHLD and linux threads signals), and store
643 the previous mask in PREV_MASK. */
644
645 static void
646 block_child_signals (sigset_t *prev_mask)
647 {
648 /* Make sure SIGCHLD is blocked. */
649 if (!sigismember (&blocked_mask, SIGCHLD))
650 sigaddset (&blocked_mask, SIGCHLD);
651
652 sigprocmask (SIG_BLOCK, &blocked_mask, prev_mask);
653 }
654
655 /* Restore child signals mask, previously returned by
656 block_child_signals. */
657
658 static void
659 restore_child_signals_mask (sigset_t *prev_mask)
660 {
661 sigprocmask (SIG_SETMASK, prev_mask, NULL);
662 }
663
664 /* Mask of signals to pass directly to the inferior. */
665 static sigset_t pass_mask;
666
667 /* Update signals to pass to the inferior. */
668 static void
669 linux_nat_pass_signals (struct target_ops *self,
670 int numsigs, unsigned char *pass_signals)
671 {
672 int signo;
673
674 sigemptyset (&pass_mask);
675
676 for (signo = 1; signo < NSIG; signo++)
677 {
678 int target_signo = gdb_signal_from_host (signo);
679 if (target_signo < numsigs && pass_signals[target_signo])
680 sigaddset (&pass_mask, signo);
681 }
682 }
683
684 \f
685
686 /* Prototypes for local functions. */
687 static int stop_wait_callback (struct lwp_info *lp, void *data);
688 static int linux_thread_alive (ptid_t ptid);
689 static char *linux_child_pid_to_exec_file (struct target_ops *self, int pid);
690
691 \f
692
693 /* Destroy and free LP. */
694
695 static void
696 lwp_free (struct lwp_info *lp)
697 {
698 xfree (lp->arch_private);
699 xfree (lp);
700 }
701
702 /* Remove all LWPs belong to PID from the lwp list. */
703
704 static void
705 purge_lwp_list (int pid)
706 {
707 struct lwp_info *lp, *lpprev, *lpnext;
708
709 lpprev = NULL;
710
711 for (lp = lwp_list; lp; lp = lpnext)
712 {
713 lpnext = lp->next;
714
715 if (ptid_get_pid (lp->ptid) == pid)
716 {
717 if (lp == lwp_list)
718 lwp_list = lp->next;
719 else
720 lpprev->next = lp->next;
721
722 lwp_free (lp);
723 }
724 else
725 lpprev = lp;
726 }
727 }
728
729 /* Add the LWP specified by PTID to the list. PTID is the first LWP
730 in the process. Return a pointer to the structure describing the
731 new LWP.
732
733 This differs from add_lwp in that we don't let the arch specific
734 bits know about this new thread. Current clients of this callback
735 take the opportunity to install watchpoints in the new thread, and
736 we shouldn't do that for the first thread. If we're spawning a
737 child ("run"), the thread executes the shell wrapper first, and we
738 shouldn't touch it until it execs the program we want to debug.
739 For "attach", it'd be okay to call the callback, but it's not
740 necessary, because watchpoints can't yet have been inserted into
741 the inferior. */
742
743 static struct lwp_info *
744 add_initial_lwp (ptid_t ptid)
745 {
746 struct lwp_info *lp;
747
748 gdb_assert (ptid_lwp_p (ptid));
749
750 lp = (struct lwp_info *) xmalloc (sizeof (struct lwp_info));
751
752 memset (lp, 0, sizeof (struct lwp_info));
753
754 lp->last_resume_kind = resume_continue;
755 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
756
757 lp->ptid = ptid;
758 lp->core = -1;
759
760 lp->next = lwp_list;
761 lwp_list = lp;
762
763 return lp;
764 }
765
766 /* Add the LWP specified by PID to the list. Return a pointer to the
767 structure describing the new LWP. The LWP should already be
768 stopped. */
769
770 static struct lwp_info *
771 add_lwp (ptid_t ptid)
772 {
773 struct lwp_info *lp;
774
775 lp = add_initial_lwp (ptid);
776
777 /* Let the arch specific bits know about this new thread. Current
778 clients of this callback take the opportunity to install
779 watchpoints in the new thread. We don't do this for the first
780 thread though. See add_initial_lwp. */
781 if (linux_nat_new_thread != NULL)
782 linux_nat_new_thread (lp);
783
784 return lp;
785 }
786
787 /* Remove the LWP specified by PID from the list. */
788
789 static void
790 delete_lwp (ptid_t ptid)
791 {
792 struct lwp_info *lp, *lpprev;
793
794 lpprev = NULL;
795
796 for (lp = lwp_list; lp; lpprev = lp, lp = lp->next)
797 if (ptid_equal (lp->ptid, ptid))
798 break;
799
800 if (!lp)
801 return;
802
803 if (lpprev)
804 lpprev->next = lp->next;
805 else
806 lwp_list = lp->next;
807
808 lwp_free (lp);
809 }
810
811 /* Return a pointer to the structure describing the LWP corresponding
812 to PID. If no corresponding LWP could be found, return NULL. */
813
814 static struct lwp_info *
815 find_lwp_pid (ptid_t ptid)
816 {
817 struct lwp_info *lp;
818 int lwp;
819
820 if (ptid_lwp_p (ptid))
821 lwp = ptid_get_lwp (ptid);
822 else
823 lwp = ptid_get_pid (ptid);
824
825 for (lp = lwp_list; lp; lp = lp->next)
826 if (lwp == ptid_get_lwp (lp->ptid))
827 return lp;
828
829 return NULL;
830 }
831
832 /* Call CALLBACK with its second argument set to DATA for every LWP in
833 the list. If CALLBACK returns 1 for a particular LWP, return a
834 pointer to the structure describing that LWP immediately.
835 Otherwise return NULL. */
836
837 struct lwp_info *
838 iterate_over_lwps (ptid_t filter,
839 int (*callback) (struct lwp_info *, void *),
840 void *data)
841 {
842 struct lwp_info *lp, *lpnext;
843
844 for (lp = lwp_list; lp; lp = lpnext)
845 {
846 lpnext = lp->next;
847
848 if (ptid_match (lp->ptid, filter))
849 {
850 if ((*callback) (lp, data))
851 return lp;
852 }
853 }
854
855 return NULL;
856 }
857
858 /* Update our internal state when changing from one checkpoint to
859 another indicated by NEW_PTID. We can only switch single-threaded
860 applications, so we only create one new LWP, and the previous list
861 is discarded. */
862
863 void
864 linux_nat_switch_fork (ptid_t new_ptid)
865 {
866 struct lwp_info *lp;
867
868 purge_lwp_list (ptid_get_pid (inferior_ptid));
869
870 lp = add_lwp (new_ptid);
871 lp->stopped = 1;
872
873 /* This changes the thread's ptid while preserving the gdb thread
874 num. Also changes the inferior pid, while preserving the
875 inferior num. */
876 thread_change_ptid (inferior_ptid, new_ptid);
877
878 /* We've just told GDB core that the thread changed target id, but,
879 in fact, it really is a different thread, with different register
880 contents. */
881 registers_changed ();
882 }
883
884 /* Handle the exit of a single thread LP. */
885
886 static void
887 exit_lwp (struct lwp_info *lp)
888 {
889 struct thread_info *th = find_thread_ptid (lp->ptid);
890
891 if (th)
892 {
893 if (print_thread_events)
894 printf_unfiltered (_("[%s exited]\n"), target_pid_to_str (lp->ptid));
895
896 delete_thread (lp->ptid);
897 }
898
899 delete_lwp (lp->ptid);
900 }
901
902 /* Wait for the LWP specified by LP, which we have just attached to.
903 Returns a wait status for that LWP, to cache. */
904
905 static int
906 linux_nat_post_attach_wait (ptid_t ptid, int first, int *cloned,
907 int *signalled)
908 {
909 pid_t new_pid, pid = ptid_get_lwp (ptid);
910 int status;
911
912 if (linux_proc_pid_is_stopped (pid))
913 {
914 if (debug_linux_nat)
915 fprintf_unfiltered (gdb_stdlog,
916 "LNPAW: Attaching to a stopped process\n");
917
918 /* The process is definitely stopped. It is in a job control
919 stop, unless the kernel predates the TASK_STOPPED /
920 TASK_TRACED distinction, in which case it might be in a
921 ptrace stop. Make sure it is in a ptrace stop; from there we
922 can kill it, signal it, et cetera.
923
924 First make sure there is a pending SIGSTOP. Since we are
925 already attached, the process can not transition from stopped
926 to running without a PTRACE_CONT; so we know this signal will
927 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
928 probably already in the queue (unless this kernel is old
929 enough to use TASK_STOPPED for ptrace stops); but since SIGSTOP
930 is not an RT signal, it can only be queued once. */
931 kill_lwp (pid, SIGSTOP);
932
933 /* Finally, resume the stopped process. This will deliver the SIGSTOP
934 (or a higher priority signal, just like normal PTRACE_ATTACH). */
935 ptrace (PTRACE_CONT, pid, 0, 0);
936 }
937
938 /* Make sure the initial process is stopped. The user-level threads
939 layer might want to poke around in the inferior, and that won't
940 work if things haven't stabilized yet. */
941 new_pid = my_waitpid (pid, &status, 0);
942 if (new_pid == -1 && errno == ECHILD)
943 {
944 if (first)
945 warning (_("%s is a cloned process"), target_pid_to_str (ptid));
946
947 /* Try again with __WCLONE to check cloned processes. */
948 new_pid = my_waitpid (pid, &status, __WCLONE);
949 *cloned = 1;
950 }
951
952 gdb_assert (pid == new_pid);
953
954 if (!WIFSTOPPED (status))
955 {
956 /* The pid we tried to attach has apparently just exited. */
957 if (debug_linux_nat)
958 fprintf_unfiltered (gdb_stdlog, "LNPAW: Failed to stop %d: %s",
959 pid, status_to_str (status));
960 return status;
961 }
962
963 if (WSTOPSIG (status) != SIGSTOP)
964 {
965 *signalled = 1;
966 if (debug_linux_nat)
967 fprintf_unfiltered (gdb_stdlog,
968 "LNPAW: Received %s after attaching\n",
969 status_to_str (status));
970 }
971
972 return status;
973 }
974
975 /* Attach to the LWP specified by PID. Return 0 if successful, -1 if
976 the new LWP could not be attached, or 1 if we're already auto
977 attached to this thread, but haven't processed the
978 PTRACE_EVENT_CLONE event of its parent thread, so we just ignore
979 its existance, without considering it an error. */
980
981 int
982 lin_lwp_attach_lwp (ptid_t ptid)
983 {
984 struct lwp_info *lp;
985 int lwpid;
986
987 gdb_assert (ptid_lwp_p (ptid));
988
989 lp = find_lwp_pid (ptid);
990 lwpid = ptid_get_lwp (ptid);
991
992 /* We assume that we're already attached to any LWP that has an id
993 equal to the overall process id, and to any LWP that is already
994 in our list of LWPs. If we're not seeing exit events from threads
995 and we've had PID wraparound since we last tried to stop all threads,
996 this assumption might be wrong; fortunately, this is very unlikely
997 to happen. */
998 if (lwpid != ptid_get_pid (ptid) && lp == NULL)
999 {
1000 int status, cloned = 0, signalled = 0;
1001
1002 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) < 0)
1003 {
1004 if (linux_supports_tracefork ())
1005 {
1006 /* If we haven't stopped all threads when we get here,
1007 we may have seen a thread listed in thread_db's list,
1008 but not processed the PTRACE_EVENT_CLONE yet. If
1009 that's the case, ignore this new thread, and let
1010 normal event handling discover it later. */
1011 if (in_pid_list_p (stopped_pids, lwpid))
1012 {
1013 /* We've already seen this thread stop, but we
1014 haven't seen the PTRACE_EVENT_CLONE extended
1015 event yet. */
1016 return 0;
1017 }
1018 else
1019 {
1020 int new_pid;
1021 int status;
1022
1023 /* See if we've got a stop for this new child
1024 pending. If so, we're already attached. */
1025 new_pid = my_waitpid (lwpid, &status, WNOHANG);
1026 if (new_pid == -1 && errno == ECHILD)
1027 new_pid = my_waitpid (lwpid, &status, __WCLONE | WNOHANG);
1028 if (new_pid != -1)
1029 {
1030 if (WIFSTOPPED (status))
1031 add_to_pid_list (&stopped_pids, lwpid, status);
1032 return 1;
1033 }
1034 }
1035 }
1036
1037 /* If we fail to attach to the thread, issue a warning,
1038 but continue. One way this can happen is if thread
1039 creation is interrupted; as of Linux kernel 2.6.19, a
1040 bug may place threads in the thread list and then fail
1041 to create them. */
1042 warning (_("Can't attach %s: %s"), target_pid_to_str (ptid),
1043 safe_strerror (errno));
1044 return -1;
1045 }
1046
1047 if (debug_linux_nat)
1048 fprintf_unfiltered (gdb_stdlog,
1049 "LLAL: PTRACE_ATTACH %s, 0, 0 (OK)\n",
1050 target_pid_to_str (ptid));
1051
1052 status = linux_nat_post_attach_wait (ptid, 0, &cloned, &signalled);
1053 if (!WIFSTOPPED (status))
1054 return 1;
1055
1056 lp = add_lwp (ptid);
1057 lp->stopped = 1;
1058 lp->cloned = cloned;
1059 lp->signalled = signalled;
1060 if (WSTOPSIG (status) != SIGSTOP)
1061 {
1062 lp->resumed = 1;
1063 lp->status = status;
1064 }
1065
1066 target_post_attach (ptid_get_lwp (lp->ptid));
1067
1068 if (debug_linux_nat)
1069 {
1070 fprintf_unfiltered (gdb_stdlog,
1071 "LLAL: waitpid %s received %s\n",
1072 target_pid_to_str (ptid),
1073 status_to_str (status));
1074 }
1075 }
1076 else
1077 {
1078 /* We assume that the LWP representing the original process is
1079 already stopped. Mark it as stopped in the data structure
1080 that the GNU/linux ptrace layer uses to keep track of
1081 threads. Note that this won't have already been done since
1082 the main thread will have, we assume, been stopped by an
1083 attach from a different layer. */
1084 if (lp == NULL)
1085 lp = add_lwp (ptid);
1086 lp->stopped = 1;
1087 }
1088
1089 lp->last_resume_kind = resume_stop;
1090 return 0;
1091 }
1092
1093 static void
1094 linux_nat_create_inferior (struct target_ops *ops,
1095 char *exec_file, char *allargs, char **env,
1096 int from_tty)
1097 {
1098 #ifdef HAVE_PERSONALITY
1099 int personality_orig = 0, personality_set = 0;
1100 #endif /* HAVE_PERSONALITY */
1101
1102 /* The fork_child mechanism is synchronous and calls target_wait, so
1103 we have to mask the async mode. */
1104
1105 #ifdef HAVE_PERSONALITY
1106 if (disable_randomization)
1107 {
1108 errno = 0;
1109 personality_orig = personality (0xffffffff);
1110 if (errno == 0 && !(personality_orig & ADDR_NO_RANDOMIZE))
1111 {
1112 personality_set = 1;
1113 personality (personality_orig | ADDR_NO_RANDOMIZE);
1114 }
1115 if (errno != 0 || (personality_set
1116 && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE)))
1117 warning (_("Error disabling address space randomization: %s"),
1118 safe_strerror (errno));
1119 }
1120 #endif /* HAVE_PERSONALITY */
1121
1122 /* Make sure we report all signals during startup. */
1123 linux_nat_pass_signals (ops, 0, NULL);
1124
1125 linux_ops->to_create_inferior (ops, exec_file, allargs, env, from_tty);
1126
1127 #ifdef HAVE_PERSONALITY
1128 if (personality_set)
1129 {
1130 errno = 0;
1131 personality (personality_orig);
1132 if (errno != 0)
1133 warning (_("Error restoring address space randomization: %s"),
1134 safe_strerror (errno));
1135 }
1136 #endif /* HAVE_PERSONALITY */
1137 }
1138
1139 static void
1140 linux_nat_attach (struct target_ops *ops, const char *args, int from_tty)
1141 {
1142 struct lwp_info *lp;
1143 int status;
1144 ptid_t ptid;
1145 volatile struct gdb_exception ex;
1146
1147 /* Make sure we report all signals during attach. */
1148 linux_nat_pass_signals (ops, 0, NULL);
1149
1150 TRY_CATCH (ex, RETURN_MASK_ERROR)
1151 {
1152 linux_ops->to_attach (ops, args, from_tty);
1153 }
1154 if (ex.reason < 0)
1155 {
1156 pid_t pid = parse_pid_to_attach (args);
1157 struct buffer buffer;
1158 char *message, *buffer_s;
1159
1160 message = xstrdup (ex.message);
1161 make_cleanup (xfree, message);
1162
1163 buffer_init (&buffer);
1164 linux_ptrace_attach_fail_reason (pid, &buffer);
1165
1166 buffer_grow_str0 (&buffer, "");
1167 buffer_s = buffer_finish (&buffer);
1168 make_cleanup (xfree, buffer_s);
1169
1170 if (*buffer_s != '\0')
1171 throw_error (ex.error, "warning: %s\n%s", buffer_s, message);
1172 else
1173 throw_error (ex.error, "%s", message);
1174 }
1175
1176 /* The ptrace base target adds the main thread with (pid,0,0)
1177 format. Decorate it with lwp info. */
1178 ptid = ptid_build (ptid_get_pid (inferior_ptid),
1179 ptid_get_pid (inferior_ptid),
1180 0);
1181 thread_change_ptid (inferior_ptid, ptid);
1182
1183 /* Add the initial process as the first LWP to the list. */
1184 lp = add_initial_lwp (ptid);
1185
1186 status = linux_nat_post_attach_wait (lp->ptid, 1, &lp->cloned,
1187 &lp->signalled);
1188 if (!WIFSTOPPED (status))
1189 {
1190 if (WIFEXITED (status))
1191 {
1192 int exit_code = WEXITSTATUS (status);
1193
1194 target_terminal_ours ();
1195 target_mourn_inferior ();
1196 if (exit_code == 0)
1197 error (_("Unable to attach: program exited normally."));
1198 else
1199 error (_("Unable to attach: program exited with code %d."),
1200 exit_code);
1201 }
1202 else if (WIFSIGNALED (status))
1203 {
1204 enum gdb_signal signo;
1205
1206 target_terminal_ours ();
1207 target_mourn_inferior ();
1208
1209 signo = gdb_signal_from_host (WTERMSIG (status));
1210 error (_("Unable to attach: program terminated with signal "
1211 "%s, %s."),
1212 gdb_signal_to_name (signo),
1213 gdb_signal_to_string (signo));
1214 }
1215
1216 internal_error (__FILE__, __LINE__,
1217 _("unexpected status %d for PID %ld"),
1218 status, (long) ptid_get_lwp (ptid));
1219 }
1220
1221 lp->stopped = 1;
1222
1223 /* Save the wait status to report later. */
1224 lp->resumed = 1;
1225 if (debug_linux_nat)
1226 fprintf_unfiltered (gdb_stdlog,
1227 "LNA: waitpid %ld, saving status %s\n",
1228 (long) ptid_get_pid (lp->ptid), status_to_str (status));
1229
1230 lp->status = status;
1231
1232 if (target_can_async_p ())
1233 target_async (inferior_event_handler, 0);
1234 }
1235
1236 /* Get pending status of LP. */
1237 static int
1238 get_pending_status (struct lwp_info *lp, int *status)
1239 {
1240 enum gdb_signal signo = GDB_SIGNAL_0;
1241
1242 /* If we paused threads momentarily, we may have stored pending
1243 events in lp->status or lp->waitstatus (see stop_wait_callback),
1244 and GDB core hasn't seen any signal for those threads.
1245 Otherwise, the last signal reported to the core is found in the
1246 thread object's stop_signal.
1247
1248 There's a corner case that isn't handled here at present. Only
1249 if the thread stopped with a TARGET_WAITKIND_STOPPED does
1250 stop_signal make sense as a real signal to pass to the inferior.
1251 Some catchpoint related events, like
1252 TARGET_WAITKIND_(V)FORK|EXEC|SYSCALL, have their stop_signal set
1253 to GDB_SIGNAL_SIGTRAP when the catchpoint triggers. But,
1254 those traps are debug API (ptrace in our case) related and
1255 induced; the inferior wouldn't see them if it wasn't being
1256 traced. Hence, we should never pass them to the inferior, even
1257 when set to pass state. Since this corner case isn't handled by
1258 infrun.c when proceeding with a signal, for consistency, neither
1259 do we handle it here (or elsewhere in the file we check for
1260 signal pass state). Normally SIGTRAP isn't set to pass state, so
1261 this is really a corner case. */
1262
1263 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
1264 signo = GDB_SIGNAL_0; /* a pending ptrace event, not a real signal. */
1265 else if (lp->status)
1266 signo = gdb_signal_from_host (WSTOPSIG (lp->status));
1267 else if (non_stop && !is_executing (lp->ptid))
1268 {
1269 struct thread_info *tp = find_thread_ptid (lp->ptid);
1270
1271 signo = tp->suspend.stop_signal;
1272 }
1273 else if (!non_stop)
1274 {
1275 struct target_waitstatus last;
1276 ptid_t last_ptid;
1277
1278 get_last_target_status (&last_ptid, &last);
1279
1280 if (ptid_get_lwp (lp->ptid) == ptid_get_lwp (last_ptid))
1281 {
1282 struct thread_info *tp = find_thread_ptid (lp->ptid);
1283
1284 signo = tp->suspend.stop_signal;
1285 }
1286 }
1287
1288 *status = 0;
1289
1290 if (signo == GDB_SIGNAL_0)
1291 {
1292 if (debug_linux_nat)
1293 fprintf_unfiltered (gdb_stdlog,
1294 "GPT: lwp %s has no pending signal\n",
1295 target_pid_to_str (lp->ptid));
1296 }
1297 else if (!signal_pass_state (signo))
1298 {
1299 if (debug_linux_nat)
1300 fprintf_unfiltered (gdb_stdlog,
1301 "GPT: lwp %s had signal %s, "
1302 "but it is in no pass state\n",
1303 target_pid_to_str (lp->ptid),
1304 gdb_signal_to_string (signo));
1305 }
1306 else
1307 {
1308 *status = W_STOPCODE (gdb_signal_to_host (signo));
1309
1310 if (debug_linux_nat)
1311 fprintf_unfiltered (gdb_stdlog,
1312 "GPT: lwp %s has pending signal %s\n",
1313 target_pid_to_str (lp->ptid),
1314 gdb_signal_to_string (signo));
1315 }
1316
1317 return 0;
1318 }
1319
1320 static int
1321 detach_callback (struct lwp_info *lp, void *data)
1322 {
1323 gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status));
1324
1325 if (debug_linux_nat && lp->status)
1326 fprintf_unfiltered (gdb_stdlog, "DC: Pending %s for %s on detach.\n",
1327 strsignal (WSTOPSIG (lp->status)),
1328 target_pid_to_str (lp->ptid));
1329
1330 /* If there is a pending SIGSTOP, get rid of it. */
1331 if (lp->signalled)
1332 {
1333 if (debug_linux_nat)
1334 fprintf_unfiltered (gdb_stdlog,
1335 "DC: Sending SIGCONT to %s\n",
1336 target_pid_to_str (lp->ptid));
1337
1338 kill_lwp (ptid_get_lwp (lp->ptid), SIGCONT);
1339 lp->signalled = 0;
1340 }
1341
1342 /* We don't actually detach from the LWP that has an id equal to the
1343 overall process id just yet. */
1344 if (ptid_get_lwp (lp->ptid) != ptid_get_pid (lp->ptid))
1345 {
1346 int status = 0;
1347
1348 /* Pass on any pending signal for this LWP. */
1349 get_pending_status (lp, &status);
1350
1351 if (linux_nat_prepare_to_resume != NULL)
1352 linux_nat_prepare_to_resume (lp);
1353 errno = 0;
1354 if (ptrace (PTRACE_DETACH, ptid_get_lwp (lp->ptid), 0,
1355 WSTOPSIG (status)) < 0)
1356 error (_("Can't detach %s: %s"), target_pid_to_str (lp->ptid),
1357 safe_strerror (errno));
1358
1359 if (debug_linux_nat)
1360 fprintf_unfiltered (gdb_stdlog,
1361 "PTRACE_DETACH (%s, %s, 0) (OK)\n",
1362 target_pid_to_str (lp->ptid),
1363 strsignal (WSTOPSIG (status)));
1364
1365 delete_lwp (lp->ptid);
1366 }
1367
1368 return 0;
1369 }
1370
1371 static void
1372 linux_nat_detach (struct target_ops *ops, const char *args, int from_tty)
1373 {
1374 int pid;
1375 int status;
1376 struct lwp_info *main_lwp;
1377
1378 pid = ptid_get_pid (inferior_ptid);
1379
1380 /* Don't unregister from the event loop, as there may be other
1381 inferiors running. */
1382
1383 /* Stop all threads before detaching. ptrace requires that the
1384 thread is stopped to sucessfully detach. */
1385 iterate_over_lwps (pid_to_ptid (pid), stop_callback, NULL);
1386 /* ... and wait until all of them have reported back that
1387 they're no longer running. */
1388 iterate_over_lwps (pid_to_ptid (pid), stop_wait_callback, NULL);
1389
1390 iterate_over_lwps (pid_to_ptid (pid), detach_callback, NULL);
1391
1392 /* Only the initial process should be left right now. */
1393 gdb_assert (num_lwps (ptid_get_pid (inferior_ptid)) == 1);
1394
1395 main_lwp = find_lwp_pid (pid_to_ptid (pid));
1396
1397 /* Pass on any pending signal for the last LWP. */
1398 if ((args == NULL || *args == '\0')
1399 && get_pending_status (main_lwp, &status) != -1
1400 && WIFSTOPPED (status))
1401 {
1402 char *tem;
1403
1404 /* Put the signal number in ARGS so that inf_ptrace_detach will
1405 pass it along with PTRACE_DETACH. */
1406 tem = alloca (8);
1407 xsnprintf (tem, 8, "%d", (int) WSTOPSIG (status));
1408 args = tem;
1409 if (debug_linux_nat)
1410 fprintf_unfiltered (gdb_stdlog,
1411 "LND: Sending signal %s to %s\n",
1412 args,
1413 target_pid_to_str (main_lwp->ptid));
1414 }
1415
1416 if (linux_nat_prepare_to_resume != NULL)
1417 linux_nat_prepare_to_resume (main_lwp);
1418 delete_lwp (main_lwp->ptid);
1419
1420 if (forks_exist_p ())
1421 {
1422 /* Multi-fork case. The current inferior_ptid is being detached
1423 from, but there are other viable forks to debug. Detach from
1424 the current fork, and context-switch to the first
1425 available. */
1426 linux_fork_detach (args, from_tty);
1427 }
1428 else
1429 linux_ops->to_detach (ops, args, from_tty);
1430 }
1431
1432 /* Resume LP. */
1433
1434 static void
1435 resume_lwp (struct lwp_info *lp, int step, enum gdb_signal signo)
1436 {
1437 if (lp->stopped)
1438 {
1439 struct inferior *inf = find_inferior_pid (ptid_get_pid (lp->ptid));
1440
1441 if (inf->vfork_child != NULL)
1442 {
1443 if (debug_linux_nat)
1444 fprintf_unfiltered (gdb_stdlog,
1445 "RC: Not resuming %s (vfork parent)\n",
1446 target_pid_to_str (lp->ptid));
1447 }
1448 else if (lp->status == 0
1449 && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE)
1450 {
1451 if (debug_linux_nat)
1452 fprintf_unfiltered (gdb_stdlog,
1453 "RC: Resuming sibling %s, %s, %s\n",
1454 target_pid_to_str (lp->ptid),
1455 (signo != GDB_SIGNAL_0
1456 ? strsignal (gdb_signal_to_host (signo))
1457 : "0"),
1458 step ? "step" : "resume");
1459
1460 if (linux_nat_prepare_to_resume != NULL)
1461 linux_nat_prepare_to_resume (lp);
1462 linux_ops->to_resume (linux_ops,
1463 pid_to_ptid (ptid_get_lwp (lp->ptid)),
1464 step, signo);
1465 lp->stopped = 0;
1466 lp->step = step;
1467 lp->stopped_by_watchpoint = 0;
1468 }
1469 else
1470 {
1471 if (debug_linux_nat)
1472 fprintf_unfiltered (gdb_stdlog,
1473 "RC: Not resuming sibling %s (has pending)\n",
1474 target_pid_to_str (lp->ptid));
1475 }
1476 }
1477 else
1478 {
1479 if (debug_linux_nat)
1480 fprintf_unfiltered (gdb_stdlog,
1481 "RC: Not resuming sibling %s (not stopped)\n",
1482 target_pid_to_str (lp->ptid));
1483 }
1484 }
1485
1486 /* Callback for iterate_over_lwps. If LWP is EXCEPT, do nothing.
1487 Resume LWP with the last stop signal, if it is in pass state. */
1488
1489 static int
1490 linux_nat_resume_callback (struct lwp_info *lp, void *except)
1491 {
1492 enum gdb_signal signo = GDB_SIGNAL_0;
1493
1494 if (lp == except)
1495 return 0;
1496
1497 if (lp->stopped)
1498 {
1499 struct thread_info *thread;
1500
1501 thread = find_thread_ptid (lp->ptid);
1502 if (thread != NULL)
1503 {
1504 signo = thread->suspend.stop_signal;
1505 thread->suspend.stop_signal = GDB_SIGNAL_0;
1506 }
1507 }
1508
1509 resume_lwp (lp, 0, signo);
1510 return 0;
1511 }
1512
1513 static int
1514 resume_clear_callback (struct lwp_info *lp, void *data)
1515 {
1516 lp->resumed = 0;
1517 lp->last_resume_kind = resume_stop;
1518 return 0;
1519 }
1520
1521 static int
1522 resume_set_callback (struct lwp_info *lp, void *data)
1523 {
1524 lp->resumed = 1;
1525 lp->last_resume_kind = resume_continue;
1526 return 0;
1527 }
1528
1529 static void
1530 linux_nat_resume (struct target_ops *ops,
1531 ptid_t ptid, int step, enum gdb_signal signo)
1532 {
1533 struct lwp_info *lp;
1534 int resume_many;
1535
1536 if (debug_linux_nat)
1537 fprintf_unfiltered (gdb_stdlog,
1538 "LLR: Preparing to %s %s, %s, inferior_ptid %s\n",
1539 step ? "step" : "resume",
1540 target_pid_to_str (ptid),
1541 (signo != GDB_SIGNAL_0
1542 ? strsignal (gdb_signal_to_host (signo)) : "0"),
1543 target_pid_to_str (inferior_ptid));
1544
1545 /* A specific PTID means `step only this process id'. */
1546 resume_many = (ptid_equal (minus_one_ptid, ptid)
1547 || ptid_is_pid (ptid));
1548
1549 /* Mark the lwps we're resuming as resumed. */
1550 iterate_over_lwps (ptid, resume_set_callback, NULL);
1551
1552 /* See if it's the current inferior that should be handled
1553 specially. */
1554 if (resume_many)
1555 lp = find_lwp_pid (inferior_ptid);
1556 else
1557 lp = find_lwp_pid (ptid);
1558 gdb_assert (lp != NULL);
1559
1560 /* Remember if we're stepping. */
1561 lp->step = step;
1562 lp->last_resume_kind = step ? resume_step : resume_continue;
1563
1564 /* If we have a pending wait status for this thread, there is no
1565 point in resuming the process. But first make sure that
1566 linux_nat_wait won't preemptively handle the event - we
1567 should never take this short-circuit if we are going to
1568 leave LP running, since we have skipped resuming all the
1569 other threads. This bit of code needs to be synchronized
1570 with linux_nat_wait. */
1571
1572 if (lp->status && WIFSTOPPED (lp->status))
1573 {
1574 if (!lp->step
1575 && WSTOPSIG (lp->status)
1576 && sigismember (&pass_mask, WSTOPSIG (lp->status)))
1577 {
1578 if (debug_linux_nat)
1579 fprintf_unfiltered (gdb_stdlog,
1580 "LLR: Not short circuiting for ignored "
1581 "status 0x%x\n", lp->status);
1582
1583 /* FIXME: What should we do if we are supposed to continue
1584 this thread with a signal? */
1585 gdb_assert (signo == GDB_SIGNAL_0);
1586 signo = gdb_signal_from_host (WSTOPSIG (lp->status));
1587 lp->status = 0;
1588 }
1589 }
1590
1591 if (lp->status || lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
1592 {
1593 /* FIXME: What should we do if we are supposed to continue
1594 this thread with a signal? */
1595 gdb_assert (signo == GDB_SIGNAL_0);
1596
1597 if (debug_linux_nat)
1598 fprintf_unfiltered (gdb_stdlog,
1599 "LLR: Short circuiting for status 0x%x\n",
1600 lp->status);
1601
1602 if (target_can_async_p ())
1603 {
1604 target_async (inferior_event_handler, 0);
1605 /* Tell the event loop we have something to process. */
1606 async_file_mark ();
1607 }
1608 return;
1609 }
1610
1611 if (resume_many)
1612 iterate_over_lwps (ptid, linux_nat_resume_callback, lp);
1613
1614 /* Convert to something the lower layer understands. */
1615 ptid = pid_to_ptid (ptid_get_lwp (lp->ptid));
1616
1617 if (linux_nat_prepare_to_resume != NULL)
1618 linux_nat_prepare_to_resume (lp);
1619 linux_ops->to_resume (linux_ops, ptid, step, signo);
1620 lp->stopped_by_watchpoint = 0;
1621 lp->stopped = 0;
1622
1623 if (debug_linux_nat)
1624 fprintf_unfiltered (gdb_stdlog,
1625 "LLR: %s %s, %s (resume event thread)\n",
1626 step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
1627 target_pid_to_str (ptid),
1628 (signo != GDB_SIGNAL_0
1629 ? strsignal (gdb_signal_to_host (signo)) : "0"));
1630
1631 if (target_can_async_p ())
1632 target_async (inferior_event_handler, 0);
1633 }
1634
1635 /* Send a signal to an LWP. */
1636
1637 static int
1638 kill_lwp (int lwpid, int signo)
1639 {
1640 /* Use tkill, if possible, in case we are using nptl threads. If tkill
1641 fails, then we are not using nptl threads and we should be using kill. */
1642
1643 #ifdef HAVE_TKILL_SYSCALL
1644 {
1645 static int tkill_failed;
1646
1647 if (!tkill_failed)
1648 {
1649 int ret;
1650
1651 errno = 0;
1652 ret = syscall (__NR_tkill, lwpid, signo);
1653 if (errno != ENOSYS)
1654 return ret;
1655 tkill_failed = 1;
1656 }
1657 }
1658 #endif
1659
1660 return kill (lwpid, signo);
1661 }
1662
1663 /* Handle a GNU/Linux syscall trap wait response. If we see a syscall
1664 event, check if the core is interested in it: if not, ignore the
1665 event, and keep waiting; otherwise, we need to toggle the LWP's
1666 syscall entry/exit status, since the ptrace event itself doesn't
1667 indicate it, and report the trap to higher layers. */
1668
1669 static int
1670 linux_handle_syscall_trap (struct lwp_info *lp, int stopping)
1671 {
1672 struct target_waitstatus *ourstatus = &lp->waitstatus;
1673 struct gdbarch *gdbarch = target_thread_architecture (lp->ptid);
1674 int syscall_number = (int) gdbarch_get_syscall_number (gdbarch, lp->ptid);
1675
1676 if (stopping)
1677 {
1678 /* If we're stopping threads, there's a SIGSTOP pending, which
1679 makes it so that the LWP reports an immediate syscall return,
1680 followed by the SIGSTOP. Skip seeing that "return" using
1681 PTRACE_CONT directly, and let stop_wait_callback collect the
1682 SIGSTOP. Later when the thread is resumed, a new syscall
1683 entry event. If we didn't do this (and returned 0), we'd
1684 leave a syscall entry pending, and our caller, by using
1685 PTRACE_CONT to collect the SIGSTOP, skips the syscall return
1686 itself. Later, when the user re-resumes this LWP, we'd see
1687 another syscall entry event and we'd mistake it for a return.
1688
1689 If stop_wait_callback didn't force the SIGSTOP out of the LWP
1690 (leaving immediately with LWP->signalled set, without issuing
1691 a PTRACE_CONT), it would still be problematic to leave this
1692 syscall enter pending, as later when the thread is resumed,
1693 it would then see the same syscall exit mentioned above,
1694 followed by the delayed SIGSTOP, while the syscall didn't
1695 actually get to execute. It seems it would be even more
1696 confusing to the user. */
1697
1698 if (debug_linux_nat)
1699 fprintf_unfiltered (gdb_stdlog,
1700 "LHST: ignoring syscall %d "
1701 "for LWP %ld (stopping threads), "
1702 "resuming with PTRACE_CONT for SIGSTOP\n",
1703 syscall_number,
1704 ptid_get_lwp (lp->ptid));
1705
1706 lp->syscall_state = TARGET_WAITKIND_IGNORE;
1707 ptrace (PTRACE_CONT, ptid_get_lwp (lp->ptid), 0, 0);
1708 lp->stopped = 0;
1709 return 1;
1710 }
1711
1712 if (catch_syscall_enabled ())
1713 {
1714 /* Always update the entry/return state, even if this particular
1715 syscall isn't interesting to the core now. In async mode,
1716 the user could install a new catchpoint for this syscall
1717 between syscall enter/return, and we'll need to know to
1718 report a syscall return if that happens. */
1719 lp->syscall_state = (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
1720 ? TARGET_WAITKIND_SYSCALL_RETURN
1721 : TARGET_WAITKIND_SYSCALL_ENTRY);
1722
1723 if (catching_syscall_number (syscall_number))
1724 {
1725 /* Alright, an event to report. */
1726 ourstatus->kind = lp->syscall_state;
1727 ourstatus->value.syscall_number = syscall_number;
1728
1729 if (debug_linux_nat)
1730 fprintf_unfiltered (gdb_stdlog,
1731 "LHST: stopping for %s of syscall %d"
1732 " for LWP %ld\n",
1733 lp->syscall_state
1734 == TARGET_WAITKIND_SYSCALL_ENTRY
1735 ? "entry" : "return",
1736 syscall_number,
1737 ptid_get_lwp (lp->ptid));
1738 return 0;
1739 }
1740
1741 if (debug_linux_nat)
1742 fprintf_unfiltered (gdb_stdlog,
1743 "LHST: ignoring %s of syscall %d "
1744 "for LWP %ld\n",
1745 lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
1746 ? "entry" : "return",
1747 syscall_number,
1748 ptid_get_lwp (lp->ptid));
1749 }
1750 else
1751 {
1752 /* If we had been syscall tracing, and hence used PT_SYSCALL
1753 before on this LWP, it could happen that the user removes all
1754 syscall catchpoints before we get to process this event.
1755 There are two noteworthy issues here:
1756
1757 - When stopped at a syscall entry event, resuming with
1758 PT_STEP still resumes executing the syscall and reports a
1759 syscall return.
1760
1761 - Only PT_SYSCALL catches syscall enters. If we last
1762 single-stepped this thread, then this event can't be a
1763 syscall enter. If we last single-stepped this thread, this
1764 has to be a syscall exit.
1765
1766 The points above mean that the next resume, be it PT_STEP or
1767 PT_CONTINUE, can not trigger a syscall trace event. */
1768 if (debug_linux_nat)
1769 fprintf_unfiltered (gdb_stdlog,
1770 "LHST: caught syscall event "
1771 "with no syscall catchpoints."
1772 " %d for LWP %ld, ignoring\n",
1773 syscall_number,
1774 ptid_get_lwp (lp->ptid));
1775 lp->syscall_state = TARGET_WAITKIND_IGNORE;
1776 }
1777
1778 /* The core isn't interested in this event. For efficiency, avoid
1779 stopping all threads only to have the core resume them all again.
1780 Since we're not stopping threads, if we're still syscall tracing
1781 and not stepping, we can't use PTRACE_CONT here, as we'd miss any
1782 subsequent syscall. Simply resume using the inf-ptrace layer,
1783 which knows when to use PT_SYSCALL or PT_CONTINUE. */
1784
1785 /* Note that gdbarch_get_syscall_number may access registers, hence
1786 fill a regcache. */
1787 registers_changed ();
1788 if (linux_nat_prepare_to_resume != NULL)
1789 linux_nat_prepare_to_resume (lp);
1790 linux_ops->to_resume (linux_ops, pid_to_ptid (ptid_get_lwp (lp->ptid)),
1791 lp->step, GDB_SIGNAL_0);
1792 lp->stopped = 0;
1793 return 1;
1794 }
1795
1796 /* Handle a GNU/Linux extended wait response. If we see a clone
1797 event, we need to add the new LWP to our list (and not report the
1798 trap to higher layers). This function returns non-zero if the
1799 event should be ignored and we should wait again. If STOPPING is
1800 true, the new LWP remains stopped, otherwise it is continued. */
1801
1802 static int
1803 linux_handle_extended_wait (struct lwp_info *lp, int status,
1804 int stopping)
1805 {
1806 int pid = ptid_get_lwp (lp->ptid);
1807 struct target_waitstatus *ourstatus = &lp->waitstatus;
1808 int event = linux_ptrace_get_extended_event (status);
1809
1810 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK
1811 || event == PTRACE_EVENT_CLONE)
1812 {
1813 unsigned long new_pid;
1814 int ret;
1815
1816 ptrace (PTRACE_GETEVENTMSG, pid, 0, &new_pid);
1817
1818 /* If we haven't already seen the new PID stop, wait for it now. */
1819 if (! pull_pid_from_list (&stopped_pids, new_pid, &status))
1820 {
1821 /* The new child has a pending SIGSTOP. We can't affect it until it
1822 hits the SIGSTOP, but we're already attached. */
1823 ret = my_waitpid (new_pid, &status,
1824 (event == PTRACE_EVENT_CLONE) ? __WCLONE : 0);
1825 if (ret == -1)
1826 perror_with_name (_("waiting for new child"));
1827 else if (ret != new_pid)
1828 internal_error (__FILE__, __LINE__,
1829 _("wait returned unexpected PID %d"), ret);
1830 else if (!WIFSTOPPED (status))
1831 internal_error (__FILE__, __LINE__,
1832 _("wait returned unexpected status 0x%x"), status);
1833 }
1834
1835 ourstatus->value.related_pid = ptid_build (new_pid, new_pid, 0);
1836
1837 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
1838 {
1839 /* The arch-specific native code may need to know about new
1840 forks even if those end up never mapped to an
1841 inferior. */
1842 if (linux_nat_new_fork != NULL)
1843 linux_nat_new_fork (lp, new_pid);
1844 }
1845
1846 if (event == PTRACE_EVENT_FORK
1847 && linux_fork_checkpointing_p (ptid_get_pid (lp->ptid)))
1848 {
1849 /* Handle checkpointing by linux-fork.c here as a special
1850 case. We don't want the follow-fork-mode or 'catch fork'
1851 to interfere with this. */
1852
1853 /* This won't actually modify the breakpoint list, but will
1854 physically remove the breakpoints from the child. */
1855 detach_breakpoints (ptid_build (new_pid, new_pid, 0));
1856
1857 /* Retain child fork in ptrace (stopped) state. */
1858 if (!find_fork_pid (new_pid))
1859 add_fork (new_pid);
1860
1861 /* Report as spurious, so that infrun doesn't want to follow
1862 this fork. We're actually doing an infcall in
1863 linux-fork.c. */
1864 ourstatus->kind = TARGET_WAITKIND_SPURIOUS;
1865
1866 /* Report the stop to the core. */
1867 return 0;
1868 }
1869
1870 if (event == PTRACE_EVENT_FORK)
1871 ourstatus->kind = TARGET_WAITKIND_FORKED;
1872 else if (event == PTRACE_EVENT_VFORK)
1873 ourstatus->kind = TARGET_WAITKIND_VFORKED;
1874 else
1875 {
1876 struct lwp_info *new_lp;
1877
1878 ourstatus->kind = TARGET_WAITKIND_IGNORE;
1879
1880 if (debug_linux_nat)
1881 fprintf_unfiltered (gdb_stdlog,
1882 "LHEW: Got clone event "
1883 "from LWP %d, new child is LWP %ld\n",
1884 pid, new_pid);
1885
1886 new_lp = add_lwp (ptid_build (ptid_get_pid (lp->ptid), new_pid, 0));
1887 new_lp->cloned = 1;
1888 new_lp->stopped = 1;
1889
1890 if (WSTOPSIG (status) != SIGSTOP)
1891 {
1892 /* This can happen if someone starts sending signals to
1893 the new thread before it gets a chance to run, which
1894 have a lower number than SIGSTOP (e.g. SIGUSR1).
1895 This is an unlikely case, and harder to handle for
1896 fork / vfork than for clone, so we do not try - but
1897 we handle it for clone events here. We'll send
1898 the other signal on to the thread below. */
1899
1900 new_lp->signalled = 1;
1901 }
1902 else
1903 {
1904 struct thread_info *tp;
1905
1906 /* When we stop for an event in some other thread, and
1907 pull the thread list just as this thread has cloned,
1908 we'll have seen the new thread in the thread_db list
1909 before handling the CLONE event (glibc's
1910 pthread_create adds the new thread to the thread list
1911 before clone'ing, and has the kernel fill in the
1912 thread's tid on the clone call with
1913 CLONE_PARENT_SETTID). If that happened, and the core
1914 had requested the new thread to stop, we'll have
1915 killed it with SIGSTOP. But since SIGSTOP is not an
1916 RT signal, it can only be queued once. We need to be
1917 careful to not resume the LWP if we wanted it to
1918 stop. In that case, we'll leave the SIGSTOP pending.
1919 It will later be reported as GDB_SIGNAL_0. */
1920 tp = find_thread_ptid (new_lp->ptid);
1921 if (tp != NULL && tp->stop_requested)
1922 new_lp->last_resume_kind = resume_stop;
1923 else
1924 status = 0;
1925 }
1926
1927 if (non_stop)
1928 {
1929 /* Add the new thread to GDB's lists as soon as possible
1930 so that:
1931
1932 1) the frontend doesn't have to wait for a stop to
1933 display them, and,
1934
1935 2) we tag it with the correct running state. */
1936
1937 /* If the thread_db layer is active, let it know about
1938 this new thread, and add it to GDB's list. */
1939 if (!thread_db_attach_lwp (new_lp->ptid))
1940 {
1941 /* We're not using thread_db. Add it to GDB's
1942 list. */
1943 target_post_attach (ptid_get_lwp (new_lp->ptid));
1944 add_thread (new_lp->ptid);
1945 }
1946
1947 if (!stopping)
1948 {
1949 set_running (new_lp->ptid, 1);
1950 set_executing (new_lp->ptid, 1);
1951 /* thread_db_attach_lwp -> lin_lwp_attach_lwp forced
1952 resume_stop. */
1953 new_lp->last_resume_kind = resume_continue;
1954 }
1955 }
1956
1957 if (status != 0)
1958 {
1959 /* We created NEW_LP so it cannot yet contain STATUS. */
1960 gdb_assert (new_lp->status == 0);
1961
1962 /* Save the wait status to report later. */
1963 if (debug_linux_nat)
1964 fprintf_unfiltered (gdb_stdlog,
1965 "LHEW: waitpid of new LWP %ld, "
1966 "saving status %s\n",
1967 (long) ptid_get_lwp (new_lp->ptid),
1968 status_to_str (status));
1969 new_lp->status = status;
1970 }
1971
1972 /* Note the need to use the low target ops to resume, to
1973 handle resuming with PT_SYSCALL if we have syscall
1974 catchpoints. */
1975 if (!stopping)
1976 {
1977 new_lp->resumed = 1;
1978
1979 if (status == 0)
1980 {
1981 gdb_assert (new_lp->last_resume_kind == resume_continue);
1982 if (debug_linux_nat)
1983 fprintf_unfiltered (gdb_stdlog,
1984 "LHEW: resuming new LWP %ld\n",
1985 ptid_get_lwp (new_lp->ptid));
1986 if (linux_nat_prepare_to_resume != NULL)
1987 linux_nat_prepare_to_resume (new_lp);
1988 linux_ops->to_resume (linux_ops, pid_to_ptid (new_pid),
1989 0, GDB_SIGNAL_0);
1990 new_lp->stopped = 0;
1991 }
1992 }
1993
1994 if (debug_linux_nat)
1995 fprintf_unfiltered (gdb_stdlog,
1996 "LHEW: resuming parent LWP %d\n", pid);
1997 if (linux_nat_prepare_to_resume != NULL)
1998 linux_nat_prepare_to_resume (lp);
1999 linux_ops->to_resume (linux_ops,
2000 pid_to_ptid (ptid_get_lwp (lp->ptid)),
2001 0, GDB_SIGNAL_0);
2002 lp->stopped = 0;
2003 return 1;
2004 }
2005
2006 return 0;
2007 }
2008
2009 if (event == PTRACE_EVENT_EXEC)
2010 {
2011 if (debug_linux_nat)
2012 fprintf_unfiltered (gdb_stdlog,
2013 "LHEW: Got exec event from LWP %ld\n",
2014 ptid_get_lwp (lp->ptid));
2015
2016 ourstatus->kind = TARGET_WAITKIND_EXECD;
2017 ourstatus->value.execd_pathname
2018 = xstrdup (linux_child_pid_to_exec_file (NULL, pid));
2019
2020 return 0;
2021 }
2022
2023 if (event == PTRACE_EVENT_VFORK_DONE)
2024 {
2025 if (current_inferior ()->waiting_for_vfork_done)
2026 {
2027 if (debug_linux_nat)
2028 fprintf_unfiltered (gdb_stdlog,
2029 "LHEW: Got expected PTRACE_EVENT_"
2030 "VFORK_DONE from LWP %ld: stopping\n",
2031 ptid_get_lwp (lp->ptid));
2032
2033 ourstatus->kind = TARGET_WAITKIND_VFORK_DONE;
2034 return 0;
2035 }
2036
2037 if (debug_linux_nat)
2038 fprintf_unfiltered (gdb_stdlog,
2039 "LHEW: Got PTRACE_EVENT_VFORK_DONE "
2040 "from LWP %ld: resuming\n",
2041 ptid_get_lwp (lp->ptid));
2042 ptrace (PTRACE_CONT, ptid_get_lwp (lp->ptid), 0, 0);
2043 return 1;
2044 }
2045
2046 internal_error (__FILE__, __LINE__,
2047 _("unknown ptrace event %d"), event);
2048 }
2049
2050 /* Wait for LP to stop. Returns the wait status, or 0 if the LWP has
2051 exited. */
2052
2053 static int
2054 wait_lwp (struct lwp_info *lp)
2055 {
2056 pid_t pid;
2057 int status = 0;
2058 int thread_dead = 0;
2059 sigset_t prev_mask;
2060
2061 gdb_assert (!lp->stopped);
2062 gdb_assert (lp->status == 0);
2063
2064 /* Make sure SIGCHLD is blocked for sigsuspend avoiding a race below. */
2065 block_child_signals (&prev_mask);
2066
2067 for (;;)
2068 {
2069 /* If my_waitpid returns 0 it means the __WCLONE vs. non-__WCLONE kind
2070 was right and we should just call sigsuspend. */
2071
2072 pid = my_waitpid (ptid_get_lwp (lp->ptid), &status, WNOHANG);
2073 if (pid == -1 && errno == ECHILD)
2074 pid = my_waitpid (ptid_get_lwp (lp->ptid), &status, __WCLONE | WNOHANG);
2075 if (pid == -1 && errno == ECHILD)
2076 {
2077 /* The thread has previously exited. We need to delete it
2078 now because, for some vendor 2.4 kernels with NPTL
2079 support backported, there won't be an exit event unless
2080 it is the main thread. 2.6 kernels will report an exit
2081 event for each thread that exits, as expected. */
2082 thread_dead = 1;
2083 if (debug_linux_nat)
2084 fprintf_unfiltered (gdb_stdlog, "WL: %s vanished.\n",
2085 target_pid_to_str (lp->ptid));
2086 }
2087 if (pid != 0)
2088 break;
2089
2090 /* Bugs 10970, 12702.
2091 Thread group leader may have exited in which case we'll lock up in
2092 waitpid if there are other threads, even if they are all zombies too.
2093 Basically, we're not supposed to use waitpid this way.
2094 __WCLONE is not applicable for the leader so we can't use that.
2095 LINUX_NAT_THREAD_ALIVE cannot be used here as it requires a STOPPED
2096 process; it gets ESRCH both for the zombie and for running processes.
2097
2098 As a workaround, check if we're waiting for the thread group leader and
2099 if it's a zombie, and avoid calling waitpid if it is.
2100
2101 This is racy, what if the tgl becomes a zombie right after we check?
2102 Therefore always use WNOHANG with sigsuspend - it is equivalent to
2103 waiting waitpid but linux_proc_pid_is_zombie is safe this way. */
2104
2105 if (ptid_get_pid (lp->ptid) == ptid_get_lwp (lp->ptid)
2106 && linux_proc_pid_is_zombie (ptid_get_lwp (lp->ptid)))
2107 {
2108 thread_dead = 1;
2109 if (debug_linux_nat)
2110 fprintf_unfiltered (gdb_stdlog,
2111 "WL: Thread group leader %s vanished.\n",
2112 target_pid_to_str (lp->ptid));
2113 break;
2114 }
2115
2116 /* Wait for next SIGCHLD and try again. This may let SIGCHLD handlers
2117 get invoked despite our caller had them intentionally blocked by
2118 block_child_signals. This is sensitive only to the loop of
2119 linux_nat_wait_1 and there if we get called my_waitpid gets called
2120 again before it gets to sigsuspend so we can safely let the handlers
2121 get executed here. */
2122
2123 if (debug_linux_nat)
2124 fprintf_unfiltered (gdb_stdlog, "WL: about to sigsuspend\n");
2125 sigsuspend (&suspend_mask);
2126 }
2127
2128 restore_child_signals_mask (&prev_mask);
2129
2130 if (!thread_dead)
2131 {
2132 gdb_assert (pid == ptid_get_lwp (lp->ptid));
2133
2134 if (debug_linux_nat)
2135 {
2136 fprintf_unfiltered (gdb_stdlog,
2137 "WL: waitpid %s received %s\n",
2138 target_pid_to_str (lp->ptid),
2139 status_to_str (status));
2140 }
2141
2142 /* Check if the thread has exited. */
2143 if (WIFEXITED (status) || WIFSIGNALED (status))
2144 {
2145 thread_dead = 1;
2146 if (debug_linux_nat)
2147 fprintf_unfiltered (gdb_stdlog, "WL: %s exited.\n",
2148 target_pid_to_str (lp->ptid));
2149 }
2150 }
2151
2152 if (thread_dead)
2153 {
2154 exit_lwp (lp);
2155 return 0;
2156 }
2157
2158 gdb_assert (WIFSTOPPED (status));
2159 lp->stopped = 1;
2160
2161 /* Handle GNU/Linux's syscall SIGTRAPs. */
2162 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
2163 {
2164 /* No longer need the sysgood bit. The ptrace event ends up
2165 recorded in lp->waitstatus if we care for it. We can carry
2166 on handling the event like a regular SIGTRAP from here
2167 on. */
2168 status = W_STOPCODE (SIGTRAP);
2169 if (linux_handle_syscall_trap (lp, 1))
2170 return wait_lwp (lp);
2171 }
2172
2173 /* Handle GNU/Linux's extended waitstatus for trace events. */
2174 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP
2175 && linux_is_extended_waitstatus (status))
2176 {
2177 if (debug_linux_nat)
2178 fprintf_unfiltered (gdb_stdlog,
2179 "WL: Handling extended status 0x%06x\n",
2180 status);
2181 if (linux_handle_extended_wait (lp, status, 1))
2182 return wait_lwp (lp);
2183 }
2184
2185 return status;
2186 }
2187
2188 /* Send a SIGSTOP to LP. */
2189
2190 static int
2191 stop_callback (struct lwp_info *lp, void *data)
2192 {
2193 if (!lp->stopped && !lp->signalled)
2194 {
2195 int ret;
2196
2197 if (debug_linux_nat)
2198 {
2199 fprintf_unfiltered (gdb_stdlog,
2200 "SC: kill %s **<SIGSTOP>**\n",
2201 target_pid_to_str (lp->ptid));
2202 }
2203 errno = 0;
2204 ret = kill_lwp (ptid_get_lwp (lp->ptid), SIGSTOP);
2205 if (debug_linux_nat)
2206 {
2207 fprintf_unfiltered (gdb_stdlog,
2208 "SC: lwp kill %d %s\n",
2209 ret,
2210 errno ? safe_strerror (errno) : "ERRNO-OK");
2211 }
2212
2213 lp->signalled = 1;
2214 gdb_assert (lp->status == 0);
2215 }
2216
2217 return 0;
2218 }
2219
2220 /* Request a stop on LWP. */
2221
2222 void
2223 linux_stop_lwp (struct lwp_info *lwp)
2224 {
2225 stop_callback (lwp, NULL);
2226 }
2227
2228 /* Return non-zero if LWP PID has a pending SIGINT. */
2229
2230 static int
2231 linux_nat_has_pending_sigint (int pid)
2232 {
2233 sigset_t pending, blocked, ignored;
2234
2235 linux_proc_pending_signals (pid, &pending, &blocked, &ignored);
2236
2237 if (sigismember (&pending, SIGINT)
2238 && !sigismember (&ignored, SIGINT))
2239 return 1;
2240
2241 return 0;
2242 }
2243
2244 /* Set a flag in LP indicating that we should ignore its next SIGINT. */
2245
2246 static int
2247 set_ignore_sigint (struct lwp_info *lp, void *data)
2248 {
2249 /* If a thread has a pending SIGINT, consume it; otherwise, set a
2250 flag to consume the next one. */
2251 if (lp->stopped && lp->status != 0 && WIFSTOPPED (lp->status)
2252 && WSTOPSIG (lp->status) == SIGINT)
2253 lp->status = 0;
2254 else
2255 lp->ignore_sigint = 1;
2256
2257 return 0;
2258 }
2259
2260 /* If LP does not have a SIGINT pending, then clear the ignore_sigint flag.
2261 This function is called after we know the LWP has stopped; if the LWP
2262 stopped before the expected SIGINT was delivered, then it will never have
2263 arrived. Also, if the signal was delivered to a shared queue and consumed
2264 by a different thread, it will never be delivered to this LWP. */
2265
2266 static void
2267 maybe_clear_ignore_sigint (struct lwp_info *lp)
2268 {
2269 if (!lp->ignore_sigint)
2270 return;
2271
2272 if (!linux_nat_has_pending_sigint (ptid_get_lwp (lp->ptid)))
2273 {
2274 if (debug_linux_nat)
2275 fprintf_unfiltered (gdb_stdlog,
2276 "MCIS: Clearing bogus flag for %s\n",
2277 target_pid_to_str (lp->ptid));
2278 lp->ignore_sigint = 0;
2279 }
2280 }
2281
2282 /* Fetch the possible triggered data watchpoint info and store it in
2283 LP.
2284
2285 On some archs, like x86, that use debug registers to set
2286 watchpoints, it's possible that the way to know which watched
2287 address trapped, is to check the register that is used to select
2288 which address to watch. Problem is, between setting the watchpoint
2289 and reading back which data address trapped, the user may change
2290 the set of watchpoints, and, as a consequence, GDB changes the
2291 debug registers in the inferior. To avoid reading back a stale
2292 stopped-data-address when that happens, we cache in LP the fact
2293 that a watchpoint trapped, and the corresponding data address, as
2294 soon as we see LP stop with a SIGTRAP. If GDB changes the debug
2295 registers meanwhile, we have the cached data we can rely on. */
2296
2297 static void
2298 save_sigtrap (struct lwp_info *lp)
2299 {
2300 struct cleanup *old_chain;
2301
2302 if (linux_ops->to_stopped_by_watchpoint == NULL)
2303 {
2304 lp->stopped_by_watchpoint = 0;
2305 return;
2306 }
2307
2308 old_chain = save_inferior_ptid ();
2309 inferior_ptid = lp->ptid;
2310
2311 lp->stopped_by_watchpoint = linux_ops->to_stopped_by_watchpoint (linux_ops);
2312
2313 if (lp->stopped_by_watchpoint)
2314 {
2315 if (linux_ops->to_stopped_data_address != NULL)
2316 lp->stopped_data_address_p =
2317 linux_ops->to_stopped_data_address (&current_target,
2318 &lp->stopped_data_address);
2319 else
2320 lp->stopped_data_address_p = 0;
2321 }
2322
2323 do_cleanups (old_chain);
2324 }
2325
2326 /* See save_sigtrap. */
2327
2328 static int
2329 linux_nat_stopped_by_watchpoint (struct target_ops *ops)
2330 {
2331 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2332
2333 gdb_assert (lp != NULL);
2334
2335 return lp->stopped_by_watchpoint;
2336 }
2337
2338 static int
2339 linux_nat_stopped_data_address (struct target_ops *ops, CORE_ADDR *addr_p)
2340 {
2341 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2342
2343 gdb_assert (lp != NULL);
2344
2345 *addr_p = lp->stopped_data_address;
2346
2347 return lp->stopped_data_address_p;
2348 }
2349
2350 /* Commonly any breakpoint / watchpoint generate only SIGTRAP. */
2351
2352 static int
2353 sigtrap_is_event (int status)
2354 {
2355 return WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP;
2356 }
2357
2358 /* SIGTRAP-like events recognizer. */
2359
2360 static int (*linux_nat_status_is_event) (int status) = sigtrap_is_event;
2361
2362 /* Check for SIGTRAP-like events in LP. */
2363
2364 static int
2365 linux_nat_lp_status_is_event (struct lwp_info *lp)
2366 {
2367 /* We check for lp->waitstatus in addition to lp->status, because we can
2368 have pending process exits recorded in lp->status
2369 and W_EXITCODE(0,0) == 0. We should probably have an additional
2370 lp->status_p flag. */
2371
2372 return (lp->waitstatus.kind == TARGET_WAITKIND_IGNORE
2373 && linux_nat_status_is_event (lp->status));
2374 }
2375
2376 /* Set alternative SIGTRAP-like events recognizer. If
2377 breakpoint_inserted_here_p there then gdbarch_decr_pc_after_break will be
2378 applied. */
2379
2380 void
2381 linux_nat_set_status_is_event (struct target_ops *t,
2382 int (*status_is_event) (int status))
2383 {
2384 linux_nat_status_is_event = status_is_event;
2385 }
2386
2387 /* Wait until LP is stopped. */
2388
2389 static int
2390 stop_wait_callback (struct lwp_info *lp, void *data)
2391 {
2392 struct inferior *inf = find_inferior_pid (ptid_get_pid (lp->ptid));
2393
2394 /* If this is a vfork parent, bail out, it is not going to report
2395 any SIGSTOP until the vfork is done with. */
2396 if (inf->vfork_child != NULL)
2397 return 0;
2398
2399 if (!lp->stopped)
2400 {
2401 int status;
2402
2403 status = wait_lwp (lp);
2404 if (status == 0)
2405 return 0;
2406
2407 if (lp->ignore_sigint && WIFSTOPPED (status)
2408 && WSTOPSIG (status) == SIGINT)
2409 {
2410 lp->ignore_sigint = 0;
2411
2412 errno = 0;
2413 ptrace (PTRACE_CONT, ptid_get_lwp (lp->ptid), 0, 0);
2414 lp->stopped = 0;
2415 if (debug_linux_nat)
2416 fprintf_unfiltered (gdb_stdlog,
2417 "PTRACE_CONT %s, 0, 0 (%s) "
2418 "(discarding SIGINT)\n",
2419 target_pid_to_str (lp->ptid),
2420 errno ? safe_strerror (errno) : "OK");
2421
2422 return stop_wait_callback (lp, NULL);
2423 }
2424
2425 maybe_clear_ignore_sigint (lp);
2426
2427 if (WSTOPSIG (status) != SIGSTOP)
2428 {
2429 /* The thread was stopped with a signal other than SIGSTOP. */
2430
2431 save_sigtrap (lp);
2432
2433 if (debug_linux_nat)
2434 fprintf_unfiltered (gdb_stdlog,
2435 "SWC: Pending event %s in %s\n",
2436 status_to_str ((int) status),
2437 target_pid_to_str (lp->ptid));
2438
2439 /* Save the sigtrap event. */
2440 lp->status = status;
2441 gdb_assert (lp->signalled);
2442 }
2443 else
2444 {
2445 /* We caught the SIGSTOP that we intended to catch, so
2446 there's no SIGSTOP pending. */
2447
2448 if (debug_linux_nat)
2449 fprintf_unfiltered (gdb_stdlog,
2450 "SWC: Delayed SIGSTOP caught for %s.\n",
2451 target_pid_to_str (lp->ptid));
2452
2453 /* Reset SIGNALLED only after the stop_wait_callback call
2454 above as it does gdb_assert on SIGNALLED. */
2455 lp->signalled = 0;
2456 }
2457 }
2458
2459 return 0;
2460 }
2461
2462 /* Return non-zero if LP has a wait status pending. */
2463
2464 static int
2465 status_callback (struct lwp_info *lp, void *data)
2466 {
2467 /* Only report a pending wait status if we pretend that this has
2468 indeed been resumed. */
2469 if (!lp->resumed)
2470 return 0;
2471
2472 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
2473 {
2474 /* A ptrace event, like PTRACE_FORK|VFORK|EXEC, syscall event,
2475 or a pending process exit. Note that `W_EXITCODE(0,0) ==
2476 0', so a clean process exit can not be stored pending in
2477 lp->status, it is indistinguishable from
2478 no-pending-status. */
2479 return 1;
2480 }
2481
2482 if (lp->status != 0)
2483 return 1;
2484
2485 return 0;
2486 }
2487
2488 /* Return non-zero if LP isn't stopped. */
2489
2490 static int
2491 running_callback (struct lwp_info *lp, void *data)
2492 {
2493 return (!lp->stopped
2494 || ((lp->status != 0
2495 || lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
2496 && lp->resumed));
2497 }
2498
2499 /* Count the LWP's that have had events. */
2500
2501 static int
2502 count_events_callback (struct lwp_info *lp, void *data)
2503 {
2504 int *count = data;
2505
2506 gdb_assert (count != NULL);
2507
2508 /* Count only resumed LWPs that have a SIGTRAP event pending. */
2509 if (lp->resumed && linux_nat_lp_status_is_event (lp))
2510 (*count)++;
2511
2512 return 0;
2513 }
2514
2515 /* Select the LWP (if any) that is currently being single-stepped. */
2516
2517 static int
2518 select_singlestep_lwp_callback (struct lwp_info *lp, void *data)
2519 {
2520 if (lp->last_resume_kind == resume_step
2521 && lp->status != 0)
2522 return 1;
2523 else
2524 return 0;
2525 }
2526
2527 /* Select the Nth LWP that has had a SIGTRAP event. */
2528
2529 static int
2530 select_event_lwp_callback (struct lwp_info *lp, void *data)
2531 {
2532 int *selector = data;
2533
2534 gdb_assert (selector != NULL);
2535
2536 /* Select only resumed LWPs that have a SIGTRAP event pending. */
2537 if (lp->resumed && linux_nat_lp_status_is_event (lp))
2538 if ((*selector)-- == 0)
2539 return 1;
2540
2541 return 0;
2542 }
2543
2544 static int
2545 cancel_breakpoint (struct lwp_info *lp)
2546 {
2547 /* Arrange for a breakpoint to be hit again later. We don't keep
2548 the SIGTRAP status and don't forward the SIGTRAP signal to the
2549 LWP. We will handle the current event, eventually we will resume
2550 this LWP, and this breakpoint will trap again.
2551
2552 If we do not do this, then we run the risk that the user will
2553 delete or disable the breakpoint, but the LWP will have already
2554 tripped on it. */
2555
2556 struct regcache *regcache = get_thread_regcache (lp->ptid);
2557 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2558 CORE_ADDR pc;
2559
2560 pc = regcache_read_pc (regcache) - target_decr_pc_after_break (gdbarch);
2561 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
2562 {
2563 if (debug_linux_nat)
2564 fprintf_unfiltered (gdb_stdlog,
2565 "CB: Push back breakpoint for %s\n",
2566 target_pid_to_str (lp->ptid));
2567
2568 /* Back up the PC if necessary. */
2569 if (target_decr_pc_after_break (gdbarch))
2570 regcache_write_pc (regcache, pc);
2571
2572 return 1;
2573 }
2574 return 0;
2575 }
2576
2577 static int
2578 cancel_breakpoints_callback (struct lwp_info *lp, void *data)
2579 {
2580 struct lwp_info *event_lp = data;
2581
2582 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
2583 if (lp == event_lp)
2584 return 0;
2585
2586 /* If a LWP other than the LWP that we're reporting an event for has
2587 hit a GDB breakpoint (as opposed to some random trap signal),
2588 then just arrange for it to hit it again later. We don't keep
2589 the SIGTRAP status and don't forward the SIGTRAP signal to the
2590 LWP. We will handle the current event, eventually we will resume
2591 all LWPs, and this one will get its breakpoint trap again.
2592
2593 If we do not do this, then we run the risk that the user will
2594 delete or disable the breakpoint, but the LWP will have already
2595 tripped on it. */
2596
2597 if (linux_nat_lp_status_is_event (lp)
2598 && cancel_breakpoint (lp))
2599 /* Throw away the SIGTRAP. */
2600 lp->status = 0;
2601
2602 return 0;
2603 }
2604
2605 /* Select one LWP out of those that have events pending. */
2606
2607 static void
2608 select_event_lwp (ptid_t filter, struct lwp_info **orig_lp, int *status)
2609 {
2610 int num_events = 0;
2611 int random_selector;
2612 struct lwp_info *event_lp;
2613
2614 /* Record the wait status for the original LWP. */
2615 (*orig_lp)->status = *status;
2616
2617 /* Give preference to any LWP that is being single-stepped. */
2618 event_lp = iterate_over_lwps (filter,
2619 select_singlestep_lwp_callback, NULL);
2620 if (event_lp != NULL)
2621 {
2622 if (debug_linux_nat)
2623 fprintf_unfiltered (gdb_stdlog,
2624 "SEL: Select single-step %s\n",
2625 target_pid_to_str (event_lp->ptid));
2626 }
2627 else
2628 {
2629 /* No single-stepping LWP. Select one at random, out of those
2630 which have had SIGTRAP events. */
2631
2632 /* First see how many SIGTRAP events we have. */
2633 iterate_over_lwps (filter, count_events_callback, &num_events);
2634
2635 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
2636 random_selector = (int)
2637 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2638
2639 if (debug_linux_nat && num_events > 1)
2640 fprintf_unfiltered (gdb_stdlog,
2641 "SEL: Found %d SIGTRAP events, selecting #%d\n",
2642 num_events, random_selector);
2643
2644 event_lp = iterate_over_lwps (filter,
2645 select_event_lwp_callback,
2646 &random_selector);
2647 }
2648
2649 if (event_lp != NULL)
2650 {
2651 /* Switch the event LWP. */
2652 *orig_lp = event_lp;
2653 *status = event_lp->status;
2654 }
2655
2656 /* Flush the wait status for the event LWP. */
2657 (*orig_lp)->status = 0;
2658 }
2659
2660 /* Return non-zero if LP has been resumed. */
2661
2662 static int
2663 resumed_callback (struct lwp_info *lp, void *data)
2664 {
2665 return lp->resumed;
2666 }
2667
2668 /* Stop an active thread, verify it still exists, then resume it. If
2669 the thread ends up with a pending status, then it is not resumed,
2670 and *DATA (really a pointer to int), is set. */
2671
2672 static int
2673 stop_and_resume_callback (struct lwp_info *lp, void *data)
2674 {
2675 int *new_pending_p = data;
2676
2677 if (!lp->stopped)
2678 {
2679 ptid_t ptid = lp->ptid;
2680
2681 stop_callback (lp, NULL);
2682 stop_wait_callback (lp, NULL);
2683
2684 /* Resume if the lwp still exists, and the core wanted it
2685 running. */
2686 lp = find_lwp_pid (ptid);
2687 if (lp != NULL)
2688 {
2689 if (lp->last_resume_kind == resume_stop
2690 && lp->status == 0)
2691 {
2692 /* The core wanted the LWP to stop. Even if it stopped
2693 cleanly (with SIGSTOP), leave the event pending. */
2694 if (debug_linux_nat)
2695 fprintf_unfiltered (gdb_stdlog,
2696 "SARC: core wanted LWP %ld stopped "
2697 "(leaving SIGSTOP pending)\n",
2698 ptid_get_lwp (lp->ptid));
2699 lp->status = W_STOPCODE (SIGSTOP);
2700 }
2701
2702 if (lp->status == 0)
2703 {
2704 if (debug_linux_nat)
2705 fprintf_unfiltered (gdb_stdlog,
2706 "SARC: re-resuming LWP %ld\n",
2707 ptid_get_lwp (lp->ptid));
2708 resume_lwp (lp, lp->step, GDB_SIGNAL_0);
2709 }
2710 else
2711 {
2712 if (debug_linux_nat)
2713 fprintf_unfiltered (gdb_stdlog,
2714 "SARC: not re-resuming LWP %ld "
2715 "(has pending)\n",
2716 ptid_get_lwp (lp->ptid));
2717 if (new_pending_p)
2718 *new_pending_p = 1;
2719 }
2720 }
2721 }
2722 return 0;
2723 }
2724
2725 /* Check if we should go on and pass this event to common code.
2726 Return the affected lwp if we are, or NULL otherwise. If we stop
2727 all lwps temporarily, we may end up with new pending events in some
2728 other lwp. In that case set *NEW_PENDING_P to true. */
2729
2730 static struct lwp_info *
2731 linux_nat_filter_event (int lwpid, int status, int *new_pending_p)
2732 {
2733 struct lwp_info *lp;
2734 int event = linux_ptrace_get_extended_event (status);
2735
2736 *new_pending_p = 0;
2737
2738 lp = find_lwp_pid (pid_to_ptid (lwpid));
2739
2740 /* Check for stop events reported by a process we didn't already
2741 know about - anything not already in our LWP list.
2742
2743 If we're expecting to receive stopped processes after
2744 fork, vfork, and clone events, then we'll just add the
2745 new one to our list and go back to waiting for the event
2746 to be reported - the stopped process might be returned
2747 from waitpid before or after the event is.
2748
2749 But note the case of a non-leader thread exec'ing after the
2750 leader having exited, and gone from our lists. The non-leader
2751 thread changes its tid to the tgid. */
2752
2753 if (WIFSTOPPED (status) && lp == NULL
2754 && (WSTOPSIG (status) == SIGTRAP && event == PTRACE_EVENT_EXEC))
2755 {
2756 /* A multi-thread exec after we had seen the leader exiting. */
2757 if (debug_linux_nat)
2758 fprintf_unfiltered (gdb_stdlog,
2759 "LLW: Re-adding thread group leader LWP %d.\n",
2760 lwpid);
2761
2762 lp = add_lwp (ptid_build (lwpid, lwpid, 0));
2763 lp->stopped = 1;
2764 lp->resumed = 1;
2765 add_thread (lp->ptid);
2766 }
2767
2768 if (WIFSTOPPED (status) && !lp)
2769 {
2770 add_to_pid_list (&stopped_pids, lwpid, status);
2771 return NULL;
2772 }
2773
2774 /* Make sure we don't report an event for the exit of an LWP not in
2775 our list, i.e. not part of the current process. This can happen
2776 if we detach from a program we originally forked and then it
2777 exits. */
2778 if (!WIFSTOPPED (status) && !lp)
2779 return NULL;
2780
2781 /* This LWP is stopped now. (And if dead, this prevents it from
2782 ever being continued.) */
2783 lp->stopped = 1;
2784
2785 /* Handle GNU/Linux's syscall SIGTRAPs. */
2786 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
2787 {
2788 /* No longer need the sysgood bit. The ptrace event ends up
2789 recorded in lp->waitstatus if we care for it. We can carry
2790 on handling the event like a regular SIGTRAP from here
2791 on. */
2792 status = W_STOPCODE (SIGTRAP);
2793 if (linux_handle_syscall_trap (lp, 0))
2794 return NULL;
2795 }
2796
2797 /* Handle GNU/Linux's extended waitstatus for trace events. */
2798 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP
2799 && linux_is_extended_waitstatus (status))
2800 {
2801 if (debug_linux_nat)
2802 fprintf_unfiltered (gdb_stdlog,
2803 "LLW: Handling extended status 0x%06x\n",
2804 status);
2805 if (linux_handle_extended_wait (lp, status, 0))
2806 return NULL;
2807 }
2808
2809 if (linux_nat_status_is_event (status))
2810 save_sigtrap (lp);
2811
2812 /* Check if the thread has exited. */
2813 if ((WIFEXITED (status) || WIFSIGNALED (status))
2814 && num_lwps (ptid_get_pid (lp->ptid)) > 1)
2815 {
2816 /* If this is the main thread, we must stop all threads and verify
2817 if they are still alive. This is because in the nptl thread model
2818 on Linux 2.4, there is no signal issued for exiting LWPs
2819 other than the main thread. We only get the main thread exit
2820 signal once all child threads have already exited. If we
2821 stop all the threads and use the stop_wait_callback to check
2822 if they have exited we can determine whether this signal
2823 should be ignored or whether it means the end of the debugged
2824 application, regardless of which threading model is being
2825 used. */
2826 if (ptid_get_pid (lp->ptid) == ptid_get_lwp (lp->ptid))
2827 {
2828 iterate_over_lwps (pid_to_ptid (ptid_get_pid (lp->ptid)),
2829 stop_and_resume_callback, new_pending_p);
2830 }
2831
2832 if (debug_linux_nat)
2833 fprintf_unfiltered (gdb_stdlog,
2834 "LLW: %s exited.\n",
2835 target_pid_to_str (lp->ptid));
2836
2837 if (num_lwps (ptid_get_pid (lp->ptid)) > 1)
2838 {
2839 /* If there is at least one more LWP, then the exit signal
2840 was not the end of the debugged application and should be
2841 ignored. */
2842 exit_lwp (lp);
2843 return NULL;
2844 }
2845 }
2846
2847 /* Check if the current LWP has previously exited. In the nptl
2848 thread model, LWPs other than the main thread do not issue
2849 signals when they exit so we must check whenever the thread has
2850 stopped. A similar check is made in stop_wait_callback(). */
2851 if (num_lwps (ptid_get_pid (lp->ptid)) > 1 && !linux_thread_alive (lp->ptid))
2852 {
2853 ptid_t ptid = pid_to_ptid (ptid_get_pid (lp->ptid));
2854
2855 if (debug_linux_nat)
2856 fprintf_unfiltered (gdb_stdlog,
2857 "LLW: %s exited.\n",
2858 target_pid_to_str (lp->ptid));
2859
2860 exit_lwp (lp);
2861
2862 /* Make sure there is at least one thread running. */
2863 gdb_assert (iterate_over_lwps (ptid, running_callback, NULL));
2864
2865 /* Discard the event. */
2866 return NULL;
2867 }
2868
2869 /* Make sure we don't report a SIGSTOP that we sent ourselves in
2870 an attempt to stop an LWP. */
2871 if (lp->signalled
2872 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP)
2873 {
2874 if (debug_linux_nat)
2875 fprintf_unfiltered (gdb_stdlog,
2876 "LLW: Delayed SIGSTOP caught for %s.\n",
2877 target_pid_to_str (lp->ptid));
2878
2879 lp->signalled = 0;
2880
2881 if (lp->last_resume_kind != resume_stop)
2882 {
2883 /* This is a delayed SIGSTOP. */
2884
2885 registers_changed ();
2886
2887 if (linux_nat_prepare_to_resume != NULL)
2888 linux_nat_prepare_to_resume (lp);
2889 linux_ops->to_resume (linux_ops,
2890 pid_to_ptid (ptid_get_lwp (lp->ptid)),
2891 lp->step, GDB_SIGNAL_0);
2892 if (debug_linux_nat)
2893 fprintf_unfiltered (gdb_stdlog,
2894 "LLW: %s %s, 0, 0 (discard SIGSTOP)\n",
2895 lp->step ?
2896 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2897 target_pid_to_str (lp->ptid));
2898
2899 lp->stopped = 0;
2900 gdb_assert (lp->resumed);
2901
2902 /* Discard the event. */
2903 return NULL;
2904 }
2905 }
2906
2907 /* Make sure we don't report a SIGINT that we have already displayed
2908 for another thread. */
2909 if (lp->ignore_sigint
2910 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGINT)
2911 {
2912 if (debug_linux_nat)
2913 fprintf_unfiltered (gdb_stdlog,
2914 "LLW: Delayed SIGINT caught for %s.\n",
2915 target_pid_to_str (lp->ptid));
2916
2917 /* This is a delayed SIGINT. */
2918 lp->ignore_sigint = 0;
2919
2920 registers_changed ();
2921 if (linux_nat_prepare_to_resume != NULL)
2922 linux_nat_prepare_to_resume (lp);
2923 linux_ops->to_resume (linux_ops, pid_to_ptid (ptid_get_lwp (lp->ptid)),
2924 lp->step, GDB_SIGNAL_0);
2925 if (debug_linux_nat)
2926 fprintf_unfiltered (gdb_stdlog,
2927 "LLW: %s %s, 0, 0 (discard SIGINT)\n",
2928 lp->step ?
2929 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2930 target_pid_to_str (lp->ptid));
2931
2932 lp->stopped = 0;
2933 gdb_assert (lp->resumed);
2934
2935 /* Discard the event. */
2936 return NULL;
2937 }
2938
2939 /* An interesting event. */
2940 gdb_assert (lp);
2941 lp->status = status;
2942 return lp;
2943 }
2944
2945 /* Detect zombie thread group leaders, and "exit" them. We can't reap
2946 their exits until all other threads in the group have exited. */
2947
2948 static void
2949 check_zombie_leaders (void)
2950 {
2951 struct inferior *inf;
2952
2953 ALL_INFERIORS (inf)
2954 {
2955 struct lwp_info *leader_lp;
2956
2957 if (inf->pid == 0)
2958 continue;
2959
2960 leader_lp = find_lwp_pid (pid_to_ptid (inf->pid));
2961 if (leader_lp != NULL
2962 /* Check if there are other threads in the group, as we may
2963 have raced with the inferior simply exiting. */
2964 && num_lwps (inf->pid) > 1
2965 && linux_proc_pid_is_zombie (inf->pid))
2966 {
2967 if (debug_linux_nat)
2968 fprintf_unfiltered (gdb_stdlog,
2969 "CZL: Thread group leader %d zombie "
2970 "(it exited, or another thread execd).\n",
2971 inf->pid);
2972
2973 /* A leader zombie can mean one of two things:
2974
2975 - It exited, and there's an exit status pending
2976 available, or only the leader exited (not the whole
2977 program). In the latter case, we can't waitpid the
2978 leader's exit status until all other threads are gone.
2979
2980 - There are 3 or more threads in the group, and a thread
2981 other than the leader exec'd. On an exec, the Linux
2982 kernel destroys all other threads (except the execing
2983 one) in the thread group, and resets the execing thread's
2984 tid to the tgid. No exit notification is sent for the
2985 execing thread -- from the ptracer's perspective, it
2986 appears as though the execing thread just vanishes.
2987 Until we reap all other threads except the leader and the
2988 execing thread, the leader will be zombie, and the
2989 execing thread will be in `D (disc sleep)'. As soon as
2990 all other threads are reaped, the execing thread changes
2991 it's tid to the tgid, and the previous (zombie) leader
2992 vanishes, giving place to the "new" leader. We could try
2993 distinguishing the exit and exec cases, by waiting once
2994 more, and seeing if something comes out, but it doesn't
2995 sound useful. The previous leader _does_ go away, and
2996 we'll re-add the new one once we see the exec event
2997 (which is just the same as what would happen if the
2998 previous leader did exit voluntarily before some other
2999 thread execs). */
3000
3001 if (debug_linux_nat)
3002 fprintf_unfiltered (gdb_stdlog,
3003 "CZL: Thread group leader %d vanished.\n",
3004 inf->pid);
3005 exit_lwp (leader_lp);
3006 }
3007 }
3008 }
3009
3010 static ptid_t
3011 linux_nat_wait_1 (struct target_ops *ops,
3012 ptid_t ptid, struct target_waitstatus *ourstatus,
3013 int target_options)
3014 {
3015 static sigset_t prev_mask;
3016 enum resume_kind last_resume_kind;
3017 struct lwp_info *lp;
3018 int status;
3019
3020 if (debug_linux_nat)
3021 fprintf_unfiltered (gdb_stdlog, "LLW: enter\n");
3022
3023 /* The first time we get here after starting a new inferior, we may
3024 not have added it to the LWP list yet - this is the earliest
3025 moment at which we know its PID. */
3026 if (ptid_is_pid (inferior_ptid))
3027 {
3028 /* Upgrade the main thread's ptid. */
3029 thread_change_ptid (inferior_ptid,
3030 ptid_build (ptid_get_pid (inferior_ptid),
3031 ptid_get_pid (inferior_ptid), 0));
3032
3033 lp = add_initial_lwp (inferior_ptid);
3034 lp->resumed = 1;
3035 }
3036
3037 /* Make sure SIGCHLD is blocked until the sigsuspend below. */
3038 block_child_signals (&prev_mask);
3039
3040 retry:
3041 lp = NULL;
3042 status = 0;
3043
3044 /* First check if there is a LWP with a wait status pending. */
3045 if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid))
3046 {
3047 /* Any LWP in the PTID group that's been resumed will do. */
3048 lp = iterate_over_lwps (ptid, status_callback, NULL);
3049 if (lp)
3050 {
3051 if (debug_linux_nat && lp->status)
3052 fprintf_unfiltered (gdb_stdlog,
3053 "LLW: Using pending wait status %s for %s.\n",
3054 status_to_str (lp->status),
3055 target_pid_to_str (lp->ptid));
3056 }
3057 }
3058 else if (ptid_lwp_p (ptid))
3059 {
3060 if (debug_linux_nat)
3061 fprintf_unfiltered (gdb_stdlog,
3062 "LLW: Waiting for specific LWP %s.\n",
3063 target_pid_to_str (ptid));
3064
3065 /* We have a specific LWP to check. */
3066 lp = find_lwp_pid (ptid);
3067 gdb_assert (lp);
3068
3069 if (debug_linux_nat && lp->status)
3070 fprintf_unfiltered (gdb_stdlog,
3071 "LLW: Using pending wait status %s for %s.\n",
3072 status_to_str (lp->status),
3073 target_pid_to_str (lp->ptid));
3074
3075 /* We check for lp->waitstatus in addition to lp->status,
3076 because we can have pending process exits recorded in
3077 lp->status and W_EXITCODE(0,0) == 0. We should probably have
3078 an additional lp->status_p flag. */
3079 if (lp->status == 0 && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE)
3080 lp = NULL;
3081 }
3082
3083 if (!target_can_async_p ())
3084 {
3085 /* Causes SIGINT to be passed on to the attached process. */
3086 set_sigint_trap ();
3087 }
3088
3089 /* But if we don't find a pending event, we'll have to wait. */
3090
3091 while (lp == NULL)
3092 {
3093 pid_t lwpid;
3094
3095 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
3096 quirks:
3097
3098 - If the thread group leader exits while other threads in the
3099 thread group still exist, waitpid(TGID, ...) hangs. That
3100 waitpid won't return an exit status until the other threads
3101 in the group are reapped.
3102
3103 - When a non-leader thread execs, that thread just vanishes
3104 without reporting an exit (so we'd hang if we waited for it
3105 explicitly in that case). The exec event is reported to
3106 the TGID pid. */
3107
3108 errno = 0;
3109 lwpid = my_waitpid (-1, &status, __WCLONE | WNOHANG);
3110 if (lwpid == 0 || (lwpid == -1 && errno == ECHILD))
3111 lwpid = my_waitpid (-1, &status, WNOHANG);
3112
3113 if (debug_linux_nat)
3114 fprintf_unfiltered (gdb_stdlog,
3115 "LNW: waitpid(-1, ...) returned %d, %s\n",
3116 lwpid, errno ? safe_strerror (errno) : "ERRNO-OK");
3117
3118 if (lwpid > 0)
3119 {
3120 /* If this is true, then we paused LWPs momentarily, and may
3121 now have pending events to handle. */
3122 int new_pending;
3123
3124 if (debug_linux_nat)
3125 {
3126 fprintf_unfiltered (gdb_stdlog,
3127 "LLW: waitpid %ld received %s\n",
3128 (long) lwpid, status_to_str (status));
3129 }
3130
3131 lp = linux_nat_filter_event (lwpid, status, &new_pending);
3132
3133 /* STATUS is now no longer valid, use LP->STATUS instead. */
3134 status = 0;
3135
3136 if (lp && !ptid_match (lp->ptid, ptid))
3137 {
3138 gdb_assert (lp->resumed);
3139
3140 if (debug_linux_nat)
3141 fprintf_unfiltered (gdb_stdlog,
3142 "LWP %ld got an event %06x, "
3143 "leaving pending.\n",
3144 ptid_get_lwp (lp->ptid), lp->status);
3145
3146 if (WIFSTOPPED (lp->status))
3147 {
3148 if (WSTOPSIG (lp->status) != SIGSTOP)
3149 {
3150 /* Cancel breakpoint hits. The breakpoint may
3151 be removed before we fetch events from this
3152 process to report to the core. It is best
3153 not to assume the moribund breakpoints
3154 heuristic always handles these cases --- it
3155 could be too many events go through to the
3156 core before this one is handled. All-stop
3157 always cancels breakpoint hits in all
3158 threads. */
3159 if (non_stop
3160 && linux_nat_lp_status_is_event (lp)
3161 && cancel_breakpoint (lp))
3162 {
3163 /* Throw away the SIGTRAP. */
3164 lp->status = 0;
3165
3166 if (debug_linux_nat)
3167 fprintf_unfiltered (gdb_stdlog,
3168 "LLW: LWP %ld hit a "
3169 "breakpoint while "
3170 "waiting for another "
3171 "process; "
3172 "cancelled it\n",
3173 ptid_get_lwp (lp->ptid));
3174 }
3175 }
3176 else
3177 lp->signalled = 0;
3178 }
3179 else if (WIFEXITED (lp->status) || WIFSIGNALED (lp->status))
3180 {
3181 if (debug_linux_nat)
3182 fprintf_unfiltered (gdb_stdlog,
3183 "Process %ld exited while stopping "
3184 "LWPs\n",
3185 ptid_get_lwp (lp->ptid));
3186
3187 /* This was the last lwp in the process. Since
3188 events are serialized to GDB core, and we can't
3189 report this one right now, but GDB core and the
3190 other target layers will want to be notified
3191 about the exit code/signal, leave the status
3192 pending for the next time we're able to report
3193 it. */
3194
3195 /* Dead LWP's aren't expected to reported a pending
3196 sigstop. */
3197 lp->signalled = 0;
3198
3199 /* Store the pending event in the waitstatus as
3200 well, because W_EXITCODE(0,0) == 0. */
3201 store_waitstatus (&lp->waitstatus, lp->status);
3202 }
3203
3204 /* Keep looking. */
3205 lp = NULL;
3206 }
3207
3208 if (new_pending)
3209 {
3210 /* Some LWP now has a pending event. Go all the way
3211 back to check it. */
3212 goto retry;
3213 }
3214
3215 if (lp)
3216 {
3217 /* We got an event to report to the core. */
3218 break;
3219 }
3220
3221 /* Retry until nothing comes out of waitpid. A single
3222 SIGCHLD can indicate more than one child stopped. */
3223 continue;
3224 }
3225
3226 /* Check for zombie thread group leaders. Those can't be reaped
3227 until all other threads in the thread group are. */
3228 check_zombie_leaders ();
3229
3230 /* If there are no resumed children left, bail. We'd be stuck
3231 forever in the sigsuspend call below otherwise. */
3232 if (iterate_over_lwps (ptid, resumed_callback, NULL) == NULL)
3233 {
3234 if (debug_linux_nat)
3235 fprintf_unfiltered (gdb_stdlog, "LLW: exit (no resumed LWP)\n");
3236
3237 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
3238
3239 if (!target_can_async_p ())
3240 clear_sigint_trap ();
3241
3242 restore_child_signals_mask (&prev_mask);
3243 return minus_one_ptid;
3244 }
3245
3246 /* No interesting event to report to the core. */
3247
3248 if (target_options & TARGET_WNOHANG)
3249 {
3250 if (debug_linux_nat)
3251 fprintf_unfiltered (gdb_stdlog, "LLW: exit (ignore)\n");
3252
3253 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3254 restore_child_signals_mask (&prev_mask);
3255 return minus_one_ptid;
3256 }
3257
3258 /* We shouldn't end up here unless we want to try again. */
3259 gdb_assert (lp == NULL);
3260
3261 /* Block until we get an event reported with SIGCHLD. */
3262 if (debug_linux_nat)
3263 fprintf_unfiltered (gdb_stdlog, "LNW: about to sigsuspend\n");
3264 sigsuspend (&suspend_mask);
3265 }
3266
3267 if (!target_can_async_p ())
3268 clear_sigint_trap ();
3269
3270 gdb_assert (lp);
3271
3272 status = lp->status;
3273 lp->status = 0;
3274
3275 /* Don't report signals that GDB isn't interested in, such as
3276 signals that are neither printed nor stopped upon. Stopping all
3277 threads can be a bit time-consuming so if we want decent
3278 performance with heavily multi-threaded programs, especially when
3279 they're using a high frequency timer, we'd better avoid it if we
3280 can. */
3281
3282 if (WIFSTOPPED (status))
3283 {
3284 enum gdb_signal signo = gdb_signal_from_host (WSTOPSIG (status));
3285
3286 /* When using hardware single-step, we need to report every signal.
3287 Otherwise, signals in pass_mask may be short-circuited. */
3288 if (!lp->step
3289 && WSTOPSIG (status) && sigismember (&pass_mask, WSTOPSIG (status)))
3290 {
3291 /* FIMXE: kettenis/2001-06-06: Should we resume all threads
3292 here? It is not clear we should. GDB may not expect
3293 other threads to run. On the other hand, not resuming
3294 newly attached threads may cause an unwanted delay in
3295 getting them running. */
3296 registers_changed ();
3297 if (linux_nat_prepare_to_resume != NULL)
3298 linux_nat_prepare_to_resume (lp);
3299 linux_ops->to_resume (linux_ops,
3300 pid_to_ptid (ptid_get_lwp (lp->ptid)),
3301 lp->step, signo);
3302 if (debug_linux_nat)
3303 fprintf_unfiltered (gdb_stdlog,
3304 "LLW: %s %s, %s (preempt 'handle')\n",
3305 lp->step ?
3306 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3307 target_pid_to_str (lp->ptid),
3308 (signo != GDB_SIGNAL_0
3309 ? strsignal (gdb_signal_to_host (signo))
3310 : "0"));
3311 lp->stopped = 0;
3312 goto retry;
3313 }
3314
3315 if (!non_stop)
3316 {
3317 /* Only do the below in all-stop, as we currently use SIGINT
3318 to implement target_stop (see linux_nat_stop) in
3319 non-stop. */
3320 if (signo == GDB_SIGNAL_INT && signal_pass_state (signo) == 0)
3321 {
3322 /* If ^C/BREAK is typed at the tty/console, SIGINT gets
3323 forwarded to the entire process group, that is, all LWPs
3324 will receive it - unless they're using CLONE_THREAD to
3325 share signals. Since we only want to report it once, we
3326 mark it as ignored for all LWPs except this one. */
3327 iterate_over_lwps (pid_to_ptid (ptid_get_pid (ptid)),
3328 set_ignore_sigint, NULL);
3329 lp->ignore_sigint = 0;
3330 }
3331 else
3332 maybe_clear_ignore_sigint (lp);
3333 }
3334 }
3335
3336 /* This LWP is stopped now. */
3337 lp->stopped = 1;
3338
3339 if (debug_linux_nat)
3340 fprintf_unfiltered (gdb_stdlog, "LLW: Candidate event %s in %s.\n",
3341 status_to_str (status), target_pid_to_str (lp->ptid));
3342
3343 if (!non_stop)
3344 {
3345 /* Now stop all other LWP's ... */
3346 iterate_over_lwps (minus_one_ptid, stop_callback, NULL);
3347
3348 /* ... and wait until all of them have reported back that
3349 they're no longer running. */
3350 iterate_over_lwps (minus_one_ptid, stop_wait_callback, NULL);
3351
3352 /* If we're not waiting for a specific LWP, choose an event LWP
3353 from among those that have had events. Giving equal priority
3354 to all LWPs that have had events helps prevent
3355 starvation. */
3356 if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid))
3357 select_event_lwp (ptid, &lp, &status);
3358
3359 /* Now that we've selected our final event LWP, cancel any
3360 breakpoints in other LWPs that have hit a GDB breakpoint.
3361 See the comment in cancel_breakpoints_callback to find out
3362 why. */
3363 iterate_over_lwps (minus_one_ptid, cancel_breakpoints_callback, lp);
3364
3365 /* We'll need this to determine whether to report a SIGSTOP as
3366 TARGET_WAITKIND_0. Need to take a copy because
3367 resume_clear_callback clears it. */
3368 last_resume_kind = lp->last_resume_kind;
3369
3370 /* In all-stop, from the core's perspective, all LWPs are now
3371 stopped until a new resume action is sent over. */
3372 iterate_over_lwps (minus_one_ptid, resume_clear_callback, NULL);
3373 }
3374 else
3375 {
3376 /* See above. */
3377 last_resume_kind = lp->last_resume_kind;
3378 resume_clear_callback (lp, NULL);
3379 }
3380
3381 if (linux_nat_status_is_event (status))
3382 {
3383 if (debug_linux_nat)
3384 fprintf_unfiltered (gdb_stdlog,
3385 "LLW: trap ptid is %s.\n",
3386 target_pid_to_str (lp->ptid));
3387 }
3388
3389 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3390 {
3391 *ourstatus = lp->waitstatus;
3392 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3393 }
3394 else
3395 store_waitstatus (ourstatus, status);
3396
3397 if (debug_linux_nat)
3398 fprintf_unfiltered (gdb_stdlog, "LLW: exit\n");
3399
3400 restore_child_signals_mask (&prev_mask);
3401
3402 if (last_resume_kind == resume_stop
3403 && ourstatus->kind == TARGET_WAITKIND_STOPPED
3404 && WSTOPSIG (status) == SIGSTOP)
3405 {
3406 /* A thread that has been requested to stop by GDB with
3407 target_stop, and it stopped cleanly, so report as SIG0. The
3408 use of SIGSTOP is an implementation detail. */
3409 ourstatus->value.sig = GDB_SIGNAL_0;
3410 }
3411
3412 if (ourstatus->kind == TARGET_WAITKIND_EXITED
3413 || ourstatus->kind == TARGET_WAITKIND_SIGNALLED)
3414 lp->core = -1;
3415 else
3416 lp->core = linux_common_core_of_thread (lp->ptid);
3417
3418 return lp->ptid;
3419 }
3420
3421 /* Resume LWPs that are currently stopped without any pending status
3422 to report, but are resumed from the core's perspective. */
3423
3424 static int
3425 resume_stopped_resumed_lwps (struct lwp_info *lp, void *data)
3426 {
3427 ptid_t *wait_ptid_p = data;
3428
3429 if (lp->stopped
3430 && lp->resumed
3431 && lp->status == 0
3432 && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE)
3433 {
3434 struct regcache *regcache = get_thread_regcache (lp->ptid);
3435 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3436 CORE_ADDR pc = regcache_read_pc (regcache);
3437
3438 gdb_assert (is_executing (lp->ptid));
3439
3440 /* Don't bother if there's a breakpoint at PC that we'd hit
3441 immediately, and we're not waiting for this LWP. */
3442 if (!ptid_match (lp->ptid, *wait_ptid_p))
3443 {
3444 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
3445 return 0;
3446 }
3447
3448 if (debug_linux_nat)
3449 fprintf_unfiltered (gdb_stdlog,
3450 "RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
3451 target_pid_to_str (lp->ptid),
3452 paddress (gdbarch, pc),
3453 lp->step);
3454
3455 registers_changed ();
3456 if (linux_nat_prepare_to_resume != NULL)
3457 linux_nat_prepare_to_resume (lp);
3458 linux_ops->to_resume (linux_ops, pid_to_ptid (ptid_get_lwp (lp->ptid)),
3459 lp->step, GDB_SIGNAL_0);
3460 lp->stopped = 0;
3461 lp->stopped_by_watchpoint = 0;
3462 }
3463
3464 return 0;
3465 }
3466
3467 static ptid_t
3468 linux_nat_wait (struct target_ops *ops,
3469 ptid_t ptid, struct target_waitstatus *ourstatus,
3470 int target_options)
3471 {
3472 ptid_t event_ptid;
3473
3474 if (debug_linux_nat)
3475 {
3476 char *options_string;
3477
3478 options_string = target_options_to_string (target_options);
3479 fprintf_unfiltered (gdb_stdlog,
3480 "linux_nat_wait: [%s], [%s]\n",
3481 target_pid_to_str (ptid),
3482 options_string);
3483 xfree (options_string);
3484 }
3485
3486 /* Flush the async file first. */
3487 if (target_can_async_p ())
3488 async_file_flush ();
3489
3490 /* Resume LWPs that are currently stopped without any pending status
3491 to report, but are resumed from the core's perspective. LWPs get
3492 in this state if we find them stopping at a time we're not
3493 interested in reporting the event (target_wait on a
3494 specific_process, for example, see linux_nat_wait_1), and
3495 meanwhile the event became uninteresting. Don't bother resuming
3496 LWPs we're not going to wait for if they'd stop immediately. */
3497 if (non_stop)
3498 iterate_over_lwps (minus_one_ptid, resume_stopped_resumed_lwps, &ptid);
3499
3500 event_ptid = linux_nat_wait_1 (ops, ptid, ourstatus, target_options);
3501
3502 /* If we requested any event, and something came out, assume there
3503 may be more. If we requested a specific lwp or process, also
3504 assume there may be more. */
3505 if (target_can_async_p ()
3506 && ((ourstatus->kind != TARGET_WAITKIND_IGNORE
3507 && ourstatus->kind != TARGET_WAITKIND_NO_RESUMED)
3508 || !ptid_equal (ptid, minus_one_ptid)))
3509 async_file_mark ();
3510
3511 /* Get ready for the next event. */
3512 if (target_can_async_p ())
3513 target_async (inferior_event_handler, 0);
3514
3515 return event_ptid;
3516 }
3517
3518 static int
3519 kill_callback (struct lwp_info *lp, void *data)
3520 {
3521 /* PTRACE_KILL may resume the inferior. Send SIGKILL first. */
3522
3523 errno = 0;
3524 kill_lwp (ptid_get_lwp (lp->ptid), SIGKILL);
3525 if (debug_linux_nat)
3526 {
3527 int save_errno = errno;
3528
3529 fprintf_unfiltered (gdb_stdlog,
3530 "KC: kill (SIGKILL) %s, 0, 0 (%s)\n",
3531 target_pid_to_str (lp->ptid),
3532 save_errno ? safe_strerror (save_errno) : "OK");
3533 }
3534
3535 /* Some kernels ignore even SIGKILL for processes under ptrace. */
3536
3537 errno = 0;
3538 ptrace (PTRACE_KILL, ptid_get_lwp (lp->ptid), 0, 0);
3539 if (debug_linux_nat)
3540 {
3541 int save_errno = errno;
3542
3543 fprintf_unfiltered (gdb_stdlog,
3544 "KC: PTRACE_KILL %s, 0, 0 (%s)\n",
3545 target_pid_to_str (lp->ptid),
3546 save_errno ? safe_strerror (save_errno) : "OK");
3547 }
3548
3549 return 0;
3550 }
3551
3552 static int
3553 kill_wait_callback (struct lwp_info *lp, void *data)
3554 {
3555 pid_t pid;
3556
3557 /* We must make sure that there are no pending events (delayed
3558 SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current
3559 program doesn't interfere with any following debugging session. */
3560
3561 /* For cloned processes we must check both with __WCLONE and
3562 without, since the exit status of a cloned process isn't reported
3563 with __WCLONE. */
3564 if (lp->cloned)
3565 {
3566 do
3567 {
3568 pid = my_waitpid (ptid_get_lwp (lp->ptid), NULL, __WCLONE);
3569 if (pid != (pid_t) -1)
3570 {
3571 if (debug_linux_nat)
3572 fprintf_unfiltered (gdb_stdlog,
3573 "KWC: wait %s received unknown.\n",
3574 target_pid_to_str (lp->ptid));
3575 /* The Linux kernel sometimes fails to kill a thread
3576 completely after PTRACE_KILL; that goes from the stop
3577 point in do_fork out to the one in
3578 get_signal_to_deliever and waits again. So kill it
3579 again. */
3580 kill_callback (lp, NULL);
3581 }
3582 }
3583 while (pid == ptid_get_lwp (lp->ptid));
3584
3585 gdb_assert (pid == -1 && errno == ECHILD);
3586 }
3587
3588 do
3589 {
3590 pid = my_waitpid (ptid_get_lwp (lp->ptid), NULL, 0);
3591 if (pid != (pid_t) -1)
3592 {
3593 if (debug_linux_nat)
3594 fprintf_unfiltered (gdb_stdlog,
3595 "KWC: wait %s received unk.\n",
3596 target_pid_to_str (lp->ptid));
3597 /* See the call to kill_callback above. */
3598 kill_callback (lp, NULL);
3599 }
3600 }
3601 while (pid == ptid_get_lwp (lp->ptid));
3602
3603 gdb_assert (pid == -1 && errno == ECHILD);
3604 return 0;
3605 }
3606
3607 static void
3608 linux_nat_kill (struct target_ops *ops)
3609 {
3610 struct target_waitstatus last;
3611 ptid_t last_ptid;
3612 int status;
3613
3614 /* If we're stopped while forking and we haven't followed yet,
3615 kill the other task. We need to do this first because the
3616 parent will be sleeping if this is a vfork. */
3617
3618 get_last_target_status (&last_ptid, &last);
3619
3620 if (last.kind == TARGET_WAITKIND_FORKED
3621 || last.kind == TARGET_WAITKIND_VFORKED)
3622 {
3623 ptrace (PT_KILL, ptid_get_pid (last.value.related_pid), 0, 0);
3624 wait (&status);
3625
3626 /* Let the arch-specific native code know this process is
3627 gone. */
3628 linux_nat_forget_process (ptid_get_pid (last.value.related_pid));
3629 }
3630
3631 if (forks_exist_p ())
3632 linux_fork_killall ();
3633 else
3634 {
3635 ptid_t ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
3636
3637 /* Stop all threads before killing them, since ptrace requires
3638 that the thread is stopped to sucessfully PTRACE_KILL. */
3639 iterate_over_lwps (ptid, stop_callback, NULL);
3640 /* ... and wait until all of them have reported back that
3641 they're no longer running. */
3642 iterate_over_lwps (ptid, stop_wait_callback, NULL);
3643
3644 /* Kill all LWP's ... */
3645 iterate_over_lwps (ptid, kill_callback, NULL);
3646
3647 /* ... and wait until we've flushed all events. */
3648 iterate_over_lwps (ptid, kill_wait_callback, NULL);
3649 }
3650
3651 target_mourn_inferior ();
3652 }
3653
3654 static void
3655 linux_nat_mourn_inferior (struct target_ops *ops)
3656 {
3657 int pid = ptid_get_pid (inferior_ptid);
3658
3659 purge_lwp_list (pid);
3660
3661 if (! forks_exist_p ())
3662 /* Normal case, no other forks available. */
3663 linux_ops->to_mourn_inferior (ops);
3664 else
3665 /* Multi-fork case. The current inferior_ptid has exited, but
3666 there are other viable forks to debug. Delete the exiting
3667 one and context-switch to the first available. */
3668 linux_fork_mourn_inferior ();
3669
3670 /* Let the arch-specific native code know this process is gone. */
3671 linux_nat_forget_process (pid);
3672 }
3673
3674 /* Convert a native/host siginfo object, into/from the siginfo in the
3675 layout of the inferiors' architecture. */
3676
3677 static void
3678 siginfo_fixup (siginfo_t *siginfo, gdb_byte *inf_siginfo, int direction)
3679 {
3680 int done = 0;
3681
3682 if (linux_nat_siginfo_fixup != NULL)
3683 done = linux_nat_siginfo_fixup (siginfo, inf_siginfo, direction);
3684
3685 /* If there was no callback, or the callback didn't do anything,
3686 then just do a straight memcpy. */
3687 if (!done)
3688 {
3689 if (direction == 1)
3690 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
3691 else
3692 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
3693 }
3694 }
3695
3696 static enum target_xfer_status
3697 linux_xfer_siginfo (struct target_ops *ops, enum target_object object,
3698 const char *annex, gdb_byte *readbuf,
3699 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
3700 ULONGEST *xfered_len)
3701 {
3702 int pid;
3703 siginfo_t siginfo;
3704 gdb_byte inf_siginfo[sizeof (siginfo_t)];
3705
3706 gdb_assert (object == TARGET_OBJECT_SIGNAL_INFO);
3707 gdb_assert (readbuf || writebuf);
3708
3709 pid = ptid_get_lwp (inferior_ptid);
3710 if (pid == 0)
3711 pid = ptid_get_pid (inferior_ptid);
3712
3713 if (offset > sizeof (siginfo))
3714 return TARGET_XFER_E_IO;
3715
3716 errno = 0;
3717 ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
3718 if (errno != 0)
3719 return TARGET_XFER_E_IO;
3720
3721 /* When GDB is built as a 64-bit application, ptrace writes into
3722 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
3723 inferior with a 64-bit GDB should look the same as debugging it
3724 with a 32-bit GDB, we need to convert it. GDB core always sees
3725 the converted layout, so any read/write will have to be done
3726 post-conversion. */
3727 siginfo_fixup (&siginfo, inf_siginfo, 0);
3728
3729 if (offset + len > sizeof (siginfo))
3730 len = sizeof (siginfo) - offset;
3731
3732 if (readbuf != NULL)
3733 memcpy (readbuf, inf_siginfo + offset, len);
3734 else
3735 {
3736 memcpy (inf_siginfo + offset, writebuf, len);
3737
3738 /* Convert back to ptrace layout before flushing it out. */
3739 siginfo_fixup (&siginfo, inf_siginfo, 1);
3740
3741 errno = 0;
3742 ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
3743 if (errno != 0)
3744 return TARGET_XFER_E_IO;
3745 }
3746
3747 *xfered_len = len;
3748 return TARGET_XFER_OK;
3749 }
3750
3751 static enum target_xfer_status
3752 linux_nat_xfer_partial (struct target_ops *ops, enum target_object object,
3753 const char *annex, gdb_byte *readbuf,
3754 const gdb_byte *writebuf,
3755 ULONGEST offset, ULONGEST len, ULONGEST *xfered_len)
3756 {
3757 struct cleanup *old_chain;
3758 enum target_xfer_status xfer;
3759
3760 if (object == TARGET_OBJECT_SIGNAL_INFO)
3761 return linux_xfer_siginfo (ops, object, annex, readbuf, writebuf,
3762 offset, len, xfered_len);
3763
3764 /* The target is connected but no live inferior is selected. Pass
3765 this request down to a lower stratum (e.g., the executable
3766 file). */
3767 if (object == TARGET_OBJECT_MEMORY && ptid_equal (inferior_ptid, null_ptid))
3768 return TARGET_XFER_EOF;
3769
3770 old_chain = save_inferior_ptid ();
3771
3772 if (ptid_lwp_p (inferior_ptid))
3773 inferior_ptid = pid_to_ptid (ptid_get_lwp (inferior_ptid));
3774
3775 xfer = linux_ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
3776 offset, len, xfered_len);
3777
3778 do_cleanups (old_chain);
3779 return xfer;
3780 }
3781
3782 static int
3783 linux_thread_alive (ptid_t ptid)
3784 {
3785 int err, tmp_errno;
3786
3787 gdb_assert (ptid_lwp_p (ptid));
3788
3789 /* Send signal 0 instead of anything ptrace, because ptracing a
3790 running thread errors out claiming that the thread doesn't
3791 exist. */
3792 err = kill_lwp (ptid_get_lwp (ptid), 0);
3793 tmp_errno = errno;
3794 if (debug_linux_nat)
3795 fprintf_unfiltered (gdb_stdlog,
3796 "LLTA: KILL(SIG0) %s (%s)\n",
3797 target_pid_to_str (ptid),
3798 err ? safe_strerror (tmp_errno) : "OK");
3799
3800 if (err != 0)
3801 return 0;
3802
3803 return 1;
3804 }
3805
3806 static int
3807 linux_nat_thread_alive (struct target_ops *ops, ptid_t ptid)
3808 {
3809 return linux_thread_alive (ptid);
3810 }
3811
3812 static char *
3813 linux_nat_pid_to_str (struct target_ops *ops, ptid_t ptid)
3814 {
3815 static char buf[64];
3816
3817 if (ptid_lwp_p (ptid)
3818 && (ptid_get_pid (ptid) != ptid_get_lwp (ptid)
3819 || num_lwps (ptid_get_pid (ptid)) > 1))
3820 {
3821 snprintf (buf, sizeof (buf), "LWP %ld", ptid_get_lwp (ptid));
3822 return buf;
3823 }
3824
3825 return normal_pid_to_str (ptid);
3826 }
3827
3828 static char *
3829 linux_nat_thread_name (struct target_ops *self, struct thread_info *thr)
3830 {
3831 int pid = ptid_get_pid (thr->ptid);
3832 long lwp = ptid_get_lwp (thr->ptid);
3833 #define FORMAT "/proc/%d/task/%ld/comm"
3834 char buf[sizeof (FORMAT) + 30];
3835 FILE *comm_file;
3836 char *result = NULL;
3837
3838 snprintf (buf, sizeof (buf), FORMAT, pid, lwp);
3839 comm_file = gdb_fopen_cloexec (buf, "r");
3840 if (comm_file)
3841 {
3842 /* Not exported by the kernel, so we define it here. */
3843 #define COMM_LEN 16
3844 static char line[COMM_LEN + 1];
3845
3846 if (fgets (line, sizeof (line), comm_file))
3847 {
3848 char *nl = strchr (line, '\n');
3849
3850 if (nl)
3851 *nl = '\0';
3852 if (*line != '\0')
3853 result = line;
3854 }
3855
3856 fclose (comm_file);
3857 }
3858
3859 #undef COMM_LEN
3860 #undef FORMAT
3861
3862 return result;
3863 }
3864
3865 /* Accepts an integer PID; Returns a string representing a file that
3866 can be opened to get the symbols for the child process. */
3867
3868 static char *
3869 linux_child_pid_to_exec_file (struct target_ops *self, int pid)
3870 {
3871 static char buf[PATH_MAX];
3872 char name[PATH_MAX];
3873
3874 xsnprintf (name, PATH_MAX, "/proc/%d/exe", pid);
3875 memset (buf, 0, PATH_MAX);
3876 if (readlink (name, buf, PATH_MAX - 1) <= 0)
3877 strcpy (buf, name);
3878
3879 return buf;
3880 }
3881
3882 /* Implement the to_xfer_partial interface for memory reads using the /proc
3883 filesystem. Because we can use a single read() call for /proc, this
3884 can be much more efficient than banging away at PTRACE_PEEKTEXT,
3885 but it doesn't support writes. */
3886
3887 static enum target_xfer_status
3888 linux_proc_xfer_partial (struct target_ops *ops, enum target_object object,
3889 const char *annex, gdb_byte *readbuf,
3890 const gdb_byte *writebuf,
3891 ULONGEST offset, LONGEST len, ULONGEST *xfered_len)
3892 {
3893 LONGEST ret;
3894 int fd;
3895 char filename[64];
3896
3897 if (object != TARGET_OBJECT_MEMORY || !readbuf)
3898 return 0;
3899
3900 /* Don't bother for one word. */
3901 if (len < 3 * sizeof (long))
3902 return TARGET_XFER_EOF;
3903
3904 /* We could keep this file open and cache it - possibly one per
3905 thread. That requires some juggling, but is even faster. */
3906 xsnprintf (filename, sizeof filename, "/proc/%d/mem",
3907 ptid_get_pid (inferior_ptid));
3908 fd = gdb_open_cloexec (filename, O_RDONLY | O_LARGEFILE, 0);
3909 if (fd == -1)
3910 return TARGET_XFER_EOF;
3911
3912 /* If pread64 is available, use it. It's faster if the kernel
3913 supports it (only one syscall), and it's 64-bit safe even on
3914 32-bit platforms (for instance, SPARC debugging a SPARC64
3915 application). */
3916 #ifdef HAVE_PREAD64
3917 if (pread64 (fd, readbuf, len, offset) != len)
3918 #else
3919 if (lseek (fd, offset, SEEK_SET) == -1 || read (fd, readbuf, len) != len)
3920 #endif
3921 ret = 0;
3922 else
3923 ret = len;
3924
3925 close (fd);
3926
3927 if (ret == 0)
3928 return TARGET_XFER_EOF;
3929 else
3930 {
3931 *xfered_len = ret;
3932 return TARGET_XFER_OK;
3933 }
3934 }
3935
3936
3937 /* Enumerate spufs IDs for process PID. */
3938 static LONGEST
3939 spu_enumerate_spu_ids (int pid, gdb_byte *buf, ULONGEST offset, ULONGEST len)
3940 {
3941 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
3942 LONGEST pos = 0;
3943 LONGEST written = 0;
3944 char path[128];
3945 DIR *dir;
3946 struct dirent *entry;
3947
3948 xsnprintf (path, sizeof path, "/proc/%d/fd", pid);
3949 dir = opendir (path);
3950 if (!dir)
3951 return -1;
3952
3953 rewinddir (dir);
3954 while ((entry = readdir (dir)) != NULL)
3955 {
3956 struct stat st;
3957 struct statfs stfs;
3958 int fd;
3959
3960 fd = atoi (entry->d_name);
3961 if (!fd)
3962 continue;
3963
3964 xsnprintf (path, sizeof path, "/proc/%d/fd/%d", pid, fd);
3965 if (stat (path, &st) != 0)
3966 continue;
3967 if (!S_ISDIR (st.st_mode))
3968 continue;
3969
3970 if (statfs (path, &stfs) != 0)
3971 continue;
3972 if (stfs.f_type != SPUFS_MAGIC)
3973 continue;
3974
3975 if (pos >= offset && pos + 4 <= offset + len)
3976 {
3977 store_unsigned_integer (buf + pos - offset, 4, byte_order, fd);
3978 written += 4;
3979 }
3980 pos += 4;
3981 }
3982
3983 closedir (dir);
3984 return written;
3985 }
3986
3987 /* Implement the to_xfer_partial interface for the TARGET_OBJECT_SPU
3988 object type, using the /proc file system. */
3989
3990 static enum target_xfer_status
3991 linux_proc_xfer_spu (struct target_ops *ops, enum target_object object,
3992 const char *annex, gdb_byte *readbuf,
3993 const gdb_byte *writebuf,
3994 ULONGEST offset, ULONGEST len, ULONGEST *xfered_len)
3995 {
3996 char buf[128];
3997 int fd = 0;
3998 int ret = -1;
3999 int pid = ptid_get_pid (inferior_ptid);
4000
4001 if (!annex)
4002 {
4003 if (!readbuf)
4004 return TARGET_XFER_E_IO;
4005 else
4006 {
4007 LONGEST l = spu_enumerate_spu_ids (pid, readbuf, offset, len);
4008
4009 if (l < 0)
4010 return TARGET_XFER_E_IO;
4011 else if (l == 0)
4012 return TARGET_XFER_EOF;
4013 else
4014 {
4015 *xfered_len = (ULONGEST) l;
4016 return TARGET_XFER_OK;
4017 }
4018 }
4019 }
4020
4021 xsnprintf (buf, sizeof buf, "/proc/%d/fd/%s", pid, annex);
4022 fd = gdb_open_cloexec (buf, writebuf? O_WRONLY : O_RDONLY, 0);
4023 if (fd <= 0)
4024 return TARGET_XFER_E_IO;
4025
4026 if (offset != 0
4027 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4028 {
4029 close (fd);
4030 return TARGET_XFER_EOF;
4031 }
4032
4033 if (writebuf)
4034 ret = write (fd, writebuf, (size_t) len);
4035 else if (readbuf)
4036 ret = read (fd, readbuf, (size_t) len);
4037
4038 close (fd);
4039
4040 if (ret < 0)
4041 return TARGET_XFER_E_IO;
4042 else if (ret == 0)
4043 return TARGET_XFER_EOF;
4044 else
4045 {
4046 *xfered_len = (ULONGEST) ret;
4047 return TARGET_XFER_OK;
4048 }
4049 }
4050
4051
4052 /* Parse LINE as a signal set and add its set bits to SIGS. */
4053
4054 static void
4055 add_line_to_sigset (const char *line, sigset_t *sigs)
4056 {
4057 int len = strlen (line) - 1;
4058 const char *p;
4059 int signum;
4060
4061 if (line[len] != '\n')
4062 error (_("Could not parse signal set: %s"), line);
4063
4064 p = line;
4065 signum = len * 4;
4066 while (len-- > 0)
4067 {
4068 int digit;
4069
4070 if (*p >= '0' && *p <= '9')
4071 digit = *p - '0';
4072 else if (*p >= 'a' && *p <= 'f')
4073 digit = *p - 'a' + 10;
4074 else
4075 error (_("Could not parse signal set: %s"), line);
4076
4077 signum -= 4;
4078
4079 if (digit & 1)
4080 sigaddset (sigs, signum + 1);
4081 if (digit & 2)
4082 sigaddset (sigs, signum + 2);
4083 if (digit & 4)
4084 sigaddset (sigs, signum + 3);
4085 if (digit & 8)
4086 sigaddset (sigs, signum + 4);
4087
4088 p++;
4089 }
4090 }
4091
4092 /* Find process PID's pending signals from /proc/pid/status and set
4093 SIGS to match. */
4094
4095 void
4096 linux_proc_pending_signals (int pid, sigset_t *pending,
4097 sigset_t *blocked, sigset_t *ignored)
4098 {
4099 FILE *procfile;
4100 char buffer[PATH_MAX], fname[PATH_MAX];
4101 struct cleanup *cleanup;
4102
4103 sigemptyset (pending);
4104 sigemptyset (blocked);
4105 sigemptyset (ignored);
4106 xsnprintf (fname, sizeof fname, "/proc/%d/status", pid);
4107 procfile = gdb_fopen_cloexec (fname, "r");
4108 if (procfile == NULL)
4109 error (_("Could not open %s"), fname);
4110 cleanup = make_cleanup_fclose (procfile);
4111
4112 while (fgets (buffer, PATH_MAX, procfile) != NULL)
4113 {
4114 /* Normal queued signals are on the SigPnd line in the status
4115 file. However, 2.6 kernels also have a "shared" pending
4116 queue for delivering signals to a thread group, so check for
4117 a ShdPnd line also.
4118
4119 Unfortunately some Red Hat kernels include the shared pending
4120 queue but not the ShdPnd status field. */
4121
4122 if (strncmp (buffer, "SigPnd:\t", 8) == 0)
4123 add_line_to_sigset (buffer + 8, pending);
4124 else if (strncmp (buffer, "ShdPnd:\t", 8) == 0)
4125 add_line_to_sigset (buffer + 8, pending);
4126 else if (strncmp (buffer, "SigBlk:\t", 8) == 0)
4127 add_line_to_sigset (buffer + 8, blocked);
4128 else if (strncmp (buffer, "SigIgn:\t", 8) == 0)
4129 add_line_to_sigset (buffer + 8, ignored);
4130 }
4131
4132 do_cleanups (cleanup);
4133 }
4134
4135 static enum target_xfer_status
4136 linux_nat_xfer_osdata (struct target_ops *ops, enum target_object object,
4137 const char *annex, gdb_byte *readbuf,
4138 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
4139 ULONGEST *xfered_len)
4140 {
4141 gdb_assert (object == TARGET_OBJECT_OSDATA);
4142
4143 *xfered_len = linux_common_xfer_osdata (annex, readbuf, offset, len);
4144 if (*xfered_len == 0)
4145 return TARGET_XFER_EOF;
4146 else
4147 return TARGET_XFER_OK;
4148 }
4149
4150 static enum target_xfer_status
4151 linux_xfer_partial (struct target_ops *ops, enum target_object object,
4152 const char *annex, gdb_byte *readbuf,
4153 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
4154 ULONGEST *xfered_len)
4155 {
4156 enum target_xfer_status xfer;
4157
4158 if (object == TARGET_OBJECT_AUXV)
4159 return memory_xfer_auxv (ops, object, annex, readbuf, writebuf,
4160 offset, len, xfered_len);
4161
4162 if (object == TARGET_OBJECT_OSDATA)
4163 return linux_nat_xfer_osdata (ops, object, annex, readbuf, writebuf,
4164 offset, len, xfered_len);
4165
4166 if (object == TARGET_OBJECT_SPU)
4167 return linux_proc_xfer_spu (ops, object, annex, readbuf, writebuf,
4168 offset, len, xfered_len);
4169
4170 /* GDB calculates all the addresses in possibly larget width of the address.
4171 Address width needs to be masked before its final use - either by
4172 linux_proc_xfer_partial or inf_ptrace_xfer_partial.
4173
4174 Compare ADDR_BIT first to avoid a compiler warning on shift overflow. */
4175
4176 if (object == TARGET_OBJECT_MEMORY)
4177 {
4178 int addr_bit = gdbarch_addr_bit (target_gdbarch ());
4179
4180 if (addr_bit < (sizeof (ULONGEST) * HOST_CHAR_BIT))
4181 offset &= ((ULONGEST) 1 << addr_bit) - 1;
4182 }
4183
4184 xfer = linux_proc_xfer_partial (ops, object, annex, readbuf, writebuf,
4185 offset, len, xfered_len);
4186 if (xfer != TARGET_XFER_EOF)
4187 return xfer;
4188
4189 return super_xfer_partial (ops, object, annex, readbuf, writebuf,
4190 offset, len, xfered_len);
4191 }
4192
4193 static void
4194 cleanup_target_stop (void *arg)
4195 {
4196 ptid_t *ptid = (ptid_t *) arg;
4197
4198 gdb_assert (arg != NULL);
4199
4200 /* Unpause all */
4201 target_resume (*ptid, 0, GDB_SIGNAL_0);
4202 }
4203
4204 static VEC(static_tracepoint_marker_p) *
4205 linux_child_static_tracepoint_markers_by_strid (struct target_ops *self,
4206 const char *strid)
4207 {
4208 char s[IPA_CMD_BUF_SIZE];
4209 struct cleanup *old_chain;
4210 int pid = ptid_get_pid (inferior_ptid);
4211 VEC(static_tracepoint_marker_p) *markers = NULL;
4212 struct static_tracepoint_marker *marker = NULL;
4213 char *p = s;
4214 ptid_t ptid = ptid_build (pid, 0, 0);
4215
4216 /* Pause all */
4217 target_stop (ptid);
4218
4219 memcpy (s, "qTfSTM", sizeof ("qTfSTM"));
4220 s[sizeof ("qTfSTM")] = 0;
4221
4222 agent_run_command (pid, s, strlen (s) + 1);
4223
4224 old_chain = make_cleanup (free_current_marker, &marker);
4225 make_cleanup (cleanup_target_stop, &ptid);
4226
4227 while (*p++ == 'm')
4228 {
4229 if (marker == NULL)
4230 marker = XCNEW (struct static_tracepoint_marker);
4231
4232 do
4233 {
4234 parse_static_tracepoint_marker_definition (p, &p, marker);
4235
4236 if (strid == NULL || strcmp (strid, marker->str_id) == 0)
4237 {
4238 VEC_safe_push (static_tracepoint_marker_p,
4239 markers, marker);
4240 marker = NULL;
4241 }
4242 else
4243 {
4244 release_static_tracepoint_marker (marker);
4245 memset (marker, 0, sizeof (*marker));
4246 }
4247 }
4248 while (*p++ == ','); /* comma-separated list */
4249
4250 memcpy (s, "qTsSTM", sizeof ("qTsSTM"));
4251 s[sizeof ("qTsSTM")] = 0;
4252 agent_run_command (pid, s, strlen (s) + 1);
4253 p = s;
4254 }
4255
4256 do_cleanups (old_chain);
4257
4258 return markers;
4259 }
4260
4261 /* Create a prototype generic GNU/Linux target. The client can override
4262 it with local methods. */
4263
4264 static void
4265 linux_target_install_ops (struct target_ops *t)
4266 {
4267 t->to_insert_fork_catchpoint = linux_child_insert_fork_catchpoint;
4268 t->to_remove_fork_catchpoint = linux_child_remove_fork_catchpoint;
4269 t->to_insert_vfork_catchpoint = linux_child_insert_vfork_catchpoint;
4270 t->to_remove_vfork_catchpoint = linux_child_remove_vfork_catchpoint;
4271 t->to_insert_exec_catchpoint = linux_child_insert_exec_catchpoint;
4272 t->to_remove_exec_catchpoint = linux_child_remove_exec_catchpoint;
4273 t->to_set_syscall_catchpoint = linux_child_set_syscall_catchpoint;
4274 t->to_pid_to_exec_file = linux_child_pid_to_exec_file;
4275 t->to_post_startup_inferior = linux_child_post_startup_inferior;
4276 t->to_post_attach = linux_child_post_attach;
4277 t->to_follow_fork = linux_child_follow_fork;
4278
4279 super_xfer_partial = t->to_xfer_partial;
4280 t->to_xfer_partial = linux_xfer_partial;
4281
4282 t->to_static_tracepoint_markers_by_strid
4283 = linux_child_static_tracepoint_markers_by_strid;
4284 }
4285
4286 struct target_ops *
4287 linux_target (void)
4288 {
4289 struct target_ops *t;
4290
4291 t = inf_ptrace_target ();
4292 linux_target_install_ops (t);
4293
4294 return t;
4295 }
4296
4297 struct target_ops *
4298 linux_trad_target (CORE_ADDR (*register_u_offset)(struct gdbarch *, int, int))
4299 {
4300 struct target_ops *t;
4301
4302 t = inf_ptrace_trad_target (register_u_offset);
4303 linux_target_install_ops (t);
4304
4305 return t;
4306 }
4307
4308 /* target_is_async_p implementation. */
4309
4310 static int
4311 linux_nat_is_async_p (struct target_ops *ops)
4312 {
4313 /* NOTE: palves 2008-03-21: We're only async when the user requests
4314 it explicitly with the "set target-async" command.
4315 Someday, linux will always be async. */
4316 return target_async_permitted;
4317 }
4318
4319 /* target_can_async_p implementation. */
4320
4321 static int
4322 linux_nat_can_async_p (struct target_ops *ops)
4323 {
4324 /* NOTE: palves 2008-03-21: We're only async when the user requests
4325 it explicitly with the "set target-async" command.
4326 Someday, linux will always be async. */
4327 return target_async_permitted;
4328 }
4329
4330 static int
4331 linux_nat_supports_non_stop (struct target_ops *self)
4332 {
4333 return 1;
4334 }
4335
4336 /* True if we want to support multi-process. To be removed when GDB
4337 supports multi-exec. */
4338
4339 int linux_multi_process = 1;
4340
4341 static int
4342 linux_nat_supports_multi_process (struct target_ops *self)
4343 {
4344 return linux_multi_process;
4345 }
4346
4347 static int
4348 linux_nat_supports_disable_randomization (struct target_ops *self)
4349 {
4350 #ifdef HAVE_PERSONALITY
4351 return 1;
4352 #else
4353 return 0;
4354 #endif
4355 }
4356
4357 static int async_terminal_is_ours = 1;
4358
4359 /* target_terminal_inferior implementation.
4360
4361 This is a wrapper around child_terminal_inferior to add async support. */
4362
4363 static void
4364 linux_nat_terminal_inferior (struct target_ops *self)
4365 {
4366 if (!target_is_async_p ())
4367 {
4368 /* Async mode is disabled. */
4369 child_terminal_inferior (self);
4370 return;
4371 }
4372
4373 child_terminal_inferior (self);
4374
4375 /* Calls to target_terminal_*() are meant to be idempotent. */
4376 if (!async_terminal_is_ours)
4377 return;
4378
4379 delete_file_handler (input_fd);
4380 async_terminal_is_ours = 0;
4381 set_sigint_trap ();
4382 }
4383
4384 /* target_terminal_ours implementation.
4385
4386 This is a wrapper around child_terminal_ours to add async support (and
4387 implement the target_terminal_ours vs target_terminal_ours_for_output
4388 distinction). child_terminal_ours is currently no different than
4389 child_terminal_ours_for_output.
4390 We leave target_terminal_ours_for_output alone, leaving it to
4391 child_terminal_ours_for_output. */
4392
4393 static void
4394 linux_nat_terminal_ours (struct target_ops *self)
4395 {
4396 if (!target_is_async_p ())
4397 {
4398 /* Async mode is disabled. */
4399 child_terminal_ours (self);
4400 return;
4401 }
4402
4403 /* GDB should never give the terminal to the inferior if the
4404 inferior is running in the background (run&, continue&, etc.),
4405 but claiming it sure should. */
4406 child_terminal_ours (self);
4407
4408 if (async_terminal_is_ours)
4409 return;
4410
4411 clear_sigint_trap ();
4412 add_file_handler (input_fd, stdin_event_handler, 0);
4413 async_terminal_is_ours = 1;
4414 }
4415
4416 static void (*async_client_callback) (enum inferior_event_type event_type,
4417 void *context);
4418 static void *async_client_context;
4419
4420 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4421 so we notice when any child changes state, and notify the
4422 event-loop; it allows us to use sigsuspend in linux_nat_wait_1
4423 above to wait for the arrival of a SIGCHLD. */
4424
4425 static void
4426 sigchld_handler (int signo)
4427 {
4428 int old_errno = errno;
4429
4430 if (debug_linux_nat)
4431 ui_file_write_async_safe (gdb_stdlog,
4432 "sigchld\n", sizeof ("sigchld\n") - 1);
4433
4434 if (signo == SIGCHLD
4435 && linux_nat_event_pipe[0] != -1)
4436 async_file_mark (); /* Let the event loop know that there are
4437 events to handle. */
4438
4439 errno = old_errno;
4440 }
4441
4442 /* Callback registered with the target events file descriptor. */
4443
4444 static void
4445 handle_target_event (int error, gdb_client_data client_data)
4446 {
4447 (*async_client_callback) (INF_REG_EVENT, async_client_context);
4448 }
4449
4450 /* Create/destroy the target events pipe. Returns previous state. */
4451
4452 static int
4453 linux_async_pipe (int enable)
4454 {
4455 int previous = (linux_nat_event_pipe[0] != -1);
4456
4457 if (previous != enable)
4458 {
4459 sigset_t prev_mask;
4460
4461 /* Block child signals while we create/destroy the pipe, as
4462 their handler writes to it. */
4463 block_child_signals (&prev_mask);
4464
4465 if (enable)
4466 {
4467 if (gdb_pipe_cloexec (linux_nat_event_pipe) == -1)
4468 internal_error (__FILE__, __LINE__,
4469 "creating event pipe failed.");
4470
4471 fcntl (linux_nat_event_pipe[0], F_SETFL, O_NONBLOCK);
4472 fcntl (linux_nat_event_pipe[1], F_SETFL, O_NONBLOCK);
4473 }
4474 else
4475 {
4476 close (linux_nat_event_pipe[0]);
4477 close (linux_nat_event_pipe[1]);
4478 linux_nat_event_pipe[0] = -1;
4479 linux_nat_event_pipe[1] = -1;
4480 }
4481
4482 restore_child_signals_mask (&prev_mask);
4483 }
4484
4485 return previous;
4486 }
4487
4488 /* target_async implementation. */
4489
4490 static void
4491 linux_nat_async (struct target_ops *ops,
4492 void (*callback) (enum inferior_event_type event_type,
4493 void *context),
4494 void *context)
4495 {
4496 if (callback != NULL)
4497 {
4498 async_client_callback = callback;
4499 async_client_context = context;
4500 if (!linux_async_pipe (1))
4501 {
4502 add_file_handler (linux_nat_event_pipe[0],
4503 handle_target_event, NULL);
4504 /* There may be pending events to handle. Tell the event loop
4505 to poll them. */
4506 async_file_mark ();
4507 }
4508 }
4509 else
4510 {
4511 async_client_callback = callback;
4512 async_client_context = context;
4513 delete_file_handler (linux_nat_event_pipe[0]);
4514 linux_async_pipe (0);
4515 }
4516 return;
4517 }
4518
4519 /* Stop an LWP, and push a GDB_SIGNAL_0 stop status if no other
4520 event came out. */
4521
4522 static int
4523 linux_nat_stop_lwp (struct lwp_info *lwp, void *data)
4524 {
4525 if (!lwp->stopped)
4526 {
4527 if (debug_linux_nat)
4528 fprintf_unfiltered (gdb_stdlog,
4529 "LNSL: running -> suspending %s\n",
4530 target_pid_to_str (lwp->ptid));
4531
4532
4533 if (lwp->last_resume_kind == resume_stop)
4534 {
4535 if (debug_linux_nat)
4536 fprintf_unfiltered (gdb_stdlog,
4537 "linux-nat: already stopping LWP %ld at "
4538 "GDB's request\n",
4539 ptid_get_lwp (lwp->ptid));
4540 return 0;
4541 }
4542
4543 stop_callback (lwp, NULL);
4544 lwp->last_resume_kind = resume_stop;
4545 }
4546 else
4547 {
4548 /* Already known to be stopped; do nothing. */
4549
4550 if (debug_linux_nat)
4551 {
4552 if (find_thread_ptid (lwp->ptid)->stop_requested)
4553 fprintf_unfiltered (gdb_stdlog,
4554 "LNSL: already stopped/stop_requested %s\n",
4555 target_pid_to_str (lwp->ptid));
4556 else
4557 fprintf_unfiltered (gdb_stdlog,
4558 "LNSL: already stopped/no "
4559 "stop_requested yet %s\n",
4560 target_pid_to_str (lwp->ptid));
4561 }
4562 }
4563 return 0;
4564 }
4565
4566 static void
4567 linux_nat_stop (struct target_ops *self, ptid_t ptid)
4568 {
4569 if (non_stop)
4570 iterate_over_lwps (ptid, linux_nat_stop_lwp, NULL);
4571 else
4572 linux_ops->to_stop (linux_ops, ptid);
4573 }
4574
4575 static void
4576 linux_nat_close (struct target_ops *self)
4577 {
4578 /* Unregister from the event loop. */
4579 if (linux_nat_is_async_p (self))
4580 linux_nat_async (self, NULL, NULL);
4581
4582 if (linux_ops->to_close)
4583 linux_ops->to_close (linux_ops);
4584
4585 super_close (self);
4586 }
4587
4588 /* When requests are passed down from the linux-nat layer to the
4589 single threaded inf-ptrace layer, ptids of (lwpid,0,0) form are
4590 used. The address space pointer is stored in the inferior object,
4591 but the common code that is passed such ptid can't tell whether
4592 lwpid is a "main" process id or not (it assumes so). We reverse
4593 look up the "main" process id from the lwp here. */
4594
4595 static struct address_space *
4596 linux_nat_thread_address_space (struct target_ops *t, ptid_t ptid)
4597 {
4598 struct lwp_info *lwp;
4599 struct inferior *inf;
4600 int pid;
4601
4602 if (ptid_get_lwp (ptid) == 0)
4603 {
4604 /* An (lwpid,0,0) ptid. Look up the lwp object to get at the
4605 tgid. */
4606 lwp = find_lwp_pid (ptid);
4607 pid = ptid_get_pid (lwp->ptid);
4608 }
4609 else
4610 {
4611 /* A (pid,lwpid,0) ptid. */
4612 pid = ptid_get_pid (ptid);
4613 }
4614
4615 inf = find_inferior_pid (pid);
4616 gdb_assert (inf != NULL);
4617 return inf->aspace;
4618 }
4619
4620 /* Return the cached value of the processor core for thread PTID. */
4621
4622 static int
4623 linux_nat_core_of_thread (struct target_ops *ops, ptid_t ptid)
4624 {
4625 struct lwp_info *info = find_lwp_pid (ptid);
4626
4627 if (info)
4628 return info->core;
4629 return -1;
4630 }
4631
4632 void
4633 linux_nat_add_target (struct target_ops *t)
4634 {
4635 /* Save the provided single-threaded target. We save this in a separate
4636 variable because another target we've inherited from (e.g. inf-ptrace)
4637 may have saved a pointer to T; we want to use it for the final
4638 process stratum target. */
4639 linux_ops_saved = *t;
4640 linux_ops = &linux_ops_saved;
4641
4642 /* Override some methods for multithreading. */
4643 t->to_create_inferior = linux_nat_create_inferior;
4644 t->to_attach = linux_nat_attach;
4645 t->to_detach = linux_nat_detach;
4646 t->to_resume = linux_nat_resume;
4647 t->to_wait = linux_nat_wait;
4648 t->to_pass_signals = linux_nat_pass_signals;
4649 t->to_xfer_partial = linux_nat_xfer_partial;
4650 t->to_kill = linux_nat_kill;
4651 t->to_mourn_inferior = linux_nat_mourn_inferior;
4652 t->to_thread_alive = linux_nat_thread_alive;
4653 t->to_pid_to_str = linux_nat_pid_to_str;
4654 t->to_thread_name = linux_nat_thread_name;
4655 t->to_has_thread_control = tc_schedlock;
4656 t->to_thread_address_space = linux_nat_thread_address_space;
4657 t->to_stopped_by_watchpoint = linux_nat_stopped_by_watchpoint;
4658 t->to_stopped_data_address = linux_nat_stopped_data_address;
4659
4660 t->to_can_async_p = linux_nat_can_async_p;
4661 t->to_is_async_p = linux_nat_is_async_p;
4662 t->to_supports_non_stop = linux_nat_supports_non_stop;
4663 t->to_async = linux_nat_async;
4664 t->to_terminal_inferior = linux_nat_terminal_inferior;
4665 t->to_terminal_ours = linux_nat_terminal_ours;
4666
4667 super_close = t->to_close;
4668 t->to_close = linux_nat_close;
4669
4670 /* Methods for non-stop support. */
4671 t->to_stop = linux_nat_stop;
4672
4673 t->to_supports_multi_process = linux_nat_supports_multi_process;
4674
4675 t->to_supports_disable_randomization
4676 = linux_nat_supports_disable_randomization;
4677
4678 t->to_core_of_thread = linux_nat_core_of_thread;
4679
4680 /* We don't change the stratum; this target will sit at
4681 process_stratum and thread_db will set at thread_stratum. This
4682 is a little strange, since this is a multi-threaded-capable
4683 target, but we want to be on the stack below thread_db, and we
4684 also want to be used for single-threaded processes. */
4685
4686 add_target (t);
4687 }
4688
4689 /* Register a method to call whenever a new thread is attached. */
4690 void
4691 linux_nat_set_new_thread (struct target_ops *t,
4692 void (*new_thread) (struct lwp_info *))
4693 {
4694 /* Save the pointer. We only support a single registered instance
4695 of the GNU/Linux native target, so we do not need to map this to
4696 T. */
4697 linux_nat_new_thread = new_thread;
4698 }
4699
4700 /* See declaration in linux-nat.h. */
4701
4702 void
4703 linux_nat_set_new_fork (struct target_ops *t,
4704 linux_nat_new_fork_ftype *new_fork)
4705 {
4706 /* Save the pointer. */
4707 linux_nat_new_fork = new_fork;
4708 }
4709
4710 /* See declaration in linux-nat.h. */
4711
4712 void
4713 linux_nat_set_forget_process (struct target_ops *t,
4714 linux_nat_forget_process_ftype *fn)
4715 {
4716 /* Save the pointer. */
4717 linux_nat_forget_process_hook = fn;
4718 }
4719
4720 /* See declaration in linux-nat.h. */
4721
4722 void
4723 linux_nat_forget_process (pid_t pid)
4724 {
4725 if (linux_nat_forget_process_hook != NULL)
4726 linux_nat_forget_process_hook (pid);
4727 }
4728
4729 /* Register a method that converts a siginfo object between the layout
4730 that ptrace returns, and the layout in the architecture of the
4731 inferior. */
4732 void
4733 linux_nat_set_siginfo_fixup (struct target_ops *t,
4734 int (*siginfo_fixup) (siginfo_t *,
4735 gdb_byte *,
4736 int))
4737 {
4738 /* Save the pointer. */
4739 linux_nat_siginfo_fixup = siginfo_fixup;
4740 }
4741
4742 /* Register a method to call prior to resuming a thread. */
4743
4744 void
4745 linux_nat_set_prepare_to_resume (struct target_ops *t,
4746 void (*prepare_to_resume) (struct lwp_info *))
4747 {
4748 /* Save the pointer. */
4749 linux_nat_prepare_to_resume = prepare_to_resume;
4750 }
4751
4752 /* See linux-nat.h. */
4753
4754 int
4755 linux_nat_get_siginfo (ptid_t ptid, siginfo_t *siginfo)
4756 {
4757 int pid;
4758
4759 pid = ptid_get_lwp (ptid);
4760 if (pid == 0)
4761 pid = ptid_get_pid (ptid);
4762
4763 errno = 0;
4764 ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, siginfo);
4765 if (errno != 0)
4766 {
4767 memset (siginfo, 0, sizeof (*siginfo));
4768 return 0;
4769 }
4770 return 1;
4771 }
4772
4773 /* Provide a prototype to silence -Wmissing-prototypes. */
4774 extern initialize_file_ftype _initialize_linux_nat;
4775
4776 void
4777 _initialize_linux_nat (void)
4778 {
4779 add_setshow_zuinteger_cmd ("lin-lwp", class_maintenance,
4780 &debug_linux_nat, _("\
4781 Set debugging of GNU/Linux lwp module."), _("\
4782 Show debugging of GNU/Linux lwp module."), _("\
4783 Enables printf debugging output."),
4784 NULL,
4785 show_debug_linux_nat,
4786 &setdebuglist, &showdebuglist);
4787
4788 /* Save this mask as the default. */
4789 sigprocmask (SIG_SETMASK, NULL, &normal_mask);
4790
4791 /* Install a SIGCHLD handler. */
4792 sigchld_action.sa_handler = sigchld_handler;
4793 sigemptyset (&sigchld_action.sa_mask);
4794 sigchld_action.sa_flags = SA_RESTART;
4795
4796 /* Make it the default. */
4797 sigaction (SIGCHLD, &sigchld_action, NULL);
4798
4799 /* Make sure we don't block SIGCHLD during a sigsuspend. */
4800 sigprocmask (SIG_SETMASK, NULL, &suspend_mask);
4801 sigdelset (&suspend_mask, SIGCHLD);
4802
4803 sigemptyset (&blocked_mask);
4804
4805 /* Do not enable PTRACE_O_TRACEEXIT until GDB is more prepared to
4806 support read-only process state. */
4807 linux_ptrace_set_additional_flags (PTRACE_O_TRACESYSGOOD
4808 | PTRACE_O_TRACEVFORKDONE
4809 | PTRACE_O_TRACEVFORK
4810 | PTRACE_O_TRACEFORK
4811 | PTRACE_O_TRACEEXEC);
4812 }
4813 \f
4814
4815 /* FIXME: kettenis/2000-08-26: The stuff on this page is specific to
4816 the GNU/Linux Threads library and therefore doesn't really belong
4817 here. */
4818
4819 /* Read variable NAME in the target and return its value if found.
4820 Otherwise return zero. It is assumed that the type of the variable
4821 is `int'. */
4822
4823 static int
4824 get_signo (const char *name)
4825 {
4826 struct bound_minimal_symbol ms;
4827 int signo;
4828
4829 ms = lookup_minimal_symbol (name, NULL, NULL);
4830 if (ms.minsym == NULL)
4831 return 0;
4832
4833 if (target_read_memory (BMSYMBOL_VALUE_ADDRESS (ms), (gdb_byte *) &signo,
4834 sizeof (signo)) != 0)
4835 return 0;
4836
4837 return signo;
4838 }
4839
4840 /* Return the set of signals used by the threads library in *SET. */
4841
4842 void
4843 lin_thread_get_thread_signals (sigset_t *set)
4844 {
4845 struct sigaction action;
4846 int restart, cancel;
4847
4848 sigemptyset (&blocked_mask);
4849 sigemptyset (set);
4850
4851 restart = get_signo ("__pthread_sig_restart");
4852 cancel = get_signo ("__pthread_sig_cancel");
4853
4854 /* LinuxThreads normally uses the first two RT signals, but in some legacy
4855 cases may use SIGUSR1/SIGUSR2. NPTL always uses RT signals, but does
4856 not provide any way for the debugger to query the signal numbers -
4857 fortunately they don't change! */
4858
4859 if (restart == 0)
4860 restart = __SIGRTMIN;
4861
4862 if (cancel == 0)
4863 cancel = __SIGRTMIN + 1;
4864
4865 sigaddset (set, restart);
4866 sigaddset (set, cancel);
4867
4868 /* The GNU/Linux Threads library makes terminating threads send a
4869 special "cancel" signal instead of SIGCHLD. Make sure we catch
4870 those (to prevent them from terminating GDB itself, which is
4871 likely to be their default action) and treat them the same way as
4872 SIGCHLD. */
4873
4874 action.sa_handler = sigchld_handler;
4875 sigemptyset (&action.sa_mask);
4876 action.sa_flags = SA_RESTART;
4877 sigaction (cancel, &action, NULL);
4878
4879 /* We block the "cancel" signal throughout this code ... */
4880 sigaddset (&blocked_mask, cancel);
4881 sigprocmask (SIG_BLOCK, &blocked_mask, NULL);
4882
4883 /* ... except during a sigsuspend. */
4884 sigdelset (&suspend_mask, cancel);
4885 }
This page took 0.164142 seconds and 5 git commands to generate.