77aa8e31377fa885898fe9c0a924001badbca00f
[deliverable/binutils-gdb.git] / gdb / linux-nat.c
1 /* GNU/Linux native-dependent code common to multiple platforms.
2
3 Copyright (C) 2001-2015 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21 #include "inferior.h"
22 #include "infrun.h"
23 #include "target.h"
24 #include "nat/linux-nat.h"
25 #include "nat/linux-waitpid.h"
26 #include "gdb_wait.h"
27 #ifdef HAVE_TKILL_SYSCALL
28 #include <unistd.h>
29 #include <sys/syscall.h>
30 #endif
31 #include <sys/ptrace.h>
32 #include "linux-nat.h"
33 #include "nat/linux-ptrace.h"
34 #include "nat/linux-procfs.h"
35 #include "linux-fork.h"
36 #include "gdbthread.h"
37 #include "gdbcmd.h"
38 #include "regcache.h"
39 #include "regset.h"
40 #include "inf-child.h"
41 #include "inf-ptrace.h"
42 #include "auxv.h"
43 #include <sys/procfs.h> /* for elf_gregset etc. */
44 #include "elf-bfd.h" /* for elfcore_write_* */
45 #include "gregset.h" /* for gregset */
46 #include "gdbcore.h" /* for get_exec_file */
47 #include <ctype.h> /* for isdigit */
48 #include <sys/stat.h> /* for struct stat */
49 #include <fcntl.h> /* for O_RDONLY */
50 #include "inf-loop.h"
51 #include "event-loop.h"
52 #include "event-top.h"
53 #include <pwd.h>
54 #include <sys/types.h>
55 #include <dirent.h>
56 #include "xml-support.h"
57 #include <sys/vfs.h>
58 #include "solib.h"
59 #include "nat/linux-osdata.h"
60 #include "linux-tdep.h"
61 #include "symfile.h"
62 #include "agent.h"
63 #include "tracepoint.h"
64 #include "buffer.h"
65 #include "target-descriptions.h"
66 #include "filestuff.h"
67 #include "objfiles.h"
68
69 #ifndef SPUFS_MAGIC
70 #define SPUFS_MAGIC 0x23c9b64e
71 #endif
72
73 #ifdef HAVE_PERSONALITY
74 # include <sys/personality.h>
75 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
76 # define ADDR_NO_RANDOMIZE 0x0040000
77 # endif
78 #endif /* HAVE_PERSONALITY */
79
80 /* This comment documents high-level logic of this file.
81
82 Waiting for events in sync mode
83 ===============================
84
85 When waiting for an event in a specific thread, we just use waitpid, passing
86 the specific pid, and not passing WNOHANG.
87
88 When waiting for an event in all threads, waitpid is not quite good. Prior to
89 version 2.4, Linux can either wait for event in main thread, or in secondary
90 threads. (2.4 has the __WALL flag). So, if we use blocking waitpid, we might
91 miss an event. The solution is to use non-blocking waitpid, together with
92 sigsuspend. First, we use non-blocking waitpid to get an event in the main
93 process, if any. Second, we use non-blocking waitpid with the __WCLONED
94 flag to check for events in cloned processes. If nothing is found, we use
95 sigsuspend to wait for SIGCHLD. When SIGCHLD arrives, it means something
96 happened to a child process -- and SIGCHLD will be delivered both for events
97 in main debugged process and in cloned processes. As soon as we know there's
98 an event, we get back to calling nonblocking waitpid with and without
99 __WCLONED.
100
101 Note that SIGCHLD should be blocked between waitpid and sigsuspend calls,
102 so that we don't miss a signal. If SIGCHLD arrives in between, when it's
103 blocked, the signal becomes pending and sigsuspend immediately
104 notices it and returns.
105
106 Waiting for events in async mode
107 ================================
108
109 In async mode, GDB should always be ready to handle both user input
110 and target events, so neither blocking waitpid nor sigsuspend are
111 viable options. Instead, we should asynchronously notify the GDB main
112 event loop whenever there's an unprocessed event from the target. We
113 detect asynchronous target events by handling SIGCHLD signals. To
114 notify the event loop about target events, the self-pipe trick is used
115 --- a pipe is registered as waitable event source in the event loop,
116 the event loop select/poll's on the read end of this pipe (as well on
117 other event sources, e.g., stdin), and the SIGCHLD handler writes a
118 byte to this pipe. This is more portable than relying on
119 pselect/ppoll, since on kernels that lack those syscalls, libc
120 emulates them with select/poll+sigprocmask, and that is racy
121 (a.k.a. plain broken).
122
123 Obviously, if we fail to notify the event loop if there's a target
124 event, it's bad. OTOH, if we notify the event loop when there's no
125 event from the target, linux_nat_wait will detect that there's no real
126 event to report, and return event of type TARGET_WAITKIND_IGNORE.
127 This is mostly harmless, but it will waste time and is better avoided.
128
129 The main design point is that every time GDB is outside linux-nat.c,
130 we have a SIGCHLD handler installed that is called when something
131 happens to the target and notifies the GDB event loop. Whenever GDB
132 core decides to handle the event, and calls into linux-nat.c, we
133 process things as in sync mode, except that the we never block in
134 sigsuspend.
135
136 While processing an event, we may end up momentarily blocked in
137 waitpid calls. Those waitpid calls, while blocking, are guarantied to
138 return quickly. E.g., in all-stop mode, before reporting to the core
139 that an LWP hit a breakpoint, all LWPs are stopped by sending them
140 SIGSTOP, and synchronously waiting for the SIGSTOP to be reported.
141 Note that this is different from blocking indefinitely waiting for the
142 next event --- here, we're already handling an event.
143
144 Use of signals
145 ==============
146
147 We stop threads by sending a SIGSTOP. The use of SIGSTOP instead of another
148 signal is not entirely significant; we just need for a signal to be delivered,
149 so that we can intercept it. SIGSTOP's advantage is that it can not be
150 blocked. A disadvantage is that it is not a real-time signal, so it can only
151 be queued once; we do not keep track of other sources of SIGSTOP.
152
153 Two other signals that can't be blocked are SIGCONT and SIGKILL. But we can't
154 use them, because they have special behavior when the signal is generated -
155 not when it is delivered. SIGCONT resumes the entire thread group and SIGKILL
156 kills the entire thread group.
157
158 A delivered SIGSTOP would stop the entire thread group, not just the thread we
159 tkill'd. But we never let the SIGSTOP be delivered; we always intercept and
160 cancel it (by PTRACE_CONT without passing SIGSTOP).
161
162 We could use a real-time signal instead. This would solve those problems; we
163 could use PTRACE_GETSIGINFO to locate the specific stop signals sent by GDB.
164 But we would still have to have some support for SIGSTOP, since PTRACE_ATTACH
165 generates it, and there are races with trying to find a signal that is not
166 blocked. */
167
168 #ifndef O_LARGEFILE
169 #define O_LARGEFILE 0
170 #endif
171
172 /* The single-threaded native GNU/Linux target_ops. We save a pointer for
173 the use of the multi-threaded target. */
174 static struct target_ops *linux_ops;
175 static struct target_ops linux_ops_saved;
176
177 /* The method to call, if any, when a new thread is attached. */
178 static void (*linux_nat_new_thread) (struct lwp_info *);
179
180 /* The method to call, if any, when a new fork is attached. */
181 static linux_nat_new_fork_ftype *linux_nat_new_fork;
182
183 /* The method to call, if any, when a process is no longer
184 attached. */
185 static linux_nat_forget_process_ftype *linux_nat_forget_process_hook;
186
187 /* Hook to call prior to resuming a thread. */
188 static void (*linux_nat_prepare_to_resume) (struct lwp_info *);
189
190 /* The method to call, if any, when the siginfo object needs to be
191 converted between the layout returned by ptrace, and the layout in
192 the architecture of the inferior. */
193 static int (*linux_nat_siginfo_fixup) (siginfo_t *,
194 gdb_byte *,
195 int);
196
197 /* The saved to_xfer_partial method, inherited from inf-ptrace.c.
198 Called by our to_xfer_partial. */
199 static target_xfer_partial_ftype *super_xfer_partial;
200
201 /* The saved to_close method, inherited from inf-ptrace.c.
202 Called by our to_close. */
203 static void (*super_close) (struct target_ops *);
204
205 static unsigned int debug_linux_nat;
206 static void
207 show_debug_linux_nat (struct ui_file *file, int from_tty,
208 struct cmd_list_element *c, const char *value)
209 {
210 fprintf_filtered (file, _("Debugging of GNU/Linux lwp module is %s.\n"),
211 value);
212 }
213
214 struct simple_pid_list
215 {
216 int pid;
217 int status;
218 struct simple_pid_list *next;
219 };
220 struct simple_pid_list *stopped_pids;
221
222 /* Async mode support. */
223
224 /* The read/write ends of the pipe registered as waitable file in the
225 event loop. */
226 static int linux_nat_event_pipe[2] = { -1, -1 };
227
228 /* Flush the event pipe. */
229
230 static void
231 async_file_flush (void)
232 {
233 int ret;
234 char buf;
235
236 do
237 {
238 ret = read (linux_nat_event_pipe[0], &buf, 1);
239 }
240 while (ret >= 0 || (ret == -1 && errno == EINTR));
241 }
242
243 /* Put something (anything, doesn't matter what, or how much) in event
244 pipe, so that the select/poll in the event-loop realizes we have
245 something to process. */
246
247 static void
248 async_file_mark (void)
249 {
250 int ret;
251
252 /* It doesn't really matter what the pipe contains, as long we end
253 up with something in it. Might as well flush the previous
254 left-overs. */
255 async_file_flush ();
256
257 do
258 {
259 ret = write (linux_nat_event_pipe[1], "+", 1);
260 }
261 while (ret == -1 && errno == EINTR);
262
263 /* Ignore EAGAIN. If the pipe is full, the event loop will already
264 be awakened anyway. */
265 }
266
267 static int kill_lwp (int lwpid, int signo);
268
269 static int stop_callback (struct lwp_info *lp, void *data);
270
271 static void block_child_signals (sigset_t *prev_mask);
272 static void restore_child_signals_mask (sigset_t *prev_mask);
273
274 struct lwp_info;
275 static struct lwp_info *add_lwp (ptid_t ptid);
276 static void purge_lwp_list (int pid);
277 static void delete_lwp (ptid_t ptid);
278 static struct lwp_info *find_lwp_pid (ptid_t ptid);
279
280 \f
281 /* Trivial list manipulation functions to keep track of a list of
282 new stopped processes. */
283 static void
284 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
285 {
286 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
287
288 new_pid->pid = pid;
289 new_pid->status = status;
290 new_pid->next = *listp;
291 *listp = new_pid;
292 }
293
294 static int
295 in_pid_list_p (struct simple_pid_list *list, int pid)
296 {
297 struct simple_pid_list *p;
298
299 for (p = list; p != NULL; p = p->next)
300 if (p->pid == pid)
301 return 1;
302 return 0;
303 }
304
305 static int
306 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
307 {
308 struct simple_pid_list **p;
309
310 for (p = listp; *p != NULL; p = &(*p)->next)
311 if ((*p)->pid == pid)
312 {
313 struct simple_pid_list *next = (*p)->next;
314
315 *statusp = (*p)->status;
316 xfree (*p);
317 *p = next;
318 return 1;
319 }
320 return 0;
321 }
322
323 /* Initialize ptrace warnings and check for supported ptrace
324 features given PID.
325
326 ATTACHED should be nonzero iff we attached to the inferior. */
327
328 static void
329 linux_init_ptrace (pid_t pid, int attached)
330 {
331 linux_enable_event_reporting (pid, attached);
332 linux_ptrace_init_warnings ();
333 }
334
335 static void
336 linux_child_post_attach (struct target_ops *self, int pid)
337 {
338 linux_init_ptrace (pid, 1);
339 }
340
341 static void
342 linux_child_post_startup_inferior (struct target_ops *self, ptid_t ptid)
343 {
344 linux_init_ptrace (ptid_get_pid (ptid), 0);
345 }
346
347 /* Return the number of known LWPs in the tgid given by PID. */
348
349 static int
350 num_lwps (int pid)
351 {
352 int count = 0;
353 struct lwp_info *lp;
354
355 for (lp = lwp_list; lp; lp = lp->next)
356 if (ptid_get_pid (lp->ptid) == pid)
357 count++;
358
359 return count;
360 }
361
362 /* Call delete_lwp with prototype compatible for make_cleanup. */
363
364 static void
365 delete_lwp_cleanup (void *lp_voidp)
366 {
367 struct lwp_info *lp = lp_voidp;
368
369 delete_lwp (lp->ptid);
370 }
371
372 /* Target hook for follow_fork. On entry inferior_ptid must be the
373 ptid of the followed inferior. At return, inferior_ptid will be
374 unchanged. */
375
376 static int
377 linux_child_follow_fork (struct target_ops *ops, int follow_child,
378 int detach_fork)
379 {
380 if (!follow_child)
381 {
382 struct lwp_info *child_lp = NULL;
383 int status = W_STOPCODE (0);
384 struct cleanup *old_chain;
385 int has_vforked;
386 int parent_pid, child_pid;
387
388 has_vforked = (inferior_thread ()->pending_follow.kind
389 == TARGET_WAITKIND_VFORKED);
390 parent_pid = ptid_get_lwp (inferior_ptid);
391 if (parent_pid == 0)
392 parent_pid = ptid_get_pid (inferior_ptid);
393 child_pid
394 = ptid_get_pid (inferior_thread ()->pending_follow.value.related_pid);
395
396
397 /* We're already attached to the parent, by default. */
398 old_chain = save_inferior_ptid ();
399 inferior_ptid = ptid_build (child_pid, child_pid, 0);
400 child_lp = add_lwp (inferior_ptid);
401 child_lp->stopped = 1;
402 child_lp->last_resume_kind = resume_stop;
403
404 /* Detach new forked process? */
405 if (detach_fork)
406 {
407 make_cleanup (delete_lwp_cleanup, child_lp);
408
409 if (linux_nat_prepare_to_resume != NULL)
410 linux_nat_prepare_to_resume (child_lp);
411
412 /* When debugging an inferior in an architecture that supports
413 hardware single stepping on a kernel without commit
414 6580807da14c423f0d0a708108e6df6ebc8bc83d, the vfork child
415 process starts with the TIF_SINGLESTEP/X86_EFLAGS_TF bits
416 set if the parent process had them set.
417 To work around this, single step the child process
418 once before detaching to clear the flags. */
419
420 if (!gdbarch_software_single_step_p (target_thread_architecture
421 (child_lp->ptid)))
422 {
423 linux_disable_event_reporting (child_pid);
424 if (ptrace (PTRACE_SINGLESTEP, child_pid, 0, 0) < 0)
425 perror_with_name (_("Couldn't do single step"));
426 if (my_waitpid (child_pid, &status, 0) < 0)
427 perror_with_name (_("Couldn't wait vfork process"));
428 }
429
430 if (WIFSTOPPED (status))
431 {
432 int signo;
433
434 signo = WSTOPSIG (status);
435 if (signo != 0
436 && !signal_pass_state (gdb_signal_from_host (signo)))
437 signo = 0;
438 ptrace (PTRACE_DETACH, child_pid, 0, signo);
439 }
440
441 /* Resets value of inferior_ptid to parent ptid. */
442 do_cleanups (old_chain);
443 }
444 else
445 {
446 /* Let the thread_db layer learn about this new process. */
447 check_for_thread_db ();
448 }
449
450 do_cleanups (old_chain);
451
452 if (has_vforked)
453 {
454 struct lwp_info *parent_lp;
455
456 parent_lp = find_lwp_pid (pid_to_ptid (parent_pid));
457 gdb_assert (linux_supports_tracefork () >= 0);
458
459 if (linux_supports_tracevforkdone ())
460 {
461 if (debug_linux_nat)
462 fprintf_unfiltered (gdb_stdlog,
463 "LCFF: waiting for VFORK_DONE on %d\n",
464 parent_pid);
465 parent_lp->stopped = 1;
466
467 /* We'll handle the VFORK_DONE event like any other
468 event, in target_wait. */
469 }
470 else
471 {
472 /* We can't insert breakpoints until the child has
473 finished with the shared memory region. We need to
474 wait until that happens. Ideal would be to just
475 call:
476 - ptrace (PTRACE_SYSCALL, parent_pid, 0, 0);
477 - waitpid (parent_pid, &status, __WALL);
478 However, most architectures can't handle a syscall
479 being traced on the way out if it wasn't traced on
480 the way in.
481
482 We might also think to loop, continuing the child
483 until it exits or gets a SIGTRAP. One problem is
484 that the child might call ptrace with PTRACE_TRACEME.
485
486 There's no simple and reliable way to figure out when
487 the vforked child will be done with its copy of the
488 shared memory. We could step it out of the syscall,
489 two instructions, let it go, and then single-step the
490 parent once. When we have hardware single-step, this
491 would work; with software single-step it could still
492 be made to work but we'd have to be able to insert
493 single-step breakpoints in the child, and we'd have
494 to insert -just- the single-step breakpoint in the
495 parent. Very awkward.
496
497 In the end, the best we can do is to make sure it
498 runs for a little while. Hopefully it will be out of
499 range of any breakpoints we reinsert. Usually this
500 is only the single-step breakpoint at vfork's return
501 point. */
502
503 if (debug_linux_nat)
504 fprintf_unfiltered (gdb_stdlog,
505 "LCFF: no VFORK_DONE "
506 "support, sleeping a bit\n");
507
508 usleep (10000);
509
510 /* Pretend we've seen a PTRACE_EVENT_VFORK_DONE event,
511 and leave it pending. The next linux_nat_resume call
512 will notice a pending event, and bypasses actually
513 resuming the inferior. */
514 parent_lp->status = 0;
515 parent_lp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
516 parent_lp->stopped = 1;
517
518 /* If we're in async mode, need to tell the event loop
519 there's something here to process. */
520 if (target_can_async_p ())
521 async_file_mark ();
522 }
523 }
524 }
525 else
526 {
527 struct lwp_info *child_lp;
528
529 child_lp = add_lwp (inferior_ptid);
530 child_lp->stopped = 1;
531 child_lp->last_resume_kind = resume_stop;
532
533 /* Let the thread_db layer learn about this new process. */
534 check_for_thread_db ();
535 }
536
537 return 0;
538 }
539
540 \f
541 static int
542 linux_child_insert_fork_catchpoint (struct target_ops *self, int pid)
543 {
544 return !linux_supports_tracefork ();
545 }
546
547 static int
548 linux_child_remove_fork_catchpoint (struct target_ops *self, int pid)
549 {
550 return 0;
551 }
552
553 static int
554 linux_child_insert_vfork_catchpoint (struct target_ops *self, int pid)
555 {
556 return !linux_supports_tracefork ();
557 }
558
559 static int
560 linux_child_remove_vfork_catchpoint (struct target_ops *self, int pid)
561 {
562 return 0;
563 }
564
565 static int
566 linux_child_insert_exec_catchpoint (struct target_ops *self, int pid)
567 {
568 return !linux_supports_tracefork ();
569 }
570
571 static int
572 linux_child_remove_exec_catchpoint (struct target_ops *self, int pid)
573 {
574 return 0;
575 }
576
577 static int
578 linux_child_set_syscall_catchpoint (struct target_ops *self,
579 int pid, int needed, int any_count,
580 int table_size, int *table)
581 {
582 if (!linux_supports_tracesysgood ())
583 return 1;
584
585 /* On GNU/Linux, we ignore the arguments. It means that we only
586 enable the syscall catchpoints, but do not disable them.
587
588 Also, we do not use the `table' information because we do not
589 filter system calls here. We let GDB do the logic for us. */
590 return 0;
591 }
592
593 /* On GNU/Linux there are no real LWP's. The closest thing to LWP's
594 are processes sharing the same VM space. A multi-threaded process
595 is basically a group of such processes. However, such a grouping
596 is almost entirely a user-space issue; the kernel doesn't enforce
597 such a grouping at all (this might change in the future). In
598 general, we'll rely on the threads library (i.e. the GNU/Linux
599 Threads library) to provide such a grouping.
600
601 It is perfectly well possible to write a multi-threaded application
602 without the assistance of a threads library, by using the clone
603 system call directly. This module should be able to give some
604 rudimentary support for debugging such applications if developers
605 specify the CLONE_PTRACE flag in the clone system call, and are
606 using the Linux kernel 2.4 or above.
607
608 Note that there are some peculiarities in GNU/Linux that affect
609 this code:
610
611 - In general one should specify the __WCLONE flag to waitpid in
612 order to make it report events for any of the cloned processes
613 (and leave it out for the initial process). However, if a cloned
614 process has exited the exit status is only reported if the
615 __WCLONE flag is absent. Linux kernel 2.4 has a __WALL flag, but
616 we cannot use it since GDB must work on older systems too.
617
618 - When a traced, cloned process exits and is waited for by the
619 debugger, the kernel reassigns it to the original parent and
620 keeps it around as a "zombie". Somehow, the GNU/Linux Threads
621 library doesn't notice this, which leads to the "zombie problem":
622 When debugged a multi-threaded process that spawns a lot of
623 threads will run out of processes, even if the threads exit,
624 because the "zombies" stay around. */
625
626 /* List of known LWPs. */
627 struct lwp_info *lwp_list;
628 \f
629
630 /* Original signal mask. */
631 static sigset_t normal_mask;
632
633 /* Signal mask for use with sigsuspend in linux_nat_wait, initialized in
634 _initialize_linux_nat. */
635 static sigset_t suspend_mask;
636
637 /* Signals to block to make that sigsuspend work. */
638 static sigset_t blocked_mask;
639
640 /* SIGCHLD action. */
641 struct sigaction sigchld_action;
642
643 /* Block child signals (SIGCHLD and linux threads signals), and store
644 the previous mask in PREV_MASK. */
645
646 static void
647 block_child_signals (sigset_t *prev_mask)
648 {
649 /* Make sure SIGCHLD is blocked. */
650 if (!sigismember (&blocked_mask, SIGCHLD))
651 sigaddset (&blocked_mask, SIGCHLD);
652
653 sigprocmask (SIG_BLOCK, &blocked_mask, prev_mask);
654 }
655
656 /* Restore child signals mask, previously returned by
657 block_child_signals. */
658
659 static void
660 restore_child_signals_mask (sigset_t *prev_mask)
661 {
662 sigprocmask (SIG_SETMASK, prev_mask, NULL);
663 }
664
665 /* Mask of signals to pass directly to the inferior. */
666 static sigset_t pass_mask;
667
668 /* Update signals to pass to the inferior. */
669 static void
670 linux_nat_pass_signals (struct target_ops *self,
671 int numsigs, unsigned char *pass_signals)
672 {
673 int signo;
674
675 sigemptyset (&pass_mask);
676
677 for (signo = 1; signo < NSIG; signo++)
678 {
679 int target_signo = gdb_signal_from_host (signo);
680 if (target_signo < numsigs && pass_signals[target_signo])
681 sigaddset (&pass_mask, signo);
682 }
683 }
684
685 \f
686
687 /* Prototypes for local functions. */
688 static int stop_wait_callback (struct lwp_info *lp, void *data);
689 static int linux_thread_alive (ptid_t ptid);
690 static char *linux_child_pid_to_exec_file (struct target_ops *self, int pid);
691
692 \f
693
694 /* Destroy and free LP. */
695
696 static void
697 lwp_free (struct lwp_info *lp)
698 {
699 xfree (lp->arch_private);
700 xfree (lp);
701 }
702
703 /* Remove all LWPs belong to PID from the lwp list. */
704
705 static void
706 purge_lwp_list (int pid)
707 {
708 struct lwp_info *lp, *lpprev, *lpnext;
709
710 lpprev = NULL;
711
712 for (lp = lwp_list; lp; lp = lpnext)
713 {
714 lpnext = lp->next;
715
716 if (ptid_get_pid (lp->ptid) == pid)
717 {
718 if (lp == lwp_list)
719 lwp_list = lp->next;
720 else
721 lpprev->next = lp->next;
722
723 lwp_free (lp);
724 }
725 else
726 lpprev = lp;
727 }
728 }
729
730 /* Add the LWP specified by PTID to the list. PTID is the first LWP
731 in the process. Return a pointer to the structure describing the
732 new LWP.
733
734 This differs from add_lwp in that we don't let the arch specific
735 bits know about this new thread. Current clients of this callback
736 take the opportunity to install watchpoints in the new thread, and
737 we shouldn't do that for the first thread. If we're spawning a
738 child ("run"), the thread executes the shell wrapper first, and we
739 shouldn't touch it until it execs the program we want to debug.
740 For "attach", it'd be okay to call the callback, but it's not
741 necessary, because watchpoints can't yet have been inserted into
742 the inferior. */
743
744 static struct lwp_info *
745 add_initial_lwp (ptid_t ptid)
746 {
747 struct lwp_info *lp;
748
749 gdb_assert (ptid_lwp_p (ptid));
750
751 lp = (struct lwp_info *) xmalloc (sizeof (struct lwp_info));
752
753 memset (lp, 0, sizeof (struct lwp_info));
754
755 lp->last_resume_kind = resume_continue;
756 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
757
758 lp->ptid = ptid;
759 lp->core = -1;
760
761 lp->next = lwp_list;
762 lwp_list = lp;
763
764 return lp;
765 }
766
767 /* Add the LWP specified by PID to the list. Return a pointer to the
768 structure describing the new LWP. The LWP should already be
769 stopped. */
770
771 static struct lwp_info *
772 add_lwp (ptid_t ptid)
773 {
774 struct lwp_info *lp;
775
776 lp = add_initial_lwp (ptid);
777
778 /* Let the arch specific bits know about this new thread. Current
779 clients of this callback take the opportunity to install
780 watchpoints in the new thread. We don't do this for the first
781 thread though. See add_initial_lwp. */
782 if (linux_nat_new_thread != NULL)
783 linux_nat_new_thread (lp);
784
785 return lp;
786 }
787
788 /* Remove the LWP specified by PID from the list. */
789
790 static void
791 delete_lwp (ptid_t ptid)
792 {
793 struct lwp_info *lp, *lpprev;
794
795 lpprev = NULL;
796
797 for (lp = lwp_list; lp; lpprev = lp, lp = lp->next)
798 if (ptid_equal (lp->ptid, ptid))
799 break;
800
801 if (!lp)
802 return;
803
804 if (lpprev)
805 lpprev->next = lp->next;
806 else
807 lwp_list = lp->next;
808
809 lwp_free (lp);
810 }
811
812 /* Return a pointer to the structure describing the LWP corresponding
813 to PID. If no corresponding LWP could be found, return NULL. */
814
815 static struct lwp_info *
816 find_lwp_pid (ptid_t ptid)
817 {
818 struct lwp_info *lp;
819 int lwp;
820
821 if (ptid_lwp_p (ptid))
822 lwp = ptid_get_lwp (ptid);
823 else
824 lwp = ptid_get_pid (ptid);
825
826 for (lp = lwp_list; lp; lp = lp->next)
827 if (lwp == ptid_get_lwp (lp->ptid))
828 return lp;
829
830 return NULL;
831 }
832
833 /* Call CALLBACK with its second argument set to DATA for every LWP in
834 the list. If CALLBACK returns 1 for a particular LWP, return a
835 pointer to the structure describing that LWP immediately.
836 Otherwise return NULL. */
837
838 struct lwp_info *
839 iterate_over_lwps (ptid_t filter,
840 int (*callback) (struct lwp_info *, void *),
841 void *data)
842 {
843 struct lwp_info *lp, *lpnext;
844
845 for (lp = lwp_list; lp; lp = lpnext)
846 {
847 lpnext = lp->next;
848
849 if (ptid_match (lp->ptid, filter))
850 {
851 if ((*callback) (lp, data))
852 return lp;
853 }
854 }
855
856 return NULL;
857 }
858
859 /* Update our internal state when changing from one checkpoint to
860 another indicated by NEW_PTID. We can only switch single-threaded
861 applications, so we only create one new LWP, and the previous list
862 is discarded. */
863
864 void
865 linux_nat_switch_fork (ptid_t new_ptid)
866 {
867 struct lwp_info *lp;
868
869 purge_lwp_list (ptid_get_pid (inferior_ptid));
870
871 lp = add_lwp (new_ptid);
872 lp->stopped = 1;
873
874 /* This changes the thread's ptid while preserving the gdb thread
875 num. Also changes the inferior pid, while preserving the
876 inferior num. */
877 thread_change_ptid (inferior_ptid, new_ptid);
878
879 /* We've just told GDB core that the thread changed target id, but,
880 in fact, it really is a different thread, with different register
881 contents. */
882 registers_changed ();
883 }
884
885 /* Handle the exit of a single thread LP. */
886
887 static void
888 exit_lwp (struct lwp_info *lp)
889 {
890 struct thread_info *th = find_thread_ptid (lp->ptid);
891
892 if (th)
893 {
894 if (print_thread_events)
895 printf_unfiltered (_("[%s exited]\n"), target_pid_to_str (lp->ptid));
896
897 delete_thread (lp->ptid);
898 }
899
900 delete_lwp (lp->ptid);
901 }
902
903 /* Wait for the LWP specified by LP, which we have just attached to.
904 Returns a wait status for that LWP, to cache. */
905
906 static int
907 linux_nat_post_attach_wait (ptid_t ptid, int first, int *cloned,
908 int *signalled)
909 {
910 pid_t new_pid, pid = ptid_get_lwp (ptid);
911 int status;
912
913 if (linux_proc_pid_is_stopped (pid))
914 {
915 if (debug_linux_nat)
916 fprintf_unfiltered (gdb_stdlog,
917 "LNPAW: Attaching to a stopped process\n");
918
919 /* The process is definitely stopped. It is in a job control
920 stop, unless the kernel predates the TASK_STOPPED /
921 TASK_TRACED distinction, in which case it might be in a
922 ptrace stop. Make sure it is in a ptrace stop; from there we
923 can kill it, signal it, et cetera.
924
925 First make sure there is a pending SIGSTOP. Since we are
926 already attached, the process can not transition from stopped
927 to running without a PTRACE_CONT; so we know this signal will
928 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
929 probably already in the queue (unless this kernel is old
930 enough to use TASK_STOPPED for ptrace stops); but since SIGSTOP
931 is not an RT signal, it can only be queued once. */
932 kill_lwp (pid, SIGSTOP);
933
934 /* Finally, resume the stopped process. This will deliver the SIGSTOP
935 (or a higher priority signal, just like normal PTRACE_ATTACH). */
936 ptrace (PTRACE_CONT, pid, 0, 0);
937 }
938
939 /* Make sure the initial process is stopped. The user-level threads
940 layer might want to poke around in the inferior, and that won't
941 work if things haven't stabilized yet. */
942 new_pid = my_waitpid (pid, &status, 0);
943 if (new_pid == -1 && errno == ECHILD)
944 {
945 if (first)
946 warning (_("%s is a cloned process"), target_pid_to_str (ptid));
947
948 /* Try again with __WCLONE to check cloned processes. */
949 new_pid = my_waitpid (pid, &status, __WCLONE);
950 *cloned = 1;
951 }
952
953 gdb_assert (pid == new_pid);
954
955 if (!WIFSTOPPED (status))
956 {
957 /* The pid we tried to attach has apparently just exited. */
958 if (debug_linux_nat)
959 fprintf_unfiltered (gdb_stdlog, "LNPAW: Failed to stop %d: %s",
960 pid, status_to_str (status));
961 return status;
962 }
963
964 if (WSTOPSIG (status) != SIGSTOP)
965 {
966 *signalled = 1;
967 if (debug_linux_nat)
968 fprintf_unfiltered (gdb_stdlog,
969 "LNPAW: Received %s after attaching\n",
970 status_to_str (status));
971 }
972
973 return status;
974 }
975
976 /* Attach to the LWP specified by PID. Return 0 if successful, -1 if
977 the new LWP could not be attached, or 1 if we're already auto
978 attached to this thread, but haven't processed the
979 PTRACE_EVENT_CLONE event of its parent thread, so we just ignore
980 its existance, without considering it an error. */
981
982 int
983 lin_lwp_attach_lwp (ptid_t ptid)
984 {
985 struct lwp_info *lp;
986 int lwpid;
987
988 gdb_assert (ptid_lwp_p (ptid));
989
990 lp = find_lwp_pid (ptid);
991 lwpid = ptid_get_lwp (ptid);
992
993 /* We assume that we're already attached to any LWP that has an id
994 equal to the overall process id, and to any LWP that is already
995 in our list of LWPs. If we're not seeing exit events from threads
996 and we've had PID wraparound since we last tried to stop all threads,
997 this assumption might be wrong; fortunately, this is very unlikely
998 to happen. */
999 if (lwpid != ptid_get_pid (ptid) && lp == NULL)
1000 {
1001 int status, cloned = 0, signalled = 0;
1002
1003 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) < 0)
1004 {
1005 if (linux_supports_tracefork ())
1006 {
1007 /* If we haven't stopped all threads when we get here,
1008 we may have seen a thread listed in thread_db's list,
1009 but not processed the PTRACE_EVENT_CLONE yet. If
1010 that's the case, ignore this new thread, and let
1011 normal event handling discover it later. */
1012 if (in_pid_list_p (stopped_pids, lwpid))
1013 {
1014 /* We've already seen this thread stop, but we
1015 haven't seen the PTRACE_EVENT_CLONE extended
1016 event yet. */
1017 return 0;
1018 }
1019 else
1020 {
1021 int new_pid;
1022 int status;
1023
1024 /* See if we've got a stop for this new child
1025 pending. If so, we're already attached. */
1026 gdb_assert (lwpid > 0);
1027 new_pid = my_waitpid (lwpid, &status, WNOHANG);
1028 if (new_pid == -1 && errno == ECHILD)
1029 new_pid = my_waitpid (lwpid, &status, __WCLONE | WNOHANG);
1030 if (new_pid != -1)
1031 {
1032 if (WIFSTOPPED (status))
1033 add_to_pid_list (&stopped_pids, lwpid, status);
1034 return 1;
1035 }
1036 }
1037 }
1038
1039 /* If we fail to attach to the thread, issue a warning,
1040 but continue. One way this can happen is if thread
1041 creation is interrupted; as of Linux kernel 2.6.19, a
1042 bug may place threads in the thread list and then fail
1043 to create them. */
1044 warning (_("Can't attach %s: %s"), target_pid_to_str (ptid),
1045 safe_strerror (errno));
1046 return -1;
1047 }
1048
1049 if (debug_linux_nat)
1050 fprintf_unfiltered (gdb_stdlog,
1051 "LLAL: PTRACE_ATTACH %s, 0, 0 (OK)\n",
1052 target_pid_to_str (ptid));
1053
1054 status = linux_nat_post_attach_wait (ptid, 0, &cloned, &signalled);
1055 if (!WIFSTOPPED (status))
1056 return 1;
1057
1058 lp = add_lwp (ptid);
1059 lp->stopped = 1;
1060 lp->cloned = cloned;
1061 lp->signalled = signalled;
1062 if (WSTOPSIG (status) != SIGSTOP)
1063 {
1064 lp->resumed = 1;
1065 lp->status = status;
1066 }
1067
1068 target_post_attach (ptid_get_lwp (lp->ptid));
1069
1070 if (debug_linux_nat)
1071 {
1072 fprintf_unfiltered (gdb_stdlog,
1073 "LLAL: waitpid %s received %s\n",
1074 target_pid_to_str (ptid),
1075 status_to_str (status));
1076 }
1077 }
1078 else
1079 {
1080 /* We assume that the LWP representing the original process is
1081 already stopped. Mark it as stopped in the data structure
1082 that the GNU/linux ptrace layer uses to keep track of
1083 threads. Note that this won't have already been done since
1084 the main thread will have, we assume, been stopped by an
1085 attach from a different layer. */
1086 if (lp == NULL)
1087 lp = add_lwp (ptid);
1088 lp->stopped = 1;
1089 }
1090
1091 lp->last_resume_kind = resume_stop;
1092 return 0;
1093 }
1094
1095 static void
1096 linux_nat_create_inferior (struct target_ops *ops,
1097 char *exec_file, char *allargs, char **env,
1098 int from_tty)
1099 {
1100 #ifdef HAVE_PERSONALITY
1101 int personality_orig = 0, personality_set = 0;
1102 #endif /* HAVE_PERSONALITY */
1103
1104 /* The fork_child mechanism is synchronous and calls target_wait, so
1105 we have to mask the async mode. */
1106
1107 #ifdef HAVE_PERSONALITY
1108 if (disable_randomization)
1109 {
1110 errno = 0;
1111 personality_orig = personality (0xffffffff);
1112 if (errno == 0 && !(personality_orig & ADDR_NO_RANDOMIZE))
1113 {
1114 personality_set = 1;
1115 personality (personality_orig | ADDR_NO_RANDOMIZE);
1116 }
1117 if (errno != 0 || (personality_set
1118 && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE)))
1119 warning (_("Error disabling address space randomization: %s"),
1120 safe_strerror (errno));
1121 }
1122 #endif /* HAVE_PERSONALITY */
1123
1124 /* Make sure we report all signals during startup. */
1125 linux_nat_pass_signals (ops, 0, NULL);
1126
1127 linux_ops->to_create_inferior (ops, exec_file, allargs, env, from_tty);
1128
1129 #ifdef HAVE_PERSONALITY
1130 if (personality_set)
1131 {
1132 errno = 0;
1133 personality (personality_orig);
1134 if (errno != 0)
1135 warning (_("Error restoring address space randomization: %s"),
1136 safe_strerror (errno));
1137 }
1138 #endif /* HAVE_PERSONALITY */
1139 }
1140
1141 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1142 already attached. Returns true if a new LWP is found, false
1143 otherwise. */
1144
1145 static int
1146 attach_proc_task_lwp_callback (ptid_t ptid)
1147 {
1148 struct lwp_info *lp;
1149
1150 /* Ignore LWPs we're already attached to. */
1151 lp = find_lwp_pid (ptid);
1152 if (lp == NULL)
1153 {
1154 int lwpid = ptid_get_lwp (ptid);
1155
1156 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) < 0)
1157 {
1158 int err = errno;
1159
1160 /* Be quiet if we simply raced with the thread exiting.
1161 EPERM is returned if the thread's task still exists, and
1162 is marked as exited or zombie, as well as other
1163 conditions, so in that case, confirm the status in
1164 /proc/PID/status. */
1165 if (err == ESRCH
1166 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1167 {
1168 if (debug_linux_nat)
1169 {
1170 fprintf_unfiltered (gdb_stdlog,
1171 "Cannot attach to lwp %d: "
1172 "thread is gone (%d: %s)\n",
1173 lwpid, err, safe_strerror (err));
1174 }
1175 }
1176 else
1177 {
1178 warning (_("Cannot attach to lwp %d: %s\n"),
1179 lwpid,
1180 linux_ptrace_attach_fail_reason_string (ptid,
1181 err));
1182 }
1183 }
1184 else
1185 {
1186 if (debug_linux_nat)
1187 fprintf_unfiltered (gdb_stdlog,
1188 "PTRACE_ATTACH %s, 0, 0 (OK)\n",
1189 target_pid_to_str (ptid));
1190
1191 lp = add_lwp (ptid);
1192 lp->cloned = 1;
1193
1194 /* The next time we wait for this LWP we'll see a SIGSTOP as
1195 PTRACE_ATTACH brings it to a halt. */
1196 lp->signalled = 1;
1197
1198 /* We need to wait for a stop before being able to make the
1199 next ptrace call on this LWP. */
1200 lp->must_set_ptrace_flags = 1;
1201 }
1202
1203 return 1;
1204 }
1205 return 0;
1206 }
1207
1208 static void
1209 linux_nat_attach (struct target_ops *ops, const char *args, int from_tty)
1210 {
1211 struct lwp_info *lp;
1212 int status;
1213 ptid_t ptid;
1214 volatile struct gdb_exception ex;
1215
1216 /* Make sure we report all signals during attach. */
1217 linux_nat_pass_signals (ops, 0, NULL);
1218
1219 TRY_CATCH (ex, RETURN_MASK_ERROR)
1220 {
1221 linux_ops->to_attach (ops, args, from_tty);
1222 }
1223 if (ex.reason < 0)
1224 {
1225 pid_t pid = parse_pid_to_attach (args);
1226 struct buffer buffer;
1227 char *message, *buffer_s;
1228
1229 message = xstrdup (ex.message);
1230 make_cleanup (xfree, message);
1231
1232 buffer_init (&buffer);
1233 linux_ptrace_attach_fail_reason (pid, &buffer);
1234
1235 buffer_grow_str0 (&buffer, "");
1236 buffer_s = buffer_finish (&buffer);
1237 make_cleanup (xfree, buffer_s);
1238
1239 if (*buffer_s != '\0')
1240 throw_error (ex.error, "warning: %s\n%s", buffer_s, message);
1241 else
1242 throw_error (ex.error, "%s", message);
1243 }
1244
1245 /* The ptrace base target adds the main thread with (pid,0,0)
1246 format. Decorate it with lwp info. */
1247 ptid = ptid_build (ptid_get_pid (inferior_ptid),
1248 ptid_get_pid (inferior_ptid),
1249 0);
1250 thread_change_ptid (inferior_ptid, ptid);
1251
1252 /* Add the initial process as the first LWP to the list. */
1253 lp = add_initial_lwp (ptid);
1254
1255 status = linux_nat_post_attach_wait (lp->ptid, 1, &lp->cloned,
1256 &lp->signalled);
1257 if (!WIFSTOPPED (status))
1258 {
1259 if (WIFEXITED (status))
1260 {
1261 int exit_code = WEXITSTATUS (status);
1262
1263 target_terminal_ours ();
1264 target_mourn_inferior ();
1265 if (exit_code == 0)
1266 error (_("Unable to attach: program exited normally."));
1267 else
1268 error (_("Unable to attach: program exited with code %d."),
1269 exit_code);
1270 }
1271 else if (WIFSIGNALED (status))
1272 {
1273 enum gdb_signal signo;
1274
1275 target_terminal_ours ();
1276 target_mourn_inferior ();
1277
1278 signo = gdb_signal_from_host (WTERMSIG (status));
1279 error (_("Unable to attach: program terminated with signal "
1280 "%s, %s."),
1281 gdb_signal_to_name (signo),
1282 gdb_signal_to_string (signo));
1283 }
1284
1285 internal_error (__FILE__, __LINE__,
1286 _("unexpected status %d for PID %ld"),
1287 status, (long) ptid_get_lwp (ptid));
1288 }
1289
1290 lp->stopped = 1;
1291
1292 /* Save the wait status to report later. */
1293 lp->resumed = 1;
1294 if (debug_linux_nat)
1295 fprintf_unfiltered (gdb_stdlog,
1296 "LNA: waitpid %ld, saving status %s\n",
1297 (long) ptid_get_pid (lp->ptid), status_to_str (status));
1298
1299 lp->status = status;
1300
1301 /* We must attach to every LWP. If /proc is mounted, use that to
1302 find them now. The inferior may be using raw clone instead of
1303 using pthreads. But even if it is using pthreads, thread_db
1304 walks structures in the inferior's address space to find the list
1305 of threads/LWPs, and those structures may well be corrupted.
1306 Note that once thread_db is loaded, we'll still use it to list
1307 threads and associate pthread info with each LWP. */
1308 linux_proc_attach_tgid_threads (ptid_get_pid (lp->ptid),
1309 attach_proc_task_lwp_callback);
1310
1311 if (target_can_async_p ())
1312 target_async (inferior_event_handler, 0);
1313 }
1314
1315 /* Get pending status of LP. */
1316 static int
1317 get_pending_status (struct lwp_info *lp, int *status)
1318 {
1319 enum gdb_signal signo = GDB_SIGNAL_0;
1320
1321 /* If we paused threads momentarily, we may have stored pending
1322 events in lp->status or lp->waitstatus (see stop_wait_callback),
1323 and GDB core hasn't seen any signal for those threads.
1324 Otherwise, the last signal reported to the core is found in the
1325 thread object's stop_signal.
1326
1327 There's a corner case that isn't handled here at present. Only
1328 if the thread stopped with a TARGET_WAITKIND_STOPPED does
1329 stop_signal make sense as a real signal to pass to the inferior.
1330 Some catchpoint related events, like
1331 TARGET_WAITKIND_(V)FORK|EXEC|SYSCALL, have their stop_signal set
1332 to GDB_SIGNAL_SIGTRAP when the catchpoint triggers. But,
1333 those traps are debug API (ptrace in our case) related and
1334 induced; the inferior wouldn't see them if it wasn't being
1335 traced. Hence, we should never pass them to the inferior, even
1336 when set to pass state. Since this corner case isn't handled by
1337 infrun.c when proceeding with a signal, for consistency, neither
1338 do we handle it here (or elsewhere in the file we check for
1339 signal pass state). Normally SIGTRAP isn't set to pass state, so
1340 this is really a corner case. */
1341
1342 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
1343 signo = GDB_SIGNAL_0; /* a pending ptrace event, not a real signal. */
1344 else if (lp->status)
1345 signo = gdb_signal_from_host (WSTOPSIG (lp->status));
1346 else if (non_stop && !is_executing (lp->ptid))
1347 {
1348 struct thread_info *tp = find_thread_ptid (lp->ptid);
1349
1350 signo = tp->suspend.stop_signal;
1351 }
1352 else if (!non_stop)
1353 {
1354 struct target_waitstatus last;
1355 ptid_t last_ptid;
1356
1357 get_last_target_status (&last_ptid, &last);
1358
1359 if (ptid_get_lwp (lp->ptid) == ptid_get_lwp (last_ptid))
1360 {
1361 struct thread_info *tp = find_thread_ptid (lp->ptid);
1362
1363 signo = tp->suspend.stop_signal;
1364 }
1365 }
1366
1367 *status = 0;
1368
1369 if (signo == GDB_SIGNAL_0)
1370 {
1371 if (debug_linux_nat)
1372 fprintf_unfiltered (gdb_stdlog,
1373 "GPT: lwp %s has no pending signal\n",
1374 target_pid_to_str (lp->ptid));
1375 }
1376 else if (!signal_pass_state (signo))
1377 {
1378 if (debug_linux_nat)
1379 fprintf_unfiltered (gdb_stdlog,
1380 "GPT: lwp %s had signal %s, "
1381 "but it is in no pass state\n",
1382 target_pid_to_str (lp->ptid),
1383 gdb_signal_to_string (signo));
1384 }
1385 else
1386 {
1387 *status = W_STOPCODE (gdb_signal_to_host (signo));
1388
1389 if (debug_linux_nat)
1390 fprintf_unfiltered (gdb_stdlog,
1391 "GPT: lwp %s has pending signal %s\n",
1392 target_pid_to_str (lp->ptid),
1393 gdb_signal_to_string (signo));
1394 }
1395
1396 return 0;
1397 }
1398
1399 static int
1400 detach_callback (struct lwp_info *lp, void *data)
1401 {
1402 gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status));
1403
1404 if (debug_linux_nat && lp->status)
1405 fprintf_unfiltered (gdb_stdlog, "DC: Pending %s for %s on detach.\n",
1406 strsignal (WSTOPSIG (lp->status)),
1407 target_pid_to_str (lp->ptid));
1408
1409 /* If there is a pending SIGSTOP, get rid of it. */
1410 if (lp->signalled)
1411 {
1412 if (debug_linux_nat)
1413 fprintf_unfiltered (gdb_stdlog,
1414 "DC: Sending SIGCONT to %s\n",
1415 target_pid_to_str (lp->ptid));
1416
1417 kill_lwp (ptid_get_lwp (lp->ptid), SIGCONT);
1418 lp->signalled = 0;
1419 }
1420
1421 /* We don't actually detach from the LWP that has an id equal to the
1422 overall process id just yet. */
1423 if (ptid_get_lwp (lp->ptid) != ptid_get_pid (lp->ptid))
1424 {
1425 int status = 0;
1426
1427 /* Pass on any pending signal for this LWP. */
1428 get_pending_status (lp, &status);
1429
1430 if (linux_nat_prepare_to_resume != NULL)
1431 linux_nat_prepare_to_resume (lp);
1432 errno = 0;
1433 if (ptrace (PTRACE_DETACH, ptid_get_lwp (lp->ptid), 0,
1434 WSTOPSIG (status)) < 0)
1435 error (_("Can't detach %s: %s"), target_pid_to_str (lp->ptid),
1436 safe_strerror (errno));
1437
1438 if (debug_linux_nat)
1439 fprintf_unfiltered (gdb_stdlog,
1440 "PTRACE_DETACH (%s, %s, 0) (OK)\n",
1441 target_pid_to_str (lp->ptid),
1442 strsignal (WSTOPSIG (status)));
1443
1444 delete_lwp (lp->ptid);
1445 }
1446
1447 return 0;
1448 }
1449
1450 static void
1451 linux_nat_detach (struct target_ops *ops, const char *args, int from_tty)
1452 {
1453 int pid;
1454 int status;
1455 struct lwp_info *main_lwp;
1456
1457 pid = ptid_get_pid (inferior_ptid);
1458
1459 /* Don't unregister from the event loop, as there may be other
1460 inferiors running. */
1461
1462 /* Stop all threads before detaching. ptrace requires that the
1463 thread is stopped to sucessfully detach. */
1464 iterate_over_lwps (pid_to_ptid (pid), stop_callback, NULL);
1465 /* ... and wait until all of them have reported back that
1466 they're no longer running. */
1467 iterate_over_lwps (pid_to_ptid (pid), stop_wait_callback, NULL);
1468
1469 iterate_over_lwps (pid_to_ptid (pid), detach_callback, NULL);
1470
1471 /* Only the initial process should be left right now. */
1472 gdb_assert (num_lwps (ptid_get_pid (inferior_ptid)) == 1);
1473
1474 main_lwp = find_lwp_pid (pid_to_ptid (pid));
1475
1476 /* Pass on any pending signal for the last LWP. */
1477 if ((args == NULL || *args == '\0')
1478 && get_pending_status (main_lwp, &status) != -1
1479 && WIFSTOPPED (status))
1480 {
1481 char *tem;
1482
1483 /* Put the signal number in ARGS so that inf_ptrace_detach will
1484 pass it along with PTRACE_DETACH. */
1485 tem = alloca (8);
1486 xsnprintf (tem, 8, "%d", (int) WSTOPSIG (status));
1487 args = tem;
1488 if (debug_linux_nat)
1489 fprintf_unfiltered (gdb_stdlog,
1490 "LND: Sending signal %s to %s\n",
1491 args,
1492 target_pid_to_str (main_lwp->ptid));
1493 }
1494
1495 if (linux_nat_prepare_to_resume != NULL)
1496 linux_nat_prepare_to_resume (main_lwp);
1497 delete_lwp (main_lwp->ptid);
1498
1499 if (forks_exist_p ())
1500 {
1501 /* Multi-fork case. The current inferior_ptid is being detached
1502 from, but there are other viable forks to debug. Detach from
1503 the current fork, and context-switch to the first
1504 available. */
1505 linux_fork_detach (args, from_tty);
1506 }
1507 else
1508 linux_ops->to_detach (ops, args, from_tty);
1509 }
1510
1511 /* Resume LP. */
1512
1513 static void
1514 resume_lwp (struct lwp_info *lp, int step, enum gdb_signal signo)
1515 {
1516 if (lp->stopped)
1517 {
1518 struct inferior *inf = find_inferior_ptid (lp->ptid);
1519
1520 if (inf->vfork_child != NULL)
1521 {
1522 if (debug_linux_nat)
1523 fprintf_unfiltered (gdb_stdlog,
1524 "RC: Not resuming %s (vfork parent)\n",
1525 target_pid_to_str (lp->ptid));
1526 }
1527 else if (lp->status == 0
1528 && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE)
1529 {
1530 if (debug_linux_nat)
1531 fprintf_unfiltered (gdb_stdlog,
1532 "RC: Resuming sibling %s, %s, %s\n",
1533 target_pid_to_str (lp->ptid),
1534 (signo != GDB_SIGNAL_0
1535 ? strsignal (gdb_signal_to_host (signo))
1536 : "0"),
1537 step ? "step" : "resume");
1538
1539 if (linux_nat_prepare_to_resume != NULL)
1540 linux_nat_prepare_to_resume (lp);
1541 linux_ops->to_resume (linux_ops,
1542 pid_to_ptid (ptid_get_lwp (lp->ptid)),
1543 step, signo);
1544 lp->stopped = 0;
1545 lp->step = step;
1546 lp->stopped_by_watchpoint = 0;
1547 }
1548 else
1549 {
1550 if (debug_linux_nat)
1551 fprintf_unfiltered (gdb_stdlog,
1552 "RC: Not resuming sibling %s (has pending)\n",
1553 target_pid_to_str (lp->ptid));
1554 }
1555 }
1556 else
1557 {
1558 if (debug_linux_nat)
1559 fprintf_unfiltered (gdb_stdlog,
1560 "RC: Not resuming sibling %s (not stopped)\n",
1561 target_pid_to_str (lp->ptid));
1562 }
1563 }
1564
1565 /* Callback for iterate_over_lwps. If LWP is EXCEPT, do nothing.
1566 Resume LWP with the last stop signal, if it is in pass state. */
1567
1568 static int
1569 linux_nat_resume_callback (struct lwp_info *lp, void *except)
1570 {
1571 enum gdb_signal signo = GDB_SIGNAL_0;
1572
1573 if (lp == except)
1574 return 0;
1575
1576 if (lp->stopped)
1577 {
1578 struct thread_info *thread;
1579
1580 thread = find_thread_ptid (lp->ptid);
1581 if (thread != NULL)
1582 {
1583 signo = thread->suspend.stop_signal;
1584 thread->suspend.stop_signal = GDB_SIGNAL_0;
1585 }
1586 }
1587
1588 resume_lwp (lp, 0, signo);
1589 return 0;
1590 }
1591
1592 static int
1593 resume_clear_callback (struct lwp_info *lp, void *data)
1594 {
1595 lp->resumed = 0;
1596 lp->last_resume_kind = resume_stop;
1597 return 0;
1598 }
1599
1600 static int
1601 resume_set_callback (struct lwp_info *lp, void *data)
1602 {
1603 lp->resumed = 1;
1604 lp->last_resume_kind = resume_continue;
1605 return 0;
1606 }
1607
1608 static void
1609 linux_nat_resume (struct target_ops *ops,
1610 ptid_t ptid, int step, enum gdb_signal signo)
1611 {
1612 struct lwp_info *lp;
1613 int resume_many;
1614
1615 if (debug_linux_nat)
1616 fprintf_unfiltered (gdb_stdlog,
1617 "LLR: Preparing to %s %s, %s, inferior_ptid %s\n",
1618 step ? "step" : "resume",
1619 target_pid_to_str (ptid),
1620 (signo != GDB_SIGNAL_0
1621 ? strsignal (gdb_signal_to_host (signo)) : "0"),
1622 target_pid_to_str (inferior_ptid));
1623
1624 /* A specific PTID means `step only this process id'. */
1625 resume_many = (ptid_equal (minus_one_ptid, ptid)
1626 || ptid_is_pid (ptid));
1627
1628 /* Mark the lwps we're resuming as resumed. */
1629 iterate_over_lwps (ptid, resume_set_callback, NULL);
1630
1631 /* See if it's the current inferior that should be handled
1632 specially. */
1633 if (resume_many)
1634 lp = find_lwp_pid (inferior_ptid);
1635 else
1636 lp = find_lwp_pid (ptid);
1637 gdb_assert (lp != NULL);
1638
1639 /* Remember if we're stepping. */
1640 lp->step = step;
1641 lp->last_resume_kind = step ? resume_step : resume_continue;
1642
1643 /* If we have a pending wait status for this thread, there is no
1644 point in resuming the process. But first make sure that
1645 linux_nat_wait won't preemptively handle the event - we
1646 should never take this short-circuit if we are going to
1647 leave LP running, since we have skipped resuming all the
1648 other threads. This bit of code needs to be synchronized
1649 with linux_nat_wait. */
1650
1651 if (lp->status && WIFSTOPPED (lp->status))
1652 {
1653 if (!lp->step
1654 && WSTOPSIG (lp->status)
1655 && sigismember (&pass_mask, WSTOPSIG (lp->status)))
1656 {
1657 if (debug_linux_nat)
1658 fprintf_unfiltered (gdb_stdlog,
1659 "LLR: Not short circuiting for ignored "
1660 "status 0x%x\n", lp->status);
1661
1662 /* FIXME: What should we do if we are supposed to continue
1663 this thread with a signal? */
1664 gdb_assert (signo == GDB_SIGNAL_0);
1665 signo = gdb_signal_from_host (WSTOPSIG (lp->status));
1666 lp->status = 0;
1667 }
1668 }
1669
1670 if (lp->status || lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
1671 {
1672 /* FIXME: What should we do if we are supposed to continue
1673 this thread with a signal? */
1674 gdb_assert (signo == GDB_SIGNAL_0);
1675
1676 if (debug_linux_nat)
1677 fprintf_unfiltered (gdb_stdlog,
1678 "LLR: Short circuiting for status 0x%x\n",
1679 lp->status);
1680
1681 if (target_can_async_p ())
1682 {
1683 target_async (inferior_event_handler, 0);
1684 /* Tell the event loop we have something to process. */
1685 async_file_mark ();
1686 }
1687 return;
1688 }
1689
1690 if (resume_many)
1691 iterate_over_lwps (ptid, linux_nat_resume_callback, lp);
1692
1693 /* Convert to something the lower layer understands. */
1694 ptid = pid_to_ptid (ptid_get_lwp (lp->ptid));
1695
1696 if (linux_nat_prepare_to_resume != NULL)
1697 linux_nat_prepare_to_resume (lp);
1698 linux_ops->to_resume (linux_ops, ptid, step, signo);
1699 lp->stopped_by_watchpoint = 0;
1700 lp->stopped = 0;
1701
1702 if (debug_linux_nat)
1703 fprintf_unfiltered (gdb_stdlog,
1704 "LLR: %s %s, %s (resume event thread)\n",
1705 step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
1706 target_pid_to_str (ptid),
1707 (signo != GDB_SIGNAL_0
1708 ? strsignal (gdb_signal_to_host (signo)) : "0"));
1709
1710 if (target_can_async_p ())
1711 target_async (inferior_event_handler, 0);
1712 }
1713
1714 /* Send a signal to an LWP. */
1715
1716 static int
1717 kill_lwp (int lwpid, int signo)
1718 {
1719 /* Use tkill, if possible, in case we are using nptl threads. If tkill
1720 fails, then we are not using nptl threads and we should be using kill. */
1721
1722 #ifdef HAVE_TKILL_SYSCALL
1723 {
1724 static int tkill_failed;
1725
1726 if (!tkill_failed)
1727 {
1728 int ret;
1729
1730 errno = 0;
1731 ret = syscall (__NR_tkill, lwpid, signo);
1732 if (errno != ENOSYS)
1733 return ret;
1734 tkill_failed = 1;
1735 }
1736 }
1737 #endif
1738
1739 return kill (lwpid, signo);
1740 }
1741
1742 /* Handle a GNU/Linux syscall trap wait response. If we see a syscall
1743 event, check if the core is interested in it: if not, ignore the
1744 event, and keep waiting; otherwise, we need to toggle the LWP's
1745 syscall entry/exit status, since the ptrace event itself doesn't
1746 indicate it, and report the trap to higher layers. */
1747
1748 static int
1749 linux_handle_syscall_trap (struct lwp_info *lp, int stopping)
1750 {
1751 struct target_waitstatus *ourstatus = &lp->waitstatus;
1752 struct gdbarch *gdbarch = target_thread_architecture (lp->ptid);
1753 int syscall_number = (int) gdbarch_get_syscall_number (gdbarch, lp->ptid);
1754
1755 if (stopping)
1756 {
1757 /* If we're stopping threads, there's a SIGSTOP pending, which
1758 makes it so that the LWP reports an immediate syscall return,
1759 followed by the SIGSTOP. Skip seeing that "return" using
1760 PTRACE_CONT directly, and let stop_wait_callback collect the
1761 SIGSTOP. Later when the thread is resumed, a new syscall
1762 entry event. If we didn't do this (and returned 0), we'd
1763 leave a syscall entry pending, and our caller, by using
1764 PTRACE_CONT to collect the SIGSTOP, skips the syscall return
1765 itself. Later, when the user re-resumes this LWP, we'd see
1766 another syscall entry event and we'd mistake it for a return.
1767
1768 If stop_wait_callback didn't force the SIGSTOP out of the LWP
1769 (leaving immediately with LWP->signalled set, without issuing
1770 a PTRACE_CONT), it would still be problematic to leave this
1771 syscall enter pending, as later when the thread is resumed,
1772 it would then see the same syscall exit mentioned above,
1773 followed by the delayed SIGSTOP, while the syscall didn't
1774 actually get to execute. It seems it would be even more
1775 confusing to the user. */
1776
1777 if (debug_linux_nat)
1778 fprintf_unfiltered (gdb_stdlog,
1779 "LHST: ignoring syscall %d "
1780 "for LWP %ld (stopping threads), "
1781 "resuming with PTRACE_CONT for SIGSTOP\n",
1782 syscall_number,
1783 ptid_get_lwp (lp->ptid));
1784
1785 lp->syscall_state = TARGET_WAITKIND_IGNORE;
1786 ptrace (PTRACE_CONT, ptid_get_lwp (lp->ptid), 0, 0);
1787 lp->stopped = 0;
1788 return 1;
1789 }
1790
1791 if (catch_syscall_enabled ())
1792 {
1793 /* Always update the entry/return state, even if this particular
1794 syscall isn't interesting to the core now. In async mode,
1795 the user could install a new catchpoint for this syscall
1796 between syscall enter/return, and we'll need to know to
1797 report a syscall return if that happens. */
1798 lp->syscall_state = (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
1799 ? TARGET_WAITKIND_SYSCALL_RETURN
1800 : TARGET_WAITKIND_SYSCALL_ENTRY);
1801
1802 if (catching_syscall_number (syscall_number))
1803 {
1804 /* Alright, an event to report. */
1805 ourstatus->kind = lp->syscall_state;
1806 ourstatus->value.syscall_number = syscall_number;
1807
1808 if (debug_linux_nat)
1809 fprintf_unfiltered (gdb_stdlog,
1810 "LHST: stopping for %s of syscall %d"
1811 " for LWP %ld\n",
1812 lp->syscall_state
1813 == TARGET_WAITKIND_SYSCALL_ENTRY
1814 ? "entry" : "return",
1815 syscall_number,
1816 ptid_get_lwp (lp->ptid));
1817 return 0;
1818 }
1819
1820 if (debug_linux_nat)
1821 fprintf_unfiltered (gdb_stdlog,
1822 "LHST: ignoring %s of syscall %d "
1823 "for LWP %ld\n",
1824 lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
1825 ? "entry" : "return",
1826 syscall_number,
1827 ptid_get_lwp (lp->ptid));
1828 }
1829 else
1830 {
1831 /* If we had been syscall tracing, and hence used PT_SYSCALL
1832 before on this LWP, it could happen that the user removes all
1833 syscall catchpoints before we get to process this event.
1834 There are two noteworthy issues here:
1835
1836 - When stopped at a syscall entry event, resuming with
1837 PT_STEP still resumes executing the syscall and reports a
1838 syscall return.
1839
1840 - Only PT_SYSCALL catches syscall enters. If we last
1841 single-stepped this thread, then this event can't be a
1842 syscall enter. If we last single-stepped this thread, this
1843 has to be a syscall exit.
1844
1845 The points above mean that the next resume, be it PT_STEP or
1846 PT_CONTINUE, can not trigger a syscall trace event. */
1847 if (debug_linux_nat)
1848 fprintf_unfiltered (gdb_stdlog,
1849 "LHST: caught syscall event "
1850 "with no syscall catchpoints."
1851 " %d for LWP %ld, ignoring\n",
1852 syscall_number,
1853 ptid_get_lwp (lp->ptid));
1854 lp->syscall_state = TARGET_WAITKIND_IGNORE;
1855 }
1856
1857 /* The core isn't interested in this event. For efficiency, avoid
1858 stopping all threads only to have the core resume them all again.
1859 Since we're not stopping threads, if we're still syscall tracing
1860 and not stepping, we can't use PTRACE_CONT here, as we'd miss any
1861 subsequent syscall. Simply resume using the inf-ptrace layer,
1862 which knows when to use PT_SYSCALL or PT_CONTINUE. */
1863
1864 /* Note that gdbarch_get_syscall_number may access registers, hence
1865 fill a regcache. */
1866 registers_changed ();
1867 if (linux_nat_prepare_to_resume != NULL)
1868 linux_nat_prepare_to_resume (lp);
1869 linux_ops->to_resume (linux_ops, pid_to_ptid (ptid_get_lwp (lp->ptid)),
1870 lp->step, GDB_SIGNAL_0);
1871 lp->stopped = 0;
1872 return 1;
1873 }
1874
1875 /* Handle a GNU/Linux extended wait response. If we see a clone
1876 event, we need to add the new LWP to our list (and not report the
1877 trap to higher layers). This function returns non-zero if the
1878 event should be ignored and we should wait again. If STOPPING is
1879 true, the new LWP remains stopped, otherwise it is continued. */
1880
1881 static int
1882 linux_handle_extended_wait (struct lwp_info *lp, int status,
1883 int stopping)
1884 {
1885 int pid = ptid_get_lwp (lp->ptid);
1886 struct target_waitstatus *ourstatus = &lp->waitstatus;
1887 int event = linux_ptrace_get_extended_event (status);
1888
1889 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK
1890 || event == PTRACE_EVENT_CLONE)
1891 {
1892 unsigned long new_pid;
1893 int ret;
1894
1895 ptrace (PTRACE_GETEVENTMSG, pid, 0, &new_pid);
1896
1897 /* If we haven't already seen the new PID stop, wait for it now. */
1898 if (! pull_pid_from_list (&stopped_pids, new_pid, &status))
1899 {
1900 /* The new child has a pending SIGSTOP. We can't affect it until it
1901 hits the SIGSTOP, but we're already attached. */
1902 ret = my_waitpid (new_pid, &status,
1903 (event == PTRACE_EVENT_CLONE) ? __WCLONE : 0);
1904 if (ret == -1)
1905 perror_with_name (_("waiting for new child"));
1906 else if (ret != new_pid)
1907 internal_error (__FILE__, __LINE__,
1908 _("wait returned unexpected PID %d"), ret);
1909 else if (!WIFSTOPPED (status))
1910 internal_error (__FILE__, __LINE__,
1911 _("wait returned unexpected status 0x%x"), status);
1912 }
1913
1914 ourstatus->value.related_pid = ptid_build (new_pid, new_pid, 0);
1915
1916 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
1917 {
1918 /* The arch-specific native code may need to know about new
1919 forks even if those end up never mapped to an
1920 inferior. */
1921 if (linux_nat_new_fork != NULL)
1922 linux_nat_new_fork (lp, new_pid);
1923 }
1924
1925 if (event == PTRACE_EVENT_FORK
1926 && linux_fork_checkpointing_p (ptid_get_pid (lp->ptid)))
1927 {
1928 /* Handle checkpointing by linux-fork.c here as a special
1929 case. We don't want the follow-fork-mode or 'catch fork'
1930 to interfere with this. */
1931
1932 /* This won't actually modify the breakpoint list, but will
1933 physically remove the breakpoints from the child. */
1934 detach_breakpoints (ptid_build (new_pid, new_pid, 0));
1935
1936 /* Retain child fork in ptrace (stopped) state. */
1937 if (!find_fork_pid (new_pid))
1938 add_fork (new_pid);
1939
1940 /* Report as spurious, so that infrun doesn't want to follow
1941 this fork. We're actually doing an infcall in
1942 linux-fork.c. */
1943 ourstatus->kind = TARGET_WAITKIND_SPURIOUS;
1944
1945 /* Report the stop to the core. */
1946 return 0;
1947 }
1948
1949 if (event == PTRACE_EVENT_FORK)
1950 ourstatus->kind = TARGET_WAITKIND_FORKED;
1951 else if (event == PTRACE_EVENT_VFORK)
1952 ourstatus->kind = TARGET_WAITKIND_VFORKED;
1953 else
1954 {
1955 struct lwp_info *new_lp;
1956
1957 ourstatus->kind = TARGET_WAITKIND_IGNORE;
1958
1959 if (debug_linux_nat)
1960 fprintf_unfiltered (gdb_stdlog,
1961 "LHEW: Got clone event "
1962 "from LWP %d, new child is LWP %ld\n",
1963 pid, new_pid);
1964
1965 new_lp = add_lwp (ptid_build (ptid_get_pid (lp->ptid), new_pid, 0));
1966 new_lp->cloned = 1;
1967 new_lp->stopped = 1;
1968
1969 if (WSTOPSIG (status) != SIGSTOP)
1970 {
1971 /* This can happen if someone starts sending signals to
1972 the new thread before it gets a chance to run, which
1973 have a lower number than SIGSTOP (e.g. SIGUSR1).
1974 This is an unlikely case, and harder to handle for
1975 fork / vfork than for clone, so we do not try - but
1976 we handle it for clone events here. We'll send
1977 the other signal on to the thread below. */
1978
1979 new_lp->signalled = 1;
1980 }
1981 else
1982 {
1983 struct thread_info *tp;
1984
1985 /* When we stop for an event in some other thread, and
1986 pull the thread list just as this thread has cloned,
1987 we'll have seen the new thread in the thread_db list
1988 before handling the CLONE event (glibc's
1989 pthread_create adds the new thread to the thread list
1990 before clone'ing, and has the kernel fill in the
1991 thread's tid on the clone call with
1992 CLONE_PARENT_SETTID). If that happened, and the core
1993 had requested the new thread to stop, we'll have
1994 killed it with SIGSTOP. But since SIGSTOP is not an
1995 RT signal, it can only be queued once. We need to be
1996 careful to not resume the LWP if we wanted it to
1997 stop. In that case, we'll leave the SIGSTOP pending.
1998 It will later be reported as GDB_SIGNAL_0. */
1999 tp = find_thread_ptid (new_lp->ptid);
2000 if (tp != NULL && tp->stop_requested)
2001 new_lp->last_resume_kind = resume_stop;
2002 else
2003 status = 0;
2004 }
2005
2006 if (non_stop)
2007 {
2008 /* Add the new thread to GDB's lists as soon as possible
2009 so that:
2010
2011 1) the frontend doesn't have to wait for a stop to
2012 display them, and,
2013
2014 2) we tag it with the correct running state. */
2015
2016 /* If the thread_db layer is active, let it know about
2017 this new thread, and add it to GDB's list. */
2018 if (!thread_db_attach_lwp (new_lp->ptid))
2019 {
2020 /* We're not using thread_db. Add it to GDB's
2021 list. */
2022 target_post_attach (ptid_get_lwp (new_lp->ptid));
2023 add_thread (new_lp->ptid);
2024 }
2025
2026 if (!stopping)
2027 {
2028 set_running (new_lp->ptid, 1);
2029 set_executing (new_lp->ptid, 1);
2030 /* thread_db_attach_lwp -> lin_lwp_attach_lwp forced
2031 resume_stop. */
2032 new_lp->last_resume_kind = resume_continue;
2033 }
2034 }
2035
2036 if (status != 0)
2037 {
2038 /* We created NEW_LP so it cannot yet contain STATUS. */
2039 gdb_assert (new_lp->status == 0);
2040
2041 /* Save the wait status to report later. */
2042 if (debug_linux_nat)
2043 fprintf_unfiltered (gdb_stdlog,
2044 "LHEW: waitpid of new LWP %ld, "
2045 "saving status %s\n",
2046 (long) ptid_get_lwp (new_lp->ptid),
2047 status_to_str (status));
2048 new_lp->status = status;
2049 }
2050
2051 /* Note the need to use the low target ops to resume, to
2052 handle resuming with PT_SYSCALL if we have syscall
2053 catchpoints. */
2054 if (!stopping)
2055 {
2056 new_lp->resumed = 1;
2057
2058 if (status == 0)
2059 {
2060 gdb_assert (new_lp->last_resume_kind == resume_continue);
2061 if (debug_linux_nat)
2062 fprintf_unfiltered (gdb_stdlog,
2063 "LHEW: resuming new LWP %ld\n",
2064 ptid_get_lwp (new_lp->ptid));
2065 if (linux_nat_prepare_to_resume != NULL)
2066 linux_nat_prepare_to_resume (new_lp);
2067 linux_ops->to_resume (linux_ops, pid_to_ptid (new_pid),
2068 0, GDB_SIGNAL_0);
2069 new_lp->stopped = 0;
2070 }
2071 }
2072
2073 if (debug_linux_nat)
2074 fprintf_unfiltered (gdb_stdlog,
2075 "LHEW: resuming parent LWP %d\n", pid);
2076 if (linux_nat_prepare_to_resume != NULL)
2077 linux_nat_prepare_to_resume (lp);
2078 linux_ops->to_resume (linux_ops,
2079 pid_to_ptid (ptid_get_lwp (lp->ptid)),
2080 0, GDB_SIGNAL_0);
2081 lp->stopped = 0;
2082 return 1;
2083 }
2084
2085 return 0;
2086 }
2087
2088 if (event == PTRACE_EVENT_EXEC)
2089 {
2090 if (debug_linux_nat)
2091 fprintf_unfiltered (gdb_stdlog,
2092 "LHEW: Got exec event from LWP %ld\n",
2093 ptid_get_lwp (lp->ptid));
2094
2095 ourstatus->kind = TARGET_WAITKIND_EXECD;
2096 ourstatus->value.execd_pathname
2097 = xstrdup (linux_child_pid_to_exec_file (NULL, pid));
2098
2099 return 0;
2100 }
2101
2102 if (event == PTRACE_EVENT_VFORK_DONE)
2103 {
2104 if (current_inferior ()->waiting_for_vfork_done)
2105 {
2106 if (debug_linux_nat)
2107 fprintf_unfiltered (gdb_stdlog,
2108 "LHEW: Got expected PTRACE_EVENT_"
2109 "VFORK_DONE from LWP %ld: stopping\n",
2110 ptid_get_lwp (lp->ptid));
2111
2112 ourstatus->kind = TARGET_WAITKIND_VFORK_DONE;
2113 return 0;
2114 }
2115
2116 if (debug_linux_nat)
2117 fprintf_unfiltered (gdb_stdlog,
2118 "LHEW: Got PTRACE_EVENT_VFORK_DONE "
2119 "from LWP %ld: resuming\n",
2120 ptid_get_lwp (lp->ptid));
2121 ptrace (PTRACE_CONT, ptid_get_lwp (lp->ptid), 0, 0);
2122 return 1;
2123 }
2124
2125 internal_error (__FILE__, __LINE__,
2126 _("unknown ptrace event %d"), event);
2127 }
2128
2129 /* Wait for LP to stop. Returns the wait status, or 0 if the LWP has
2130 exited. */
2131
2132 static int
2133 wait_lwp (struct lwp_info *lp)
2134 {
2135 pid_t pid;
2136 int status = 0;
2137 int thread_dead = 0;
2138 sigset_t prev_mask;
2139
2140 gdb_assert (!lp->stopped);
2141 gdb_assert (lp->status == 0);
2142
2143 /* Make sure SIGCHLD is blocked for sigsuspend avoiding a race below. */
2144 block_child_signals (&prev_mask);
2145
2146 for (;;)
2147 {
2148 /* If my_waitpid returns 0 it means the __WCLONE vs. non-__WCLONE kind
2149 was right and we should just call sigsuspend. */
2150
2151 pid = my_waitpid (ptid_get_lwp (lp->ptid), &status, WNOHANG);
2152 if (pid == -1 && errno == ECHILD)
2153 pid = my_waitpid (ptid_get_lwp (lp->ptid), &status, __WCLONE | WNOHANG);
2154 if (pid == -1 && errno == ECHILD)
2155 {
2156 /* The thread has previously exited. We need to delete it
2157 now because, for some vendor 2.4 kernels with NPTL
2158 support backported, there won't be an exit event unless
2159 it is the main thread. 2.6 kernels will report an exit
2160 event for each thread that exits, as expected. */
2161 thread_dead = 1;
2162 if (debug_linux_nat)
2163 fprintf_unfiltered (gdb_stdlog, "WL: %s vanished.\n",
2164 target_pid_to_str (lp->ptid));
2165 }
2166 if (pid != 0)
2167 break;
2168
2169 /* Bugs 10970, 12702.
2170 Thread group leader may have exited in which case we'll lock up in
2171 waitpid if there are other threads, even if they are all zombies too.
2172 Basically, we're not supposed to use waitpid this way.
2173 __WCLONE is not applicable for the leader so we can't use that.
2174 LINUX_NAT_THREAD_ALIVE cannot be used here as it requires a STOPPED
2175 process; it gets ESRCH both for the zombie and for running processes.
2176
2177 As a workaround, check if we're waiting for the thread group leader and
2178 if it's a zombie, and avoid calling waitpid if it is.
2179
2180 This is racy, what if the tgl becomes a zombie right after we check?
2181 Therefore always use WNOHANG with sigsuspend - it is equivalent to
2182 waiting waitpid but linux_proc_pid_is_zombie is safe this way. */
2183
2184 if (ptid_get_pid (lp->ptid) == ptid_get_lwp (lp->ptid)
2185 && linux_proc_pid_is_zombie (ptid_get_lwp (lp->ptid)))
2186 {
2187 thread_dead = 1;
2188 if (debug_linux_nat)
2189 fprintf_unfiltered (gdb_stdlog,
2190 "WL: Thread group leader %s vanished.\n",
2191 target_pid_to_str (lp->ptid));
2192 break;
2193 }
2194
2195 /* Wait for next SIGCHLD and try again. This may let SIGCHLD handlers
2196 get invoked despite our caller had them intentionally blocked by
2197 block_child_signals. This is sensitive only to the loop of
2198 linux_nat_wait_1 and there if we get called my_waitpid gets called
2199 again before it gets to sigsuspend so we can safely let the handlers
2200 get executed here. */
2201
2202 if (debug_linux_nat)
2203 fprintf_unfiltered (gdb_stdlog, "WL: about to sigsuspend\n");
2204 sigsuspend (&suspend_mask);
2205 }
2206
2207 restore_child_signals_mask (&prev_mask);
2208
2209 if (!thread_dead)
2210 {
2211 gdb_assert (pid == ptid_get_lwp (lp->ptid));
2212
2213 if (debug_linux_nat)
2214 {
2215 fprintf_unfiltered (gdb_stdlog,
2216 "WL: waitpid %s received %s\n",
2217 target_pid_to_str (lp->ptid),
2218 status_to_str (status));
2219 }
2220
2221 /* Check if the thread has exited. */
2222 if (WIFEXITED (status) || WIFSIGNALED (status))
2223 {
2224 thread_dead = 1;
2225 if (debug_linux_nat)
2226 fprintf_unfiltered (gdb_stdlog, "WL: %s exited.\n",
2227 target_pid_to_str (lp->ptid));
2228 }
2229 }
2230
2231 if (thread_dead)
2232 {
2233 exit_lwp (lp);
2234 return 0;
2235 }
2236
2237 gdb_assert (WIFSTOPPED (status));
2238 lp->stopped = 1;
2239
2240 if (lp->must_set_ptrace_flags)
2241 {
2242 struct inferior *inf = find_inferior_pid (ptid_get_pid (lp->ptid));
2243
2244 linux_enable_event_reporting (ptid_get_lwp (lp->ptid), inf->attach_flag);
2245 lp->must_set_ptrace_flags = 0;
2246 }
2247
2248 /* Handle GNU/Linux's syscall SIGTRAPs. */
2249 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
2250 {
2251 /* No longer need the sysgood bit. The ptrace event ends up
2252 recorded in lp->waitstatus if we care for it. We can carry
2253 on handling the event like a regular SIGTRAP from here
2254 on. */
2255 status = W_STOPCODE (SIGTRAP);
2256 if (linux_handle_syscall_trap (lp, 1))
2257 return wait_lwp (lp);
2258 }
2259
2260 /* Handle GNU/Linux's extended waitstatus for trace events. */
2261 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP
2262 && linux_is_extended_waitstatus (status))
2263 {
2264 if (debug_linux_nat)
2265 fprintf_unfiltered (gdb_stdlog,
2266 "WL: Handling extended status 0x%06x\n",
2267 status);
2268 if (linux_handle_extended_wait (lp, status, 1))
2269 return wait_lwp (lp);
2270 }
2271
2272 return status;
2273 }
2274
2275 /* Send a SIGSTOP to LP. */
2276
2277 static int
2278 stop_callback (struct lwp_info *lp, void *data)
2279 {
2280 if (!lp->stopped && !lp->signalled)
2281 {
2282 int ret;
2283
2284 if (debug_linux_nat)
2285 {
2286 fprintf_unfiltered (gdb_stdlog,
2287 "SC: kill %s **<SIGSTOP>**\n",
2288 target_pid_to_str (lp->ptid));
2289 }
2290 errno = 0;
2291 ret = kill_lwp (ptid_get_lwp (lp->ptid), SIGSTOP);
2292 if (debug_linux_nat)
2293 {
2294 fprintf_unfiltered (gdb_stdlog,
2295 "SC: lwp kill %d %s\n",
2296 ret,
2297 errno ? safe_strerror (errno) : "ERRNO-OK");
2298 }
2299
2300 lp->signalled = 1;
2301 gdb_assert (lp->status == 0);
2302 }
2303
2304 return 0;
2305 }
2306
2307 /* Request a stop on LWP. */
2308
2309 void
2310 linux_stop_lwp (struct lwp_info *lwp)
2311 {
2312 stop_callback (lwp, NULL);
2313 }
2314
2315 /* Return non-zero if LWP PID has a pending SIGINT. */
2316
2317 static int
2318 linux_nat_has_pending_sigint (int pid)
2319 {
2320 sigset_t pending, blocked, ignored;
2321
2322 linux_proc_pending_signals (pid, &pending, &blocked, &ignored);
2323
2324 if (sigismember (&pending, SIGINT)
2325 && !sigismember (&ignored, SIGINT))
2326 return 1;
2327
2328 return 0;
2329 }
2330
2331 /* Set a flag in LP indicating that we should ignore its next SIGINT. */
2332
2333 static int
2334 set_ignore_sigint (struct lwp_info *lp, void *data)
2335 {
2336 /* If a thread has a pending SIGINT, consume it; otherwise, set a
2337 flag to consume the next one. */
2338 if (lp->stopped && lp->status != 0 && WIFSTOPPED (lp->status)
2339 && WSTOPSIG (lp->status) == SIGINT)
2340 lp->status = 0;
2341 else
2342 lp->ignore_sigint = 1;
2343
2344 return 0;
2345 }
2346
2347 /* If LP does not have a SIGINT pending, then clear the ignore_sigint flag.
2348 This function is called after we know the LWP has stopped; if the LWP
2349 stopped before the expected SIGINT was delivered, then it will never have
2350 arrived. Also, if the signal was delivered to a shared queue and consumed
2351 by a different thread, it will never be delivered to this LWP. */
2352
2353 static void
2354 maybe_clear_ignore_sigint (struct lwp_info *lp)
2355 {
2356 if (!lp->ignore_sigint)
2357 return;
2358
2359 if (!linux_nat_has_pending_sigint (ptid_get_lwp (lp->ptid)))
2360 {
2361 if (debug_linux_nat)
2362 fprintf_unfiltered (gdb_stdlog,
2363 "MCIS: Clearing bogus flag for %s\n",
2364 target_pid_to_str (lp->ptid));
2365 lp->ignore_sigint = 0;
2366 }
2367 }
2368
2369 /* Fetch the possible triggered data watchpoint info and store it in
2370 LP.
2371
2372 On some archs, like x86, that use debug registers to set
2373 watchpoints, it's possible that the way to know which watched
2374 address trapped, is to check the register that is used to select
2375 which address to watch. Problem is, between setting the watchpoint
2376 and reading back which data address trapped, the user may change
2377 the set of watchpoints, and, as a consequence, GDB changes the
2378 debug registers in the inferior. To avoid reading back a stale
2379 stopped-data-address when that happens, we cache in LP the fact
2380 that a watchpoint trapped, and the corresponding data address, as
2381 soon as we see LP stop with a SIGTRAP. If GDB changes the debug
2382 registers meanwhile, we have the cached data we can rely on. */
2383
2384 static void
2385 save_sigtrap (struct lwp_info *lp)
2386 {
2387 struct cleanup *old_chain;
2388
2389 if (linux_ops->to_stopped_by_watchpoint == NULL)
2390 {
2391 lp->stopped_by_watchpoint = 0;
2392 return;
2393 }
2394
2395 old_chain = save_inferior_ptid ();
2396 inferior_ptid = lp->ptid;
2397
2398 lp->stopped_by_watchpoint = linux_ops->to_stopped_by_watchpoint (linux_ops);
2399
2400 if (lp->stopped_by_watchpoint)
2401 {
2402 if (linux_ops->to_stopped_data_address != NULL)
2403 lp->stopped_data_address_p =
2404 linux_ops->to_stopped_data_address (&current_target,
2405 &lp->stopped_data_address);
2406 else
2407 lp->stopped_data_address_p = 0;
2408 }
2409
2410 do_cleanups (old_chain);
2411 }
2412
2413 /* See save_sigtrap. */
2414
2415 static int
2416 linux_nat_stopped_by_watchpoint (struct target_ops *ops)
2417 {
2418 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2419
2420 gdb_assert (lp != NULL);
2421
2422 return lp->stopped_by_watchpoint;
2423 }
2424
2425 static int
2426 linux_nat_stopped_data_address (struct target_ops *ops, CORE_ADDR *addr_p)
2427 {
2428 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2429
2430 gdb_assert (lp != NULL);
2431
2432 *addr_p = lp->stopped_data_address;
2433
2434 return lp->stopped_data_address_p;
2435 }
2436
2437 /* Commonly any breakpoint / watchpoint generate only SIGTRAP. */
2438
2439 static int
2440 sigtrap_is_event (int status)
2441 {
2442 return WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP;
2443 }
2444
2445 /* SIGTRAP-like events recognizer. */
2446
2447 static int (*linux_nat_status_is_event) (int status) = sigtrap_is_event;
2448
2449 /* Check for SIGTRAP-like events in LP. */
2450
2451 static int
2452 linux_nat_lp_status_is_event (struct lwp_info *lp)
2453 {
2454 /* We check for lp->waitstatus in addition to lp->status, because we can
2455 have pending process exits recorded in lp->status
2456 and W_EXITCODE(0,0) == 0. We should probably have an additional
2457 lp->status_p flag. */
2458
2459 return (lp->waitstatus.kind == TARGET_WAITKIND_IGNORE
2460 && linux_nat_status_is_event (lp->status));
2461 }
2462
2463 /* Set alternative SIGTRAP-like events recognizer. If
2464 breakpoint_inserted_here_p there then gdbarch_decr_pc_after_break will be
2465 applied. */
2466
2467 void
2468 linux_nat_set_status_is_event (struct target_ops *t,
2469 int (*status_is_event) (int status))
2470 {
2471 linux_nat_status_is_event = status_is_event;
2472 }
2473
2474 /* Wait until LP is stopped. */
2475
2476 static int
2477 stop_wait_callback (struct lwp_info *lp, void *data)
2478 {
2479 struct inferior *inf = find_inferior_ptid (lp->ptid);
2480
2481 /* If this is a vfork parent, bail out, it is not going to report
2482 any SIGSTOP until the vfork is done with. */
2483 if (inf->vfork_child != NULL)
2484 return 0;
2485
2486 if (!lp->stopped)
2487 {
2488 int status;
2489
2490 status = wait_lwp (lp);
2491 if (status == 0)
2492 return 0;
2493
2494 if (lp->ignore_sigint && WIFSTOPPED (status)
2495 && WSTOPSIG (status) == SIGINT)
2496 {
2497 lp->ignore_sigint = 0;
2498
2499 errno = 0;
2500 ptrace (PTRACE_CONT, ptid_get_lwp (lp->ptid), 0, 0);
2501 lp->stopped = 0;
2502 if (debug_linux_nat)
2503 fprintf_unfiltered (gdb_stdlog,
2504 "PTRACE_CONT %s, 0, 0 (%s) "
2505 "(discarding SIGINT)\n",
2506 target_pid_to_str (lp->ptid),
2507 errno ? safe_strerror (errno) : "OK");
2508
2509 return stop_wait_callback (lp, NULL);
2510 }
2511
2512 maybe_clear_ignore_sigint (lp);
2513
2514 if (WSTOPSIG (status) != SIGSTOP)
2515 {
2516 /* The thread was stopped with a signal other than SIGSTOP. */
2517
2518 save_sigtrap (lp);
2519
2520 if (debug_linux_nat)
2521 fprintf_unfiltered (gdb_stdlog,
2522 "SWC: Pending event %s in %s\n",
2523 status_to_str ((int) status),
2524 target_pid_to_str (lp->ptid));
2525
2526 /* Save the sigtrap event. */
2527 lp->status = status;
2528 gdb_assert (lp->signalled);
2529 }
2530 else
2531 {
2532 /* We caught the SIGSTOP that we intended to catch, so
2533 there's no SIGSTOP pending. */
2534
2535 if (debug_linux_nat)
2536 fprintf_unfiltered (gdb_stdlog,
2537 "SWC: Delayed SIGSTOP caught for %s.\n",
2538 target_pid_to_str (lp->ptid));
2539
2540 /* Reset SIGNALLED only after the stop_wait_callback call
2541 above as it does gdb_assert on SIGNALLED. */
2542 lp->signalled = 0;
2543 }
2544 }
2545
2546 return 0;
2547 }
2548
2549 /* Return non-zero if LP has a wait status pending. */
2550
2551 static int
2552 status_callback (struct lwp_info *lp, void *data)
2553 {
2554 /* Only report a pending wait status if we pretend that this has
2555 indeed been resumed. */
2556 if (!lp->resumed)
2557 return 0;
2558
2559 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
2560 {
2561 /* A ptrace event, like PTRACE_FORK|VFORK|EXEC, syscall event,
2562 or a pending process exit. Note that `W_EXITCODE(0,0) ==
2563 0', so a clean process exit can not be stored pending in
2564 lp->status, it is indistinguishable from
2565 no-pending-status. */
2566 return 1;
2567 }
2568
2569 if (lp->status != 0)
2570 return 1;
2571
2572 return 0;
2573 }
2574
2575 /* Return non-zero if LP isn't stopped. */
2576
2577 static int
2578 running_callback (struct lwp_info *lp, void *data)
2579 {
2580 return (!lp->stopped
2581 || ((lp->status != 0
2582 || lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
2583 && lp->resumed));
2584 }
2585
2586 /* Count the LWP's that have had events. */
2587
2588 static int
2589 count_events_callback (struct lwp_info *lp, void *data)
2590 {
2591 int *count = data;
2592
2593 gdb_assert (count != NULL);
2594
2595 /* Count only resumed LWPs that have a SIGTRAP event pending. */
2596 if (lp->resumed && linux_nat_lp_status_is_event (lp))
2597 (*count)++;
2598
2599 return 0;
2600 }
2601
2602 /* Select the LWP (if any) that is currently being single-stepped. */
2603
2604 static int
2605 select_singlestep_lwp_callback (struct lwp_info *lp, void *data)
2606 {
2607 if (lp->last_resume_kind == resume_step
2608 && lp->status != 0)
2609 return 1;
2610 else
2611 return 0;
2612 }
2613
2614 /* Select the Nth LWP that has had a SIGTRAP event. */
2615
2616 static int
2617 select_event_lwp_callback (struct lwp_info *lp, void *data)
2618 {
2619 int *selector = data;
2620
2621 gdb_assert (selector != NULL);
2622
2623 /* Select only resumed LWPs that have a SIGTRAP event pending. */
2624 if (lp->resumed && linux_nat_lp_status_is_event (lp))
2625 if ((*selector)-- == 0)
2626 return 1;
2627
2628 return 0;
2629 }
2630
2631 static int
2632 cancel_breakpoint (struct lwp_info *lp)
2633 {
2634 /* Arrange for a breakpoint to be hit again later. We don't keep
2635 the SIGTRAP status and don't forward the SIGTRAP signal to the
2636 LWP. We will handle the current event, eventually we will resume
2637 this LWP, and this breakpoint will trap again.
2638
2639 If we do not do this, then we run the risk that the user will
2640 delete or disable the breakpoint, but the LWP will have already
2641 tripped on it. */
2642
2643 struct regcache *regcache = get_thread_regcache (lp->ptid);
2644 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2645 CORE_ADDR pc;
2646
2647 pc = regcache_read_pc (regcache) - target_decr_pc_after_break (gdbarch);
2648 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
2649 {
2650 if (debug_linux_nat)
2651 fprintf_unfiltered (gdb_stdlog,
2652 "CB: Push back breakpoint for %s\n",
2653 target_pid_to_str (lp->ptid));
2654
2655 /* Back up the PC if necessary. */
2656 if (target_decr_pc_after_break (gdbarch))
2657 regcache_write_pc (regcache, pc);
2658
2659 return 1;
2660 }
2661 return 0;
2662 }
2663
2664 static int
2665 cancel_breakpoints_callback (struct lwp_info *lp, void *data)
2666 {
2667 struct lwp_info *event_lp = data;
2668
2669 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
2670 if (lp == event_lp)
2671 return 0;
2672
2673 /* If a LWP other than the LWP that we're reporting an event for has
2674 hit a GDB breakpoint (as opposed to some random trap signal),
2675 then just arrange for it to hit it again later. We don't keep
2676 the SIGTRAP status and don't forward the SIGTRAP signal to the
2677 LWP. We will handle the current event, eventually we will resume
2678 all LWPs, and this one will get its breakpoint trap again.
2679
2680 If we do not do this, then we run the risk that the user will
2681 delete or disable the breakpoint, but the LWP will have already
2682 tripped on it. */
2683
2684 if (linux_nat_lp_status_is_event (lp)
2685 && cancel_breakpoint (lp))
2686 /* Throw away the SIGTRAP. */
2687 lp->status = 0;
2688
2689 return 0;
2690 }
2691
2692 /* Select one LWP out of those that have events pending. */
2693
2694 static void
2695 select_event_lwp (ptid_t filter, struct lwp_info **orig_lp, int *status)
2696 {
2697 int num_events = 0;
2698 int random_selector;
2699 struct lwp_info *event_lp;
2700
2701 /* Record the wait status for the original LWP. */
2702 (*orig_lp)->status = *status;
2703
2704 /* Give preference to any LWP that is being single-stepped. */
2705 event_lp = iterate_over_lwps (filter,
2706 select_singlestep_lwp_callback, NULL);
2707 if (event_lp != NULL)
2708 {
2709 if (debug_linux_nat)
2710 fprintf_unfiltered (gdb_stdlog,
2711 "SEL: Select single-step %s\n",
2712 target_pid_to_str (event_lp->ptid));
2713 }
2714 else
2715 {
2716 /* No single-stepping LWP. Select one at random, out of those
2717 which have had SIGTRAP events. */
2718
2719 /* First see how many SIGTRAP events we have. */
2720 iterate_over_lwps (filter, count_events_callback, &num_events);
2721
2722 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
2723 random_selector = (int)
2724 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2725
2726 if (debug_linux_nat && num_events > 1)
2727 fprintf_unfiltered (gdb_stdlog,
2728 "SEL: Found %d SIGTRAP events, selecting #%d\n",
2729 num_events, random_selector);
2730
2731 event_lp = iterate_over_lwps (filter,
2732 select_event_lwp_callback,
2733 &random_selector);
2734 }
2735
2736 if (event_lp != NULL)
2737 {
2738 /* Switch the event LWP. */
2739 *orig_lp = event_lp;
2740 *status = event_lp->status;
2741 }
2742
2743 /* Flush the wait status for the event LWP. */
2744 (*orig_lp)->status = 0;
2745 }
2746
2747 /* Return non-zero if LP has been resumed. */
2748
2749 static int
2750 resumed_callback (struct lwp_info *lp, void *data)
2751 {
2752 return lp->resumed;
2753 }
2754
2755 /* Stop an active thread, verify it still exists, then resume it. If
2756 the thread ends up with a pending status, then it is not resumed,
2757 and *DATA (really a pointer to int), is set. */
2758
2759 static int
2760 stop_and_resume_callback (struct lwp_info *lp, void *data)
2761 {
2762 int *new_pending_p = data;
2763
2764 if (!lp->stopped)
2765 {
2766 ptid_t ptid = lp->ptid;
2767
2768 stop_callback (lp, NULL);
2769 stop_wait_callback (lp, NULL);
2770
2771 /* Resume if the lwp still exists, and the core wanted it
2772 running. */
2773 lp = find_lwp_pid (ptid);
2774 if (lp != NULL)
2775 {
2776 if (lp->last_resume_kind == resume_stop
2777 && lp->status == 0)
2778 {
2779 /* The core wanted the LWP to stop. Even if it stopped
2780 cleanly (with SIGSTOP), leave the event pending. */
2781 if (debug_linux_nat)
2782 fprintf_unfiltered (gdb_stdlog,
2783 "SARC: core wanted LWP %ld stopped "
2784 "(leaving SIGSTOP pending)\n",
2785 ptid_get_lwp (lp->ptid));
2786 lp->status = W_STOPCODE (SIGSTOP);
2787 }
2788
2789 if (lp->status == 0)
2790 {
2791 if (debug_linux_nat)
2792 fprintf_unfiltered (gdb_stdlog,
2793 "SARC: re-resuming LWP %ld\n",
2794 ptid_get_lwp (lp->ptid));
2795 resume_lwp (lp, lp->step, GDB_SIGNAL_0);
2796 }
2797 else
2798 {
2799 if (debug_linux_nat)
2800 fprintf_unfiltered (gdb_stdlog,
2801 "SARC: not re-resuming LWP %ld "
2802 "(has pending)\n",
2803 ptid_get_lwp (lp->ptid));
2804 if (new_pending_p)
2805 *new_pending_p = 1;
2806 }
2807 }
2808 }
2809 return 0;
2810 }
2811
2812 /* Check if we should go on and pass this event to common code.
2813 Return the affected lwp if we are, or NULL otherwise. If we stop
2814 all lwps temporarily, we may end up with new pending events in some
2815 other lwp. In that case set *NEW_PENDING_P to true. */
2816
2817 static struct lwp_info *
2818 linux_nat_filter_event (int lwpid, int status, int *new_pending_p)
2819 {
2820 struct lwp_info *lp;
2821 int event = linux_ptrace_get_extended_event (status);
2822
2823 *new_pending_p = 0;
2824
2825 lp = find_lwp_pid (pid_to_ptid (lwpid));
2826
2827 /* Check for stop events reported by a process we didn't already
2828 know about - anything not already in our LWP list.
2829
2830 If we're expecting to receive stopped processes after
2831 fork, vfork, and clone events, then we'll just add the
2832 new one to our list and go back to waiting for the event
2833 to be reported - the stopped process might be returned
2834 from waitpid before or after the event is.
2835
2836 But note the case of a non-leader thread exec'ing after the
2837 leader having exited, and gone from our lists. The non-leader
2838 thread changes its tid to the tgid. */
2839
2840 if (WIFSTOPPED (status) && lp == NULL
2841 && (WSTOPSIG (status) == SIGTRAP && event == PTRACE_EVENT_EXEC))
2842 {
2843 /* A multi-thread exec after we had seen the leader exiting. */
2844 if (debug_linux_nat)
2845 fprintf_unfiltered (gdb_stdlog,
2846 "LLW: Re-adding thread group leader LWP %d.\n",
2847 lwpid);
2848
2849 lp = add_lwp (ptid_build (lwpid, lwpid, 0));
2850 lp->stopped = 1;
2851 lp->resumed = 1;
2852 add_thread (lp->ptid);
2853 }
2854
2855 if (WIFSTOPPED (status) && !lp)
2856 {
2857 add_to_pid_list (&stopped_pids, lwpid, status);
2858 return NULL;
2859 }
2860
2861 /* Make sure we don't report an event for the exit of an LWP not in
2862 our list, i.e. not part of the current process. This can happen
2863 if we detach from a program we originally forked and then it
2864 exits. */
2865 if (!WIFSTOPPED (status) && !lp)
2866 return NULL;
2867
2868 /* This LWP is stopped now. (And if dead, this prevents it from
2869 ever being continued.) */
2870 lp->stopped = 1;
2871
2872 if (WIFSTOPPED (status) && lp->must_set_ptrace_flags)
2873 {
2874 struct inferior *inf = find_inferior_pid (ptid_get_pid (lp->ptid));
2875
2876 linux_enable_event_reporting (ptid_get_lwp (lp->ptid), inf->attach_flag);
2877 lp->must_set_ptrace_flags = 0;
2878 }
2879
2880 /* Handle GNU/Linux's syscall SIGTRAPs. */
2881 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
2882 {
2883 /* No longer need the sysgood bit. The ptrace event ends up
2884 recorded in lp->waitstatus if we care for it. We can carry
2885 on handling the event like a regular SIGTRAP from here
2886 on. */
2887 status = W_STOPCODE (SIGTRAP);
2888 if (linux_handle_syscall_trap (lp, 0))
2889 return NULL;
2890 }
2891
2892 /* Handle GNU/Linux's extended waitstatus for trace events. */
2893 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP
2894 && linux_is_extended_waitstatus (status))
2895 {
2896 if (debug_linux_nat)
2897 fprintf_unfiltered (gdb_stdlog,
2898 "LLW: Handling extended status 0x%06x\n",
2899 status);
2900 if (linux_handle_extended_wait (lp, status, 0))
2901 return NULL;
2902 }
2903
2904 if (linux_nat_status_is_event (status))
2905 save_sigtrap (lp);
2906
2907 /* Check if the thread has exited. */
2908 if ((WIFEXITED (status) || WIFSIGNALED (status))
2909 && num_lwps (ptid_get_pid (lp->ptid)) > 1)
2910 {
2911 /* If this is the main thread, we must stop all threads and verify
2912 if they are still alive. This is because in the nptl thread model
2913 on Linux 2.4, there is no signal issued for exiting LWPs
2914 other than the main thread. We only get the main thread exit
2915 signal once all child threads have already exited. If we
2916 stop all the threads and use the stop_wait_callback to check
2917 if they have exited we can determine whether this signal
2918 should be ignored or whether it means the end of the debugged
2919 application, regardless of which threading model is being
2920 used. */
2921 if (ptid_get_pid (lp->ptid) == ptid_get_lwp (lp->ptid))
2922 {
2923 iterate_over_lwps (pid_to_ptid (ptid_get_pid (lp->ptid)),
2924 stop_and_resume_callback, new_pending_p);
2925 }
2926
2927 if (debug_linux_nat)
2928 fprintf_unfiltered (gdb_stdlog,
2929 "LLW: %s exited.\n",
2930 target_pid_to_str (lp->ptid));
2931
2932 if (num_lwps (ptid_get_pid (lp->ptid)) > 1)
2933 {
2934 /* If there is at least one more LWP, then the exit signal
2935 was not the end of the debugged application and should be
2936 ignored. */
2937 exit_lwp (lp);
2938 return NULL;
2939 }
2940 }
2941
2942 /* Check if the current LWP has previously exited. In the nptl
2943 thread model, LWPs other than the main thread do not issue
2944 signals when they exit so we must check whenever the thread has
2945 stopped. A similar check is made in stop_wait_callback(). */
2946 if (num_lwps (ptid_get_pid (lp->ptid)) > 1 && !linux_thread_alive (lp->ptid))
2947 {
2948 ptid_t ptid = pid_to_ptid (ptid_get_pid (lp->ptid));
2949
2950 if (debug_linux_nat)
2951 fprintf_unfiltered (gdb_stdlog,
2952 "LLW: %s exited.\n",
2953 target_pid_to_str (lp->ptid));
2954
2955 exit_lwp (lp);
2956
2957 /* Make sure there is at least one thread running. */
2958 gdb_assert (iterate_over_lwps (ptid, running_callback, NULL));
2959
2960 /* Discard the event. */
2961 return NULL;
2962 }
2963
2964 /* Make sure we don't report a SIGSTOP that we sent ourselves in
2965 an attempt to stop an LWP. */
2966 if (lp->signalled
2967 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP)
2968 {
2969 if (debug_linux_nat)
2970 fprintf_unfiltered (gdb_stdlog,
2971 "LLW: Delayed SIGSTOP caught for %s.\n",
2972 target_pid_to_str (lp->ptid));
2973
2974 lp->signalled = 0;
2975
2976 if (lp->last_resume_kind != resume_stop)
2977 {
2978 /* This is a delayed SIGSTOP. */
2979
2980 registers_changed ();
2981
2982 if (linux_nat_prepare_to_resume != NULL)
2983 linux_nat_prepare_to_resume (lp);
2984 linux_ops->to_resume (linux_ops,
2985 pid_to_ptid (ptid_get_lwp (lp->ptid)),
2986 lp->step, GDB_SIGNAL_0);
2987 if (debug_linux_nat)
2988 fprintf_unfiltered (gdb_stdlog,
2989 "LLW: %s %s, 0, 0 (discard SIGSTOP)\n",
2990 lp->step ?
2991 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2992 target_pid_to_str (lp->ptid));
2993
2994 lp->stopped = 0;
2995 gdb_assert (lp->resumed);
2996
2997 /* Discard the event. */
2998 return NULL;
2999 }
3000 }
3001
3002 /* Make sure we don't report a SIGINT that we have already displayed
3003 for another thread. */
3004 if (lp->ignore_sigint
3005 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGINT)
3006 {
3007 if (debug_linux_nat)
3008 fprintf_unfiltered (gdb_stdlog,
3009 "LLW: Delayed SIGINT caught for %s.\n",
3010 target_pid_to_str (lp->ptid));
3011
3012 /* This is a delayed SIGINT. */
3013 lp->ignore_sigint = 0;
3014
3015 registers_changed ();
3016 if (linux_nat_prepare_to_resume != NULL)
3017 linux_nat_prepare_to_resume (lp);
3018 linux_ops->to_resume (linux_ops, pid_to_ptid (ptid_get_lwp (lp->ptid)),
3019 lp->step, GDB_SIGNAL_0);
3020 if (debug_linux_nat)
3021 fprintf_unfiltered (gdb_stdlog,
3022 "LLW: %s %s, 0, 0 (discard SIGINT)\n",
3023 lp->step ?
3024 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3025 target_pid_to_str (lp->ptid));
3026
3027 lp->stopped = 0;
3028 gdb_assert (lp->resumed);
3029
3030 /* Discard the event. */
3031 return NULL;
3032 }
3033
3034 /* An interesting event. */
3035 gdb_assert (lp);
3036 lp->status = status;
3037 return lp;
3038 }
3039
3040 /* Detect zombie thread group leaders, and "exit" them. We can't reap
3041 their exits until all other threads in the group have exited. */
3042
3043 static void
3044 check_zombie_leaders (void)
3045 {
3046 struct inferior *inf;
3047
3048 ALL_INFERIORS (inf)
3049 {
3050 struct lwp_info *leader_lp;
3051
3052 if (inf->pid == 0)
3053 continue;
3054
3055 leader_lp = find_lwp_pid (pid_to_ptid (inf->pid));
3056 if (leader_lp != NULL
3057 /* Check if there are other threads in the group, as we may
3058 have raced with the inferior simply exiting. */
3059 && num_lwps (inf->pid) > 1
3060 && linux_proc_pid_is_zombie (inf->pid))
3061 {
3062 if (debug_linux_nat)
3063 fprintf_unfiltered (gdb_stdlog,
3064 "CZL: Thread group leader %d zombie "
3065 "(it exited, or another thread execd).\n",
3066 inf->pid);
3067
3068 /* A leader zombie can mean one of two things:
3069
3070 - It exited, and there's an exit status pending
3071 available, or only the leader exited (not the whole
3072 program). In the latter case, we can't waitpid the
3073 leader's exit status until all other threads are gone.
3074
3075 - There are 3 or more threads in the group, and a thread
3076 other than the leader exec'd. On an exec, the Linux
3077 kernel destroys all other threads (except the execing
3078 one) in the thread group, and resets the execing thread's
3079 tid to the tgid. No exit notification is sent for the
3080 execing thread -- from the ptracer's perspective, it
3081 appears as though the execing thread just vanishes.
3082 Until we reap all other threads except the leader and the
3083 execing thread, the leader will be zombie, and the
3084 execing thread will be in `D (disc sleep)'. As soon as
3085 all other threads are reaped, the execing thread changes
3086 it's tid to the tgid, and the previous (zombie) leader
3087 vanishes, giving place to the "new" leader. We could try
3088 distinguishing the exit and exec cases, by waiting once
3089 more, and seeing if something comes out, but it doesn't
3090 sound useful. The previous leader _does_ go away, and
3091 we'll re-add the new one once we see the exec event
3092 (which is just the same as what would happen if the
3093 previous leader did exit voluntarily before some other
3094 thread execs). */
3095
3096 if (debug_linux_nat)
3097 fprintf_unfiltered (gdb_stdlog,
3098 "CZL: Thread group leader %d vanished.\n",
3099 inf->pid);
3100 exit_lwp (leader_lp);
3101 }
3102 }
3103 }
3104
3105 static ptid_t
3106 linux_nat_wait_1 (struct target_ops *ops,
3107 ptid_t ptid, struct target_waitstatus *ourstatus,
3108 int target_options)
3109 {
3110 sigset_t prev_mask;
3111 enum resume_kind last_resume_kind;
3112 struct lwp_info *lp;
3113 int status;
3114
3115 if (debug_linux_nat)
3116 fprintf_unfiltered (gdb_stdlog, "LLW: enter\n");
3117
3118 /* The first time we get here after starting a new inferior, we may
3119 not have added it to the LWP list yet - this is the earliest
3120 moment at which we know its PID. */
3121 if (ptid_is_pid (inferior_ptid))
3122 {
3123 /* Upgrade the main thread's ptid. */
3124 thread_change_ptid (inferior_ptid,
3125 ptid_build (ptid_get_pid (inferior_ptid),
3126 ptid_get_pid (inferior_ptid), 0));
3127
3128 lp = add_initial_lwp (inferior_ptid);
3129 lp->resumed = 1;
3130 }
3131
3132 /* Make sure SIGCHLD is blocked until the sigsuspend below. */
3133 block_child_signals (&prev_mask);
3134
3135 retry:
3136 lp = NULL;
3137 status = 0;
3138
3139 /* First check if there is a LWP with a wait status pending. */
3140 if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid))
3141 {
3142 /* Any LWP in the PTID group that's been resumed will do. */
3143 lp = iterate_over_lwps (ptid, status_callback, NULL);
3144 if (lp)
3145 {
3146 if (debug_linux_nat && lp->status)
3147 fprintf_unfiltered (gdb_stdlog,
3148 "LLW: Using pending wait status %s for %s.\n",
3149 status_to_str (lp->status),
3150 target_pid_to_str (lp->ptid));
3151 }
3152 }
3153 else if (ptid_lwp_p (ptid))
3154 {
3155 if (debug_linux_nat)
3156 fprintf_unfiltered (gdb_stdlog,
3157 "LLW: Waiting for specific LWP %s.\n",
3158 target_pid_to_str (ptid));
3159
3160 /* We have a specific LWP to check. */
3161 lp = find_lwp_pid (ptid);
3162 gdb_assert (lp);
3163
3164 if (debug_linux_nat && lp->status)
3165 fprintf_unfiltered (gdb_stdlog,
3166 "LLW: Using pending wait status %s for %s.\n",
3167 status_to_str (lp->status),
3168 target_pid_to_str (lp->ptid));
3169
3170 /* We check for lp->waitstatus in addition to lp->status,
3171 because we can have pending process exits recorded in
3172 lp->status and W_EXITCODE(0,0) == 0. We should probably have
3173 an additional lp->status_p flag. */
3174 if (lp->status == 0 && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE)
3175 lp = NULL;
3176 }
3177
3178 if (!target_can_async_p ())
3179 {
3180 /* Causes SIGINT to be passed on to the attached process. */
3181 set_sigint_trap ();
3182 }
3183
3184 /* But if we don't find a pending event, we'll have to wait. */
3185
3186 while (lp == NULL)
3187 {
3188 pid_t lwpid;
3189
3190 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
3191 quirks:
3192
3193 - If the thread group leader exits while other threads in the
3194 thread group still exist, waitpid(TGID, ...) hangs. That
3195 waitpid won't return an exit status until the other threads
3196 in the group are reapped.
3197
3198 - When a non-leader thread execs, that thread just vanishes
3199 without reporting an exit (so we'd hang if we waited for it
3200 explicitly in that case). The exec event is reported to
3201 the TGID pid. */
3202
3203 errno = 0;
3204 lwpid = my_waitpid (-1, &status, __WCLONE | WNOHANG);
3205 if (lwpid == 0 || (lwpid == -1 && errno == ECHILD))
3206 lwpid = my_waitpid (-1, &status, WNOHANG);
3207
3208 if (debug_linux_nat)
3209 fprintf_unfiltered (gdb_stdlog,
3210 "LNW: waitpid(-1, ...) returned %d, %s\n",
3211 lwpid, errno ? safe_strerror (errno) : "ERRNO-OK");
3212
3213 if (lwpid > 0)
3214 {
3215 /* If this is true, then we paused LWPs momentarily, and may
3216 now have pending events to handle. */
3217 int new_pending;
3218
3219 if (debug_linux_nat)
3220 {
3221 fprintf_unfiltered (gdb_stdlog,
3222 "LLW: waitpid %ld received %s\n",
3223 (long) lwpid, status_to_str (status));
3224 }
3225
3226 lp = linux_nat_filter_event (lwpid, status, &new_pending);
3227
3228 /* STATUS is now no longer valid, use LP->STATUS instead. */
3229 status = 0;
3230
3231 if (lp && !ptid_match (lp->ptid, ptid))
3232 {
3233 gdb_assert (lp->resumed);
3234
3235 if (debug_linux_nat)
3236 fprintf_unfiltered (gdb_stdlog,
3237 "LWP %ld got an event %06x, "
3238 "leaving pending.\n",
3239 ptid_get_lwp (lp->ptid), lp->status);
3240
3241 if (WIFSTOPPED (lp->status))
3242 {
3243 if (WSTOPSIG (lp->status) != SIGSTOP)
3244 {
3245 /* Cancel breakpoint hits. The breakpoint may
3246 be removed before we fetch events from this
3247 process to report to the core. It is best
3248 not to assume the moribund breakpoints
3249 heuristic always handles these cases --- it
3250 could be too many events go through to the
3251 core before this one is handled. All-stop
3252 always cancels breakpoint hits in all
3253 threads. */
3254 if (non_stop
3255 && linux_nat_lp_status_is_event (lp)
3256 && cancel_breakpoint (lp))
3257 {
3258 /* Throw away the SIGTRAP. */
3259 lp->status = 0;
3260
3261 if (debug_linux_nat)
3262 fprintf_unfiltered (gdb_stdlog,
3263 "LLW: LWP %ld hit a "
3264 "breakpoint while "
3265 "waiting for another "
3266 "process; "
3267 "cancelled it\n",
3268 ptid_get_lwp (lp->ptid));
3269 }
3270 }
3271 else
3272 lp->signalled = 0;
3273 }
3274 else if (WIFEXITED (lp->status) || WIFSIGNALED (lp->status))
3275 {
3276 if (debug_linux_nat)
3277 fprintf_unfiltered (gdb_stdlog,
3278 "Process %ld exited while stopping "
3279 "LWPs\n",
3280 ptid_get_lwp (lp->ptid));
3281
3282 /* This was the last lwp in the process. Since
3283 events are serialized to GDB core, and we can't
3284 report this one right now, but GDB core and the
3285 other target layers will want to be notified
3286 about the exit code/signal, leave the status
3287 pending for the next time we're able to report
3288 it. */
3289
3290 /* Dead LWP's aren't expected to reported a pending
3291 sigstop. */
3292 lp->signalled = 0;
3293
3294 /* Store the pending event in the waitstatus as
3295 well, because W_EXITCODE(0,0) == 0. */
3296 store_waitstatus (&lp->waitstatus, lp->status);
3297 }
3298
3299 /* Keep looking. */
3300 lp = NULL;
3301 }
3302
3303 if (new_pending)
3304 {
3305 /* Some LWP now has a pending event. Go all the way
3306 back to check it. */
3307 goto retry;
3308 }
3309
3310 if (lp)
3311 {
3312 /* We got an event to report to the core. */
3313 break;
3314 }
3315
3316 /* Retry until nothing comes out of waitpid. A single
3317 SIGCHLD can indicate more than one child stopped. */
3318 continue;
3319 }
3320
3321 /* Check for zombie thread group leaders. Those can't be reaped
3322 until all other threads in the thread group are. */
3323 check_zombie_leaders ();
3324
3325 /* If there are no resumed children left, bail. We'd be stuck
3326 forever in the sigsuspend call below otherwise. */
3327 if (iterate_over_lwps (ptid, resumed_callback, NULL) == NULL)
3328 {
3329 if (debug_linux_nat)
3330 fprintf_unfiltered (gdb_stdlog, "LLW: exit (no resumed LWP)\n");
3331
3332 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
3333
3334 if (!target_can_async_p ())
3335 clear_sigint_trap ();
3336
3337 restore_child_signals_mask (&prev_mask);
3338 return minus_one_ptid;
3339 }
3340
3341 /* No interesting event to report to the core. */
3342
3343 if (target_options & TARGET_WNOHANG)
3344 {
3345 if (debug_linux_nat)
3346 fprintf_unfiltered (gdb_stdlog, "LLW: exit (ignore)\n");
3347
3348 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3349 restore_child_signals_mask (&prev_mask);
3350 return minus_one_ptid;
3351 }
3352
3353 /* We shouldn't end up here unless we want to try again. */
3354 gdb_assert (lp == NULL);
3355
3356 /* Block until we get an event reported with SIGCHLD. */
3357 if (debug_linux_nat)
3358 fprintf_unfiltered (gdb_stdlog, "LNW: about to sigsuspend\n");
3359 sigsuspend (&suspend_mask);
3360 }
3361
3362 if (!target_can_async_p ())
3363 clear_sigint_trap ();
3364
3365 gdb_assert (lp);
3366
3367 status = lp->status;
3368 lp->status = 0;
3369
3370 /* Don't report signals that GDB isn't interested in, such as
3371 signals that are neither printed nor stopped upon. Stopping all
3372 threads can be a bit time-consuming so if we want decent
3373 performance with heavily multi-threaded programs, especially when
3374 they're using a high frequency timer, we'd better avoid it if we
3375 can. */
3376
3377 if (WIFSTOPPED (status))
3378 {
3379 enum gdb_signal signo = gdb_signal_from_host (WSTOPSIG (status));
3380
3381 /* When using hardware single-step, we need to report every signal.
3382 Otherwise, signals in pass_mask may be short-circuited. */
3383 if (!lp->step
3384 && WSTOPSIG (status) && sigismember (&pass_mask, WSTOPSIG (status)))
3385 {
3386 /* FIMXE: kettenis/2001-06-06: Should we resume all threads
3387 here? It is not clear we should. GDB may not expect
3388 other threads to run. On the other hand, not resuming
3389 newly attached threads may cause an unwanted delay in
3390 getting them running. */
3391 registers_changed ();
3392 if (linux_nat_prepare_to_resume != NULL)
3393 linux_nat_prepare_to_resume (lp);
3394 linux_ops->to_resume (linux_ops,
3395 pid_to_ptid (ptid_get_lwp (lp->ptid)),
3396 lp->step, signo);
3397 if (debug_linux_nat)
3398 fprintf_unfiltered (gdb_stdlog,
3399 "LLW: %s %s, %s (preempt 'handle')\n",
3400 lp->step ?
3401 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3402 target_pid_to_str (lp->ptid),
3403 (signo != GDB_SIGNAL_0
3404 ? strsignal (gdb_signal_to_host (signo))
3405 : "0"));
3406 lp->stopped = 0;
3407 goto retry;
3408 }
3409
3410 if (!non_stop)
3411 {
3412 /* Only do the below in all-stop, as we currently use SIGINT
3413 to implement target_stop (see linux_nat_stop) in
3414 non-stop. */
3415 if (signo == GDB_SIGNAL_INT && signal_pass_state (signo) == 0)
3416 {
3417 /* If ^C/BREAK is typed at the tty/console, SIGINT gets
3418 forwarded to the entire process group, that is, all LWPs
3419 will receive it - unless they're using CLONE_THREAD to
3420 share signals. Since we only want to report it once, we
3421 mark it as ignored for all LWPs except this one. */
3422 iterate_over_lwps (pid_to_ptid (ptid_get_pid (ptid)),
3423 set_ignore_sigint, NULL);
3424 lp->ignore_sigint = 0;
3425 }
3426 else
3427 maybe_clear_ignore_sigint (lp);
3428 }
3429 }
3430
3431 /* This LWP is stopped now. */
3432 lp->stopped = 1;
3433
3434 if (debug_linux_nat)
3435 fprintf_unfiltered (gdb_stdlog, "LLW: Candidate event %s in %s.\n",
3436 status_to_str (status), target_pid_to_str (lp->ptid));
3437
3438 if (!non_stop)
3439 {
3440 /* Now stop all other LWP's ... */
3441 iterate_over_lwps (minus_one_ptid, stop_callback, NULL);
3442
3443 /* ... and wait until all of them have reported back that
3444 they're no longer running. */
3445 iterate_over_lwps (minus_one_ptid, stop_wait_callback, NULL);
3446
3447 /* If we're not waiting for a specific LWP, choose an event LWP
3448 from among those that have had events. Giving equal priority
3449 to all LWPs that have had events helps prevent
3450 starvation. */
3451 if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid))
3452 select_event_lwp (ptid, &lp, &status);
3453
3454 /* Now that we've selected our final event LWP, cancel any
3455 breakpoints in other LWPs that have hit a GDB breakpoint.
3456 See the comment in cancel_breakpoints_callback to find out
3457 why. */
3458 iterate_over_lwps (minus_one_ptid, cancel_breakpoints_callback, lp);
3459
3460 /* We'll need this to determine whether to report a SIGSTOP as
3461 TARGET_WAITKIND_0. Need to take a copy because
3462 resume_clear_callback clears it. */
3463 last_resume_kind = lp->last_resume_kind;
3464
3465 /* In all-stop, from the core's perspective, all LWPs are now
3466 stopped until a new resume action is sent over. */
3467 iterate_over_lwps (minus_one_ptid, resume_clear_callback, NULL);
3468 }
3469 else
3470 {
3471 /* See above. */
3472 last_resume_kind = lp->last_resume_kind;
3473 resume_clear_callback (lp, NULL);
3474 }
3475
3476 if (linux_nat_status_is_event (status))
3477 {
3478 if (debug_linux_nat)
3479 fprintf_unfiltered (gdb_stdlog,
3480 "LLW: trap ptid is %s.\n",
3481 target_pid_to_str (lp->ptid));
3482 }
3483
3484 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3485 {
3486 *ourstatus = lp->waitstatus;
3487 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3488 }
3489 else
3490 store_waitstatus (ourstatus, status);
3491
3492 if (debug_linux_nat)
3493 fprintf_unfiltered (gdb_stdlog, "LLW: exit\n");
3494
3495 restore_child_signals_mask (&prev_mask);
3496
3497 if (last_resume_kind == resume_stop
3498 && ourstatus->kind == TARGET_WAITKIND_STOPPED
3499 && WSTOPSIG (status) == SIGSTOP)
3500 {
3501 /* A thread that has been requested to stop by GDB with
3502 target_stop, and it stopped cleanly, so report as SIG0. The
3503 use of SIGSTOP is an implementation detail. */
3504 ourstatus->value.sig = GDB_SIGNAL_0;
3505 }
3506
3507 if (ourstatus->kind == TARGET_WAITKIND_EXITED
3508 || ourstatus->kind == TARGET_WAITKIND_SIGNALLED)
3509 lp->core = -1;
3510 else
3511 lp->core = linux_common_core_of_thread (lp->ptid);
3512
3513 return lp->ptid;
3514 }
3515
3516 /* Resume LWPs that are currently stopped without any pending status
3517 to report, but are resumed from the core's perspective. */
3518
3519 static int
3520 resume_stopped_resumed_lwps (struct lwp_info *lp, void *data)
3521 {
3522 ptid_t *wait_ptid_p = data;
3523
3524 if (lp->stopped
3525 && lp->resumed
3526 && lp->status == 0
3527 && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE)
3528 {
3529 struct regcache *regcache = get_thread_regcache (lp->ptid);
3530 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3531 CORE_ADDR pc = regcache_read_pc (regcache);
3532
3533 gdb_assert (is_executing (lp->ptid));
3534
3535 /* Don't bother if there's a breakpoint at PC that we'd hit
3536 immediately, and we're not waiting for this LWP. */
3537 if (!ptid_match (lp->ptid, *wait_ptid_p))
3538 {
3539 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
3540 return 0;
3541 }
3542
3543 if (debug_linux_nat)
3544 fprintf_unfiltered (gdb_stdlog,
3545 "RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
3546 target_pid_to_str (lp->ptid),
3547 paddress (gdbarch, pc),
3548 lp->step);
3549
3550 registers_changed ();
3551 if (linux_nat_prepare_to_resume != NULL)
3552 linux_nat_prepare_to_resume (lp);
3553 linux_ops->to_resume (linux_ops, pid_to_ptid (ptid_get_lwp (lp->ptid)),
3554 lp->step, GDB_SIGNAL_0);
3555 lp->stopped = 0;
3556 lp->stopped_by_watchpoint = 0;
3557 }
3558
3559 return 0;
3560 }
3561
3562 static ptid_t
3563 linux_nat_wait (struct target_ops *ops,
3564 ptid_t ptid, struct target_waitstatus *ourstatus,
3565 int target_options)
3566 {
3567 ptid_t event_ptid;
3568
3569 if (debug_linux_nat)
3570 {
3571 char *options_string;
3572
3573 options_string = target_options_to_string (target_options);
3574 fprintf_unfiltered (gdb_stdlog,
3575 "linux_nat_wait: [%s], [%s]\n",
3576 target_pid_to_str (ptid),
3577 options_string);
3578 xfree (options_string);
3579 }
3580
3581 /* Flush the async file first. */
3582 if (target_can_async_p ())
3583 async_file_flush ();
3584
3585 /* Resume LWPs that are currently stopped without any pending status
3586 to report, but are resumed from the core's perspective. LWPs get
3587 in this state if we find them stopping at a time we're not
3588 interested in reporting the event (target_wait on a
3589 specific_process, for example, see linux_nat_wait_1), and
3590 meanwhile the event became uninteresting. Don't bother resuming
3591 LWPs we're not going to wait for if they'd stop immediately. */
3592 if (non_stop)
3593 iterate_over_lwps (minus_one_ptid, resume_stopped_resumed_lwps, &ptid);
3594
3595 event_ptid = linux_nat_wait_1 (ops, ptid, ourstatus, target_options);
3596
3597 /* If we requested any event, and something came out, assume there
3598 may be more. If we requested a specific lwp or process, also
3599 assume there may be more. */
3600 if (target_can_async_p ()
3601 && ((ourstatus->kind != TARGET_WAITKIND_IGNORE
3602 && ourstatus->kind != TARGET_WAITKIND_NO_RESUMED)
3603 || !ptid_equal (ptid, minus_one_ptid)))
3604 async_file_mark ();
3605
3606 /* Get ready for the next event. */
3607 if (target_can_async_p ())
3608 target_async (inferior_event_handler, 0);
3609
3610 return event_ptid;
3611 }
3612
3613 static int
3614 kill_callback (struct lwp_info *lp, void *data)
3615 {
3616 /* PTRACE_KILL may resume the inferior. Send SIGKILL first. */
3617
3618 errno = 0;
3619 kill_lwp (ptid_get_lwp (lp->ptid), SIGKILL);
3620 if (debug_linux_nat)
3621 {
3622 int save_errno = errno;
3623
3624 fprintf_unfiltered (gdb_stdlog,
3625 "KC: kill (SIGKILL) %s, 0, 0 (%s)\n",
3626 target_pid_to_str (lp->ptid),
3627 save_errno ? safe_strerror (save_errno) : "OK");
3628 }
3629
3630 /* Some kernels ignore even SIGKILL for processes under ptrace. */
3631
3632 errno = 0;
3633 ptrace (PTRACE_KILL, ptid_get_lwp (lp->ptid), 0, 0);
3634 if (debug_linux_nat)
3635 {
3636 int save_errno = errno;
3637
3638 fprintf_unfiltered (gdb_stdlog,
3639 "KC: PTRACE_KILL %s, 0, 0 (%s)\n",
3640 target_pid_to_str (lp->ptid),
3641 save_errno ? safe_strerror (save_errno) : "OK");
3642 }
3643
3644 return 0;
3645 }
3646
3647 static int
3648 kill_wait_callback (struct lwp_info *lp, void *data)
3649 {
3650 pid_t pid;
3651
3652 /* We must make sure that there are no pending events (delayed
3653 SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current
3654 program doesn't interfere with any following debugging session. */
3655
3656 /* For cloned processes we must check both with __WCLONE and
3657 without, since the exit status of a cloned process isn't reported
3658 with __WCLONE. */
3659 if (lp->cloned)
3660 {
3661 do
3662 {
3663 pid = my_waitpid (ptid_get_lwp (lp->ptid), NULL, __WCLONE);
3664 if (pid != (pid_t) -1)
3665 {
3666 if (debug_linux_nat)
3667 fprintf_unfiltered (gdb_stdlog,
3668 "KWC: wait %s received unknown.\n",
3669 target_pid_to_str (lp->ptid));
3670 /* The Linux kernel sometimes fails to kill a thread
3671 completely after PTRACE_KILL; that goes from the stop
3672 point in do_fork out to the one in
3673 get_signal_to_deliever and waits again. So kill it
3674 again. */
3675 kill_callback (lp, NULL);
3676 }
3677 }
3678 while (pid == ptid_get_lwp (lp->ptid));
3679
3680 gdb_assert (pid == -1 && errno == ECHILD);
3681 }
3682
3683 do
3684 {
3685 pid = my_waitpid (ptid_get_lwp (lp->ptid), NULL, 0);
3686 if (pid != (pid_t) -1)
3687 {
3688 if (debug_linux_nat)
3689 fprintf_unfiltered (gdb_stdlog,
3690 "KWC: wait %s received unk.\n",
3691 target_pid_to_str (lp->ptid));
3692 /* See the call to kill_callback above. */
3693 kill_callback (lp, NULL);
3694 }
3695 }
3696 while (pid == ptid_get_lwp (lp->ptid));
3697
3698 gdb_assert (pid == -1 && errno == ECHILD);
3699 return 0;
3700 }
3701
3702 static void
3703 linux_nat_kill (struct target_ops *ops)
3704 {
3705 struct target_waitstatus last;
3706 ptid_t last_ptid;
3707 int status;
3708
3709 /* If we're stopped while forking and we haven't followed yet,
3710 kill the other task. We need to do this first because the
3711 parent will be sleeping if this is a vfork. */
3712
3713 get_last_target_status (&last_ptid, &last);
3714
3715 if (last.kind == TARGET_WAITKIND_FORKED
3716 || last.kind == TARGET_WAITKIND_VFORKED)
3717 {
3718 ptrace (PT_KILL, ptid_get_pid (last.value.related_pid), 0, 0);
3719 wait (&status);
3720
3721 /* Let the arch-specific native code know this process is
3722 gone. */
3723 linux_nat_forget_process (ptid_get_pid (last.value.related_pid));
3724 }
3725
3726 if (forks_exist_p ())
3727 linux_fork_killall ();
3728 else
3729 {
3730 ptid_t ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
3731
3732 /* Stop all threads before killing them, since ptrace requires
3733 that the thread is stopped to sucessfully PTRACE_KILL. */
3734 iterate_over_lwps (ptid, stop_callback, NULL);
3735 /* ... and wait until all of them have reported back that
3736 they're no longer running. */
3737 iterate_over_lwps (ptid, stop_wait_callback, NULL);
3738
3739 /* Kill all LWP's ... */
3740 iterate_over_lwps (ptid, kill_callback, NULL);
3741
3742 /* ... and wait until we've flushed all events. */
3743 iterate_over_lwps (ptid, kill_wait_callback, NULL);
3744 }
3745
3746 target_mourn_inferior ();
3747 }
3748
3749 static void
3750 linux_nat_mourn_inferior (struct target_ops *ops)
3751 {
3752 int pid = ptid_get_pid (inferior_ptid);
3753
3754 purge_lwp_list (pid);
3755
3756 if (! forks_exist_p ())
3757 /* Normal case, no other forks available. */
3758 linux_ops->to_mourn_inferior (ops);
3759 else
3760 /* Multi-fork case. The current inferior_ptid has exited, but
3761 there are other viable forks to debug. Delete the exiting
3762 one and context-switch to the first available. */
3763 linux_fork_mourn_inferior ();
3764
3765 /* Let the arch-specific native code know this process is gone. */
3766 linux_nat_forget_process (pid);
3767 }
3768
3769 /* Convert a native/host siginfo object, into/from the siginfo in the
3770 layout of the inferiors' architecture. */
3771
3772 static void
3773 siginfo_fixup (siginfo_t *siginfo, gdb_byte *inf_siginfo, int direction)
3774 {
3775 int done = 0;
3776
3777 if (linux_nat_siginfo_fixup != NULL)
3778 done = linux_nat_siginfo_fixup (siginfo, inf_siginfo, direction);
3779
3780 /* If there was no callback, or the callback didn't do anything,
3781 then just do a straight memcpy. */
3782 if (!done)
3783 {
3784 if (direction == 1)
3785 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
3786 else
3787 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
3788 }
3789 }
3790
3791 static enum target_xfer_status
3792 linux_xfer_siginfo (struct target_ops *ops, enum target_object object,
3793 const char *annex, gdb_byte *readbuf,
3794 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
3795 ULONGEST *xfered_len)
3796 {
3797 int pid;
3798 siginfo_t siginfo;
3799 gdb_byte inf_siginfo[sizeof (siginfo_t)];
3800
3801 gdb_assert (object == TARGET_OBJECT_SIGNAL_INFO);
3802 gdb_assert (readbuf || writebuf);
3803
3804 pid = ptid_get_lwp (inferior_ptid);
3805 if (pid == 0)
3806 pid = ptid_get_pid (inferior_ptid);
3807
3808 if (offset > sizeof (siginfo))
3809 return TARGET_XFER_E_IO;
3810
3811 errno = 0;
3812 ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
3813 if (errno != 0)
3814 return TARGET_XFER_E_IO;
3815
3816 /* When GDB is built as a 64-bit application, ptrace writes into
3817 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
3818 inferior with a 64-bit GDB should look the same as debugging it
3819 with a 32-bit GDB, we need to convert it. GDB core always sees
3820 the converted layout, so any read/write will have to be done
3821 post-conversion. */
3822 siginfo_fixup (&siginfo, inf_siginfo, 0);
3823
3824 if (offset + len > sizeof (siginfo))
3825 len = sizeof (siginfo) - offset;
3826
3827 if (readbuf != NULL)
3828 memcpy (readbuf, inf_siginfo + offset, len);
3829 else
3830 {
3831 memcpy (inf_siginfo + offset, writebuf, len);
3832
3833 /* Convert back to ptrace layout before flushing it out. */
3834 siginfo_fixup (&siginfo, inf_siginfo, 1);
3835
3836 errno = 0;
3837 ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
3838 if (errno != 0)
3839 return TARGET_XFER_E_IO;
3840 }
3841
3842 *xfered_len = len;
3843 return TARGET_XFER_OK;
3844 }
3845
3846 static enum target_xfer_status
3847 linux_nat_xfer_partial (struct target_ops *ops, enum target_object object,
3848 const char *annex, gdb_byte *readbuf,
3849 const gdb_byte *writebuf,
3850 ULONGEST offset, ULONGEST len, ULONGEST *xfered_len)
3851 {
3852 struct cleanup *old_chain;
3853 enum target_xfer_status xfer;
3854
3855 if (object == TARGET_OBJECT_SIGNAL_INFO)
3856 return linux_xfer_siginfo (ops, object, annex, readbuf, writebuf,
3857 offset, len, xfered_len);
3858
3859 /* The target is connected but no live inferior is selected. Pass
3860 this request down to a lower stratum (e.g., the executable
3861 file). */
3862 if (object == TARGET_OBJECT_MEMORY && ptid_equal (inferior_ptid, null_ptid))
3863 return TARGET_XFER_EOF;
3864
3865 old_chain = save_inferior_ptid ();
3866
3867 if (ptid_lwp_p (inferior_ptid))
3868 inferior_ptid = pid_to_ptid (ptid_get_lwp (inferior_ptid));
3869
3870 xfer = linux_ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
3871 offset, len, xfered_len);
3872
3873 do_cleanups (old_chain);
3874 return xfer;
3875 }
3876
3877 static int
3878 linux_thread_alive (ptid_t ptid)
3879 {
3880 int err, tmp_errno;
3881
3882 gdb_assert (ptid_lwp_p (ptid));
3883
3884 /* Send signal 0 instead of anything ptrace, because ptracing a
3885 running thread errors out claiming that the thread doesn't
3886 exist. */
3887 err = kill_lwp (ptid_get_lwp (ptid), 0);
3888 tmp_errno = errno;
3889 if (debug_linux_nat)
3890 fprintf_unfiltered (gdb_stdlog,
3891 "LLTA: KILL(SIG0) %s (%s)\n",
3892 target_pid_to_str (ptid),
3893 err ? safe_strerror (tmp_errno) : "OK");
3894
3895 if (err != 0)
3896 return 0;
3897
3898 return 1;
3899 }
3900
3901 static int
3902 linux_nat_thread_alive (struct target_ops *ops, ptid_t ptid)
3903 {
3904 return linux_thread_alive (ptid);
3905 }
3906
3907 static char *
3908 linux_nat_pid_to_str (struct target_ops *ops, ptid_t ptid)
3909 {
3910 static char buf[64];
3911
3912 if (ptid_lwp_p (ptid)
3913 && (ptid_get_pid (ptid) != ptid_get_lwp (ptid)
3914 || num_lwps (ptid_get_pid (ptid)) > 1))
3915 {
3916 snprintf (buf, sizeof (buf), "LWP %ld", ptid_get_lwp (ptid));
3917 return buf;
3918 }
3919
3920 return normal_pid_to_str (ptid);
3921 }
3922
3923 static char *
3924 linux_nat_thread_name (struct target_ops *self, struct thread_info *thr)
3925 {
3926 int pid = ptid_get_pid (thr->ptid);
3927 long lwp = ptid_get_lwp (thr->ptid);
3928 #define FORMAT "/proc/%d/task/%ld/comm"
3929 char buf[sizeof (FORMAT) + 30];
3930 FILE *comm_file;
3931 char *result = NULL;
3932
3933 snprintf (buf, sizeof (buf), FORMAT, pid, lwp);
3934 comm_file = gdb_fopen_cloexec (buf, "r");
3935 if (comm_file)
3936 {
3937 /* Not exported by the kernel, so we define it here. */
3938 #define COMM_LEN 16
3939 static char line[COMM_LEN + 1];
3940
3941 if (fgets (line, sizeof (line), comm_file))
3942 {
3943 char *nl = strchr (line, '\n');
3944
3945 if (nl)
3946 *nl = '\0';
3947 if (*line != '\0')
3948 result = line;
3949 }
3950
3951 fclose (comm_file);
3952 }
3953
3954 #undef COMM_LEN
3955 #undef FORMAT
3956
3957 return result;
3958 }
3959
3960 /* Accepts an integer PID; Returns a string representing a file that
3961 can be opened to get the symbols for the child process. */
3962
3963 static char *
3964 linux_child_pid_to_exec_file (struct target_ops *self, int pid)
3965 {
3966 static char buf[PATH_MAX];
3967 char name[PATH_MAX];
3968
3969 xsnprintf (name, PATH_MAX, "/proc/%d/exe", pid);
3970 memset (buf, 0, PATH_MAX);
3971 if (readlink (name, buf, PATH_MAX - 1) <= 0)
3972 strcpy (buf, name);
3973
3974 return buf;
3975 }
3976
3977 /* Implement the to_xfer_partial interface for memory reads using the /proc
3978 filesystem. Because we can use a single read() call for /proc, this
3979 can be much more efficient than banging away at PTRACE_PEEKTEXT,
3980 but it doesn't support writes. */
3981
3982 static enum target_xfer_status
3983 linux_proc_xfer_partial (struct target_ops *ops, enum target_object object,
3984 const char *annex, gdb_byte *readbuf,
3985 const gdb_byte *writebuf,
3986 ULONGEST offset, LONGEST len, ULONGEST *xfered_len)
3987 {
3988 LONGEST ret;
3989 int fd;
3990 char filename[64];
3991
3992 if (object != TARGET_OBJECT_MEMORY || !readbuf)
3993 return 0;
3994
3995 /* Don't bother for one word. */
3996 if (len < 3 * sizeof (long))
3997 return TARGET_XFER_EOF;
3998
3999 /* We could keep this file open and cache it - possibly one per
4000 thread. That requires some juggling, but is even faster. */
4001 xsnprintf (filename, sizeof filename, "/proc/%d/mem",
4002 ptid_get_pid (inferior_ptid));
4003 fd = gdb_open_cloexec (filename, O_RDONLY | O_LARGEFILE, 0);
4004 if (fd == -1)
4005 return TARGET_XFER_EOF;
4006
4007 /* If pread64 is available, use it. It's faster if the kernel
4008 supports it (only one syscall), and it's 64-bit safe even on
4009 32-bit platforms (for instance, SPARC debugging a SPARC64
4010 application). */
4011 #ifdef HAVE_PREAD64
4012 if (pread64 (fd, readbuf, len, offset) != len)
4013 #else
4014 if (lseek (fd, offset, SEEK_SET) == -1 || read (fd, readbuf, len) != len)
4015 #endif
4016 ret = 0;
4017 else
4018 ret = len;
4019
4020 close (fd);
4021
4022 if (ret == 0)
4023 return TARGET_XFER_EOF;
4024 else
4025 {
4026 *xfered_len = ret;
4027 return TARGET_XFER_OK;
4028 }
4029 }
4030
4031
4032 /* Enumerate spufs IDs for process PID. */
4033 static LONGEST
4034 spu_enumerate_spu_ids (int pid, gdb_byte *buf, ULONGEST offset, ULONGEST len)
4035 {
4036 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
4037 LONGEST pos = 0;
4038 LONGEST written = 0;
4039 char path[128];
4040 DIR *dir;
4041 struct dirent *entry;
4042
4043 xsnprintf (path, sizeof path, "/proc/%d/fd", pid);
4044 dir = opendir (path);
4045 if (!dir)
4046 return -1;
4047
4048 rewinddir (dir);
4049 while ((entry = readdir (dir)) != NULL)
4050 {
4051 struct stat st;
4052 struct statfs stfs;
4053 int fd;
4054
4055 fd = atoi (entry->d_name);
4056 if (!fd)
4057 continue;
4058
4059 xsnprintf (path, sizeof path, "/proc/%d/fd/%d", pid, fd);
4060 if (stat (path, &st) != 0)
4061 continue;
4062 if (!S_ISDIR (st.st_mode))
4063 continue;
4064
4065 if (statfs (path, &stfs) != 0)
4066 continue;
4067 if (stfs.f_type != SPUFS_MAGIC)
4068 continue;
4069
4070 if (pos >= offset && pos + 4 <= offset + len)
4071 {
4072 store_unsigned_integer (buf + pos - offset, 4, byte_order, fd);
4073 written += 4;
4074 }
4075 pos += 4;
4076 }
4077
4078 closedir (dir);
4079 return written;
4080 }
4081
4082 /* Implement the to_xfer_partial interface for the TARGET_OBJECT_SPU
4083 object type, using the /proc file system. */
4084
4085 static enum target_xfer_status
4086 linux_proc_xfer_spu (struct target_ops *ops, enum target_object object,
4087 const char *annex, gdb_byte *readbuf,
4088 const gdb_byte *writebuf,
4089 ULONGEST offset, ULONGEST len, ULONGEST *xfered_len)
4090 {
4091 char buf[128];
4092 int fd = 0;
4093 int ret = -1;
4094 int pid = ptid_get_pid (inferior_ptid);
4095
4096 if (!annex)
4097 {
4098 if (!readbuf)
4099 return TARGET_XFER_E_IO;
4100 else
4101 {
4102 LONGEST l = spu_enumerate_spu_ids (pid, readbuf, offset, len);
4103
4104 if (l < 0)
4105 return TARGET_XFER_E_IO;
4106 else if (l == 0)
4107 return TARGET_XFER_EOF;
4108 else
4109 {
4110 *xfered_len = (ULONGEST) l;
4111 return TARGET_XFER_OK;
4112 }
4113 }
4114 }
4115
4116 xsnprintf (buf, sizeof buf, "/proc/%d/fd/%s", pid, annex);
4117 fd = gdb_open_cloexec (buf, writebuf? O_WRONLY : O_RDONLY, 0);
4118 if (fd <= 0)
4119 return TARGET_XFER_E_IO;
4120
4121 if (offset != 0
4122 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4123 {
4124 close (fd);
4125 return TARGET_XFER_EOF;
4126 }
4127
4128 if (writebuf)
4129 ret = write (fd, writebuf, (size_t) len);
4130 else if (readbuf)
4131 ret = read (fd, readbuf, (size_t) len);
4132
4133 close (fd);
4134
4135 if (ret < 0)
4136 return TARGET_XFER_E_IO;
4137 else if (ret == 0)
4138 return TARGET_XFER_EOF;
4139 else
4140 {
4141 *xfered_len = (ULONGEST) ret;
4142 return TARGET_XFER_OK;
4143 }
4144 }
4145
4146
4147 /* Parse LINE as a signal set and add its set bits to SIGS. */
4148
4149 static void
4150 add_line_to_sigset (const char *line, sigset_t *sigs)
4151 {
4152 int len = strlen (line) - 1;
4153 const char *p;
4154 int signum;
4155
4156 if (line[len] != '\n')
4157 error (_("Could not parse signal set: %s"), line);
4158
4159 p = line;
4160 signum = len * 4;
4161 while (len-- > 0)
4162 {
4163 int digit;
4164
4165 if (*p >= '0' && *p <= '9')
4166 digit = *p - '0';
4167 else if (*p >= 'a' && *p <= 'f')
4168 digit = *p - 'a' + 10;
4169 else
4170 error (_("Could not parse signal set: %s"), line);
4171
4172 signum -= 4;
4173
4174 if (digit & 1)
4175 sigaddset (sigs, signum + 1);
4176 if (digit & 2)
4177 sigaddset (sigs, signum + 2);
4178 if (digit & 4)
4179 sigaddset (sigs, signum + 3);
4180 if (digit & 8)
4181 sigaddset (sigs, signum + 4);
4182
4183 p++;
4184 }
4185 }
4186
4187 /* Find process PID's pending signals from /proc/pid/status and set
4188 SIGS to match. */
4189
4190 void
4191 linux_proc_pending_signals (int pid, sigset_t *pending,
4192 sigset_t *blocked, sigset_t *ignored)
4193 {
4194 FILE *procfile;
4195 char buffer[PATH_MAX], fname[PATH_MAX];
4196 struct cleanup *cleanup;
4197
4198 sigemptyset (pending);
4199 sigemptyset (blocked);
4200 sigemptyset (ignored);
4201 xsnprintf (fname, sizeof fname, "/proc/%d/status", pid);
4202 procfile = gdb_fopen_cloexec (fname, "r");
4203 if (procfile == NULL)
4204 error (_("Could not open %s"), fname);
4205 cleanup = make_cleanup_fclose (procfile);
4206
4207 while (fgets (buffer, PATH_MAX, procfile) != NULL)
4208 {
4209 /* Normal queued signals are on the SigPnd line in the status
4210 file. However, 2.6 kernels also have a "shared" pending
4211 queue for delivering signals to a thread group, so check for
4212 a ShdPnd line also.
4213
4214 Unfortunately some Red Hat kernels include the shared pending
4215 queue but not the ShdPnd status field. */
4216
4217 if (strncmp (buffer, "SigPnd:\t", 8) == 0)
4218 add_line_to_sigset (buffer + 8, pending);
4219 else if (strncmp (buffer, "ShdPnd:\t", 8) == 0)
4220 add_line_to_sigset (buffer + 8, pending);
4221 else if (strncmp (buffer, "SigBlk:\t", 8) == 0)
4222 add_line_to_sigset (buffer + 8, blocked);
4223 else if (strncmp (buffer, "SigIgn:\t", 8) == 0)
4224 add_line_to_sigset (buffer + 8, ignored);
4225 }
4226
4227 do_cleanups (cleanup);
4228 }
4229
4230 static enum target_xfer_status
4231 linux_nat_xfer_osdata (struct target_ops *ops, enum target_object object,
4232 const char *annex, gdb_byte *readbuf,
4233 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
4234 ULONGEST *xfered_len)
4235 {
4236 gdb_assert (object == TARGET_OBJECT_OSDATA);
4237
4238 *xfered_len = linux_common_xfer_osdata (annex, readbuf, offset, len);
4239 if (*xfered_len == 0)
4240 return TARGET_XFER_EOF;
4241 else
4242 return TARGET_XFER_OK;
4243 }
4244
4245 static enum target_xfer_status
4246 linux_xfer_partial (struct target_ops *ops, enum target_object object,
4247 const char *annex, gdb_byte *readbuf,
4248 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
4249 ULONGEST *xfered_len)
4250 {
4251 enum target_xfer_status xfer;
4252
4253 if (object == TARGET_OBJECT_AUXV)
4254 return memory_xfer_auxv (ops, object, annex, readbuf, writebuf,
4255 offset, len, xfered_len);
4256
4257 if (object == TARGET_OBJECT_OSDATA)
4258 return linux_nat_xfer_osdata (ops, object, annex, readbuf, writebuf,
4259 offset, len, xfered_len);
4260
4261 if (object == TARGET_OBJECT_SPU)
4262 return linux_proc_xfer_spu (ops, object, annex, readbuf, writebuf,
4263 offset, len, xfered_len);
4264
4265 /* GDB calculates all the addresses in possibly larget width of the address.
4266 Address width needs to be masked before its final use - either by
4267 linux_proc_xfer_partial or inf_ptrace_xfer_partial.
4268
4269 Compare ADDR_BIT first to avoid a compiler warning on shift overflow. */
4270
4271 if (object == TARGET_OBJECT_MEMORY)
4272 {
4273 int addr_bit = gdbarch_addr_bit (target_gdbarch ());
4274
4275 if (addr_bit < (sizeof (ULONGEST) * HOST_CHAR_BIT))
4276 offset &= ((ULONGEST) 1 << addr_bit) - 1;
4277 }
4278
4279 xfer = linux_proc_xfer_partial (ops, object, annex, readbuf, writebuf,
4280 offset, len, xfered_len);
4281 if (xfer != TARGET_XFER_EOF)
4282 return xfer;
4283
4284 return super_xfer_partial (ops, object, annex, readbuf, writebuf,
4285 offset, len, xfered_len);
4286 }
4287
4288 static void
4289 cleanup_target_stop (void *arg)
4290 {
4291 ptid_t *ptid = (ptid_t *) arg;
4292
4293 gdb_assert (arg != NULL);
4294
4295 /* Unpause all */
4296 target_resume (*ptid, 0, GDB_SIGNAL_0);
4297 }
4298
4299 static VEC(static_tracepoint_marker_p) *
4300 linux_child_static_tracepoint_markers_by_strid (struct target_ops *self,
4301 const char *strid)
4302 {
4303 char s[IPA_CMD_BUF_SIZE];
4304 struct cleanup *old_chain;
4305 int pid = ptid_get_pid (inferior_ptid);
4306 VEC(static_tracepoint_marker_p) *markers = NULL;
4307 struct static_tracepoint_marker *marker = NULL;
4308 char *p = s;
4309 ptid_t ptid = ptid_build (pid, 0, 0);
4310
4311 /* Pause all */
4312 target_stop (ptid);
4313
4314 memcpy (s, "qTfSTM", sizeof ("qTfSTM"));
4315 s[sizeof ("qTfSTM")] = 0;
4316
4317 agent_run_command (pid, s, strlen (s) + 1);
4318
4319 old_chain = make_cleanup (free_current_marker, &marker);
4320 make_cleanup (cleanup_target_stop, &ptid);
4321
4322 while (*p++ == 'm')
4323 {
4324 if (marker == NULL)
4325 marker = XCNEW (struct static_tracepoint_marker);
4326
4327 do
4328 {
4329 parse_static_tracepoint_marker_definition (p, &p, marker);
4330
4331 if (strid == NULL || strcmp (strid, marker->str_id) == 0)
4332 {
4333 VEC_safe_push (static_tracepoint_marker_p,
4334 markers, marker);
4335 marker = NULL;
4336 }
4337 else
4338 {
4339 release_static_tracepoint_marker (marker);
4340 memset (marker, 0, sizeof (*marker));
4341 }
4342 }
4343 while (*p++ == ','); /* comma-separated list */
4344
4345 memcpy (s, "qTsSTM", sizeof ("qTsSTM"));
4346 s[sizeof ("qTsSTM")] = 0;
4347 agent_run_command (pid, s, strlen (s) + 1);
4348 p = s;
4349 }
4350
4351 do_cleanups (old_chain);
4352
4353 return markers;
4354 }
4355
4356 /* Create a prototype generic GNU/Linux target. The client can override
4357 it with local methods. */
4358
4359 static void
4360 linux_target_install_ops (struct target_ops *t)
4361 {
4362 t->to_insert_fork_catchpoint = linux_child_insert_fork_catchpoint;
4363 t->to_remove_fork_catchpoint = linux_child_remove_fork_catchpoint;
4364 t->to_insert_vfork_catchpoint = linux_child_insert_vfork_catchpoint;
4365 t->to_remove_vfork_catchpoint = linux_child_remove_vfork_catchpoint;
4366 t->to_insert_exec_catchpoint = linux_child_insert_exec_catchpoint;
4367 t->to_remove_exec_catchpoint = linux_child_remove_exec_catchpoint;
4368 t->to_set_syscall_catchpoint = linux_child_set_syscall_catchpoint;
4369 t->to_pid_to_exec_file = linux_child_pid_to_exec_file;
4370 t->to_post_startup_inferior = linux_child_post_startup_inferior;
4371 t->to_post_attach = linux_child_post_attach;
4372 t->to_follow_fork = linux_child_follow_fork;
4373
4374 super_xfer_partial = t->to_xfer_partial;
4375 t->to_xfer_partial = linux_xfer_partial;
4376
4377 t->to_static_tracepoint_markers_by_strid
4378 = linux_child_static_tracepoint_markers_by_strid;
4379 }
4380
4381 struct target_ops *
4382 linux_target (void)
4383 {
4384 struct target_ops *t;
4385
4386 t = inf_ptrace_target ();
4387 linux_target_install_ops (t);
4388
4389 return t;
4390 }
4391
4392 struct target_ops *
4393 linux_trad_target (CORE_ADDR (*register_u_offset)(struct gdbarch *, int, int))
4394 {
4395 struct target_ops *t;
4396
4397 t = inf_ptrace_trad_target (register_u_offset);
4398 linux_target_install_ops (t);
4399
4400 return t;
4401 }
4402
4403 /* target_is_async_p implementation. */
4404
4405 static int
4406 linux_nat_is_async_p (struct target_ops *ops)
4407 {
4408 /* NOTE: palves 2008-03-21: We're only async when the user requests
4409 it explicitly with the "set target-async" command.
4410 Someday, linux will always be async. */
4411 return target_async_permitted;
4412 }
4413
4414 /* target_can_async_p implementation. */
4415
4416 static int
4417 linux_nat_can_async_p (struct target_ops *ops)
4418 {
4419 /* NOTE: palves 2008-03-21: We're only async when the user requests
4420 it explicitly with the "set target-async" command.
4421 Someday, linux will always be async. */
4422 return target_async_permitted;
4423 }
4424
4425 static int
4426 linux_nat_supports_non_stop (struct target_ops *self)
4427 {
4428 return 1;
4429 }
4430
4431 /* True if we want to support multi-process. To be removed when GDB
4432 supports multi-exec. */
4433
4434 int linux_multi_process = 1;
4435
4436 static int
4437 linux_nat_supports_multi_process (struct target_ops *self)
4438 {
4439 return linux_multi_process;
4440 }
4441
4442 static int
4443 linux_nat_supports_disable_randomization (struct target_ops *self)
4444 {
4445 #ifdef HAVE_PERSONALITY
4446 return 1;
4447 #else
4448 return 0;
4449 #endif
4450 }
4451
4452 static int async_terminal_is_ours = 1;
4453
4454 /* target_terminal_inferior implementation.
4455
4456 This is a wrapper around child_terminal_inferior to add async support. */
4457
4458 static void
4459 linux_nat_terminal_inferior (struct target_ops *self)
4460 {
4461 if (!target_is_async_p ())
4462 {
4463 /* Async mode is disabled. */
4464 child_terminal_inferior (self);
4465 return;
4466 }
4467
4468 child_terminal_inferior (self);
4469
4470 /* Calls to target_terminal_*() are meant to be idempotent. */
4471 if (!async_terminal_is_ours)
4472 return;
4473
4474 delete_file_handler (input_fd);
4475 async_terminal_is_ours = 0;
4476 set_sigint_trap ();
4477 }
4478
4479 /* target_terminal_ours implementation.
4480
4481 This is a wrapper around child_terminal_ours to add async support (and
4482 implement the target_terminal_ours vs target_terminal_ours_for_output
4483 distinction). child_terminal_ours is currently no different than
4484 child_terminal_ours_for_output.
4485 We leave target_terminal_ours_for_output alone, leaving it to
4486 child_terminal_ours_for_output. */
4487
4488 static void
4489 linux_nat_terminal_ours (struct target_ops *self)
4490 {
4491 if (!target_is_async_p ())
4492 {
4493 /* Async mode is disabled. */
4494 child_terminal_ours (self);
4495 return;
4496 }
4497
4498 /* GDB should never give the terminal to the inferior if the
4499 inferior is running in the background (run&, continue&, etc.),
4500 but claiming it sure should. */
4501 child_terminal_ours (self);
4502
4503 if (async_terminal_is_ours)
4504 return;
4505
4506 clear_sigint_trap ();
4507 add_file_handler (input_fd, stdin_event_handler, 0);
4508 async_terminal_is_ours = 1;
4509 }
4510
4511 static void (*async_client_callback) (enum inferior_event_type event_type,
4512 void *context);
4513 static void *async_client_context;
4514
4515 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4516 so we notice when any child changes state, and notify the
4517 event-loop; it allows us to use sigsuspend in linux_nat_wait_1
4518 above to wait for the arrival of a SIGCHLD. */
4519
4520 static void
4521 sigchld_handler (int signo)
4522 {
4523 int old_errno = errno;
4524
4525 if (debug_linux_nat)
4526 ui_file_write_async_safe (gdb_stdlog,
4527 "sigchld\n", sizeof ("sigchld\n") - 1);
4528
4529 if (signo == SIGCHLD
4530 && linux_nat_event_pipe[0] != -1)
4531 async_file_mark (); /* Let the event loop know that there are
4532 events to handle. */
4533
4534 errno = old_errno;
4535 }
4536
4537 /* Callback registered with the target events file descriptor. */
4538
4539 static void
4540 handle_target_event (int error, gdb_client_data client_data)
4541 {
4542 (*async_client_callback) (INF_REG_EVENT, async_client_context);
4543 }
4544
4545 /* Create/destroy the target events pipe. Returns previous state. */
4546
4547 static int
4548 linux_async_pipe (int enable)
4549 {
4550 int previous = (linux_nat_event_pipe[0] != -1);
4551
4552 if (previous != enable)
4553 {
4554 sigset_t prev_mask;
4555
4556 /* Block child signals while we create/destroy the pipe, as
4557 their handler writes to it. */
4558 block_child_signals (&prev_mask);
4559
4560 if (enable)
4561 {
4562 if (gdb_pipe_cloexec (linux_nat_event_pipe) == -1)
4563 internal_error (__FILE__, __LINE__,
4564 "creating event pipe failed.");
4565
4566 fcntl (linux_nat_event_pipe[0], F_SETFL, O_NONBLOCK);
4567 fcntl (linux_nat_event_pipe[1], F_SETFL, O_NONBLOCK);
4568 }
4569 else
4570 {
4571 close (linux_nat_event_pipe[0]);
4572 close (linux_nat_event_pipe[1]);
4573 linux_nat_event_pipe[0] = -1;
4574 linux_nat_event_pipe[1] = -1;
4575 }
4576
4577 restore_child_signals_mask (&prev_mask);
4578 }
4579
4580 return previous;
4581 }
4582
4583 /* target_async implementation. */
4584
4585 static void
4586 linux_nat_async (struct target_ops *ops,
4587 void (*callback) (enum inferior_event_type event_type,
4588 void *context),
4589 void *context)
4590 {
4591 if (callback != NULL)
4592 {
4593 async_client_callback = callback;
4594 async_client_context = context;
4595 if (!linux_async_pipe (1))
4596 {
4597 add_file_handler (linux_nat_event_pipe[0],
4598 handle_target_event, NULL);
4599 /* There may be pending events to handle. Tell the event loop
4600 to poll them. */
4601 async_file_mark ();
4602 }
4603 }
4604 else
4605 {
4606 async_client_callback = callback;
4607 async_client_context = context;
4608 delete_file_handler (linux_nat_event_pipe[0]);
4609 linux_async_pipe (0);
4610 }
4611 return;
4612 }
4613
4614 /* Stop an LWP, and push a GDB_SIGNAL_0 stop status if no other
4615 event came out. */
4616
4617 static int
4618 linux_nat_stop_lwp (struct lwp_info *lwp, void *data)
4619 {
4620 if (!lwp->stopped)
4621 {
4622 if (debug_linux_nat)
4623 fprintf_unfiltered (gdb_stdlog,
4624 "LNSL: running -> suspending %s\n",
4625 target_pid_to_str (lwp->ptid));
4626
4627
4628 if (lwp->last_resume_kind == resume_stop)
4629 {
4630 if (debug_linux_nat)
4631 fprintf_unfiltered (gdb_stdlog,
4632 "linux-nat: already stopping LWP %ld at "
4633 "GDB's request\n",
4634 ptid_get_lwp (lwp->ptid));
4635 return 0;
4636 }
4637
4638 stop_callback (lwp, NULL);
4639 lwp->last_resume_kind = resume_stop;
4640 }
4641 else
4642 {
4643 /* Already known to be stopped; do nothing. */
4644
4645 if (debug_linux_nat)
4646 {
4647 if (find_thread_ptid (lwp->ptid)->stop_requested)
4648 fprintf_unfiltered (gdb_stdlog,
4649 "LNSL: already stopped/stop_requested %s\n",
4650 target_pid_to_str (lwp->ptid));
4651 else
4652 fprintf_unfiltered (gdb_stdlog,
4653 "LNSL: already stopped/no "
4654 "stop_requested yet %s\n",
4655 target_pid_to_str (lwp->ptid));
4656 }
4657 }
4658 return 0;
4659 }
4660
4661 static void
4662 linux_nat_stop (struct target_ops *self, ptid_t ptid)
4663 {
4664 if (non_stop)
4665 iterate_over_lwps (ptid, linux_nat_stop_lwp, NULL);
4666 else
4667 linux_ops->to_stop (linux_ops, ptid);
4668 }
4669
4670 static void
4671 linux_nat_close (struct target_ops *self)
4672 {
4673 /* Unregister from the event loop. */
4674 if (linux_nat_is_async_p (self))
4675 linux_nat_async (self, NULL, NULL);
4676
4677 if (linux_ops->to_close)
4678 linux_ops->to_close (linux_ops);
4679
4680 super_close (self);
4681 }
4682
4683 /* When requests are passed down from the linux-nat layer to the
4684 single threaded inf-ptrace layer, ptids of (lwpid,0,0) form are
4685 used. The address space pointer is stored in the inferior object,
4686 but the common code that is passed such ptid can't tell whether
4687 lwpid is a "main" process id or not (it assumes so). We reverse
4688 look up the "main" process id from the lwp here. */
4689
4690 static struct address_space *
4691 linux_nat_thread_address_space (struct target_ops *t, ptid_t ptid)
4692 {
4693 struct lwp_info *lwp;
4694 struct inferior *inf;
4695 int pid;
4696
4697 if (ptid_get_lwp (ptid) == 0)
4698 {
4699 /* An (lwpid,0,0) ptid. Look up the lwp object to get at the
4700 tgid. */
4701 lwp = find_lwp_pid (ptid);
4702 pid = ptid_get_pid (lwp->ptid);
4703 }
4704 else
4705 {
4706 /* A (pid,lwpid,0) ptid. */
4707 pid = ptid_get_pid (ptid);
4708 }
4709
4710 inf = find_inferior_pid (pid);
4711 gdb_assert (inf != NULL);
4712 return inf->aspace;
4713 }
4714
4715 /* Return the cached value of the processor core for thread PTID. */
4716
4717 static int
4718 linux_nat_core_of_thread (struct target_ops *ops, ptid_t ptid)
4719 {
4720 struct lwp_info *info = find_lwp_pid (ptid);
4721
4722 if (info)
4723 return info->core;
4724 return -1;
4725 }
4726
4727 void
4728 linux_nat_add_target (struct target_ops *t)
4729 {
4730 /* Save the provided single-threaded target. We save this in a separate
4731 variable because another target we've inherited from (e.g. inf-ptrace)
4732 may have saved a pointer to T; we want to use it for the final
4733 process stratum target. */
4734 linux_ops_saved = *t;
4735 linux_ops = &linux_ops_saved;
4736
4737 /* Override some methods for multithreading. */
4738 t->to_create_inferior = linux_nat_create_inferior;
4739 t->to_attach = linux_nat_attach;
4740 t->to_detach = linux_nat_detach;
4741 t->to_resume = linux_nat_resume;
4742 t->to_wait = linux_nat_wait;
4743 t->to_pass_signals = linux_nat_pass_signals;
4744 t->to_xfer_partial = linux_nat_xfer_partial;
4745 t->to_kill = linux_nat_kill;
4746 t->to_mourn_inferior = linux_nat_mourn_inferior;
4747 t->to_thread_alive = linux_nat_thread_alive;
4748 t->to_pid_to_str = linux_nat_pid_to_str;
4749 t->to_thread_name = linux_nat_thread_name;
4750 t->to_has_thread_control = tc_schedlock;
4751 t->to_thread_address_space = linux_nat_thread_address_space;
4752 t->to_stopped_by_watchpoint = linux_nat_stopped_by_watchpoint;
4753 t->to_stopped_data_address = linux_nat_stopped_data_address;
4754
4755 t->to_can_async_p = linux_nat_can_async_p;
4756 t->to_is_async_p = linux_nat_is_async_p;
4757 t->to_supports_non_stop = linux_nat_supports_non_stop;
4758 t->to_async = linux_nat_async;
4759 t->to_terminal_inferior = linux_nat_terminal_inferior;
4760 t->to_terminal_ours = linux_nat_terminal_ours;
4761
4762 super_close = t->to_close;
4763 t->to_close = linux_nat_close;
4764
4765 /* Methods for non-stop support. */
4766 t->to_stop = linux_nat_stop;
4767
4768 t->to_supports_multi_process = linux_nat_supports_multi_process;
4769
4770 t->to_supports_disable_randomization
4771 = linux_nat_supports_disable_randomization;
4772
4773 t->to_core_of_thread = linux_nat_core_of_thread;
4774
4775 /* We don't change the stratum; this target will sit at
4776 process_stratum and thread_db will set at thread_stratum. This
4777 is a little strange, since this is a multi-threaded-capable
4778 target, but we want to be on the stack below thread_db, and we
4779 also want to be used for single-threaded processes. */
4780
4781 add_target (t);
4782 }
4783
4784 /* Register a method to call whenever a new thread is attached. */
4785 void
4786 linux_nat_set_new_thread (struct target_ops *t,
4787 void (*new_thread) (struct lwp_info *))
4788 {
4789 /* Save the pointer. We only support a single registered instance
4790 of the GNU/Linux native target, so we do not need to map this to
4791 T. */
4792 linux_nat_new_thread = new_thread;
4793 }
4794
4795 /* See declaration in linux-nat.h. */
4796
4797 void
4798 linux_nat_set_new_fork (struct target_ops *t,
4799 linux_nat_new_fork_ftype *new_fork)
4800 {
4801 /* Save the pointer. */
4802 linux_nat_new_fork = new_fork;
4803 }
4804
4805 /* See declaration in linux-nat.h. */
4806
4807 void
4808 linux_nat_set_forget_process (struct target_ops *t,
4809 linux_nat_forget_process_ftype *fn)
4810 {
4811 /* Save the pointer. */
4812 linux_nat_forget_process_hook = fn;
4813 }
4814
4815 /* See declaration in linux-nat.h. */
4816
4817 void
4818 linux_nat_forget_process (pid_t pid)
4819 {
4820 if (linux_nat_forget_process_hook != NULL)
4821 linux_nat_forget_process_hook (pid);
4822 }
4823
4824 /* Register a method that converts a siginfo object between the layout
4825 that ptrace returns, and the layout in the architecture of the
4826 inferior. */
4827 void
4828 linux_nat_set_siginfo_fixup (struct target_ops *t,
4829 int (*siginfo_fixup) (siginfo_t *,
4830 gdb_byte *,
4831 int))
4832 {
4833 /* Save the pointer. */
4834 linux_nat_siginfo_fixup = siginfo_fixup;
4835 }
4836
4837 /* Register a method to call prior to resuming a thread. */
4838
4839 void
4840 linux_nat_set_prepare_to_resume (struct target_ops *t,
4841 void (*prepare_to_resume) (struct lwp_info *))
4842 {
4843 /* Save the pointer. */
4844 linux_nat_prepare_to_resume = prepare_to_resume;
4845 }
4846
4847 /* See linux-nat.h. */
4848
4849 int
4850 linux_nat_get_siginfo (ptid_t ptid, siginfo_t *siginfo)
4851 {
4852 int pid;
4853
4854 pid = ptid_get_lwp (ptid);
4855 if (pid == 0)
4856 pid = ptid_get_pid (ptid);
4857
4858 errno = 0;
4859 ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, siginfo);
4860 if (errno != 0)
4861 {
4862 memset (siginfo, 0, sizeof (*siginfo));
4863 return 0;
4864 }
4865 return 1;
4866 }
4867
4868 /* Provide a prototype to silence -Wmissing-prototypes. */
4869 extern initialize_file_ftype _initialize_linux_nat;
4870
4871 void
4872 _initialize_linux_nat (void)
4873 {
4874 add_setshow_zuinteger_cmd ("lin-lwp", class_maintenance,
4875 &debug_linux_nat, _("\
4876 Set debugging of GNU/Linux lwp module."), _("\
4877 Show debugging of GNU/Linux lwp module."), _("\
4878 Enables printf debugging output."),
4879 NULL,
4880 show_debug_linux_nat,
4881 &setdebuglist, &showdebuglist);
4882
4883 /* Save this mask as the default. */
4884 sigprocmask (SIG_SETMASK, NULL, &normal_mask);
4885
4886 /* Install a SIGCHLD handler. */
4887 sigchld_action.sa_handler = sigchld_handler;
4888 sigemptyset (&sigchld_action.sa_mask);
4889 sigchld_action.sa_flags = SA_RESTART;
4890
4891 /* Make it the default. */
4892 sigaction (SIGCHLD, &sigchld_action, NULL);
4893
4894 /* Make sure we don't block SIGCHLD during a sigsuspend. */
4895 sigprocmask (SIG_SETMASK, NULL, &suspend_mask);
4896 sigdelset (&suspend_mask, SIGCHLD);
4897
4898 sigemptyset (&blocked_mask);
4899
4900 /* Do not enable PTRACE_O_TRACEEXIT until GDB is more prepared to
4901 support read-only process state. */
4902 linux_ptrace_set_additional_flags (PTRACE_O_TRACESYSGOOD
4903 | PTRACE_O_TRACEVFORKDONE
4904 | PTRACE_O_TRACEVFORK
4905 | PTRACE_O_TRACEFORK
4906 | PTRACE_O_TRACEEXEC);
4907 }
4908 \f
4909
4910 /* FIXME: kettenis/2000-08-26: The stuff on this page is specific to
4911 the GNU/Linux Threads library and therefore doesn't really belong
4912 here. */
4913
4914 /* Read variable NAME in the target and return its value if found.
4915 Otherwise return zero. It is assumed that the type of the variable
4916 is `int'. */
4917
4918 static int
4919 get_signo (const char *name)
4920 {
4921 struct bound_minimal_symbol ms;
4922 int signo;
4923
4924 ms = lookup_minimal_symbol (name, NULL, NULL);
4925 if (ms.minsym == NULL)
4926 return 0;
4927
4928 if (target_read_memory (BMSYMBOL_VALUE_ADDRESS (ms), (gdb_byte *) &signo,
4929 sizeof (signo)) != 0)
4930 return 0;
4931
4932 return signo;
4933 }
4934
4935 /* Return the set of signals used by the threads library in *SET. */
4936
4937 void
4938 lin_thread_get_thread_signals (sigset_t *set)
4939 {
4940 struct sigaction action;
4941 int restart, cancel;
4942
4943 sigemptyset (&blocked_mask);
4944 sigemptyset (set);
4945
4946 restart = get_signo ("__pthread_sig_restart");
4947 cancel = get_signo ("__pthread_sig_cancel");
4948
4949 /* LinuxThreads normally uses the first two RT signals, but in some legacy
4950 cases may use SIGUSR1/SIGUSR2. NPTL always uses RT signals, but does
4951 not provide any way for the debugger to query the signal numbers -
4952 fortunately they don't change! */
4953
4954 if (restart == 0)
4955 restart = __SIGRTMIN;
4956
4957 if (cancel == 0)
4958 cancel = __SIGRTMIN + 1;
4959
4960 sigaddset (set, restart);
4961 sigaddset (set, cancel);
4962
4963 /* The GNU/Linux Threads library makes terminating threads send a
4964 special "cancel" signal instead of SIGCHLD. Make sure we catch
4965 those (to prevent them from terminating GDB itself, which is
4966 likely to be their default action) and treat them the same way as
4967 SIGCHLD. */
4968
4969 action.sa_handler = sigchld_handler;
4970 sigemptyset (&action.sa_mask);
4971 action.sa_flags = SA_RESTART;
4972 sigaction (cancel, &action, NULL);
4973
4974 /* We block the "cancel" signal throughout this code ... */
4975 sigaddset (&blocked_mask, cancel);
4976 sigprocmask (SIG_BLOCK, &blocked_mask, NULL);
4977
4978 /* ... except during a sigsuspend. */
4979 sigdelset (&suspend_mask, cancel);
4980 }
This page took 0.134259 seconds and 4 git commands to generate.