2012-02-27 Pedro Alves <palves@redhat.com>
[deliverable/binutils-gdb.git] / gdb / linux-nat.c
1 /* GNU/Linux native-dependent code common to multiple platforms.
2
3 Copyright (C) 2001-2012 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21 #include "inferior.h"
22 #include "target.h"
23 #include "gdb_string.h"
24 #include "gdb_wait.h"
25 #include "gdb_assert.h"
26 #ifdef HAVE_TKILL_SYSCALL
27 #include <unistd.h>
28 #include <sys/syscall.h>
29 #endif
30 #include <sys/ptrace.h>
31 #include "linux-nat.h"
32 #include "linux-ptrace.h"
33 #include "linux-procfs.h"
34 #include "linux-fork.h"
35 #include "gdbthread.h"
36 #include "gdbcmd.h"
37 #include "regcache.h"
38 #include "regset.h"
39 #include "inf-ptrace.h"
40 #include "auxv.h"
41 #include <sys/param.h> /* for MAXPATHLEN */
42 #include <sys/procfs.h> /* for elf_gregset etc. */
43 #include "elf-bfd.h" /* for elfcore_write_* */
44 #include "gregset.h" /* for gregset */
45 #include "gdbcore.h" /* for get_exec_file */
46 #include <ctype.h> /* for isdigit */
47 #include "gdbthread.h" /* for struct thread_info etc. */
48 #include "gdb_stat.h" /* for struct stat */
49 #include <fcntl.h> /* for O_RDONLY */
50 #include "inf-loop.h"
51 #include "event-loop.h"
52 #include "event-top.h"
53 #include <pwd.h>
54 #include <sys/types.h>
55 #include "gdb_dirent.h"
56 #include "xml-support.h"
57 #include "terminal.h"
58 #include <sys/vfs.h>
59 #include "solib.h"
60 #include "linux-osdata.h"
61 #include "linux-tdep.h"
62 #include "symfile.h"
63
64 #ifndef SPUFS_MAGIC
65 #define SPUFS_MAGIC 0x23c9b64e
66 #endif
67
68 #ifdef HAVE_PERSONALITY
69 # include <sys/personality.h>
70 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
71 # define ADDR_NO_RANDOMIZE 0x0040000
72 # endif
73 #endif /* HAVE_PERSONALITY */
74
75 /* This comment documents high-level logic of this file.
76
77 Waiting for events in sync mode
78 ===============================
79
80 When waiting for an event in a specific thread, we just use waitpid, passing
81 the specific pid, and not passing WNOHANG.
82
83 When waiting for an event in all threads, waitpid is not quite good. Prior to
84 version 2.4, Linux can either wait for event in main thread, or in secondary
85 threads. (2.4 has the __WALL flag). So, if we use blocking waitpid, we might
86 miss an event. The solution is to use non-blocking waitpid, together with
87 sigsuspend. First, we use non-blocking waitpid to get an event in the main
88 process, if any. Second, we use non-blocking waitpid with the __WCLONED
89 flag to check for events in cloned processes. If nothing is found, we use
90 sigsuspend to wait for SIGCHLD. When SIGCHLD arrives, it means something
91 happened to a child process -- and SIGCHLD will be delivered both for events
92 in main debugged process and in cloned processes. As soon as we know there's
93 an event, we get back to calling nonblocking waitpid with and without
94 __WCLONED.
95
96 Note that SIGCHLD should be blocked between waitpid and sigsuspend calls,
97 so that we don't miss a signal. If SIGCHLD arrives in between, when it's
98 blocked, the signal becomes pending and sigsuspend immediately
99 notices it and returns.
100
101 Waiting for events in async mode
102 ================================
103
104 In async mode, GDB should always be ready to handle both user input
105 and target events, so neither blocking waitpid nor sigsuspend are
106 viable options. Instead, we should asynchronously notify the GDB main
107 event loop whenever there's an unprocessed event from the target. We
108 detect asynchronous target events by handling SIGCHLD signals. To
109 notify the event loop about target events, the self-pipe trick is used
110 --- a pipe is registered as waitable event source in the event loop,
111 the event loop select/poll's on the read end of this pipe (as well on
112 other event sources, e.g., stdin), and the SIGCHLD handler writes a
113 byte to this pipe. This is more portable than relying on
114 pselect/ppoll, since on kernels that lack those syscalls, libc
115 emulates them with select/poll+sigprocmask, and that is racy
116 (a.k.a. plain broken).
117
118 Obviously, if we fail to notify the event loop if there's a target
119 event, it's bad. OTOH, if we notify the event loop when there's no
120 event from the target, linux_nat_wait will detect that there's no real
121 event to report, and return event of type TARGET_WAITKIND_IGNORE.
122 This is mostly harmless, but it will waste time and is better avoided.
123
124 The main design point is that every time GDB is outside linux-nat.c,
125 we have a SIGCHLD handler installed that is called when something
126 happens to the target and notifies the GDB event loop. Whenever GDB
127 core decides to handle the event, and calls into linux-nat.c, we
128 process things as in sync mode, except that the we never block in
129 sigsuspend.
130
131 While processing an event, we may end up momentarily blocked in
132 waitpid calls. Those waitpid calls, while blocking, are guarantied to
133 return quickly. E.g., in all-stop mode, before reporting to the core
134 that an LWP hit a breakpoint, all LWPs are stopped by sending them
135 SIGSTOP, and synchronously waiting for the SIGSTOP to be reported.
136 Note that this is different from blocking indefinitely waiting for the
137 next event --- here, we're already handling an event.
138
139 Use of signals
140 ==============
141
142 We stop threads by sending a SIGSTOP. The use of SIGSTOP instead of another
143 signal is not entirely significant; we just need for a signal to be delivered,
144 so that we can intercept it. SIGSTOP's advantage is that it can not be
145 blocked. A disadvantage is that it is not a real-time signal, so it can only
146 be queued once; we do not keep track of other sources of SIGSTOP.
147
148 Two other signals that can't be blocked are SIGCONT and SIGKILL. But we can't
149 use them, because they have special behavior when the signal is generated -
150 not when it is delivered. SIGCONT resumes the entire thread group and SIGKILL
151 kills the entire thread group.
152
153 A delivered SIGSTOP would stop the entire thread group, not just the thread we
154 tkill'd. But we never let the SIGSTOP be delivered; we always intercept and
155 cancel it (by PTRACE_CONT without passing SIGSTOP).
156
157 We could use a real-time signal instead. This would solve those problems; we
158 could use PTRACE_GETSIGINFO to locate the specific stop signals sent by GDB.
159 But we would still have to have some support for SIGSTOP, since PTRACE_ATTACH
160 generates it, and there are races with trying to find a signal that is not
161 blocked. */
162
163 #ifndef O_LARGEFILE
164 #define O_LARGEFILE 0
165 #endif
166
167 /* Unlike other extended result codes, WSTOPSIG (status) on
168 PTRACE_O_TRACESYSGOOD syscall events doesn't return SIGTRAP, but
169 instead SIGTRAP with bit 7 set. */
170 #define SYSCALL_SIGTRAP (SIGTRAP | 0x80)
171
172 /* The single-threaded native GNU/Linux target_ops. We save a pointer for
173 the use of the multi-threaded target. */
174 static struct target_ops *linux_ops;
175 static struct target_ops linux_ops_saved;
176
177 /* The method to call, if any, when a new thread is attached. */
178 static void (*linux_nat_new_thread) (struct lwp_info *);
179
180 /* Hook to call prior to resuming a thread. */
181 static void (*linux_nat_prepare_to_resume) (struct lwp_info *);
182
183 /* The method to call, if any, when the siginfo object needs to be
184 converted between the layout returned by ptrace, and the layout in
185 the architecture of the inferior. */
186 static int (*linux_nat_siginfo_fixup) (struct siginfo *,
187 gdb_byte *,
188 int);
189
190 /* The saved to_xfer_partial method, inherited from inf-ptrace.c.
191 Called by our to_xfer_partial. */
192 static LONGEST (*super_xfer_partial) (struct target_ops *,
193 enum target_object,
194 const char *, gdb_byte *,
195 const gdb_byte *,
196 ULONGEST, LONGEST);
197
198 static int debug_linux_nat;
199 static void
200 show_debug_linux_nat (struct ui_file *file, int from_tty,
201 struct cmd_list_element *c, const char *value)
202 {
203 fprintf_filtered (file, _("Debugging of GNU/Linux lwp module is %s.\n"),
204 value);
205 }
206
207 struct simple_pid_list
208 {
209 int pid;
210 int status;
211 struct simple_pid_list *next;
212 };
213 struct simple_pid_list *stopped_pids;
214
215 /* This variable is a tri-state flag: -1 for unknown, 0 if PTRACE_O_TRACEFORK
216 can not be used, 1 if it can. */
217
218 static int linux_supports_tracefork_flag = -1;
219
220 /* This variable is a tri-state flag: -1 for unknown, 0 if
221 PTRACE_O_TRACESYSGOOD can not be used, 1 if it can. */
222
223 static int linux_supports_tracesysgood_flag = -1;
224
225 /* If we have PTRACE_O_TRACEFORK, this flag indicates whether we also have
226 PTRACE_O_TRACEVFORKDONE. */
227
228 static int linux_supports_tracevforkdone_flag = -1;
229
230 /* Stores the current used ptrace() options. */
231 static int current_ptrace_options = 0;
232
233 /* Async mode support. */
234
235 /* The read/write ends of the pipe registered as waitable file in the
236 event loop. */
237 static int linux_nat_event_pipe[2] = { -1, -1 };
238
239 /* Flush the event pipe. */
240
241 static void
242 async_file_flush (void)
243 {
244 int ret;
245 char buf;
246
247 do
248 {
249 ret = read (linux_nat_event_pipe[0], &buf, 1);
250 }
251 while (ret >= 0 || (ret == -1 && errno == EINTR));
252 }
253
254 /* Put something (anything, doesn't matter what, or how much) in event
255 pipe, so that the select/poll in the event-loop realizes we have
256 something to process. */
257
258 static void
259 async_file_mark (void)
260 {
261 int ret;
262
263 /* It doesn't really matter what the pipe contains, as long we end
264 up with something in it. Might as well flush the previous
265 left-overs. */
266 async_file_flush ();
267
268 do
269 {
270 ret = write (linux_nat_event_pipe[1], "+", 1);
271 }
272 while (ret == -1 && errno == EINTR);
273
274 /* Ignore EAGAIN. If the pipe is full, the event loop will already
275 be awakened anyway. */
276 }
277
278 static void linux_nat_async (void (*callback)
279 (enum inferior_event_type event_type,
280 void *context),
281 void *context);
282 static int kill_lwp (int lwpid, int signo);
283
284 static int stop_callback (struct lwp_info *lp, void *data);
285
286 static void block_child_signals (sigset_t *prev_mask);
287 static void restore_child_signals_mask (sigset_t *prev_mask);
288
289 struct lwp_info;
290 static struct lwp_info *add_lwp (ptid_t ptid);
291 static void purge_lwp_list (int pid);
292 static void delete_lwp (ptid_t ptid);
293 static struct lwp_info *find_lwp_pid (ptid_t ptid);
294
295 \f
296 /* Trivial list manipulation functions to keep track of a list of
297 new stopped processes. */
298 static void
299 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
300 {
301 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
302
303 new_pid->pid = pid;
304 new_pid->status = status;
305 new_pid->next = *listp;
306 *listp = new_pid;
307 }
308
309 static int
310 in_pid_list_p (struct simple_pid_list *list, int pid)
311 {
312 struct simple_pid_list *p;
313
314 for (p = list; p != NULL; p = p->next)
315 if (p->pid == pid)
316 return 1;
317 return 0;
318 }
319
320 static int
321 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
322 {
323 struct simple_pid_list **p;
324
325 for (p = listp; *p != NULL; p = &(*p)->next)
326 if ((*p)->pid == pid)
327 {
328 struct simple_pid_list *next = (*p)->next;
329
330 *statusp = (*p)->status;
331 xfree (*p);
332 *p = next;
333 return 1;
334 }
335 return 0;
336 }
337
338 \f
339 /* A helper function for linux_test_for_tracefork, called after fork (). */
340
341 static void
342 linux_tracefork_child (void)
343 {
344 ptrace (PTRACE_TRACEME, 0, 0, 0);
345 kill (getpid (), SIGSTOP);
346 fork ();
347 _exit (0);
348 }
349
350 /* Wrapper function for waitpid which handles EINTR. */
351
352 static int
353 my_waitpid (int pid, int *statusp, int flags)
354 {
355 int ret;
356
357 do
358 {
359 ret = waitpid (pid, statusp, flags);
360 }
361 while (ret == -1 && errno == EINTR);
362
363 return ret;
364 }
365
366 /* Determine if PTRACE_O_TRACEFORK can be used to follow fork events.
367
368 First, we try to enable fork tracing on ORIGINAL_PID. If this fails,
369 we know that the feature is not available. This may change the tracing
370 options for ORIGINAL_PID, but we'll be setting them shortly anyway.
371
372 However, if it succeeds, we don't know for sure that the feature is
373 available; old versions of PTRACE_SETOPTIONS ignored unknown options. We
374 create a child process, attach to it, use PTRACE_SETOPTIONS to enable
375 fork tracing, and let it fork. If the process exits, we assume that we
376 can't use TRACEFORK; if we get the fork notification, and we can extract
377 the new child's PID, then we assume that we can. */
378
379 static void
380 linux_test_for_tracefork (int original_pid)
381 {
382 int child_pid, ret, status;
383 long second_pid;
384 sigset_t prev_mask;
385
386 /* We don't want those ptrace calls to be interrupted. */
387 block_child_signals (&prev_mask);
388
389 linux_supports_tracefork_flag = 0;
390 linux_supports_tracevforkdone_flag = 0;
391
392 ret = ptrace (PTRACE_SETOPTIONS, original_pid, 0, PTRACE_O_TRACEFORK);
393 if (ret != 0)
394 {
395 restore_child_signals_mask (&prev_mask);
396 return;
397 }
398
399 child_pid = fork ();
400 if (child_pid == -1)
401 perror_with_name (("fork"));
402
403 if (child_pid == 0)
404 linux_tracefork_child ();
405
406 ret = my_waitpid (child_pid, &status, 0);
407 if (ret == -1)
408 perror_with_name (("waitpid"));
409 else if (ret != child_pid)
410 error (_("linux_test_for_tracefork: waitpid: unexpected result %d."), ret);
411 if (! WIFSTOPPED (status))
412 error (_("linux_test_for_tracefork: waitpid: unexpected status %d."),
413 status);
414
415 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0, PTRACE_O_TRACEFORK);
416 if (ret != 0)
417 {
418 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
419 if (ret != 0)
420 {
421 warning (_("linux_test_for_tracefork: failed to kill child"));
422 restore_child_signals_mask (&prev_mask);
423 return;
424 }
425
426 ret = my_waitpid (child_pid, &status, 0);
427 if (ret != child_pid)
428 warning (_("linux_test_for_tracefork: failed "
429 "to wait for killed child"));
430 else if (!WIFSIGNALED (status))
431 warning (_("linux_test_for_tracefork: unexpected "
432 "wait status 0x%x from killed child"), status);
433
434 restore_child_signals_mask (&prev_mask);
435 return;
436 }
437
438 /* Check whether PTRACE_O_TRACEVFORKDONE is available. */
439 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0,
440 PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORKDONE);
441 linux_supports_tracevforkdone_flag = (ret == 0);
442
443 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
444 if (ret != 0)
445 warning (_("linux_test_for_tracefork: failed to resume child"));
446
447 ret = my_waitpid (child_pid, &status, 0);
448
449 if (ret == child_pid && WIFSTOPPED (status)
450 && status >> 16 == PTRACE_EVENT_FORK)
451 {
452 second_pid = 0;
453 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
454 if (ret == 0 && second_pid != 0)
455 {
456 int second_status;
457
458 linux_supports_tracefork_flag = 1;
459 my_waitpid (second_pid, &second_status, 0);
460 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
461 if (ret != 0)
462 warning (_("linux_test_for_tracefork: "
463 "failed to kill second child"));
464 my_waitpid (second_pid, &status, 0);
465 }
466 }
467 else
468 warning (_("linux_test_for_tracefork: unexpected result from waitpid "
469 "(%d, status 0x%x)"), ret, status);
470
471 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
472 if (ret != 0)
473 warning (_("linux_test_for_tracefork: failed to kill child"));
474 my_waitpid (child_pid, &status, 0);
475
476 restore_child_signals_mask (&prev_mask);
477 }
478
479 /* Determine if PTRACE_O_TRACESYSGOOD can be used to follow syscalls.
480
481 We try to enable syscall tracing on ORIGINAL_PID. If this fails,
482 we know that the feature is not available. This may change the tracing
483 options for ORIGINAL_PID, but we'll be setting them shortly anyway. */
484
485 static void
486 linux_test_for_tracesysgood (int original_pid)
487 {
488 int ret;
489 sigset_t prev_mask;
490
491 /* We don't want those ptrace calls to be interrupted. */
492 block_child_signals (&prev_mask);
493
494 linux_supports_tracesysgood_flag = 0;
495
496 ret = ptrace (PTRACE_SETOPTIONS, original_pid, 0, PTRACE_O_TRACESYSGOOD);
497 if (ret != 0)
498 goto out;
499
500 linux_supports_tracesysgood_flag = 1;
501 out:
502 restore_child_signals_mask (&prev_mask);
503 }
504
505 /* Determine wether we support PTRACE_O_TRACESYSGOOD option available.
506 This function also sets linux_supports_tracesysgood_flag. */
507
508 static int
509 linux_supports_tracesysgood (int pid)
510 {
511 if (linux_supports_tracesysgood_flag == -1)
512 linux_test_for_tracesysgood (pid);
513 return linux_supports_tracesysgood_flag;
514 }
515
516 /* Return non-zero iff we have tracefork functionality available.
517 This function also sets linux_supports_tracefork_flag. */
518
519 static int
520 linux_supports_tracefork (int pid)
521 {
522 if (linux_supports_tracefork_flag == -1)
523 linux_test_for_tracefork (pid);
524 return linux_supports_tracefork_flag;
525 }
526
527 static int
528 linux_supports_tracevforkdone (int pid)
529 {
530 if (linux_supports_tracefork_flag == -1)
531 linux_test_for_tracefork (pid);
532 return linux_supports_tracevforkdone_flag;
533 }
534
535 static void
536 linux_enable_tracesysgood (ptid_t ptid)
537 {
538 int pid = ptid_get_lwp (ptid);
539
540 if (pid == 0)
541 pid = ptid_get_pid (ptid);
542
543 if (linux_supports_tracesysgood (pid) == 0)
544 return;
545
546 current_ptrace_options |= PTRACE_O_TRACESYSGOOD;
547
548 ptrace (PTRACE_SETOPTIONS, pid, 0, current_ptrace_options);
549 }
550
551 \f
552 void
553 linux_enable_event_reporting (ptid_t ptid)
554 {
555 int pid = ptid_get_lwp (ptid);
556
557 if (pid == 0)
558 pid = ptid_get_pid (ptid);
559
560 if (! linux_supports_tracefork (pid))
561 return;
562
563 current_ptrace_options |= PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORK
564 | PTRACE_O_TRACEEXEC | PTRACE_O_TRACECLONE;
565
566 if (linux_supports_tracevforkdone (pid))
567 current_ptrace_options |= PTRACE_O_TRACEVFORKDONE;
568
569 /* Do not enable PTRACE_O_TRACEEXIT until GDB is more prepared to support
570 read-only process state. */
571
572 ptrace (PTRACE_SETOPTIONS, pid, 0, current_ptrace_options);
573 }
574
575 static void
576 linux_child_post_attach (int pid)
577 {
578 linux_enable_event_reporting (pid_to_ptid (pid));
579 linux_enable_tracesysgood (pid_to_ptid (pid));
580 }
581
582 static void
583 linux_child_post_startup_inferior (ptid_t ptid)
584 {
585 linux_enable_event_reporting (ptid);
586 linux_enable_tracesysgood (ptid);
587 }
588
589 /* Return the number of known LWPs in the tgid given by PID. */
590
591 static int
592 num_lwps (int pid)
593 {
594 int count = 0;
595 struct lwp_info *lp;
596
597 for (lp = lwp_list; lp; lp = lp->next)
598 if (ptid_get_pid (lp->ptid) == pid)
599 count++;
600
601 return count;
602 }
603
604 /* Call delete_lwp with prototype compatible for make_cleanup. */
605
606 static void
607 delete_lwp_cleanup (void *lp_voidp)
608 {
609 struct lwp_info *lp = lp_voidp;
610
611 delete_lwp (lp->ptid);
612 }
613
614 static int
615 linux_child_follow_fork (struct target_ops *ops, int follow_child)
616 {
617 sigset_t prev_mask;
618 int has_vforked;
619 int parent_pid, child_pid;
620
621 block_child_signals (&prev_mask);
622
623 has_vforked = (inferior_thread ()->pending_follow.kind
624 == TARGET_WAITKIND_VFORKED);
625 parent_pid = ptid_get_lwp (inferior_ptid);
626 if (parent_pid == 0)
627 parent_pid = ptid_get_pid (inferior_ptid);
628 child_pid = PIDGET (inferior_thread ()->pending_follow.value.related_pid);
629
630 if (!detach_fork)
631 linux_enable_event_reporting (pid_to_ptid (child_pid));
632
633 if (has_vforked
634 && !non_stop /* Non-stop always resumes both branches. */
635 && (!target_is_async_p () || sync_execution)
636 && !(follow_child || detach_fork || sched_multi))
637 {
638 /* The parent stays blocked inside the vfork syscall until the
639 child execs or exits. If we don't let the child run, then
640 the parent stays blocked. If we're telling the parent to run
641 in the foreground, the user will not be able to ctrl-c to get
642 back the terminal, effectively hanging the debug session. */
643 fprintf_filtered (gdb_stderr, _("\
644 Can not resume the parent process over vfork in the foreground while\n\
645 holding the child stopped. Try \"set detach-on-fork\" or \
646 \"set schedule-multiple\".\n"));
647 /* FIXME output string > 80 columns. */
648 return 1;
649 }
650
651 if (! follow_child)
652 {
653 struct lwp_info *child_lp = NULL;
654
655 /* We're already attached to the parent, by default. */
656
657 /* Detach new forked process? */
658 if (detach_fork)
659 {
660 struct cleanup *old_chain;
661
662 /* Before detaching from the child, remove all breakpoints
663 from it. If we forked, then this has already been taken
664 care of by infrun.c. If we vforked however, any
665 breakpoint inserted in the parent is visible in the
666 child, even those added while stopped in a vfork
667 catchpoint. This will remove the breakpoints from the
668 parent also, but they'll be reinserted below. */
669 if (has_vforked)
670 {
671 /* keep breakpoints list in sync. */
672 remove_breakpoints_pid (GET_PID (inferior_ptid));
673 }
674
675 if (info_verbose || debug_linux_nat)
676 {
677 target_terminal_ours ();
678 fprintf_filtered (gdb_stdlog,
679 "Detaching after fork from "
680 "child process %d.\n",
681 child_pid);
682 }
683
684 old_chain = save_inferior_ptid ();
685 inferior_ptid = ptid_build (child_pid, child_pid, 0);
686
687 child_lp = add_lwp (inferior_ptid);
688 child_lp->stopped = 1;
689 child_lp->last_resume_kind = resume_stop;
690 make_cleanup (delete_lwp_cleanup, child_lp);
691
692 /* CHILD_LP has new PID, therefore linux_nat_new_thread is not called for it.
693 See i386_inferior_data_get for the Linux kernel specifics.
694 Ensure linux_nat_prepare_to_resume will reset the hardware debug
695 registers. It is done by the linux_nat_new_thread call, which is
696 being skipped in add_lwp above for the first lwp of a pid. */
697 gdb_assert (num_lwps (GET_PID (child_lp->ptid)) == 1);
698 if (linux_nat_new_thread != NULL)
699 linux_nat_new_thread (child_lp);
700
701 if (linux_nat_prepare_to_resume != NULL)
702 linux_nat_prepare_to_resume (child_lp);
703 ptrace (PTRACE_DETACH, child_pid, 0, 0);
704
705 do_cleanups (old_chain);
706 }
707 else
708 {
709 struct inferior *parent_inf, *child_inf;
710 struct cleanup *old_chain;
711
712 /* Add process to GDB's tables. */
713 child_inf = add_inferior (child_pid);
714
715 parent_inf = current_inferior ();
716 child_inf->attach_flag = parent_inf->attach_flag;
717 copy_terminal_info (child_inf, parent_inf);
718
719 old_chain = save_inferior_ptid ();
720 save_current_program_space ();
721
722 inferior_ptid = ptid_build (child_pid, child_pid, 0);
723 add_thread (inferior_ptid);
724 child_lp = add_lwp (inferior_ptid);
725 child_lp->stopped = 1;
726 child_lp->last_resume_kind = resume_stop;
727 child_inf->symfile_flags = SYMFILE_NO_READ;
728
729 /* If this is a vfork child, then the address-space is
730 shared with the parent. */
731 if (has_vforked)
732 {
733 child_inf->pspace = parent_inf->pspace;
734 child_inf->aspace = parent_inf->aspace;
735
736 /* The parent will be frozen until the child is done
737 with the shared region. Keep track of the
738 parent. */
739 child_inf->vfork_parent = parent_inf;
740 child_inf->pending_detach = 0;
741 parent_inf->vfork_child = child_inf;
742 parent_inf->pending_detach = 0;
743 }
744 else
745 {
746 child_inf->aspace = new_address_space ();
747 child_inf->pspace = add_program_space (child_inf->aspace);
748 child_inf->removable = 1;
749 set_current_program_space (child_inf->pspace);
750 clone_program_space (child_inf->pspace, parent_inf->pspace);
751
752 /* Let the shared library layer (solib-svr4) learn about
753 this new process, relocate the cloned exec, pull in
754 shared libraries, and install the solib event
755 breakpoint. If a "cloned-VM" event was propagated
756 better throughout the core, this wouldn't be
757 required. */
758 solib_create_inferior_hook (0);
759 }
760
761 /* Let the thread_db layer learn about this new process. */
762 check_for_thread_db ();
763
764 do_cleanups (old_chain);
765 }
766
767 if (has_vforked)
768 {
769 struct lwp_info *parent_lp;
770 struct inferior *parent_inf;
771
772 parent_inf = current_inferior ();
773
774 /* If we detached from the child, then we have to be careful
775 to not insert breakpoints in the parent until the child
776 is done with the shared memory region. However, if we're
777 staying attached to the child, then we can and should
778 insert breakpoints, so that we can debug it. A
779 subsequent child exec or exit is enough to know when does
780 the child stops using the parent's address space. */
781 parent_inf->waiting_for_vfork_done = detach_fork;
782 parent_inf->pspace->breakpoints_not_allowed = detach_fork;
783
784 parent_lp = find_lwp_pid (pid_to_ptid (parent_pid));
785 gdb_assert (linux_supports_tracefork_flag >= 0);
786
787 if (linux_supports_tracevforkdone (0))
788 {
789 if (debug_linux_nat)
790 fprintf_unfiltered (gdb_stdlog,
791 "LCFF: waiting for VFORK_DONE on %d\n",
792 parent_pid);
793 parent_lp->stopped = 1;
794
795 /* We'll handle the VFORK_DONE event like any other
796 event, in target_wait. */
797 }
798 else
799 {
800 /* We can't insert breakpoints until the child has
801 finished with the shared memory region. We need to
802 wait until that happens. Ideal would be to just
803 call:
804 - ptrace (PTRACE_SYSCALL, parent_pid, 0, 0);
805 - waitpid (parent_pid, &status, __WALL);
806 However, most architectures can't handle a syscall
807 being traced on the way out if it wasn't traced on
808 the way in.
809
810 We might also think to loop, continuing the child
811 until it exits or gets a SIGTRAP. One problem is
812 that the child might call ptrace with PTRACE_TRACEME.
813
814 There's no simple and reliable way to figure out when
815 the vforked child will be done with its copy of the
816 shared memory. We could step it out of the syscall,
817 two instructions, let it go, and then single-step the
818 parent once. When we have hardware single-step, this
819 would work; with software single-step it could still
820 be made to work but we'd have to be able to insert
821 single-step breakpoints in the child, and we'd have
822 to insert -just- the single-step breakpoint in the
823 parent. Very awkward.
824
825 In the end, the best we can do is to make sure it
826 runs for a little while. Hopefully it will be out of
827 range of any breakpoints we reinsert. Usually this
828 is only the single-step breakpoint at vfork's return
829 point. */
830
831 if (debug_linux_nat)
832 fprintf_unfiltered (gdb_stdlog,
833 "LCFF: no VFORK_DONE "
834 "support, sleeping a bit\n");
835
836 usleep (10000);
837
838 /* Pretend we've seen a PTRACE_EVENT_VFORK_DONE event,
839 and leave it pending. The next linux_nat_resume call
840 will notice a pending event, and bypasses actually
841 resuming the inferior. */
842 parent_lp->status = 0;
843 parent_lp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
844 parent_lp->stopped = 1;
845
846 /* If we're in async mode, need to tell the event loop
847 there's something here to process. */
848 if (target_can_async_p ())
849 async_file_mark ();
850 }
851 }
852 }
853 else
854 {
855 struct inferior *parent_inf, *child_inf;
856 struct lwp_info *child_lp;
857 struct program_space *parent_pspace;
858
859 if (info_verbose || debug_linux_nat)
860 {
861 target_terminal_ours ();
862 if (has_vforked)
863 fprintf_filtered (gdb_stdlog,
864 _("Attaching after process %d "
865 "vfork to child process %d.\n"),
866 parent_pid, child_pid);
867 else
868 fprintf_filtered (gdb_stdlog,
869 _("Attaching after process %d "
870 "fork to child process %d.\n"),
871 parent_pid, child_pid);
872 }
873
874 /* Add the new inferior first, so that the target_detach below
875 doesn't unpush the target. */
876
877 child_inf = add_inferior (child_pid);
878
879 parent_inf = current_inferior ();
880 child_inf->attach_flag = parent_inf->attach_flag;
881 copy_terminal_info (child_inf, parent_inf);
882
883 parent_pspace = parent_inf->pspace;
884
885 /* If we're vforking, we want to hold on to the parent until the
886 child exits or execs. At child exec or exit time we can
887 remove the old breakpoints from the parent and detach or
888 resume debugging it. Otherwise, detach the parent now; we'll
889 want to reuse it's program/address spaces, but we can't set
890 them to the child before removing breakpoints from the
891 parent, otherwise, the breakpoints module could decide to
892 remove breakpoints from the wrong process (since they'd be
893 assigned to the same address space). */
894
895 if (has_vforked)
896 {
897 gdb_assert (child_inf->vfork_parent == NULL);
898 gdb_assert (parent_inf->vfork_child == NULL);
899 child_inf->vfork_parent = parent_inf;
900 child_inf->pending_detach = 0;
901 parent_inf->vfork_child = child_inf;
902 parent_inf->pending_detach = detach_fork;
903 parent_inf->waiting_for_vfork_done = 0;
904 }
905 else if (detach_fork)
906 target_detach (NULL, 0);
907
908 /* Note that the detach above makes PARENT_INF dangling. */
909
910 /* Add the child thread to the appropriate lists, and switch to
911 this new thread, before cloning the program space, and
912 informing the solib layer about this new process. */
913
914 inferior_ptid = ptid_build (child_pid, child_pid, 0);
915 add_thread (inferior_ptid);
916 child_lp = add_lwp (inferior_ptid);
917 child_lp->stopped = 1;
918 child_lp->last_resume_kind = resume_stop;
919
920 /* If this is a vfork child, then the address-space is shared
921 with the parent. If we detached from the parent, then we can
922 reuse the parent's program/address spaces. */
923 if (has_vforked || detach_fork)
924 {
925 child_inf->pspace = parent_pspace;
926 child_inf->aspace = child_inf->pspace->aspace;
927 }
928 else
929 {
930 child_inf->aspace = new_address_space ();
931 child_inf->pspace = add_program_space (child_inf->aspace);
932 child_inf->removable = 1;
933 child_inf->symfile_flags = SYMFILE_NO_READ;
934 set_current_program_space (child_inf->pspace);
935 clone_program_space (child_inf->pspace, parent_pspace);
936
937 /* Let the shared library layer (solib-svr4) learn about
938 this new process, relocate the cloned exec, pull in
939 shared libraries, and install the solib event breakpoint.
940 If a "cloned-VM" event was propagated better throughout
941 the core, this wouldn't be required. */
942 solib_create_inferior_hook (0);
943 }
944
945 /* Let the thread_db layer learn about this new process. */
946 check_for_thread_db ();
947 }
948
949 restore_child_signals_mask (&prev_mask);
950 return 0;
951 }
952
953 \f
954 static int
955 linux_child_insert_fork_catchpoint (int pid)
956 {
957 return !linux_supports_tracefork (pid);
958 }
959
960 static int
961 linux_child_remove_fork_catchpoint (int pid)
962 {
963 return 0;
964 }
965
966 static int
967 linux_child_insert_vfork_catchpoint (int pid)
968 {
969 return !linux_supports_tracefork (pid);
970 }
971
972 static int
973 linux_child_remove_vfork_catchpoint (int pid)
974 {
975 return 0;
976 }
977
978 static int
979 linux_child_insert_exec_catchpoint (int pid)
980 {
981 return !linux_supports_tracefork (pid);
982 }
983
984 static int
985 linux_child_remove_exec_catchpoint (int pid)
986 {
987 return 0;
988 }
989
990 static int
991 linux_child_set_syscall_catchpoint (int pid, int needed, int any_count,
992 int table_size, int *table)
993 {
994 if (!linux_supports_tracesysgood (pid))
995 return 1;
996
997 /* On GNU/Linux, we ignore the arguments. It means that we only
998 enable the syscall catchpoints, but do not disable them.
999
1000 Also, we do not use the `table' information because we do not
1001 filter system calls here. We let GDB do the logic for us. */
1002 return 0;
1003 }
1004
1005 /* On GNU/Linux there are no real LWP's. The closest thing to LWP's
1006 are processes sharing the same VM space. A multi-threaded process
1007 is basically a group of such processes. However, such a grouping
1008 is almost entirely a user-space issue; the kernel doesn't enforce
1009 such a grouping at all (this might change in the future). In
1010 general, we'll rely on the threads library (i.e. the GNU/Linux
1011 Threads library) to provide such a grouping.
1012
1013 It is perfectly well possible to write a multi-threaded application
1014 without the assistance of a threads library, by using the clone
1015 system call directly. This module should be able to give some
1016 rudimentary support for debugging such applications if developers
1017 specify the CLONE_PTRACE flag in the clone system call, and are
1018 using the Linux kernel 2.4 or above.
1019
1020 Note that there are some peculiarities in GNU/Linux that affect
1021 this code:
1022
1023 - In general one should specify the __WCLONE flag to waitpid in
1024 order to make it report events for any of the cloned processes
1025 (and leave it out for the initial process). However, if a cloned
1026 process has exited the exit status is only reported if the
1027 __WCLONE flag is absent. Linux kernel 2.4 has a __WALL flag, but
1028 we cannot use it since GDB must work on older systems too.
1029
1030 - When a traced, cloned process exits and is waited for by the
1031 debugger, the kernel reassigns it to the original parent and
1032 keeps it around as a "zombie". Somehow, the GNU/Linux Threads
1033 library doesn't notice this, which leads to the "zombie problem":
1034 When debugged a multi-threaded process that spawns a lot of
1035 threads will run out of processes, even if the threads exit,
1036 because the "zombies" stay around. */
1037
1038 /* List of known LWPs. */
1039 struct lwp_info *lwp_list;
1040 \f
1041
1042 /* Original signal mask. */
1043 static sigset_t normal_mask;
1044
1045 /* Signal mask for use with sigsuspend in linux_nat_wait, initialized in
1046 _initialize_linux_nat. */
1047 static sigset_t suspend_mask;
1048
1049 /* Signals to block to make that sigsuspend work. */
1050 static sigset_t blocked_mask;
1051
1052 /* SIGCHLD action. */
1053 struct sigaction sigchld_action;
1054
1055 /* Block child signals (SIGCHLD and linux threads signals), and store
1056 the previous mask in PREV_MASK. */
1057
1058 static void
1059 block_child_signals (sigset_t *prev_mask)
1060 {
1061 /* Make sure SIGCHLD is blocked. */
1062 if (!sigismember (&blocked_mask, SIGCHLD))
1063 sigaddset (&blocked_mask, SIGCHLD);
1064
1065 sigprocmask (SIG_BLOCK, &blocked_mask, prev_mask);
1066 }
1067
1068 /* Restore child signals mask, previously returned by
1069 block_child_signals. */
1070
1071 static void
1072 restore_child_signals_mask (sigset_t *prev_mask)
1073 {
1074 sigprocmask (SIG_SETMASK, prev_mask, NULL);
1075 }
1076
1077 /* Mask of signals to pass directly to the inferior. */
1078 static sigset_t pass_mask;
1079
1080 /* Update signals to pass to the inferior. */
1081 static void
1082 linux_nat_pass_signals (int numsigs, unsigned char *pass_signals)
1083 {
1084 int signo;
1085
1086 sigemptyset (&pass_mask);
1087
1088 for (signo = 1; signo < NSIG; signo++)
1089 {
1090 int target_signo = target_signal_from_host (signo);
1091 if (target_signo < numsigs && pass_signals[target_signo])
1092 sigaddset (&pass_mask, signo);
1093 }
1094 }
1095
1096 \f
1097
1098 /* Prototypes for local functions. */
1099 static int stop_wait_callback (struct lwp_info *lp, void *data);
1100 static int linux_thread_alive (ptid_t ptid);
1101 static char *linux_child_pid_to_exec_file (int pid);
1102
1103 \f
1104 /* Convert wait status STATUS to a string. Used for printing debug
1105 messages only. */
1106
1107 static char *
1108 status_to_str (int status)
1109 {
1110 static char buf[64];
1111
1112 if (WIFSTOPPED (status))
1113 {
1114 if (WSTOPSIG (status) == SYSCALL_SIGTRAP)
1115 snprintf (buf, sizeof (buf), "%s (stopped at syscall)",
1116 strsignal (SIGTRAP));
1117 else
1118 snprintf (buf, sizeof (buf), "%s (stopped)",
1119 strsignal (WSTOPSIG (status)));
1120 }
1121 else if (WIFSIGNALED (status))
1122 snprintf (buf, sizeof (buf), "%s (terminated)",
1123 strsignal (WTERMSIG (status)));
1124 else
1125 snprintf (buf, sizeof (buf), "%d (exited)", WEXITSTATUS (status));
1126
1127 return buf;
1128 }
1129
1130 /* Destroy and free LP. */
1131
1132 static void
1133 lwp_free (struct lwp_info *lp)
1134 {
1135 xfree (lp->arch_private);
1136 xfree (lp);
1137 }
1138
1139 /* Remove all LWPs belong to PID from the lwp list. */
1140
1141 static void
1142 purge_lwp_list (int pid)
1143 {
1144 struct lwp_info *lp, *lpprev, *lpnext;
1145
1146 lpprev = NULL;
1147
1148 for (lp = lwp_list; lp; lp = lpnext)
1149 {
1150 lpnext = lp->next;
1151
1152 if (ptid_get_pid (lp->ptid) == pid)
1153 {
1154 if (lp == lwp_list)
1155 lwp_list = lp->next;
1156 else
1157 lpprev->next = lp->next;
1158
1159 lwp_free (lp);
1160 }
1161 else
1162 lpprev = lp;
1163 }
1164 }
1165
1166 /* Add the LWP specified by PID to the list. Return a pointer to the
1167 structure describing the new LWP. The LWP should already be stopped
1168 (with an exception for the very first LWP). */
1169
1170 static struct lwp_info *
1171 add_lwp (ptid_t ptid)
1172 {
1173 struct lwp_info *lp;
1174
1175 gdb_assert (is_lwp (ptid));
1176
1177 lp = (struct lwp_info *) xmalloc (sizeof (struct lwp_info));
1178
1179 memset (lp, 0, sizeof (struct lwp_info));
1180
1181 lp->last_resume_kind = resume_continue;
1182 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
1183
1184 lp->ptid = ptid;
1185 lp->core = -1;
1186
1187 lp->next = lwp_list;
1188 lwp_list = lp;
1189
1190 /* Let the arch specific bits know about this new thread. Current
1191 clients of this callback take the opportunity to install
1192 watchpoints in the new thread. Don't do this for the first
1193 thread though. If we're spawning a child ("run"), the thread
1194 executes the shell wrapper first, and we shouldn't touch it until
1195 it execs the program we want to debug. For "attach", it'd be
1196 okay to call the callback, but it's not necessary, because
1197 watchpoints can't yet have been inserted into the inferior. */
1198 if (num_lwps (GET_PID (ptid)) > 1 && linux_nat_new_thread != NULL)
1199 linux_nat_new_thread (lp);
1200
1201 return lp;
1202 }
1203
1204 /* Remove the LWP specified by PID from the list. */
1205
1206 static void
1207 delete_lwp (ptid_t ptid)
1208 {
1209 struct lwp_info *lp, *lpprev;
1210
1211 lpprev = NULL;
1212
1213 for (lp = lwp_list; lp; lpprev = lp, lp = lp->next)
1214 if (ptid_equal (lp->ptid, ptid))
1215 break;
1216
1217 if (!lp)
1218 return;
1219
1220 if (lpprev)
1221 lpprev->next = lp->next;
1222 else
1223 lwp_list = lp->next;
1224
1225 lwp_free (lp);
1226 }
1227
1228 /* Return a pointer to the structure describing the LWP corresponding
1229 to PID. If no corresponding LWP could be found, return NULL. */
1230
1231 static struct lwp_info *
1232 find_lwp_pid (ptid_t ptid)
1233 {
1234 struct lwp_info *lp;
1235 int lwp;
1236
1237 if (is_lwp (ptid))
1238 lwp = GET_LWP (ptid);
1239 else
1240 lwp = GET_PID (ptid);
1241
1242 for (lp = lwp_list; lp; lp = lp->next)
1243 if (lwp == GET_LWP (lp->ptid))
1244 return lp;
1245
1246 return NULL;
1247 }
1248
1249 /* Call CALLBACK with its second argument set to DATA for every LWP in
1250 the list. If CALLBACK returns 1 for a particular LWP, return a
1251 pointer to the structure describing that LWP immediately.
1252 Otherwise return NULL. */
1253
1254 struct lwp_info *
1255 iterate_over_lwps (ptid_t filter,
1256 int (*callback) (struct lwp_info *, void *),
1257 void *data)
1258 {
1259 struct lwp_info *lp, *lpnext;
1260
1261 for (lp = lwp_list; lp; lp = lpnext)
1262 {
1263 lpnext = lp->next;
1264
1265 if (ptid_match (lp->ptid, filter))
1266 {
1267 if ((*callback) (lp, data))
1268 return lp;
1269 }
1270 }
1271
1272 return NULL;
1273 }
1274
1275 /* Iterate like iterate_over_lwps does except when forking-off a child call
1276 CALLBACK with CALLBACK_DATA specifically only for that new child PID. */
1277
1278 void
1279 linux_nat_iterate_watchpoint_lwps
1280 (linux_nat_iterate_watchpoint_lwps_ftype callback, void *callback_data)
1281 {
1282 int inferior_pid = ptid_get_pid (inferior_ptid);
1283 struct inferior *inf = current_inferior ();
1284
1285 if (inf->pid == inferior_pid)
1286 {
1287 /* Iterate all the threads of the current inferior. Without specifying
1288 INFERIOR_PID it would iterate all threads of all inferiors, which is
1289 inappropriate for watchpoints. */
1290
1291 iterate_over_lwps (pid_to_ptid (inferior_pid), callback, callback_data);
1292 }
1293 else
1294 {
1295 /* Detaching a new child PID temporarily present in INFERIOR_PID. */
1296
1297 struct lwp_info *child_lp;
1298 struct cleanup *old_chain;
1299 pid_t child_pid = GET_PID (inferior_ptid);
1300 ptid_t child_ptid = ptid_build (child_pid, child_pid, 0);
1301
1302 gdb_assert (!is_lwp (inferior_ptid));
1303 gdb_assert (find_lwp_pid (child_ptid) == NULL);
1304 child_lp = add_lwp (child_ptid);
1305 child_lp->stopped = 1;
1306 child_lp->last_resume_kind = resume_stop;
1307 old_chain = make_cleanup (delete_lwp_cleanup, child_lp);
1308
1309 callback (child_lp, callback_data);
1310
1311 do_cleanups (old_chain);
1312 }
1313 }
1314
1315 /* Update our internal state when changing from one checkpoint to
1316 another indicated by NEW_PTID. We can only switch single-threaded
1317 applications, so we only create one new LWP, and the previous list
1318 is discarded. */
1319
1320 void
1321 linux_nat_switch_fork (ptid_t new_ptid)
1322 {
1323 struct lwp_info *lp;
1324
1325 purge_lwp_list (GET_PID (inferior_ptid));
1326
1327 lp = add_lwp (new_ptid);
1328 lp->stopped = 1;
1329
1330 /* This changes the thread's ptid while preserving the gdb thread
1331 num. Also changes the inferior pid, while preserving the
1332 inferior num. */
1333 thread_change_ptid (inferior_ptid, new_ptid);
1334
1335 /* We've just told GDB core that the thread changed target id, but,
1336 in fact, it really is a different thread, with different register
1337 contents. */
1338 registers_changed ();
1339 }
1340
1341 /* Handle the exit of a single thread LP. */
1342
1343 static void
1344 exit_lwp (struct lwp_info *lp)
1345 {
1346 struct thread_info *th = find_thread_ptid (lp->ptid);
1347
1348 if (th)
1349 {
1350 if (print_thread_events)
1351 printf_unfiltered (_("[%s exited]\n"), target_pid_to_str (lp->ptid));
1352
1353 delete_thread (lp->ptid);
1354 }
1355
1356 delete_lwp (lp->ptid);
1357 }
1358
1359 /* Wait for the LWP specified by LP, which we have just attached to.
1360 Returns a wait status for that LWP, to cache. */
1361
1362 static int
1363 linux_nat_post_attach_wait (ptid_t ptid, int first, int *cloned,
1364 int *signalled)
1365 {
1366 pid_t new_pid, pid = GET_LWP (ptid);
1367 int status;
1368
1369 if (linux_proc_pid_is_stopped (pid))
1370 {
1371 if (debug_linux_nat)
1372 fprintf_unfiltered (gdb_stdlog,
1373 "LNPAW: Attaching to a stopped process\n");
1374
1375 /* The process is definitely stopped. It is in a job control
1376 stop, unless the kernel predates the TASK_STOPPED /
1377 TASK_TRACED distinction, in which case it might be in a
1378 ptrace stop. Make sure it is in a ptrace stop; from there we
1379 can kill it, signal it, et cetera.
1380
1381 First make sure there is a pending SIGSTOP. Since we are
1382 already attached, the process can not transition from stopped
1383 to running without a PTRACE_CONT; so we know this signal will
1384 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1385 probably already in the queue (unless this kernel is old
1386 enough to use TASK_STOPPED for ptrace stops); but since SIGSTOP
1387 is not an RT signal, it can only be queued once. */
1388 kill_lwp (pid, SIGSTOP);
1389
1390 /* Finally, resume the stopped process. This will deliver the SIGSTOP
1391 (or a higher priority signal, just like normal PTRACE_ATTACH). */
1392 ptrace (PTRACE_CONT, pid, 0, 0);
1393 }
1394
1395 /* Make sure the initial process is stopped. The user-level threads
1396 layer might want to poke around in the inferior, and that won't
1397 work if things haven't stabilized yet. */
1398 new_pid = my_waitpid (pid, &status, 0);
1399 if (new_pid == -1 && errno == ECHILD)
1400 {
1401 if (first)
1402 warning (_("%s is a cloned process"), target_pid_to_str (ptid));
1403
1404 /* Try again with __WCLONE to check cloned processes. */
1405 new_pid = my_waitpid (pid, &status, __WCLONE);
1406 *cloned = 1;
1407 }
1408
1409 gdb_assert (pid == new_pid);
1410
1411 if (!WIFSTOPPED (status))
1412 {
1413 /* The pid we tried to attach has apparently just exited. */
1414 if (debug_linux_nat)
1415 fprintf_unfiltered (gdb_stdlog, "LNPAW: Failed to stop %d: %s",
1416 pid, status_to_str (status));
1417 return status;
1418 }
1419
1420 if (WSTOPSIG (status) != SIGSTOP)
1421 {
1422 *signalled = 1;
1423 if (debug_linux_nat)
1424 fprintf_unfiltered (gdb_stdlog,
1425 "LNPAW: Received %s after attaching\n",
1426 status_to_str (status));
1427 }
1428
1429 return status;
1430 }
1431
1432 /* Attach to the LWP specified by PID. Return 0 if successful, -1 if
1433 the new LWP could not be attached, or 1 if we're already auto
1434 attached to this thread, but haven't processed the
1435 PTRACE_EVENT_CLONE event of its parent thread, so we just ignore
1436 its existance, without considering it an error. */
1437
1438 int
1439 lin_lwp_attach_lwp (ptid_t ptid)
1440 {
1441 struct lwp_info *lp;
1442 sigset_t prev_mask;
1443 int lwpid;
1444
1445 gdb_assert (is_lwp (ptid));
1446
1447 block_child_signals (&prev_mask);
1448
1449 lp = find_lwp_pid (ptid);
1450 lwpid = GET_LWP (ptid);
1451
1452 /* We assume that we're already attached to any LWP that has an id
1453 equal to the overall process id, and to any LWP that is already
1454 in our list of LWPs. If we're not seeing exit events from threads
1455 and we've had PID wraparound since we last tried to stop all threads,
1456 this assumption might be wrong; fortunately, this is very unlikely
1457 to happen. */
1458 if (lwpid != GET_PID (ptid) && lp == NULL)
1459 {
1460 int status, cloned = 0, signalled = 0;
1461
1462 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) < 0)
1463 {
1464 if (linux_supports_tracefork_flag)
1465 {
1466 /* If we haven't stopped all threads when we get here,
1467 we may have seen a thread listed in thread_db's list,
1468 but not processed the PTRACE_EVENT_CLONE yet. If
1469 that's the case, ignore this new thread, and let
1470 normal event handling discover it later. */
1471 if (in_pid_list_p (stopped_pids, lwpid))
1472 {
1473 /* We've already seen this thread stop, but we
1474 haven't seen the PTRACE_EVENT_CLONE extended
1475 event yet. */
1476 restore_child_signals_mask (&prev_mask);
1477 return 0;
1478 }
1479 else
1480 {
1481 int new_pid;
1482 int status;
1483
1484 /* See if we've got a stop for this new child
1485 pending. If so, we're already attached. */
1486 new_pid = my_waitpid (lwpid, &status, WNOHANG);
1487 if (new_pid == -1 && errno == ECHILD)
1488 new_pid = my_waitpid (lwpid, &status, __WCLONE | WNOHANG);
1489 if (new_pid != -1)
1490 {
1491 if (WIFSTOPPED (status))
1492 add_to_pid_list (&stopped_pids, lwpid, status);
1493
1494 restore_child_signals_mask (&prev_mask);
1495 return 1;
1496 }
1497 }
1498 }
1499
1500 /* If we fail to attach to the thread, issue a warning,
1501 but continue. One way this can happen is if thread
1502 creation is interrupted; as of Linux kernel 2.6.19, a
1503 bug may place threads in the thread list and then fail
1504 to create them. */
1505 warning (_("Can't attach %s: %s"), target_pid_to_str (ptid),
1506 safe_strerror (errno));
1507 restore_child_signals_mask (&prev_mask);
1508 return -1;
1509 }
1510
1511 if (debug_linux_nat)
1512 fprintf_unfiltered (gdb_stdlog,
1513 "LLAL: PTRACE_ATTACH %s, 0, 0 (OK)\n",
1514 target_pid_to_str (ptid));
1515
1516 status = linux_nat_post_attach_wait (ptid, 0, &cloned, &signalled);
1517 if (!WIFSTOPPED (status))
1518 {
1519 restore_child_signals_mask (&prev_mask);
1520 return 1;
1521 }
1522
1523 lp = add_lwp (ptid);
1524 lp->stopped = 1;
1525 lp->cloned = cloned;
1526 lp->signalled = signalled;
1527 if (WSTOPSIG (status) != SIGSTOP)
1528 {
1529 lp->resumed = 1;
1530 lp->status = status;
1531 }
1532
1533 target_post_attach (GET_LWP (lp->ptid));
1534
1535 if (debug_linux_nat)
1536 {
1537 fprintf_unfiltered (gdb_stdlog,
1538 "LLAL: waitpid %s received %s\n",
1539 target_pid_to_str (ptid),
1540 status_to_str (status));
1541 }
1542 }
1543 else
1544 {
1545 /* We assume that the LWP representing the original process is
1546 already stopped. Mark it as stopped in the data structure
1547 that the GNU/linux ptrace layer uses to keep track of
1548 threads. Note that this won't have already been done since
1549 the main thread will have, we assume, been stopped by an
1550 attach from a different layer. */
1551 if (lp == NULL)
1552 lp = add_lwp (ptid);
1553 lp->stopped = 1;
1554 }
1555
1556 lp->last_resume_kind = resume_stop;
1557 restore_child_signals_mask (&prev_mask);
1558 return 0;
1559 }
1560
1561 static void
1562 linux_nat_create_inferior (struct target_ops *ops,
1563 char *exec_file, char *allargs, char **env,
1564 int from_tty)
1565 {
1566 #ifdef HAVE_PERSONALITY
1567 int personality_orig = 0, personality_set = 0;
1568 #endif /* HAVE_PERSONALITY */
1569
1570 /* The fork_child mechanism is synchronous and calls target_wait, so
1571 we have to mask the async mode. */
1572
1573 #ifdef HAVE_PERSONALITY
1574 if (disable_randomization)
1575 {
1576 errno = 0;
1577 personality_orig = personality (0xffffffff);
1578 if (errno == 0 && !(personality_orig & ADDR_NO_RANDOMIZE))
1579 {
1580 personality_set = 1;
1581 personality (personality_orig | ADDR_NO_RANDOMIZE);
1582 }
1583 if (errno != 0 || (personality_set
1584 && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE)))
1585 warning (_("Error disabling address space randomization: %s"),
1586 safe_strerror (errno));
1587 }
1588 #endif /* HAVE_PERSONALITY */
1589
1590 /* Make sure we report all signals during startup. */
1591 linux_nat_pass_signals (0, NULL);
1592
1593 linux_ops->to_create_inferior (ops, exec_file, allargs, env, from_tty);
1594
1595 #ifdef HAVE_PERSONALITY
1596 if (personality_set)
1597 {
1598 errno = 0;
1599 personality (personality_orig);
1600 if (errno != 0)
1601 warning (_("Error restoring address space randomization: %s"),
1602 safe_strerror (errno));
1603 }
1604 #endif /* HAVE_PERSONALITY */
1605 }
1606
1607 static void
1608 linux_nat_attach (struct target_ops *ops, char *args, int from_tty)
1609 {
1610 struct lwp_info *lp;
1611 int status;
1612 ptid_t ptid;
1613
1614 /* Make sure we report all signals during attach. */
1615 linux_nat_pass_signals (0, NULL);
1616
1617 linux_ops->to_attach (ops, args, from_tty);
1618
1619 /* The ptrace base target adds the main thread with (pid,0,0)
1620 format. Decorate it with lwp info. */
1621 ptid = BUILD_LWP (GET_PID (inferior_ptid), GET_PID (inferior_ptid));
1622 thread_change_ptid (inferior_ptid, ptid);
1623
1624 /* Add the initial process as the first LWP to the list. */
1625 lp = add_lwp (ptid);
1626
1627 status = linux_nat_post_attach_wait (lp->ptid, 1, &lp->cloned,
1628 &lp->signalled);
1629 if (!WIFSTOPPED (status))
1630 {
1631 if (WIFEXITED (status))
1632 {
1633 int exit_code = WEXITSTATUS (status);
1634
1635 target_terminal_ours ();
1636 target_mourn_inferior ();
1637 if (exit_code == 0)
1638 error (_("Unable to attach: program exited normally."));
1639 else
1640 error (_("Unable to attach: program exited with code %d."),
1641 exit_code);
1642 }
1643 else if (WIFSIGNALED (status))
1644 {
1645 enum target_signal signo;
1646
1647 target_terminal_ours ();
1648 target_mourn_inferior ();
1649
1650 signo = target_signal_from_host (WTERMSIG (status));
1651 error (_("Unable to attach: program terminated with signal "
1652 "%s, %s."),
1653 target_signal_to_name (signo),
1654 target_signal_to_string (signo));
1655 }
1656
1657 internal_error (__FILE__, __LINE__,
1658 _("unexpected status %d for PID %ld"),
1659 status, (long) GET_LWP (ptid));
1660 }
1661
1662 lp->stopped = 1;
1663
1664 /* Save the wait status to report later. */
1665 lp->resumed = 1;
1666 if (debug_linux_nat)
1667 fprintf_unfiltered (gdb_stdlog,
1668 "LNA: waitpid %ld, saving status %s\n",
1669 (long) GET_PID (lp->ptid), status_to_str (status));
1670
1671 lp->status = status;
1672
1673 if (target_can_async_p ())
1674 target_async (inferior_event_handler, 0);
1675 }
1676
1677 /* Get pending status of LP. */
1678 static int
1679 get_pending_status (struct lwp_info *lp, int *status)
1680 {
1681 enum target_signal signo = TARGET_SIGNAL_0;
1682
1683 /* If we paused threads momentarily, we may have stored pending
1684 events in lp->status or lp->waitstatus (see stop_wait_callback),
1685 and GDB core hasn't seen any signal for those threads.
1686 Otherwise, the last signal reported to the core is found in the
1687 thread object's stop_signal.
1688
1689 There's a corner case that isn't handled here at present. Only
1690 if the thread stopped with a TARGET_WAITKIND_STOPPED does
1691 stop_signal make sense as a real signal to pass to the inferior.
1692 Some catchpoint related events, like
1693 TARGET_WAITKIND_(V)FORK|EXEC|SYSCALL, have their stop_signal set
1694 to TARGET_SIGNAL_SIGTRAP when the catchpoint triggers. But,
1695 those traps are debug API (ptrace in our case) related and
1696 induced; the inferior wouldn't see them if it wasn't being
1697 traced. Hence, we should never pass them to the inferior, even
1698 when set to pass state. Since this corner case isn't handled by
1699 infrun.c when proceeding with a signal, for consistency, neither
1700 do we handle it here (or elsewhere in the file we check for
1701 signal pass state). Normally SIGTRAP isn't set to pass state, so
1702 this is really a corner case. */
1703
1704 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
1705 signo = TARGET_SIGNAL_0; /* a pending ptrace event, not a real signal. */
1706 else if (lp->status)
1707 signo = target_signal_from_host (WSTOPSIG (lp->status));
1708 else if (non_stop && !is_executing (lp->ptid))
1709 {
1710 struct thread_info *tp = find_thread_ptid (lp->ptid);
1711
1712 signo = tp->suspend.stop_signal;
1713 }
1714 else if (!non_stop)
1715 {
1716 struct target_waitstatus last;
1717 ptid_t last_ptid;
1718
1719 get_last_target_status (&last_ptid, &last);
1720
1721 if (GET_LWP (lp->ptid) == GET_LWP (last_ptid))
1722 {
1723 struct thread_info *tp = find_thread_ptid (lp->ptid);
1724
1725 signo = tp->suspend.stop_signal;
1726 }
1727 }
1728
1729 *status = 0;
1730
1731 if (signo == TARGET_SIGNAL_0)
1732 {
1733 if (debug_linux_nat)
1734 fprintf_unfiltered (gdb_stdlog,
1735 "GPT: lwp %s has no pending signal\n",
1736 target_pid_to_str (lp->ptid));
1737 }
1738 else if (!signal_pass_state (signo))
1739 {
1740 if (debug_linux_nat)
1741 fprintf_unfiltered (gdb_stdlog,
1742 "GPT: lwp %s had signal %s, "
1743 "but it is in no pass state\n",
1744 target_pid_to_str (lp->ptid),
1745 target_signal_to_string (signo));
1746 }
1747 else
1748 {
1749 *status = W_STOPCODE (target_signal_to_host (signo));
1750
1751 if (debug_linux_nat)
1752 fprintf_unfiltered (gdb_stdlog,
1753 "GPT: lwp %s has pending signal %s\n",
1754 target_pid_to_str (lp->ptid),
1755 target_signal_to_string (signo));
1756 }
1757
1758 return 0;
1759 }
1760
1761 static int
1762 detach_callback (struct lwp_info *lp, void *data)
1763 {
1764 gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status));
1765
1766 if (debug_linux_nat && lp->status)
1767 fprintf_unfiltered (gdb_stdlog, "DC: Pending %s for %s on detach.\n",
1768 strsignal (WSTOPSIG (lp->status)),
1769 target_pid_to_str (lp->ptid));
1770
1771 /* If there is a pending SIGSTOP, get rid of it. */
1772 if (lp->signalled)
1773 {
1774 if (debug_linux_nat)
1775 fprintf_unfiltered (gdb_stdlog,
1776 "DC: Sending SIGCONT to %s\n",
1777 target_pid_to_str (lp->ptid));
1778
1779 kill_lwp (GET_LWP (lp->ptid), SIGCONT);
1780 lp->signalled = 0;
1781 }
1782
1783 /* We don't actually detach from the LWP that has an id equal to the
1784 overall process id just yet. */
1785 if (GET_LWP (lp->ptid) != GET_PID (lp->ptid))
1786 {
1787 int status = 0;
1788
1789 /* Pass on any pending signal for this LWP. */
1790 get_pending_status (lp, &status);
1791
1792 if (linux_nat_prepare_to_resume != NULL)
1793 linux_nat_prepare_to_resume (lp);
1794 errno = 0;
1795 if (ptrace (PTRACE_DETACH, GET_LWP (lp->ptid), 0,
1796 WSTOPSIG (status)) < 0)
1797 error (_("Can't detach %s: %s"), target_pid_to_str (lp->ptid),
1798 safe_strerror (errno));
1799
1800 if (debug_linux_nat)
1801 fprintf_unfiltered (gdb_stdlog,
1802 "PTRACE_DETACH (%s, %s, 0) (OK)\n",
1803 target_pid_to_str (lp->ptid),
1804 strsignal (WSTOPSIG (status)));
1805
1806 delete_lwp (lp->ptid);
1807 }
1808
1809 return 0;
1810 }
1811
1812 static void
1813 linux_nat_detach (struct target_ops *ops, char *args, int from_tty)
1814 {
1815 int pid;
1816 int status;
1817 struct lwp_info *main_lwp;
1818
1819 pid = GET_PID (inferior_ptid);
1820
1821 if (target_can_async_p ())
1822 linux_nat_async (NULL, 0);
1823
1824 /* Stop all threads before detaching. ptrace requires that the
1825 thread is stopped to sucessfully detach. */
1826 iterate_over_lwps (pid_to_ptid (pid), stop_callback, NULL);
1827 /* ... and wait until all of them have reported back that
1828 they're no longer running. */
1829 iterate_over_lwps (pid_to_ptid (pid), stop_wait_callback, NULL);
1830
1831 iterate_over_lwps (pid_to_ptid (pid), detach_callback, NULL);
1832
1833 /* Only the initial process should be left right now. */
1834 gdb_assert (num_lwps (GET_PID (inferior_ptid)) == 1);
1835
1836 main_lwp = find_lwp_pid (pid_to_ptid (pid));
1837
1838 /* Pass on any pending signal for the last LWP. */
1839 if ((args == NULL || *args == '\0')
1840 && get_pending_status (main_lwp, &status) != -1
1841 && WIFSTOPPED (status))
1842 {
1843 /* Put the signal number in ARGS so that inf_ptrace_detach will
1844 pass it along with PTRACE_DETACH. */
1845 args = alloca (8);
1846 sprintf (args, "%d", (int) WSTOPSIG (status));
1847 if (debug_linux_nat)
1848 fprintf_unfiltered (gdb_stdlog,
1849 "LND: Sending signal %s to %s\n",
1850 args,
1851 target_pid_to_str (main_lwp->ptid));
1852 }
1853
1854 if (linux_nat_prepare_to_resume != NULL)
1855 linux_nat_prepare_to_resume (main_lwp);
1856 delete_lwp (main_lwp->ptid);
1857
1858 if (forks_exist_p ())
1859 {
1860 /* Multi-fork case. The current inferior_ptid is being detached
1861 from, but there are other viable forks to debug. Detach from
1862 the current fork, and context-switch to the first
1863 available. */
1864 linux_fork_detach (args, from_tty);
1865
1866 if (non_stop && target_can_async_p ())
1867 target_async (inferior_event_handler, 0);
1868 }
1869 else
1870 linux_ops->to_detach (ops, args, from_tty);
1871 }
1872
1873 /* Resume LP. */
1874
1875 static void
1876 resume_lwp (struct lwp_info *lp, int step)
1877 {
1878 if (lp->stopped)
1879 {
1880 struct inferior *inf = find_inferior_pid (GET_PID (lp->ptid));
1881
1882 if (inf->vfork_child != NULL)
1883 {
1884 if (debug_linux_nat)
1885 fprintf_unfiltered (gdb_stdlog,
1886 "RC: Not resuming %s (vfork parent)\n",
1887 target_pid_to_str (lp->ptid));
1888 }
1889 else if (lp->status == 0
1890 && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE)
1891 {
1892 if (debug_linux_nat)
1893 fprintf_unfiltered (gdb_stdlog,
1894 "RC: PTRACE_CONT %s, 0, 0 (resuming sibling)\n",
1895 target_pid_to_str (lp->ptid));
1896
1897 if (linux_nat_prepare_to_resume != NULL)
1898 linux_nat_prepare_to_resume (lp);
1899 linux_ops->to_resume (linux_ops,
1900 pid_to_ptid (GET_LWP (lp->ptid)),
1901 step, TARGET_SIGNAL_0);
1902 lp->stopped = 0;
1903 lp->step = step;
1904 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
1905 lp->stopped_by_watchpoint = 0;
1906 }
1907 else
1908 {
1909 if (debug_linux_nat)
1910 fprintf_unfiltered (gdb_stdlog,
1911 "RC: Not resuming sibling %s (has pending)\n",
1912 target_pid_to_str (lp->ptid));
1913 }
1914 }
1915 else
1916 {
1917 if (debug_linux_nat)
1918 fprintf_unfiltered (gdb_stdlog,
1919 "RC: Not resuming sibling %s (not stopped)\n",
1920 target_pid_to_str (lp->ptid));
1921 }
1922 }
1923
1924 static int
1925 resume_callback (struct lwp_info *lp, void *data)
1926 {
1927 resume_lwp (lp, 0);
1928 return 0;
1929 }
1930
1931 static int
1932 resume_clear_callback (struct lwp_info *lp, void *data)
1933 {
1934 lp->resumed = 0;
1935 lp->last_resume_kind = resume_stop;
1936 return 0;
1937 }
1938
1939 static int
1940 resume_set_callback (struct lwp_info *lp, void *data)
1941 {
1942 lp->resumed = 1;
1943 lp->last_resume_kind = resume_continue;
1944 return 0;
1945 }
1946
1947 static void
1948 linux_nat_resume (struct target_ops *ops,
1949 ptid_t ptid, int step, enum target_signal signo)
1950 {
1951 sigset_t prev_mask;
1952 struct lwp_info *lp;
1953 int resume_many;
1954
1955 if (debug_linux_nat)
1956 fprintf_unfiltered (gdb_stdlog,
1957 "LLR: Preparing to %s %s, %s, inferior_ptid %s\n",
1958 step ? "step" : "resume",
1959 target_pid_to_str (ptid),
1960 (signo != TARGET_SIGNAL_0
1961 ? strsignal (target_signal_to_host (signo)) : "0"),
1962 target_pid_to_str (inferior_ptid));
1963
1964 block_child_signals (&prev_mask);
1965
1966 /* A specific PTID means `step only this process id'. */
1967 resume_many = (ptid_equal (minus_one_ptid, ptid)
1968 || ptid_is_pid (ptid));
1969
1970 /* Mark the lwps we're resuming as resumed. */
1971 iterate_over_lwps (ptid, resume_set_callback, NULL);
1972
1973 /* See if it's the current inferior that should be handled
1974 specially. */
1975 if (resume_many)
1976 lp = find_lwp_pid (inferior_ptid);
1977 else
1978 lp = find_lwp_pid (ptid);
1979 gdb_assert (lp != NULL);
1980
1981 /* Remember if we're stepping. */
1982 lp->step = step;
1983 lp->last_resume_kind = step ? resume_step : resume_continue;
1984
1985 /* If we have a pending wait status for this thread, there is no
1986 point in resuming the process. But first make sure that
1987 linux_nat_wait won't preemptively handle the event - we
1988 should never take this short-circuit if we are going to
1989 leave LP running, since we have skipped resuming all the
1990 other threads. This bit of code needs to be synchronized
1991 with linux_nat_wait. */
1992
1993 if (lp->status && WIFSTOPPED (lp->status))
1994 {
1995 if (!lp->step
1996 && WSTOPSIG (lp->status)
1997 && sigismember (&pass_mask, WSTOPSIG (lp->status)))
1998 {
1999 if (debug_linux_nat)
2000 fprintf_unfiltered (gdb_stdlog,
2001 "LLR: Not short circuiting for ignored "
2002 "status 0x%x\n", lp->status);
2003
2004 /* FIXME: What should we do if we are supposed to continue
2005 this thread with a signal? */
2006 gdb_assert (signo == TARGET_SIGNAL_0);
2007 signo = target_signal_from_host (WSTOPSIG (lp->status));
2008 lp->status = 0;
2009 }
2010 }
2011
2012 if (lp->status || lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
2013 {
2014 /* FIXME: What should we do if we are supposed to continue
2015 this thread with a signal? */
2016 gdb_assert (signo == TARGET_SIGNAL_0);
2017
2018 if (debug_linux_nat)
2019 fprintf_unfiltered (gdb_stdlog,
2020 "LLR: Short circuiting for status 0x%x\n",
2021 lp->status);
2022
2023 restore_child_signals_mask (&prev_mask);
2024 if (target_can_async_p ())
2025 {
2026 target_async (inferior_event_handler, 0);
2027 /* Tell the event loop we have something to process. */
2028 async_file_mark ();
2029 }
2030 return;
2031 }
2032
2033 /* Mark LWP as not stopped to prevent it from being continued by
2034 resume_callback. */
2035 lp->stopped = 0;
2036
2037 if (resume_many)
2038 iterate_over_lwps (ptid, resume_callback, NULL);
2039
2040 /* Convert to something the lower layer understands. */
2041 ptid = pid_to_ptid (GET_LWP (lp->ptid));
2042
2043 if (linux_nat_prepare_to_resume != NULL)
2044 linux_nat_prepare_to_resume (lp);
2045 linux_ops->to_resume (linux_ops, ptid, step, signo);
2046 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
2047 lp->stopped_by_watchpoint = 0;
2048
2049 if (debug_linux_nat)
2050 fprintf_unfiltered (gdb_stdlog,
2051 "LLR: %s %s, %s (resume event thread)\n",
2052 step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2053 target_pid_to_str (ptid),
2054 (signo != TARGET_SIGNAL_0
2055 ? strsignal (target_signal_to_host (signo)) : "0"));
2056
2057 restore_child_signals_mask (&prev_mask);
2058 if (target_can_async_p ())
2059 target_async (inferior_event_handler, 0);
2060 }
2061
2062 /* Send a signal to an LWP. */
2063
2064 static int
2065 kill_lwp (int lwpid, int signo)
2066 {
2067 /* Use tkill, if possible, in case we are using nptl threads. If tkill
2068 fails, then we are not using nptl threads and we should be using kill. */
2069
2070 #ifdef HAVE_TKILL_SYSCALL
2071 {
2072 static int tkill_failed;
2073
2074 if (!tkill_failed)
2075 {
2076 int ret;
2077
2078 errno = 0;
2079 ret = syscall (__NR_tkill, lwpid, signo);
2080 if (errno != ENOSYS)
2081 return ret;
2082 tkill_failed = 1;
2083 }
2084 }
2085 #endif
2086
2087 return kill (lwpid, signo);
2088 }
2089
2090 /* Handle a GNU/Linux syscall trap wait response. If we see a syscall
2091 event, check if the core is interested in it: if not, ignore the
2092 event, and keep waiting; otherwise, we need to toggle the LWP's
2093 syscall entry/exit status, since the ptrace event itself doesn't
2094 indicate it, and report the trap to higher layers. */
2095
2096 static int
2097 linux_handle_syscall_trap (struct lwp_info *lp, int stopping)
2098 {
2099 struct target_waitstatus *ourstatus = &lp->waitstatus;
2100 struct gdbarch *gdbarch = target_thread_architecture (lp->ptid);
2101 int syscall_number = (int) gdbarch_get_syscall_number (gdbarch, lp->ptid);
2102
2103 if (stopping)
2104 {
2105 /* If we're stopping threads, there's a SIGSTOP pending, which
2106 makes it so that the LWP reports an immediate syscall return,
2107 followed by the SIGSTOP. Skip seeing that "return" using
2108 PTRACE_CONT directly, and let stop_wait_callback collect the
2109 SIGSTOP. Later when the thread is resumed, a new syscall
2110 entry event. If we didn't do this (and returned 0), we'd
2111 leave a syscall entry pending, and our caller, by using
2112 PTRACE_CONT to collect the SIGSTOP, skips the syscall return
2113 itself. Later, when the user re-resumes this LWP, we'd see
2114 another syscall entry event and we'd mistake it for a return.
2115
2116 If stop_wait_callback didn't force the SIGSTOP out of the LWP
2117 (leaving immediately with LWP->signalled set, without issuing
2118 a PTRACE_CONT), it would still be problematic to leave this
2119 syscall enter pending, as later when the thread is resumed,
2120 it would then see the same syscall exit mentioned above,
2121 followed by the delayed SIGSTOP, while the syscall didn't
2122 actually get to execute. It seems it would be even more
2123 confusing to the user. */
2124
2125 if (debug_linux_nat)
2126 fprintf_unfiltered (gdb_stdlog,
2127 "LHST: ignoring syscall %d "
2128 "for LWP %ld (stopping threads), "
2129 "resuming with PTRACE_CONT for SIGSTOP\n",
2130 syscall_number,
2131 GET_LWP (lp->ptid));
2132
2133 lp->syscall_state = TARGET_WAITKIND_IGNORE;
2134 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2135 return 1;
2136 }
2137
2138 if (catch_syscall_enabled ())
2139 {
2140 /* Always update the entry/return state, even if this particular
2141 syscall isn't interesting to the core now. In async mode,
2142 the user could install a new catchpoint for this syscall
2143 between syscall enter/return, and we'll need to know to
2144 report a syscall return if that happens. */
2145 lp->syscall_state = (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2146 ? TARGET_WAITKIND_SYSCALL_RETURN
2147 : TARGET_WAITKIND_SYSCALL_ENTRY);
2148
2149 if (catching_syscall_number (syscall_number))
2150 {
2151 /* Alright, an event to report. */
2152 ourstatus->kind = lp->syscall_state;
2153 ourstatus->value.syscall_number = syscall_number;
2154
2155 if (debug_linux_nat)
2156 fprintf_unfiltered (gdb_stdlog,
2157 "LHST: stopping for %s of syscall %d"
2158 " for LWP %ld\n",
2159 lp->syscall_state
2160 == TARGET_WAITKIND_SYSCALL_ENTRY
2161 ? "entry" : "return",
2162 syscall_number,
2163 GET_LWP (lp->ptid));
2164 return 0;
2165 }
2166
2167 if (debug_linux_nat)
2168 fprintf_unfiltered (gdb_stdlog,
2169 "LHST: ignoring %s of syscall %d "
2170 "for LWP %ld\n",
2171 lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2172 ? "entry" : "return",
2173 syscall_number,
2174 GET_LWP (lp->ptid));
2175 }
2176 else
2177 {
2178 /* If we had been syscall tracing, and hence used PT_SYSCALL
2179 before on this LWP, it could happen that the user removes all
2180 syscall catchpoints before we get to process this event.
2181 There are two noteworthy issues here:
2182
2183 - When stopped at a syscall entry event, resuming with
2184 PT_STEP still resumes executing the syscall and reports a
2185 syscall return.
2186
2187 - Only PT_SYSCALL catches syscall enters. If we last
2188 single-stepped this thread, then this event can't be a
2189 syscall enter. If we last single-stepped this thread, this
2190 has to be a syscall exit.
2191
2192 The points above mean that the next resume, be it PT_STEP or
2193 PT_CONTINUE, can not trigger a syscall trace event. */
2194 if (debug_linux_nat)
2195 fprintf_unfiltered (gdb_stdlog,
2196 "LHST: caught syscall event "
2197 "with no syscall catchpoints."
2198 " %d for LWP %ld, ignoring\n",
2199 syscall_number,
2200 GET_LWP (lp->ptid));
2201 lp->syscall_state = TARGET_WAITKIND_IGNORE;
2202 }
2203
2204 /* The core isn't interested in this event. For efficiency, avoid
2205 stopping all threads only to have the core resume them all again.
2206 Since we're not stopping threads, if we're still syscall tracing
2207 and not stepping, we can't use PTRACE_CONT here, as we'd miss any
2208 subsequent syscall. Simply resume using the inf-ptrace layer,
2209 which knows when to use PT_SYSCALL or PT_CONTINUE. */
2210
2211 /* Note that gdbarch_get_syscall_number may access registers, hence
2212 fill a regcache. */
2213 registers_changed ();
2214 if (linux_nat_prepare_to_resume != NULL)
2215 linux_nat_prepare_to_resume (lp);
2216 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
2217 lp->step, TARGET_SIGNAL_0);
2218 return 1;
2219 }
2220
2221 /* Handle a GNU/Linux extended wait response. If we see a clone
2222 event, we need to add the new LWP to our list (and not report the
2223 trap to higher layers). This function returns non-zero if the
2224 event should be ignored and we should wait again. If STOPPING is
2225 true, the new LWP remains stopped, otherwise it is continued. */
2226
2227 static int
2228 linux_handle_extended_wait (struct lwp_info *lp, int status,
2229 int stopping)
2230 {
2231 int pid = GET_LWP (lp->ptid);
2232 struct target_waitstatus *ourstatus = &lp->waitstatus;
2233 int event = status >> 16;
2234
2235 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK
2236 || event == PTRACE_EVENT_CLONE)
2237 {
2238 unsigned long new_pid;
2239 int ret;
2240
2241 ptrace (PTRACE_GETEVENTMSG, pid, 0, &new_pid);
2242
2243 /* If we haven't already seen the new PID stop, wait for it now. */
2244 if (! pull_pid_from_list (&stopped_pids, new_pid, &status))
2245 {
2246 /* The new child has a pending SIGSTOP. We can't affect it until it
2247 hits the SIGSTOP, but we're already attached. */
2248 ret = my_waitpid (new_pid, &status,
2249 (event == PTRACE_EVENT_CLONE) ? __WCLONE : 0);
2250 if (ret == -1)
2251 perror_with_name (_("waiting for new child"));
2252 else if (ret != new_pid)
2253 internal_error (__FILE__, __LINE__,
2254 _("wait returned unexpected PID %d"), ret);
2255 else if (!WIFSTOPPED (status))
2256 internal_error (__FILE__, __LINE__,
2257 _("wait returned unexpected status 0x%x"), status);
2258 }
2259
2260 ourstatus->value.related_pid = ptid_build (new_pid, new_pid, 0);
2261
2262 if (event == PTRACE_EVENT_FORK
2263 && linux_fork_checkpointing_p (GET_PID (lp->ptid)))
2264 {
2265 /* Handle checkpointing by linux-fork.c here as a special
2266 case. We don't want the follow-fork-mode or 'catch fork'
2267 to interfere with this. */
2268
2269 /* This won't actually modify the breakpoint list, but will
2270 physically remove the breakpoints from the child. */
2271 detach_breakpoints (new_pid);
2272
2273 /* Retain child fork in ptrace (stopped) state. */
2274 if (!find_fork_pid (new_pid))
2275 add_fork (new_pid);
2276
2277 /* Report as spurious, so that infrun doesn't want to follow
2278 this fork. We're actually doing an infcall in
2279 linux-fork.c. */
2280 ourstatus->kind = TARGET_WAITKIND_SPURIOUS;
2281 linux_enable_event_reporting (pid_to_ptid (new_pid));
2282
2283 /* Report the stop to the core. */
2284 return 0;
2285 }
2286
2287 if (event == PTRACE_EVENT_FORK)
2288 ourstatus->kind = TARGET_WAITKIND_FORKED;
2289 else if (event == PTRACE_EVENT_VFORK)
2290 ourstatus->kind = TARGET_WAITKIND_VFORKED;
2291 else
2292 {
2293 struct lwp_info *new_lp;
2294
2295 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2296
2297 if (debug_linux_nat)
2298 fprintf_unfiltered (gdb_stdlog,
2299 "LHEW: Got clone event "
2300 "from LWP %d, new child is LWP %ld\n",
2301 pid, new_pid);
2302
2303 new_lp = add_lwp (BUILD_LWP (new_pid, GET_PID (lp->ptid)));
2304 new_lp->cloned = 1;
2305 new_lp->stopped = 1;
2306
2307 if (WSTOPSIG (status) != SIGSTOP)
2308 {
2309 /* This can happen if someone starts sending signals to
2310 the new thread before it gets a chance to run, which
2311 have a lower number than SIGSTOP (e.g. SIGUSR1).
2312 This is an unlikely case, and harder to handle for
2313 fork / vfork than for clone, so we do not try - but
2314 we handle it for clone events here. We'll send
2315 the other signal on to the thread below. */
2316
2317 new_lp->signalled = 1;
2318 }
2319 else
2320 {
2321 struct thread_info *tp;
2322
2323 /* When we stop for an event in some other thread, and
2324 pull the thread list just as this thread has cloned,
2325 we'll have seen the new thread in the thread_db list
2326 before handling the CLONE event (glibc's
2327 pthread_create adds the new thread to the thread list
2328 before clone'ing, and has the kernel fill in the
2329 thread's tid on the clone call with
2330 CLONE_PARENT_SETTID). If that happened, and the core
2331 had requested the new thread to stop, we'll have
2332 killed it with SIGSTOP. But since SIGSTOP is not an
2333 RT signal, it can only be queued once. We need to be
2334 careful to not resume the LWP if we wanted it to
2335 stop. In that case, we'll leave the SIGSTOP pending.
2336 It will later be reported as TARGET_SIGNAL_0. */
2337 tp = find_thread_ptid (new_lp->ptid);
2338 if (tp != NULL && tp->stop_requested)
2339 new_lp->last_resume_kind = resume_stop;
2340 else
2341 status = 0;
2342 }
2343
2344 if (non_stop)
2345 {
2346 /* Add the new thread to GDB's lists as soon as possible
2347 so that:
2348
2349 1) the frontend doesn't have to wait for a stop to
2350 display them, and,
2351
2352 2) we tag it with the correct running state. */
2353
2354 /* If the thread_db layer is active, let it know about
2355 this new thread, and add it to GDB's list. */
2356 if (!thread_db_attach_lwp (new_lp->ptid))
2357 {
2358 /* We're not using thread_db. Add it to GDB's
2359 list. */
2360 target_post_attach (GET_LWP (new_lp->ptid));
2361 add_thread (new_lp->ptid);
2362 }
2363
2364 if (!stopping)
2365 {
2366 set_running (new_lp->ptid, 1);
2367 set_executing (new_lp->ptid, 1);
2368 /* thread_db_attach_lwp -> lin_lwp_attach_lwp forced
2369 resume_stop. */
2370 new_lp->last_resume_kind = resume_continue;
2371 }
2372 }
2373
2374 if (status != 0)
2375 {
2376 /* We created NEW_LP so it cannot yet contain STATUS. */
2377 gdb_assert (new_lp->status == 0);
2378
2379 /* Save the wait status to report later. */
2380 if (debug_linux_nat)
2381 fprintf_unfiltered (gdb_stdlog,
2382 "LHEW: waitpid of new LWP %ld, "
2383 "saving status %s\n",
2384 (long) GET_LWP (new_lp->ptid),
2385 status_to_str (status));
2386 new_lp->status = status;
2387 }
2388
2389 /* Note the need to use the low target ops to resume, to
2390 handle resuming with PT_SYSCALL if we have syscall
2391 catchpoints. */
2392 if (!stopping)
2393 {
2394 new_lp->resumed = 1;
2395
2396 if (status == 0)
2397 {
2398 gdb_assert (new_lp->last_resume_kind == resume_continue);
2399 if (debug_linux_nat)
2400 fprintf_unfiltered (gdb_stdlog,
2401 "LHEW: resuming new LWP %ld\n",
2402 GET_LWP (new_lp->ptid));
2403 if (linux_nat_prepare_to_resume != NULL)
2404 linux_nat_prepare_to_resume (new_lp);
2405 linux_ops->to_resume (linux_ops, pid_to_ptid (new_pid),
2406 0, TARGET_SIGNAL_0);
2407 new_lp->stopped = 0;
2408 }
2409 }
2410
2411 if (debug_linux_nat)
2412 fprintf_unfiltered (gdb_stdlog,
2413 "LHEW: resuming parent LWP %d\n", pid);
2414 if (linux_nat_prepare_to_resume != NULL)
2415 linux_nat_prepare_to_resume (lp);
2416 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
2417 0, TARGET_SIGNAL_0);
2418
2419 return 1;
2420 }
2421
2422 return 0;
2423 }
2424
2425 if (event == PTRACE_EVENT_EXEC)
2426 {
2427 if (debug_linux_nat)
2428 fprintf_unfiltered (gdb_stdlog,
2429 "LHEW: Got exec event from LWP %ld\n",
2430 GET_LWP (lp->ptid));
2431
2432 ourstatus->kind = TARGET_WAITKIND_EXECD;
2433 ourstatus->value.execd_pathname
2434 = xstrdup (linux_child_pid_to_exec_file (pid));
2435
2436 return 0;
2437 }
2438
2439 if (event == PTRACE_EVENT_VFORK_DONE)
2440 {
2441 if (current_inferior ()->waiting_for_vfork_done)
2442 {
2443 if (debug_linux_nat)
2444 fprintf_unfiltered (gdb_stdlog,
2445 "LHEW: Got expected PTRACE_EVENT_"
2446 "VFORK_DONE from LWP %ld: stopping\n",
2447 GET_LWP (lp->ptid));
2448
2449 ourstatus->kind = TARGET_WAITKIND_VFORK_DONE;
2450 return 0;
2451 }
2452
2453 if (debug_linux_nat)
2454 fprintf_unfiltered (gdb_stdlog,
2455 "LHEW: Got PTRACE_EVENT_VFORK_DONE "
2456 "from LWP %ld: resuming\n",
2457 GET_LWP (lp->ptid));
2458 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2459 return 1;
2460 }
2461
2462 internal_error (__FILE__, __LINE__,
2463 _("unknown ptrace event %d"), event);
2464 }
2465
2466 /* Return non-zero if LWP is a zombie. */
2467
2468 static int
2469 linux_lwp_is_zombie (long lwp)
2470 {
2471 char buffer[MAXPATHLEN];
2472 FILE *procfile;
2473 int retval;
2474 int have_state;
2475
2476 xsnprintf (buffer, sizeof (buffer), "/proc/%ld/status", lwp);
2477 procfile = fopen (buffer, "r");
2478 if (procfile == NULL)
2479 {
2480 warning (_("unable to open /proc file '%s'"), buffer);
2481 return 0;
2482 }
2483
2484 have_state = 0;
2485 while (fgets (buffer, sizeof (buffer), procfile) != NULL)
2486 if (strncmp (buffer, "State:", 6) == 0)
2487 {
2488 have_state = 1;
2489 break;
2490 }
2491 retval = (have_state
2492 && strcmp (buffer, "State:\tZ (zombie)\n") == 0);
2493 fclose (procfile);
2494 return retval;
2495 }
2496
2497 /* Wait for LP to stop. Returns the wait status, or 0 if the LWP has
2498 exited. */
2499
2500 static int
2501 wait_lwp (struct lwp_info *lp)
2502 {
2503 pid_t pid;
2504 int status = 0;
2505 int thread_dead = 0;
2506 sigset_t prev_mask;
2507
2508 gdb_assert (!lp->stopped);
2509 gdb_assert (lp->status == 0);
2510
2511 /* Make sure SIGCHLD is blocked for sigsuspend avoiding a race below. */
2512 block_child_signals (&prev_mask);
2513
2514 for (;;)
2515 {
2516 /* If my_waitpid returns 0 it means the __WCLONE vs. non-__WCLONE kind
2517 was right and we should just call sigsuspend. */
2518
2519 pid = my_waitpid (GET_LWP (lp->ptid), &status, WNOHANG);
2520 if (pid == -1 && errno == ECHILD)
2521 pid = my_waitpid (GET_LWP (lp->ptid), &status, __WCLONE | WNOHANG);
2522 if (pid == -1 && errno == ECHILD)
2523 {
2524 /* The thread has previously exited. We need to delete it
2525 now because, for some vendor 2.4 kernels with NPTL
2526 support backported, there won't be an exit event unless
2527 it is the main thread. 2.6 kernels will report an exit
2528 event for each thread that exits, as expected. */
2529 thread_dead = 1;
2530 if (debug_linux_nat)
2531 fprintf_unfiltered (gdb_stdlog, "WL: %s vanished.\n",
2532 target_pid_to_str (lp->ptid));
2533 }
2534 if (pid != 0)
2535 break;
2536
2537 /* Bugs 10970, 12702.
2538 Thread group leader may have exited in which case we'll lock up in
2539 waitpid if there are other threads, even if they are all zombies too.
2540 Basically, we're not supposed to use waitpid this way.
2541 __WCLONE is not applicable for the leader so we can't use that.
2542 LINUX_NAT_THREAD_ALIVE cannot be used here as it requires a STOPPED
2543 process; it gets ESRCH both for the zombie and for running processes.
2544
2545 As a workaround, check if we're waiting for the thread group leader and
2546 if it's a zombie, and avoid calling waitpid if it is.
2547
2548 This is racy, what if the tgl becomes a zombie right after we check?
2549 Therefore always use WNOHANG with sigsuspend - it is equivalent to
2550 waiting waitpid but the linux_lwp_is_zombie is safe this way. */
2551
2552 if (GET_PID (lp->ptid) == GET_LWP (lp->ptid)
2553 && linux_lwp_is_zombie (GET_LWP (lp->ptid)))
2554 {
2555 thread_dead = 1;
2556 if (debug_linux_nat)
2557 fprintf_unfiltered (gdb_stdlog,
2558 "WL: Thread group leader %s vanished.\n",
2559 target_pid_to_str (lp->ptid));
2560 break;
2561 }
2562
2563 /* Wait for next SIGCHLD and try again. This may let SIGCHLD handlers
2564 get invoked despite our caller had them intentionally blocked by
2565 block_child_signals. This is sensitive only to the loop of
2566 linux_nat_wait_1 and there if we get called my_waitpid gets called
2567 again before it gets to sigsuspend so we can safely let the handlers
2568 get executed here. */
2569
2570 sigsuspend (&suspend_mask);
2571 }
2572
2573 restore_child_signals_mask (&prev_mask);
2574
2575 if (!thread_dead)
2576 {
2577 gdb_assert (pid == GET_LWP (lp->ptid));
2578
2579 if (debug_linux_nat)
2580 {
2581 fprintf_unfiltered (gdb_stdlog,
2582 "WL: waitpid %s received %s\n",
2583 target_pid_to_str (lp->ptid),
2584 status_to_str (status));
2585 }
2586
2587 /* Check if the thread has exited. */
2588 if (WIFEXITED (status) || WIFSIGNALED (status))
2589 {
2590 thread_dead = 1;
2591 if (debug_linux_nat)
2592 fprintf_unfiltered (gdb_stdlog, "WL: %s exited.\n",
2593 target_pid_to_str (lp->ptid));
2594 }
2595 }
2596
2597 if (thread_dead)
2598 {
2599 exit_lwp (lp);
2600 return 0;
2601 }
2602
2603 gdb_assert (WIFSTOPPED (status));
2604
2605 /* Handle GNU/Linux's syscall SIGTRAPs. */
2606 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
2607 {
2608 /* No longer need the sysgood bit. The ptrace event ends up
2609 recorded in lp->waitstatus if we care for it. We can carry
2610 on handling the event like a regular SIGTRAP from here
2611 on. */
2612 status = W_STOPCODE (SIGTRAP);
2613 if (linux_handle_syscall_trap (lp, 1))
2614 return wait_lwp (lp);
2615 }
2616
2617 /* Handle GNU/Linux's extended waitstatus for trace events. */
2618 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
2619 {
2620 if (debug_linux_nat)
2621 fprintf_unfiltered (gdb_stdlog,
2622 "WL: Handling extended status 0x%06x\n",
2623 status);
2624 if (linux_handle_extended_wait (lp, status, 1))
2625 return wait_lwp (lp);
2626 }
2627
2628 return status;
2629 }
2630
2631 /* Save the most recent siginfo for LP. This is currently only called
2632 for SIGTRAP; some ports use the si_addr field for
2633 target_stopped_data_address. In the future, it may also be used to
2634 restore the siginfo of requeued signals. */
2635
2636 static void
2637 save_siginfo (struct lwp_info *lp)
2638 {
2639 errno = 0;
2640 ptrace (PTRACE_GETSIGINFO, GET_LWP (lp->ptid),
2641 (PTRACE_TYPE_ARG3) 0, &lp->siginfo);
2642
2643 if (errno != 0)
2644 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
2645 }
2646
2647 /* Send a SIGSTOP to LP. */
2648
2649 static int
2650 stop_callback (struct lwp_info *lp, void *data)
2651 {
2652 if (!lp->stopped && !lp->signalled)
2653 {
2654 int ret;
2655
2656 if (debug_linux_nat)
2657 {
2658 fprintf_unfiltered (gdb_stdlog,
2659 "SC: kill %s **<SIGSTOP>**\n",
2660 target_pid_to_str (lp->ptid));
2661 }
2662 errno = 0;
2663 ret = kill_lwp (GET_LWP (lp->ptid), SIGSTOP);
2664 if (debug_linux_nat)
2665 {
2666 fprintf_unfiltered (gdb_stdlog,
2667 "SC: lwp kill %d %s\n",
2668 ret,
2669 errno ? safe_strerror (errno) : "ERRNO-OK");
2670 }
2671
2672 lp->signalled = 1;
2673 gdb_assert (lp->status == 0);
2674 }
2675
2676 return 0;
2677 }
2678
2679 /* Request a stop on LWP. */
2680
2681 void
2682 linux_stop_lwp (struct lwp_info *lwp)
2683 {
2684 stop_callback (lwp, NULL);
2685 }
2686
2687 /* Return non-zero if LWP PID has a pending SIGINT. */
2688
2689 static int
2690 linux_nat_has_pending_sigint (int pid)
2691 {
2692 sigset_t pending, blocked, ignored;
2693
2694 linux_proc_pending_signals (pid, &pending, &blocked, &ignored);
2695
2696 if (sigismember (&pending, SIGINT)
2697 && !sigismember (&ignored, SIGINT))
2698 return 1;
2699
2700 return 0;
2701 }
2702
2703 /* Set a flag in LP indicating that we should ignore its next SIGINT. */
2704
2705 static int
2706 set_ignore_sigint (struct lwp_info *lp, void *data)
2707 {
2708 /* If a thread has a pending SIGINT, consume it; otherwise, set a
2709 flag to consume the next one. */
2710 if (lp->stopped && lp->status != 0 && WIFSTOPPED (lp->status)
2711 && WSTOPSIG (lp->status) == SIGINT)
2712 lp->status = 0;
2713 else
2714 lp->ignore_sigint = 1;
2715
2716 return 0;
2717 }
2718
2719 /* If LP does not have a SIGINT pending, then clear the ignore_sigint flag.
2720 This function is called after we know the LWP has stopped; if the LWP
2721 stopped before the expected SIGINT was delivered, then it will never have
2722 arrived. Also, if the signal was delivered to a shared queue and consumed
2723 by a different thread, it will never be delivered to this LWP. */
2724
2725 static void
2726 maybe_clear_ignore_sigint (struct lwp_info *lp)
2727 {
2728 if (!lp->ignore_sigint)
2729 return;
2730
2731 if (!linux_nat_has_pending_sigint (GET_LWP (lp->ptid)))
2732 {
2733 if (debug_linux_nat)
2734 fprintf_unfiltered (gdb_stdlog,
2735 "MCIS: Clearing bogus flag for %s\n",
2736 target_pid_to_str (lp->ptid));
2737 lp->ignore_sigint = 0;
2738 }
2739 }
2740
2741 /* Fetch the possible triggered data watchpoint info and store it in
2742 LP.
2743
2744 On some archs, like x86, that use debug registers to set
2745 watchpoints, it's possible that the way to know which watched
2746 address trapped, is to check the register that is used to select
2747 which address to watch. Problem is, between setting the watchpoint
2748 and reading back which data address trapped, the user may change
2749 the set of watchpoints, and, as a consequence, GDB changes the
2750 debug registers in the inferior. To avoid reading back a stale
2751 stopped-data-address when that happens, we cache in LP the fact
2752 that a watchpoint trapped, and the corresponding data address, as
2753 soon as we see LP stop with a SIGTRAP. If GDB changes the debug
2754 registers meanwhile, we have the cached data we can rely on. */
2755
2756 static void
2757 save_sigtrap (struct lwp_info *lp)
2758 {
2759 struct cleanup *old_chain;
2760
2761 if (linux_ops->to_stopped_by_watchpoint == NULL)
2762 {
2763 lp->stopped_by_watchpoint = 0;
2764 return;
2765 }
2766
2767 old_chain = save_inferior_ptid ();
2768 inferior_ptid = lp->ptid;
2769
2770 lp->stopped_by_watchpoint = linux_ops->to_stopped_by_watchpoint ();
2771
2772 if (lp->stopped_by_watchpoint)
2773 {
2774 if (linux_ops->to_stopped_data_address != NULL)
2775 lp->stopped_data_address_p =
2776 linux_ops->to_stopped_data_address (&current_target,
2777 &lp->stopped_data_address);
2778 else
2779 lp->stopped_data_address_p = 0;
2780 }
2781
2782 do_cleanups (old_chain);
2783 }
2784
2785 /* See save_sigtrap. */
2786
2787 static int
2788 linux_nat_stopped_by_watchpoint (void)
2789 {
2790 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2791
2792 gdb_assert (lp != NULL);
2793
2794 return lp->stopped_by_watchpoint;
2795 }
2796
2797 static int
2798 linux_nat_stopped_data_address (struct target_ops *ops, CORE_ADDR *addr_p)
2799 {
2800 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2801
2802 gdb_assert (lp != NULL);
2803
2804 *addr_p = lp->stopped_data_address;
2805
2806 return lp->stopped_data_address_p;
2807 }
2808
2809 /* Commonly any breakpoint / watchpoint generate only SIGTRAP. */
2810
2811 static int
2812 sigtrap_is_event (int status)
2813 {
2814 return WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP;
2815 }
2816
2817 /* SIGTRAP-like events recognizer. */
2818
2819 static int (*linux_nat_status_is_event) (int status) = sigtrap_is_event;
2820
2821 /* Check for SIGTRAP-like events in LP. */
2822
2823 static int
2824 linux_nat_lp_status_is_event (struct lwp_info *lp)
2825 {
2826 /* We check for lp->waitstatus in addition to lp->status, because we can
2827 have pending process exits recorded in lp->status
2828 and W_EXITCODE(0,0) == 0. We should probably have an additional
2829 lp->status_p flag. */
2830
2831 return (lp->waitstatus.kind == TARGET_WAITKIND_IGNORE
2832 && linux_nat_status_is_event (lp->status));
2833 }
2834
2835 /* Set alternative SIGTRAP-like events recognizer. If
2836 breakpoint_inserted_here_p there then gdbarch_decr_pc_after_break will be
2837 applied. */
2838
2839 void
2840 linux_nat_set_status_is_event (struct target_ops *t,
2841 int (*status_is_event) (int status))
2842 {
2843 linux_nat_status_is_event = status_is_event;
2844 }
2845
2846 /* Wait until LP is stopped. */
2847
2848 static int
2849 stop_wait_callback (struct lwp_info *lp, void *data)
2850 {
2851 struct inferior *inf = find_inferior_pid (GET_PID (lp->ptid));
2852
2853 /* If this is a vfork parent, bail out, it is not going to report
2854 any SIGSTOP until the vfork is done with. */
2855 if (inf->vfork_child != NULL)
2856 return 0;
2857
2858 if (!lp->stopped)
2859 {
2860 int status;
2861
2862 status = wait_lwp (lp);
2863 if (status == 0)
2864 return 0;
2865
2866 if (lp->ignore_sigint && WIFSTOPPED (status)
2867 && WSTOPSIG (status) == SIGINT)
2868 {
2869 lp->ignore_sigint = 0;
2870
2871 errno = 0;
2872 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2873 if (debug_linux_nat)
2874 fprintf_unfiltered (gdb_stdlog,
2875 "PTRACE_CONT %s, 0, 0 (%s) "
2876 "(discarding SIGINT)\n",
2877 target_pid_to_str (lp->ptid),
2878 errno ? safe_strerror (errno) : "OK");
2879
2880 return stop_wait_callback (lp, NULL);
2881 }
2882
2883 maybe_clear_ignore_sigint (lp);
2884
2885 if (WSTOPSIG (status) != SIGSTOP)
2886 {
2887 if (linux_nat_status_is_event (status))
2888 {
2889 /* If a LWP other than the LWP that we're reporting an
2890 event for has hit a GDB breakpoint (as opposed to
2891 some random trap signal), then just arrange for it to
2892 hit it again later. We don't keep the SIGTRAP status
2893 and don't forward the SIGTRAP signal to the LWP. We
2894 will handle the current event, eventually we will
2895 resume all LWPs, and this one will get its breakpoint
2896 trap again.
2897
2898 If we do not do this, then we run the risk that the
2899 user will delete or disable the breakpoint, but the
2900 thread will have already tripped on it. */
2901
2902 /* Save the trap's siginfo in case we need it later. */
2903 save_siginfo (lp);
2904
2905 save_sigtrap (lp);
2906
2907 /* Now resume this LWP and get the SIGSTOP event. */
2908 errno = 0;
2909 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2910 if (debug_linux_nat)
2911 {
2912 fprintf_unfiltered (gdb_stdlog,
2913 "PTRACE_CONT %s, 0, 0 (%s)\n",
2914 target_pid_to_str (lp->ptid),
2915 errno ? safe_strerror (errno) : "OK");
2916
2917 fprintf_unfiltered (gdb_stdlog,
2918 "SWC: Candidate SIGTRAP event in %s\n",
2919 target_pid_to_str (lp->ptid));
2920 }
2921 /* Hold this event/waitstatus while we check to see if
2922 there are any more (we still want to get that SIGSTOP). */
2923 stop_wait_callback (lp, NULL);
2924
2925 /* Hold the SIGTRAP for handling by linux_nat_wait. If
2926 there's another event, throw it back into the
2927 queue. */
2928 if (lp->status)
2929 {
2930 if (debug_linux_nat)
2931 fprintf_unfiltered (gdb_stdlog,
2932 "SWC: kill %s, %s\n",
2933 target_pid_to_str (lp->ptid),
2934 status_to_str ((int) status));
2935 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (lp->status));
2936 }
2937
2938 /* Save the sigtrap event. */
2939 lp->status = status;
2940 return 0;
2941 }
2942 else
2943 {
2944 /* The thread was stopped with a signal other than
2945 SIGSTOP, and didn't accidentally trip a breakpoint. */
2946
2947 if (debug_linux_nat)
2948 {
2949 fprintf_unfiltered (gdb_stdlog,
2950 "SWC: Pending event %s in %s\n",
2951 status_to_str ((int) status),
2952 target_pid_to_str (lp->ptid));
2953 }
2954 /* Now resume this LWP and get the SIGSTOP event. */
2955 errno = 0;
2956 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2957 if (debug_linux_nat)
2958 fprintf_unfiltered (gdb_stdlog,
2959 "SWC: PTRACE_CONT %s, 0, 0 (%s)\n",
2960 target_pid_to_str (lp->ptid),
2961 errno ? safe_strerror (errno) : "OK");
2962
2963 /* Hold this event/waitstatus while we check to see if
2964 there are any more (we still want to get that SIGSTOP). */
2965 stop_wait_callback (lp, NULL);
2966
2967 /* If the lp->status field is still empty, use it to
2968 hold this event. If not, then this event must be
2969 returned to the event queue of the LWP. */
2970 if (lp->status)
2971 {
2972 if (debug_linux_nat)
2973 {
2974 fprintf_unfiltered (gdb_stdlog,
2975 "SWC: kill %s, %s\n",
2976 target_pid_to_str (lp->ptid),
2977 status_to_str ((int) status));
2978 }
2979 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (status));
2980 }
2981 else
2982 lp->status = status;
2983 return 0;
2984 }
2985 }
2986 else
2987 {
2988 /* We caught the SIGSTOP that we intended to catch, so
2989 there's no SIGSTOP pending. */
2990 lp->stopped = 1;
2991 lp->signalled = 0;
2992 }
2993 }
2994
2995 return 0;
2996 }
2997
2998 /* Return non-zero if LP has a wait status pending. */
2999
3000 static int
3001 status_callback (struct lwp_info *lp, void *data)
3002 {
3003 /* Only report a pending wait status if we pretend that this has
3004 indeed been resumed. */
3005 if (!lp->resumed)
3006 return 0;
3007
3008 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3009 {
3010 /* A ptrace event, like PTRACE_FORK|VFORK|EXEC, syscall event,
3011 or a pending process exit. Note that `W_EXITCODE(0,0) ==
3012 0', so a clean process exit can not be stored pending in
3013 lp->status, it is indistinguishable from
3014 no-pending-status. */
3015 return 1;
3016 }
3017
3018 if (lp->status != 0)
3019 return 1;
3020
3021 return 0;
3022 }
3023
3024 /* Return non-zero if LP isn't stopped. */
3025
3026 static int
3027 running_callback (struct lwp_info *lp, void *data)
3028 {
3029 return (!lp->stopped
3030 || ((lp->status != 0
3031 || lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3032 && lp->resumed));
3033 }
3034
3035 /* Count the LWP's that have had events. */
3036
3037 static int
3038 count_events_callback (struct lwp_info *lp, void *data)
3039 {
3040 int *count = data;
3041
3042 gdb_assert (count != NULL);
3043
3044 /* Count only resumed LWPs that have a SIGTRAP event pending. */
3045 if (lp->resumed && linux_nat_lp_status_is_event (lp))
3046 (*count)++;
3047
3048 return 0;
3049 }
3050
3051 /* Select the LWP (if any) that is currently being single-stepped. */
3052
3053 static int
3054 select_singlestep_lwp_callback (struct lwp_info *lp, void *data)
3055 {
3056 if (lp->last_resume_kind == resume_step
3057 && lp->status != 0)
3058 return 1;
3059 else
3060 return 0;
3061 }
3062
3063 /* Select the Nth LWP that has had a SIGTRAP event. */
3064
3065 static int
3066 select_event_lwp_callback (struct lwp_info *lp, void *data)
3067 {
3068 int *selector = data;
3069
3070 gdb_assert (selector != NULL);
3071
3072 /* Select only resumed LWPs that have a SIGTRAP event pending. */
3073 if (lp->resumed && linux_nat_lp_status_is_event (lp))
3074 if ((*selector)-- == 0)
3075 return 1;
3076
3077 return 0;
3078 }
3079
3080 static int
3081 cancel_breakpoint (struct lwp_info *lp)
3082 {
3083 /* Arrange for a breakpoint to be hit again later. We don't keep
3084 the SIGTRAP status and don't forward the SIGTRAP signal to the
3085 LWP. We will handle the current event, eventually we will resume
3086 this LWP, and this breakpoint will trap again.
3087
3088 If we do not do this, then we run the risk that the user will
3089 delete or disable the breakpoint, but the LWP will have already
3090 tripped on it. */
3091
3092 struct regcache *regcache = get_thread_regcache (lp->ptid);
3093 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3094 CORE_ADDR pc;
3095
3096 pc = regcache_read_pc (regcache) - gdbarch_decr_pc_after_break (gdbarch);
3097 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
3098 {
3099 if (debug_linux_nat)
3100 fprintf_unfiltered (gdb_stdlog,
3101 "CB: Push back breakpoint for %s\n",
3102 target_pid_to_str (lp->ptid));
3103
3104 /* Back up the PC if necessary. */
3105 if (gdbarch_decr_pc_after_break (gdbarch))
3106 regcache_write_pc (regcache, pc);
3107
3108 return 1;
3109 }
3110 return 0;
3111 }
3112
3113 static int
3114 cancel_breakpoints_callback (struct lwp_info *lp, void *data)
3115 {
3116 struct lwp_info *event_lp = data;
3117
3118 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
3119 if (lp == event_lp)
3120 return 0;
3121
3122 /* If a LWP other than the LWP that we're reporting an event for has
3123 hit a GDB breakpoint (as opposed to some random trap signal),
3124 then just arrange for it to hit it again later. We don't keep
3125 the SIGTRAP status and don't forward the SIGTRAP signal to the
3126 LWP. We will handle the current event, eventually we will resume
3127 all LWPs, and this one will get its breakpoint trap again.
3128
3129 If we do not do this, then we run the risk that the user will
3130 delete or disable the breakpoint, but the LWP will have already
3131 tripped on it. */
3132
3133 if (linux_nat_lp_status_is_event (lp)
3134 && cancel_breakpoint (lp))
3135 /* Throw away the SIGTRAP. */
3136 lp->status = 0;
3137
3138 return 0;
3139 }
3140
3141 /* Select one LWP out of those that have events pending. */
3142
3143 static void
3144 select_event_lwp (ptid_t filter, struct lwp_info **orig_lp, int *status)
3145 {
3146 int num_events = 0;
3147 int random_selector;
3148 struct lwp_info *event_lp;
3149
3150 /* Record the wait status for the original LWP. */
3151 (*orig_lp)->status = *status;
3152
3153 /* Give preference to any LWP that is being single-stepped. */
3154 event_lp = iterate_over_lwps (filter,
3155 select_singlestep_lwp_callback, NULL);
3156 if (event_lp != NULL)
3157 {
3158 if (debug_linux_nat)
3159 fprintf_unfiltered (gdb_stdlog,
3160 "SEL: Select single-step %s\n",
3161 target_pid_to_str (event_lp->ptid));
3162 }
3163 else
3164 {
3165 /* No single-stepping LWP. Select one at random, out of those
3166 which have had SIGTRAP events. */
3167
3168 /* First see how many SIGTRAP events we have. */
3169 iterate_over_lwps (filter, count_events_callback, &num_events);
3170
3171 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
3172 random_selector = (int)
3173 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
3174
3175 if (debug_linux_nat && num_events > 1)
3176 fprintf_unfiltered (gdb_stdlog,
3177 "SEL: Found %d SIGTRAP events, selecting #%d\n",
3178 num_events, random_selector);
3179
3180 event_lp = iterate_over_lwps (filter,
3181 select_event_lwp_callback,
3182 &random_selector);
3183 }
3184
3185 if (event_lp != NULL)
3186 {
3187 /* Switch the event LWP. */
3188 *orig_lp = event_lp;
3189 *status = event_lp->status;
3190 }
3191
3192 /* Flush the wait status for the event LWP. */
3193 (*orig_lp)->status = 0;
3194 }
3195
3196 /* Return non-zero if LP has been resumed. */
3197
3198 static int
3199 resumed_callback (struct lwp_info *lp, void *data)
3200 {
3201 return lp->resumed;
3202 }
3203
3204 /* Stop an active thread, verify it still exists, then resume it. If
3205 the thread ends up with a pending status, then it is not resumed,
3206 and *DATA (really a pointer to int), is set. */
3207
3208 static int
3209 stop_and_resume_callback (struct lwp_info *lp, void *data)
3210 {
3211 int *new_pending_p = data;
3212
3213 if (!lp->stopped)
3214 {
3215 ptid_t ptid = lp->ptid;
3216
3217 stop_callback (lp, NULL);
3218 stop_wait_callback (lp, NULL);
3219
3220 /* Resume if the lwp still exists, and the core wanted it
3221 running. */
3222 lp = find_lwp_pid (ptid);
3223 if (lp != NULL)
3224 {
3225 if (lp->last_resume_kind == resume_stop
3226 && lp->status == 0)
3227 {
3228 /* The core wanted the LWP to stop. Even if it stopped
3229 cleanly (with SIGSTOP), leave the event pending. */
3230 if (debug_linux_nat)
3231 fprintf_unfiltered (gdb_stdlog,
3232 "SARC: core wanted LWP %ld stopped "
3233 "(leaving SIGSTOP pending)\n",
3234 GET_LWP (lp->ptid));
3235 lp->status = W_STOPCODE (SIGSTOP);
3236 }
3237
3238 if (lp->status == 0)
3239 {
3240 if (debug_linux_nat)
3241 fprintf_unfiltered (gdb_stdlog,
3242 "SARC: re-resuming LWP %ld\n",
3243 GET_LWP (lp->ptid));
3244 resume_lwp (lp, lp->step);
3245 }
3246 else
3247 {
3248 if (debug_linux_nat)
3249 fprintf_unfiltered (gdb_stdlog,
3250 "SARC: not re-resuming LWP %ld "
3251 "(has pending)\n",
3252 GET_LWP (lp->ptid));
3253 if (new_pending_p)
3254 *new_pending_p = 1;
3255 }
3256 }
3257 }
3258 return 0;
3259 }
3260
3261 /* Check if we should go on and pass this event to common code.
3262 Return the affected lwp if we are, or NULL otherwise. If we stop
3263 all lwps temporarily, we may end up with new pending events in some
3264 other lwp. In that case set *NEW_PENDING_P to true. */
3265
3266 static struct lwp_info *
3267 linux_nat_filter_event (int lwpid, int status, int *new_pending_p)
3268 {
3269 struct lwp_info *lp;
3270
3271 *new_pending_p = 0;
3272
3273 lp = find_lwp_pid (pid_to_ptid (lwpid));
3274
3275 /* Check for stop events reported by a process we didn't already
3276 know about - anything not already in our LWP list.
3277
3278 If we're expecting to receive stopped processes after
3279 fork, vfork, and clone events, then we'll just add the
3280 new one to our list and go back to waiting for the event
3281 to be reported - the stopped process might be returned
3282 from waitpid before or after the event is.
3283
3284 But note the case of a non-leader thread exec'ing after the
3285 leader having exited, and gone from our lists. The non-leader
3286 thread changes its tid to the tgid. */
3287
3288 if (WIFSTOPPED (status) && lp == NULL
3289 && (WSTOPSIG (status) == SIGTRAP && status >> 16 == PTRACE_EVENT_EXEC))
3290 {
3291 /* A multi-thread exec after we had seen the leader exiting. */
3292 if (debug_linux_nat)
3293 fprintf_unfiltered (gdb_stdlog,
3294 "LLW: Re-adding thread group leader LWP %d.\n",
3295 lwpid);
3296
3297 lp = add_lwp (BUILD_LWP (lwpid, lwpid));
3298 lp->stopped = 1;
3299 lp->resumed = 1;
3300 add_thread (lp->ptid);
3301 }
3302
3303 if (WIFSTOPPED (status) && !lp)
3304 {
3305 add_to_pid_list (&stopped_pids, lwpid, status);
3306 return NULL;
3307 }
3308
3309 /* Make sure we don't report an event for the exit of an LWP not in
3310 our list, i.e. not part of the current process. This can happen
3311 if we detach from a program we originally forked and then it
3312 exits. */
3313 if (!WIFSTOPPED (status) && !lp)
3314 return NULL;
3315
3316 /* Handle GNU/Linux's syscall SIGTRAPs. */
3317 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
3318 {
3319 /* No longer need the sysgood bit. The ptrace event ends up
3320 recorded in lp->waitstatus if we care for it. We can carry
3321 on handling the event like a regular SIGTRAP from here
3322 on. */
3323 status = W_STOPCODE (SIGTRAP);
3324 if (linux_handle_syscall_trap (lp, 0))
3325 return NULL;
3326 }
3327
3328 /* Handle GNU/Linux's extended waitstatus for trace events. */
3329 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
3330 {
3331 if (debug_linux_nat)
3332 fprintf_unfiltered (gdb_stdlog,
3333 "LLW: Handling extended status 0x%06x\n",
3334 status);
3335 if (linux_handle_extended_wait (lp, status, 0))
3336 return NULL;
3337 }
3338
3339 if (linux_nat_status_is_event (status))
3340 {
3341 /* Save the trap's siginfo in case we need it later. */
3342 save_siginfo (lp);
3343
3344 save_sigtrap (lp);
3345 }
3346
3347 /* Check if the thread has exited. */
3348 if ((WIFEXITED (status) || WIFSIGNALED (status))
3349 && num_lwps (GET_PID (lp->ptid)) > 1)
3350 {
3351 /* If this is the main thread, we must stop all threads and verify
3352 if they are still alive. This is because in the nptl thread model
3353 on Linux 2.4, there is no signal issued for exiting LWPs
3354 other than the main thread. We only get the main thread exit
3355 signal once all child threads have already exited. If we
3356 stop all the threads and use the stop_wait_callback to check
3357 if they have exited we can determine whether this signal
3358 should be ignored or whether it means the end of the debugged
3359 application, regardless of which threading model is being
3360 used. */
3361 if (GET_PID (lp->ptid) == GET_LWP (lp->ptid))
3362 {
3363 lp->stopped = 1;
3364 iterate_over_lwps (pid_to_ptid (GET_PID (lp->ptid)),
3365 stop_and_resume_callback, new_pending_p);
3366 }
3367
3368 if (debug_linux_nat)
3369 fprintf_unfiltered (gdb_stdlog,
3370 "LLW: %s exited.\n",
3371 target_pid_to_str (lp->ptid));
3372
3373 if (num_lwps (GET_PID (lp->ptid)) > 1)
3374 {
3375 /* If there is at least one more LWP, then the exit signal
3376 was not the end of the debugged application and should be
3377 ignored. */
3378 exit_lwp (lp);
3379 return NULL;
3380 }
3381 }
3382
3383 /* Check if the current LWP has previously exited. In the nptl
3384 thread model, LWPs other than the main thread do not issue
3385 signals when they exit so we must check whenever the thread has
3386 stopped. A similar check is made in stop_wait_callback(). */
3387 if (num_lwps (GET_PID (lp->ptid)) > 1 && !linux_thread_alive (lp->ptid))
3388 {
3389 ptid_t ptid = pid_to_ptid (GET_PID (lp->ptid));
3390
3391 if (debug_linux_nat)
3392 fprintf_unfiltered (gdb_stdlog,
3393 "LLW: %s exited.\n",
3394 target_pid_to_str (lp->ptid));
3395
3396 exit_lwp (lp);
3397
3398 /* Make sure there is at least one thread running. */
3399 gdb_assert (iterate_over_lwps (ptid, running_callback, NULL));
3400
3401 /* Discard the event. */
3402 return NULL;
3403 }
3404
3405 /* Make sure we don't report a SIGSTOP that we sent ourselves in
3406 an attempt to stop an LWP. */
3407 if (lp->signalled
3408 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP)
3409 {
3410 if (debug_linux_nat)
3411 fprintf_unfiltered (gdb_stdlog,
3412 "LLW: Delayed SIGSTOP caught for %s.\n",
3413 target_pid_to_str (lp->ptid));
3414
3415 lp->signalled = 0;
3416
3417 if (lp->last_resume_kind != resume_stop)
3418 {
3419 /* This is a delayed SIGSTOP. */
3420
3421 registers_changed ();
3422
3423 if (linux_nat_prepare_to_resume != NULL)
3424 linux_nat_prepare_to_resume (lp);
3425 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
3426 lp->step, TARGET_SIGNAL_0);
3427 if (debug_linux_nat)
3428 fprintf_unfiltered (gdb_stdlog,
3429 "LLW: %s %s, 0, 0 (discard SIGSTOP)\n",
3430 lp->step ?
3431 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3432 target_pid_to_str (lp->ptid));
3433
3434 lp->stopped = 0;
3435 gdb_assert (lp->resumed);
3436
3437 /* Discard the event. */
3438 return NULL;
3439 }
3440 }
3441
3442 /* Make sure we don't report a SIGINT that we have already displayed
3443 for another thread. */
3444 if (lp->ignore_sigint
3445 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGINT)
3446 {
3447 if (debug_linux_nat)
3448 fprintf_unfiltered (gdb_stdlog,
3449 "LLW: Delayed SIGINT caught for %s.\n",
3450 target_pid_to_str (lp->ptid));
3451
3452 /* This is a delayed SIGINT. */
3453 lp->ignore_sigint = 0;
3454
3455 registers_changed ();
3456 if (linux_nat_prepare_to_resume != NULL)
3457 linux_nat_prepare_to_resume (lp);
3458 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
3459 lp->step, TARGET_SIGNAL_0);
3460 if (debug_linux_nat)
3461 fprintf_unfiltered (gdb_stdlog,
3462 "LLW: %s %s, 0, 0 (discard SIGINT)\n",
3463 lp->step ?
3464 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3465 target_pid_to_str (lp->ptid));
3466
3467 lp->stopped = 0;
3468 gdb_assert (lp->resumed);
3469
3470 /* Discard the event. */
3471 return NULL;
3472 }
3473
3474 /* An interesting event. */
3475 gdb_assert (lp);
3476 lp->status = status;
3477 return lp;
3478 }
3479
3480 /* Detect zombie thread group leaders, and "exit" them. We can't reap
3481 their exits until all other threads in the group have exited. */
3482
3483 static void
3484 check_zombie_leaders (void)
3485 {
3486 struct inferior *inf;
3487
3488 ALL_INFERIORS (inf)
3489 {
3490 struct lwp_info *leader_lp;
3491
3492 if (inf->pid == 0)
3493 continue;
3494
3495 leader_lp = find_lwp_pid (pid_to_ptid (inf->pid));
3496 if (leader_lp != NULL
3497 /* Check if there are other threads in the group, as we may
3498 have raced with the inferior simply exiting. */
3499 && num_lwps (inf->pid) > 1
3500 && linux_lwp_is_zombie (inf->pid))
3501 {
3502 if (debug_linux_nat)
3503 fprintf_unfiltered (gdb_stdlog,
3504 "CZL: Thread group leader %d zombie "
3505 "(it exited, or another thread execd).\n",
3506 inf->pid);
3507
3508 /* A leader zombie can mean one of two things:
3509
3510 - It exited, and there's an exit status pending
3511 available, or only the leader exited (not the whole
3512 program). In the latter case, we can't waitpid the
3513 leader's exit status until all other threads are gone.
3514
3515 - There are 3 or more threads in the group, and a thread
3516 other than the leader exec'd. On an exec, the Linux
3517 kernel destroys all other threads (except the execing
3518 one) in the thread group, and resets the execing thread's
3519 tid to the tgid. No exit notification is sent for the
3520 execing thread -- from the ptracer's perspective, it
3521 appears as though the execing thread just vanishes.
3522 Until we reap all other threads except the leader and the
3523 execing thread, the leader will be zombie, and the
3524 execing thread will be in `D (disc sleep)'. As soon as
3525 all other threads are reaped, the execing thread changes
3526 it's tid to the tgid, and the previous (zombie) leader
3527 vanishes, giving place to the "new" leader. We could try
3528 distinguishing the exit and exec cases, by waiting once
3529 more, and seeing if something comes out, but it doesn't
3530 sound useful. The previous leader _does_ go away, and
3531 we'll re-add the new one once we see the exec event
3532 (which is just the same as what would happen if the
3533 previous leader did exit voluntarily before some other
3534 thread execs). */
3535
3536 if (debug_linux_nat)
3537 fprintf_unfiltered (gdb_stdlog,
3538 "CZL: Thread group leader %d vanished.\n",
3539 inf->pid);
3540 exit_lwp (leader_lp);
3541 }
3542 }
3543 }
3544
3545 static ptid_t
3546 linux_nat_wait_1 (struct target_ops *ops,
3547 ptid_t ptid, struct target_waitstatus *ourstatus,
3548 int target_options)
3549 {
3550 static sigset_t prev_mask;
3551 enum resume_kind last_resume_kind;
3552 struct lwp_info *lp;
3553 int status;
3554
3555 if (debug_linux_nat)
3556 fprintf_unfiltered (gdb_stdlog, "LLW: enter\n");
3557
3558 /* The first time we get here after starting a new inferior, we may
3559 not have added it to the LWP list yet - this is the earliest
3560 moment at which we know its PID. */
3561 if (ptid_is_pid (inferior_ptid))
3562 {
3563 /* Upgrade the main thread's ptid. */
3564 thread_change_ptid (inferior_ptid,
3565 BUILD_LWP (GET_PID (inferior_ptid),
3566 GET_PID (inferior_ptid)));
3567
3568 lp = add_lwp (inferior_ptid);
3569 lp->resumed = 1;
3570 }
3571
3572 /* Make sure SIGCHLD is blocked. */
3573 block_child_signals (&prev_mask);
3574
3575 retry:
3576 lp = NULL;
3577 status = 0;
3578
3579 /* First check if there is a LWP with a wait status pending. */
3580 if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid))
3581 {
3582 /* Any LWP in the PTID group that's been resumed will do. */
3583 lp = iterate_over_lwps (ptid, status_callback, NULL);
3584 if (lp)
3585 {
3586 if (debug_linux_nat && lp->status)
3587 fprintf_unfiltered (gdb_stdlog,
3588 "LLW: Using pending wait status %s for %s.\n",
3589 status_to_str (lp->status),
3590 target_pid_to_str (lp->ptid));
3591 }
3592 }
3593 else if (is_lwp (ptid))
3594 {
3595 if (debug_linux_nat)
3596 fprintf_unfiltered (gdb_stdlog,
3597 "LLW: Waiting for specific LWP %s.\n",
3598 target_pid_to_str (ptid));
3599
3600 /* We have a specific LWP to check. */
3601 lp = find_lwp_pid (ptid);
3602 gdb_assert (lp);
3603
3604 if (debug_linux_nat && lp->status)
3605 fprintf_unfiltered (gdb_stdlog,
3606 "LLW: Using pending wait status %s for %s.\n",
3607 status_to_str (lp->status),
3608 target_pid_to_str (lp->ptid));
3609
3610 /* We check for lp->waitstatus in addition to lp->status,
3611 because we can have pending process exits recorded in
3612 lp->status and W_EXITCODE(0,0) == 0. We should probably have
3613 an additional lp->status_p flag. */
3614 if (lp->status == 0 && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE)
3615 lp = NULL;
3616 }
3617
3618 if (lp && lp->signalled && lp->last_resume_kind != resume_stop)
3619 {
3620 /* A pending SIGSTOP may interfere with the normal stream of
3621 events. In a typical case where interference is a problem,
3622 we have a SIGSTOP signal pending for LWP A while
3623 single-stepping it, encounter an event in LWP B, and take the
3624 pending SIGSTOP while trying to stop LWP A. After processing
3625 the event in LWP B, LWP A is continued, and we'll never see
3626 the SIGTRAP associated with the last time we were
3627 single-stepping LWP A. */
3628
3629 /* Resume the thread. It should halt immediately returning the
3630 pending SIGSTOP. */
3631 registers_changed ();
3632 if (linux_nat_prepare_to_resume != NULL)
3633 linux_nat_prepare_to_resume (lp);
3634 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
3635 lp->step, TARGET_SIGNAL_0);
3636 if (debug_linux_nat)
3637 fprintf_unfiltered (gdb_stdlog,
3638 "LLW: %s %s, 0, 0 (expect SIGSTOP)\n",
3639 lp->step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3640 target_pid_to_str (lp->ptid));
3641 lp->stopped = 0;
3642 gdb_assert (lp->resumed);
3643
3644 /* Catch the pending SIGSTOP. */
3645 status = lp->status;
3646 lp->status = 0;
3647
3648 stop_wait_callback (lp, NULL);
3649
3650 /* If the lp->status field isn't empty, we caught another signal
3651 while flushing the SIGSTOP. Return it back to the event
3652 queue of the LWP, as we already have an event to handle. */
3653 if (lp->status)
3654 {
3655 if (debug_linux_nat)
3656 fprintf_unfiltered (gdb_stdlog,
3657 "LLW: kill %s, %s\n",
3658 target_pid_to_str (lp->ptid),
3659 status_to_str (lp->status));
3660 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (lp->status));
3661 }
3662
3663 lp->status = status;
3664 }
3665
3666 if (!target_can_async_p ())
3667 {
3668 /* Causes SIGINT to be passed on to the attached process. */
3669 set_sigint_trap ();
3670 }
3671
3672 /* But if we don't find a pending event, we'll have to wait. */
3673
3674 while (lp == NULL)
3675 {
3676 pid_t lwpid;
3677
3678 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
3679 quirks:
3680
3681 - If the thread group leader exits while other threads in the
3682 thread group still exist, waitpid(TGID, ...) hangs. That
3683 waitpid won't return an exit status until the other threads
3684 in the group are reapped.
3685
3686 - When a non-leader thread execs, that thread just vanishes
3687 without reporting an exit (so we'd hang if we waited for it
3688 explicitly in that case). The exec event is reported to
3689 the TGID pid. */
3690
3691 errno = 0;
3692 lwpid = my_waitpid (-1, &status, __WCLONE | WNOHANG);
3693 if (lwpid == 0 || (lwpid == -1 && errno == ECHILD))
3694 lwpid = my_waitpid (-1, &status, WNOHANG);
3695
3696 if (debug_linux_nat)
3697 fprintf_unfiltered (gdb_stdlog,
3698 "LNW: waitpid(-1, ...) returned %d, %s\n",
3699 lwpid, errno ? safe_strerror (errno) : "ERRNO-OK");
3700
3701 if (lwpid > 0)
3702 {
3703 /* If this is true, then we paused LWPs momentarily, and may
3704 now have pending events to handle. */
3705 int new_pending;
3706
3707 if (debug_linux_nat)
3708 {
3709 fprintf_unfiltered (gdb_stdlog,
3710 "LLW: waitpid %ld received %s\n",
3711 (long) lwpid, status_to_str (status));
3712 }
3713
3714 lp = linux_nat_filter_event (lwpid, status, &new_pending);
3715
3716 /* STATUS is now no longer valid, use LP->STATUS instead. */
3717 status = 0;
3718
3719 if (lp && !ptid_match (lp->ptid, ptid))
3720 {
3721 gdb_assert (lp->resumed);
3722
3723 if (debug_linux_nat)
3724 fprintf (stderr,
3725 "LWP %ld got an event %06x, leaving pending.\n",
3726 ptid_get_lwp (lp->ptid), lp->status);
3727
3728 if (WIFSTOPPED (lp->status))
3729 {
3730 if (WSTOPSIG (lp->status) != SIGSTOP)
3731 {
3732 /* Cancel breakpoint hits. The breakpoint may
3733 be removed before we fetch events from this
3734 process to report to the core. It is best
3735 not to assume the moribund breakpoints
3736 heuristic always handles these cases --- it
3737 could be too many events go through to the
3738 core before this one is handled. All-stop
3739 always cancels breakpoint hits in all
3740 threads. */
3741 if (non_stop
3742 && linux_nat_lp_status_is_event (lp)
3743 && cancel_breakpoint (lp))
3744 {
3745 /* Throw away the SIGTRAP. */
3746 lp->status = 0;
3747
3748 if (debug_linux_nat)
3749 fprintf (stderr,
3750 "LLW: LWP %ld hit a breakpoint while"
3751 " waiting for another process;"
3752 " cancelled it\n",
3753 ptid_get_lwp (lp->ptid));
3754 }
3755 lp->stopped = 1;
3756 }
3757 else
3758 {
3759 lp->stopped = 1;
3760 lp->signalled = 0;
3761 }
3762 }
3763 else if (WIFEXITED (lp->status) || WIFSIGNALED (lp->status))
3764 {
3765 if (debug_linux_nat)
3766 fprintf (stderr,
3767 "Process %ld exited while stopping LWPs\n",
3768 ptid_get_lwp (lp->ptid));
3769
3770 /* This was the last lwp in the process. Since
3771 events are serialized to GDB core, and we can't
3772 report this one right now, but GDB core and the
3773 other target layers will want to be notified
3774 about the exit code/signal, leave the status
3775 pending for the next time we're able to report
3776 it. */
3777
3778 /* Prevent trying to stop this thread again. We'll
3779 never try to resume it because it has a pending
3780 status. */
3781 lp->stopped = 1;
3782
3783 /* Dead LWP's aren't expected to reported a pending
3784 sigstop. */
3785 lp->signalled = 0;
3786
3787 /* Store the pending event in the waitstatus as
3788 well, because W_EXITCODE(0,0) == 0. */
3789 store_waitstatus (&lp->waitstatus, lp->status);
3790 }
3791
3792 /* Keep looking. */
3793 lp = NULL;
3794 }
3795
3796 if (new_pending)
3797 {
3798 /* Some LWP now has a pending event. Go all the way
3799 back to check it. */
3800 goto retry;
3801 }
3802
3803 if (lp)
3804 {
3805 /* We got an event to report to the core. */
3806 break;
3807 }
3808
3809 /* Retry until nothing comes out of waitpid. A single
3810 SIGCHLD can indicate more than one child stopped. */
3811 continue;
3812 }
3813
3814 /* Check for zombie thread group leaders. Those can't be reaped
3815 until all other threads in the thread group are. */
3816 check_zombie_leaders ();
3817
3818 /* If there are no resumed children left, bail. We'd be stuck
3819 forever in the sigsuspend call below otherwise. */
3820 if (iterate_over_lwps (ptid, resumed_callback, NULL) == NULL)
3821 {
3822 if (debug_linux_nat)
3823 fprintf_unfiltered (gdb_stdlog, "LLW: exit (no resumed LWP)\n");
3824
3825 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
3826
3827 if (!target_can_async_p ())
3828 clear_sigint_trap ();
3829
3830 restore_child_signals_mask (&prev_mask);
3831 return minus_one_ptid;
3832 }
3833
3834 /* No interesting event to report to the core. */
3835
3836 if (target_options & TARGET_WNOHANG)
3837 {
3838 if (debug_linux_nat)
3839 fprintf_unfiltered (gdb_stdlog, "LLW: exit (ignore)\n");
3840
3841 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3842 restore_child_signals_mask (&prev_mask);
3843 return minus_one_ptid;
3844 }
3845
3846 /* We shouldn't end up here unless we want to try again. */
3847 gdb_assert (lp == NULL);
3848
3849 /* Block until we get an event reported with SIGCHLD. */
3850 sigsuspend (&suspend_mask);
3851 }
3852
3853 if (!target_can_async_p ())
3854 clear_sigint_trap ();
3855
3856 gdb_assert (lp);
3857
3858 status = lp->status;
3859 lp->status = 0;
3860
3861 /* Don't report signals that GDB isn't interested in, such as
3862 signals that are neither printed nor stopped upon. Stopping all
3863 threads can be a bit time-consuming so if we want decent
3864 performance with heavily multi-threaded programs, especially when
3865 they're using a high frequency timer, we'd better avoid it if we
3866 can. */
3867
3868 if (WIFSTOPPED (status))
3869 {
3870 enum target_signal signo = target_signal_from_host (WSTOPSIG (status));
3871
3872 /* When using hardware single-step, we need to report every signal.
3873 Otherwise, signals in pass_mask may be short-circuited. */
3874 if (!lp->step
3875 && WSTOPSIG (status) && sigismember (&pass_mask, WSTOPSIG (status)))
3876 {
3877 /* FIMXE: kettenis/2001-06-06: Should we resume all threads
3878 here? It is not clear we should. GDB may not expect
3879 other threads to run. On the other hand, not resuming
3880 newly attached threads may cause an unwanted delay in
3881 getting them running. */
3882 registers_changed ();
3883 if (linux_nat_prepare_to_resume != NULL)
3884 linux_nat_prepare_to_resume (lp);
3885 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
3886 lp->step, signo);
3887 if (debug_linux_nat)
3888 fprintf_unfiltered (gdb_stdlog,
3889 "LLW: %s %s, %s (preempt 'handle')\n",
3890 lp->step ?
3891 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3892 target_pid_to_str (lp->ptid),
3893 (signo != TARGET_SIGNAL_0
3894 ? strsignal (target_signal_to_host (signo))
3895 : "0"));
3896 lp->stopped = 0;
3897 goto retry;
3898 }
3899
3900 if (!non_stop)
3901 {
3902 /* Only do the below in all-stop, as we currently use SIGINT
3903 to implement target_stop (see linux_nat_stop) in
3904 non-stop. */
3905 if (signo == TARGET_SIGNAL_INT && signal_pass_state (signo) == 0)
3906 {
3907 /* If ^C/BREAK is typed at the tty/console, SIGINT gets
3908 forwarded to the entire process group, that is, all LWPs
3909 will receive it - unless they're using CLONE_THREAD to
3910 share signals. Since we only want to report it once, we
3911 mark it as ignored for all LWPs except this one. */
3912 iterate_over_lwps (pid_to_ptid (ptid_get_pid (ptid)),
3913 set_ignore_sigint, NULL);
3914 lp->ignore_sigint = 0;
3915 }
3916 else
3917 maybe_clear_ignore_sigint (lp);
3918 }
3919 }
3920
3921 /* This LWP is stopped now. */
3922 lp->stopped = 1;
3923
3924 if (debug_linux_nat)
3925 fprintf_unfiltered (gdb_stdlog, "LLW: Candidate event %s in %s.\n",
3926 status_to_str (status), target_pid_to_str (lp->ptid));
3927
3928 if (!non_stop)
3929 {
3930 /* Now stop all other LWP's ... */
3931 iterate_over_lwps (minus_one_ptid, stop_callback, NULL);
3932
3933 /* ... and wait until all of them have reported back that
3934 they're no longer running. */
3935 iterate_over_lwps (minus_one_ptid, stop_wait_callback, NULL);
3936
3937 /* If we're not waiting for a specific LWP, choose an event LWP
3938 from among those that have had events. Giving equal priority
3939 to all LWPs that have had events helps prevent
3940 starvation. */
3941 if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid))
3942 select_event_lwp (ptid, &lp, &status);
3943
3944 /* Now that we've selected our final event LWP, cancel any
3945 breakpoints in other LWPs that have hit a GDB breakpoint.
3946 See the comment in cancel_breakpoints_callback to find out
3947 why. */
3948 iterate_over_lwps (minus_one_ptid, cancel_breakpoints_callback, lp);
3949
3950 /* We'll need this to determine whether to report a SIGSTOP as
3951 TARGET_WAITKIND_0. Need to take a copy because
3952 resume_clear_callback clears it. */
3953 last_resume_kind = lp->last_resume_kind;
3954
3955 /* In all-stop, from the core's perspective, all LWPs are now
3956 stopped until a new resume action is sent over. */
3957 iterate_over_lwps (minus_one_ptid, resume_clear_callback, NULL);
3958 }
3959 else
3960 {
3961 /* See above. */
3962 last_resume_kind = lp->last_resume_kind;
3963 resume_clear_callback (lp, NULL);
3964 }
3965
3966 if (linux_nat_status_is_event (status))
3967 {
3968 if (debug_linux_nat)
3969 fprintf_unfiltered (gdb_stdlog,
3970 "LLW: trap ptid is %s.\n",
3971 target_pid_to_str (lp->ptid));
3972 }
3973
3974 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3975 {
3976 *ourstatus = lp->waitstatus;
3977 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3978 }
3979 else
3980 store_waitstatus (ourstatus, status);
3981
3982 if (debug_linux_nat)
3983 fprintf_unfiltered (gdb_stdlog, "LLW: exit\n");
3984
3985 restore_child_signals_mask (&prev_mask);
3986
3987 if (last_resume_kind == resume_stop
3988 && ourstatus->kind == TARGET_WAITKIND_STOPPED
3989 && WSTOPSIG (status) == SIGSTOP)
3990 {
3991 /* A thread that has been requested to stop by GDB with
3992 target_stop, and it stopped cleanly, so report as SIG0. The
3993 use of SIGSTOP is an implementation detail. */
3994 ourstatus->value.sig = TARGET_SIGNAL_0;
3995 }
3996
3997 if (ourstatus->kind == TARGET_WAITKIND_EXITED
3998 || ourstatus->kind == TARGET_WAITKIND_SIGNALLED)
3999 lp->core = -1;
4000 else
4001 lp->core = linux_nat_core_of_thread_1 (lp->ptid);
4002
4003 return lp->ptid;
4004 }
4005
4006 /* Resume LWPs that are currently stopped without any pending status
4007 to report, but are resumed from the core's perspective. */
4008
4009 static int
4010 resume_stopped_resumed_lwps (struct lwp_info *lp, void *data)
4011 {
4012 ptid_t *wait_ptid_p = data;
4013
4014 if (lp->stopped
4015 && lp->resumed
4016 && lp->status == 0
4017 && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE)
4018 {
4019 struct regcache *regcache = get_thread_regcache (lp->ptid);
4020 struct gdbarch *gdbarch = get_regcache_arch (regcache);
4021 CORE_ADDR pc = regcache_read_pc (regcache);
4022
4023 gdb_assert (is_executing (lp->ptid));
4024
4025 /* Don't bother if there's a breakpoint at PC that we'd hit
4026 immediately, and we're not waiting for this LWP. */
4027 if (!ptid_match (lp->ptid, *wait_ptid_p))
4028 {
4029 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
4030 return 0;
4031 }
4032
4033 if (debug_linux_nat)
4034 fprintf_unfiltered (gdb_stdlog,
4035 "RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
4036 target_pid_to_str (lp->ptid),
4037 paddress (gdbarch, pc),
4038 lp->step);
4039
4040 registers_changed ();
4041 if (linux_nat_prepare_to_resume != NULL)
4042 linux_nat_prepare_to_resume (lp);
4043 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
4044 lp->step, TARGET_SIGNAL_0);
4045 lp->stopped = 0;
4046 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
4047 lp->stopped_by_watchpoint = 0;
4048 }
4049
4050 return 0;
4051 }
4052
4053 static ptid_t
4054 linux_nat_wait (struct target_ops *ops,
4055 ptid_t ptid, struct target_waitstatus *ourstatus,
4056 int target_options)
4057 {
4058 ptid_t event_ptid;
4059
4060 if (debug_linux_nat)
4061 fprintf_unfiltered (gdb_stdlog,
4062 "linux_nat_wait: [%s]\n", target_pid_to_str (ptid));
4063
4064 /* Flush the async file first. */
4065 if (target_can_async_p ())
4066 async_file_flush ();
4067
4068 /* Resume LWPs that are currently stopped without any pending status
4069 to report, but are resumed from the core's perspective. LWPs get
4070 in this state if we find them stopping at a time we're not
4071 interested in reporting the event (target_wait on a
4072 specific_process, for example, see linux_nat_wait_1), and
4073 meanwhile the event became uninteresting. Don't bother resuming
4074 LWPs we're not going to wait for if they'd stop immediately. */
4075 if (non_stop)
4076 iterate_over_lwps (minus_one_ptid, resume_stopped_resumed_lwps, &ptid);
4077
4078 event_ptid = linux_nat_wait_1 (ops, ptid, ourstatus, target_options);
4079
4080 /* If we requested any event, and something came out, assume there
4081 may be more. If we requested a specific lwp or process, also
4082 assume there may be more. */
4083 if (target_can_async_p ()
4084 && ((ourstatus->kind != TARGET_WAITKIND_IGNORE
4085 && ourstatus->kind != TARGET_WAITKIND_NO_RESUMED)
4086 || !ptid_equal (ptid, minus_one_ptid)))
4087 async_file_mark ();
4088
4089 /* Get ready for the next event. */
4090 if (target_can_async_p ())
4091 target_async (inferior_event_handler, 0);
4092
4093 return event_ptid;
4094 }
4095
4096 static int
4097 kill_callback (struct lwp_info *lp, void *data)
4098 {
4099 /* PTRACE_KILL may resume the inferior. Send SIGKILL first. */
4100
4101 errno = 0;
4102 kill (GET_LWP (lp->ptid), SIGKILL);
4103 if (debug_linux_nat)
4104 fprintf_unfiltered (gdb_stdlog,
4105 "KC: kill (SIGKILL) %s, 0, 0 (%s)\n",
4106 target_pid_to_str (lp->ptid),
4107 errno ? safe_strerror (errno) : "OK");
4108
4109 /* Some kernels ignore even SIGKILL for processes under ptrace. */
4110
4111 errno = 0;
4112 ptrace (PTRACE_KILL, GET_LWP (lp->ptid), 0, 0);
4113 if (debug_linux_nat)
4114 fprintf_unfiltered (gdb_stdlog,
4115 "KC: PTRACE_KILL %s, 0, 0 (%s)\n",
4116 target_pid_to_str (lp->ptid),
4117 errno ? safe_strerror (errno) : "OK");
4118
4119 return 0;
4120 }
4121
4122 static int
4123 kill_wait_callback (struct lwp_info *lp, void *data)
4124 {
4125 pid_t pid;
4126
4127 /* We must make sure that there are no pending events (delayed
4128 SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current
4129 program doesn't interfere with any following debugging session. */
4130
4131 /* For cloned processes we must check both with __WCLONE and
4132 without, since the exit status of a cloned process isn't reported
4133 with __WCLONE. */
4134 if (lp->cloned)
4135 {
4136 do
4137 {
4138 pid = my_waitpid (GET_LWP (lp->ptid), NULL, __WCLONE);
4139 if (pid != (pid_t) -1)
4140 {
4141 if (debug_linux_nat)
4142 fprintf_unfiltered (gdb_stdlog,
4143 "KWC: wait %s received unknown.\n",
4144 target_pid_to_str (lp->ptid));
4145 /* The Linux kernel sometimes fails to kill a thread
4146 completely after PTRACE_KILL; that goes from the stop
4147 point in do_fork out to the one in
4148 get_signal_to_deliever and waits again. So kill it
4149 again. */
4150 kill_callback (lp, NULL);
4151 }
4152 }
4153 while (pid == GET_LWP (lp->ptid));
4154
4155 gdb_assert (pid == -1 && errno == ECHILD);
4156 }
4157
4158 do
4159 {
4160 pid = my_waitpid (GET_LWP (lp->ptid), NULL, 0);
4161 if (pid != (pid_t) -1)
4162 {
4163 if (debug_linux_nat)
4164 fprintf_unfiltered (gdb_stdlog,
4165 "KWC: wait %s received unk.\n",
4166 target_pid_to_str (lp->ptid));
4167 /* See the call to kill_callback above. */
4168 kill_callback (lp, NULL);
4169 }
4170 }
4171 while (pid == GET_LWP (lp->ptid));
4172
4173 gdb_assert (pid == -1 && errno == ECHILD);
4174 return 0;
4175 }
4176
4177 static void
4178 linux_nat_kill (struct target_ops *ops)
4179 {
4180 struct target_waitstatus last;
4181 ptid_t last_ptid;
4182 int status;
4183
4184 /* If we're stopped while forking and we haven't followed yet,
4185 kill the other task. We need to do this first because the
4186 parent will be sleeping if this is a vfork. */
4187
4188 get_last_target_status (&last_ptid, &last);
4189
4190 if (last.kind == TARGET_WAITKIND_FORKED
4191 || last.kind == TARGET_WAITKIND_VFORKED)
4192 {
4193 ptrace (PT_KILL, PIDGET (last.value.related_pid), 0, 0);
4194 wait (&status);
4195 }
4196
4197 if (forks_exist_p ())
4198 linux_fork_killall ();
4199 else
4200 {
4201 ptid_t ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
4202
4203 /* Stop all threads before killing them, since ptrace requires
4204 that the thread is stopped to sucessfully PTRACE_KILL. */
4205 iterate_over_lwps (ptid, stop_callback, NULL);
4206 /* ... and wait until all of them have reported back that
4207 they're no longer running. */
4208 iterate_over_lwps (ptid, stop_wait_callback, NULL);
4209
4210 /* Kill all LWP's ... */
4211 iterate_over_lwps (ptid, kill_callback, NULL);
4212
4213 /* ... and wait until we've flushed all events. */
4214 iterate_over_lwps (ptid, kill_wait_callback, NULL);
4215 }
4216
4217 target_mourn_inferior ();
4218 }
4219
4220 static void
4221 linux_nat_mourn_inferior (struct target_ops *ops)
4222 {
4223 purge_lwp_list (ptid_get_pid (inferior_ptid));
4224
4225 if (! forks_exist_p ())
4226 /* Normal case, no other forks available. */
4227 linux_ops->to_mourn_inferior (ops);
4228 else
4229 /* Multi-fork case. The current inferior_ptid has exited, but
4230 there are other viable forks to debug. Delete the exiting
4231 one and context-switch to the first available. */
4232 linux_fork_mourn_inferior ();
4233 }
4234
4235 /* Convert a native/host siginfo object, into/from the siginfo in the
4236 layout of the inferiors' architecture. */
4237
4238 static void
4239 siginfo_fixup (struct siginfo *siginfo, gdb_byte *inf_siginfo, int direction)
4240 {
4241 int done = 0;
4242
4243 if (linux_nat_siginfo_fixup != NULL)
4244 done = linux_nat_siginfo_fixup (siginfo, inf_siginfo, direction);
4245
4246 /* If there was no callback, or the callback didn't do anything,
4247 then just do a straight memcpy. */
4248 if (!done)
4249 {
4250 if (direction == 1)
4251 memcpy (siginfo, inf_siginfo, sizeof (struct siginfo));
4252 else
4253 memcpy (inf_siginfo, siginfo, sizeof (struct siginfo));
4254 }
4255 }
4256
4257 static LONGEST
4258 linux_xfer_siginfo (struct target_ops *ops, enum target_object object,
4259 const char *annex, gdb_byte *readbuf,
4260 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
4261 {
4262 int pid;
4263 struct siginfo siginfo;
4264 gdb_byte inf_siginfo[sizeof (struct siginfo)];
4265
4266 gdb_assert (object == TARGET_OBJECT_SIGNAL_INFO);
4267 gdb_assert (readbuf || writebuf);
4268
4269 pid = GET_LWP (inferior_ptid);
4270 if (pid == 0)
4271 pid = GET_PID (inferior_ptid);
4272
4273 if (offset > sizeof (siginfo))
4274 return -1;
4275
4276 errno = 0;
4277 ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
4278 if (errno != 0)
4279 return -1;
4280
4281 /* When GDB is built as a 64-bit application, ptrace writes into
4282 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
4283 inferior with a 64-bit GDB should look the same as debugging it
4284 with a 32-bit GDB, we need to convert it. GDB core always sees
4285 the converted layout, so any read/write will have to be done
4286 post-conversion. */
4287 siginfo_fixup (&siginfo, inf_siginfo, 0);
4288
4289 if (offset + len > sizeof (siginfo))
4290 len = sizeof (siginfo) - offset;
4291
4292 if (readbuf != NULL)
4293 memcpy (readbuf, inf_siginfo + offset, len);
4294 else
4295 {
4296 memcpy (inf_siginfo + offset, writebuf, len);
4297
4298 /* Convert back to ptrace layout before flushing it out. */
4299 siginfo_fixup (&siginfo, inf_siginfo, 1);
4300
4301 errno = 0;
4302 ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
4303 if (errno != 0)
4304 return -1;
4305 }
4306
4307 return len;
4308 }
4309
4310 static LONGEST
4311 linux_nat_xfer_partial (struct target_ops *ops, enum target_object object,
4312 const char *annex, gdb_byte *readbuf,
4313 const gdb_byte *writebuf,
4314 ULONGEST offset, LONGEST len)
4315 {
4316 struct cleanup *old_chain;
4317 LONGEST xfer;
4318
4319 if (object == TARGET_OBJECT_SIGNAL_INFO)
4320 return linux_xfer_siginfo (ops, object, annex, readbuf, writebuf,
4321 offset, len);
4322
4323 /* The target is connected but no live inferior is selected. Pass
4324 this request down to a lower stratum (e.g., the executable
4325 file). */
4326 if (object == TARGET_OBJECT_MEMORY && ptid_equal (inferior_ptid, null_ptid))
4327 return 0;
4328
4329 old_chain = save_inferior_ptid ();
4330
4331 if (is_lwp (inferior_ptid))
4332 inferior_ptid = pid_to_ptid (GET_LWP (inferior_ptid));
4333
4334 xfer = linux_ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
4335 offset, len);
4336
4337 do_cleanups (old_chain);
4338 return xfer;
4339 }
4340
4341 static int
4342 linux_thread_alive (ptid_t ptid)
4343 {
4344 int err, tmp_errno;
4345
4346 gdb_assert (is_lwp (ptid));
4347
4348 /* Send signal 0 instead of anything ptrace, because ptracing a
4349 running thread errors out claiming that the thread doesn't
4350 exist. */
4351 err = kill_lwp (GET_LWP (ptid), 0);
4352 tmp_errno = errno;
4353 if (debug_linux_nat)
4354 fprintf_unfiltered (gdb_stdlog,
4355 "LLTA: KILL(SIG0) %s (%s)\n",
4356 target_pid_to_str (ptid),
4357 err ? safe_strerror (tmp_errno) : "OK");
4358
4359 if (err != 0)
4360 return 0;
4361
4362 return 1;
4363 }
4364
4365 static int
4366 linux_nat_thread_alive (struct target_ops *ops, ptid_t ptid)
4367 {
4368 return linux_thread_alive (ptid);
4369 }
4370
4371 static char *
4372 linux_nat_pid_to_str (struct target_ops *ops, ptid_t ptid)
4373 {
4374 static char buf[64];
4375
4376 if (is_lwp (ptid)
4377 && (GET_PID (ptid) != GET_LWP (ptid)
4378 || num_lwps (GET_PID (ptid)) > 1))
4379 {
4380 snprintf (buf, sizeof (buf), "LWP %ld", GET_LWP (ptid));
4381 return buf;
4382 }
4383
4384 return normal_pid_to_str (ptid);
4385 }
4386
4387 static char *
4388 linux_nat_thread_name (struct thread_info *thr)
4389 {
4390 int pid = ptid_get_pid (thr->ptid);
4391 long lwp = ptid_get_lwp (thr->ptid);
4392 #define FORMAT "/proc/%d/task/%ld/comm"
4393 char buf[sizeof (FORMAT) + 30];
4394 FILE *comm_file;
4395 char *result = NULL;
4396
4397 snprintf (buf, sizeof (buf), FORMAT, pid, lwp);
4398 comm_file = fopen (buf, "r");
4399 if (comm_file)
4400 {
4401 /* Not exported by the kernel, so we define it here. */
4402 #define COMM_LEN 16
4403 static char line[COMM_LEN + 1];
4404
4405 if (fgets (line, sizeof (line), comm_file))
4406 {
4407 char *nl = strchr (line, '\n');
4408
4409 if (nl)
4410 *nl = '\0';
4411 if (*line != '\0')
4412 result = line;
4413 }
4414
4415 fclose (comm_file);
4416 }
4417
4418 #undef COMM_LEN
4419 #undef FORMAT
4420
4421 return result;
4422 }
4423
4424 /* Accepts an integer PID; Returns a string representing a file that
4425 can be opened to get the symbols for the child process. */
4426
4427 static char *
4428 linux_child_pid_to_exec_file (int pid)
4429 {
4430 char *name1, *name2;
4431
4432 name1 = xmalloc (MAXPATHLEN);
4433 name2 = xmalloc (MAXPATHLEN);
4434 make_cleanup (xfree, name1);
4435 make_cleanup (xfree, name2);
4436 memset (name2, 0, MAXPATHLEN);
4437
4438 sprintf (name1, "/proc/%d/exe", pid);
4439 if (readlink (name1, name2, MAXPATHLEN) > 0)
4440 return name2;
4441 else
4442 return name1;
4443 }
4444
4445 /* Records the thread's register state for the corefile note
4446 section. */
4447
4448 static char *
4449 linux_nat_collect_thread_registers (const struct regcache *regcache,
4450 ptid_t ptid, bfd *obfd,
4451 char *note_data, int *note_size,
4452 enum target_signal stop_signal)
4453 {
4454 struct gdbarch *gdbarch = get_regcache_arch (regcache);
4455 const struct regset *regset;
4456 int core_regset_p;
4457 gdb_gregset_t gregs;
4458 gdb_fpregset_t fpregs;
4459
4460 core_regset_p = gdbarch_regset_from_core_section_p (gdbarch);
4461
4462 if (core_regset_p
4463 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg",
4464 sizeof (gregs)))
4465 != NULL && regset->collect_regset != NULL)
4466 regset->collect_regset (regset, regcache, -1, &gregs, sizeof (gregs));
4467 else
4468 fill_gregset (regcache, &gregs, -1);
4469
4470 note_data = (char *) elfcore_write_prstatus
4471 (obfd, note_data, note_size, ptid_get_lwp (ptid),
4472 target_signal_to_host (stop_signal), &gregs);
4473
4474 if (core_regset_p
4475 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg2",
4476 sizeof (fpregs)))
4477 != NULL && regset->collect_regset != NULL)
4478 regset->collect_regset (regset, regcache, -1, &fpregs, sizeof (fpregs));
4479 else
4480 fill_fpregset (regcache, &fpregs, -1);
4481
4482 note_data = (char *) elfcore_write_prfpreg (obfd, note_data, note_size,
4483 &fpregs, sizeof (fpregs));
4484
4485 return note_data;
4486 }
4487
4488 /* Fills the "to_make_corefile_note" target vector. Builds the note
4489 section for a corefile, and returns it in a malloc buffer. */
4490
4491 static char *
4492 linux_nat_make_corefile_notes (bfd *obfd, int *note_size)
4493 {
4494 /* FIXME: uweigand/2011-10-06: Once all GNU/Linux architectures have been
4495 converted to gdbarch_core_regset_sections, this function can go away. */
4496 return linux_make_corefile_notes (target_gdbarch, obfd, note_size,
4497 linux_nat_collect_thread_registers);
4498 }
4499
4500 /* Implement the to_xfer_partial interface for memory reads using the /proc
4501 filesystem. Because we can use a single read() call for /proc, this
4502 can be much more efficient than banging away at PTRACE_PEEKTEXT,
4503 but it doesn't support writes. */
4504
4505 static LONGEST
4506 linux_proc_xfer_partial (struct target_ops *ops, enum target_object object,
4507 const char *annex, gdb_byte *readbuf,
4508 const gdb_byte *writebuf,
4509 ULONGEST offset, LONGEST len)
4510 {
4511 LONGEST ret;
4512 int fd;
4513 char filename[64];
4514
4515 if (object != TARGET_OBJECT_MEMORY || !readbuf)
4516 return 0;
4517
4518 /* Don't bother for one word. */
4519 if (len < 3 * sizeof (long))
4520 return 0;
4521
4522 /* We could keep this file open and cache it - possibly one per
4523 thread. That requires some juggling, but is even faster. */
4524 sprintf (filename, "/proc/%d/mem", PIDGET (inferior_ptid));
4525 fd = open (filename, O_RDONLY | O_LARGEFILE);
4526 if (fd == -1)
4527 return 0;
4528
4529 /* If pread64 is available, use it. It's faster if the kernel
4530 supports it (only one syscall), and it's 64-bit safe even on
4531 32-bit platforms (for instance, SPARC debugging a SPARC64
4532 application). */
4533 #ifdef HAVE_PREAD64
4534 if (pread64 (fd, readbuf, len, offset) != len)
4535 #else
4536 if (lseek (fd, offset, SEEK_SET) == -1 || read (fd, readbuf, len) != len)
4537 #endif
4538 ret = 0;
4539 else
4540 ret = len;
4541
4542 close (fd);
4543 return ret;
4544 }
4545
4546
4547 /* Enumerate spufs IDs for process PID. */
4548 static LONGEST
4549 spu_enumerate_spu_ids (int pid, gdb_byte *buf, ULONGEST offset, LONGEST len)
4550 {
4551 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch);
4552 LONGEST pos = 0;
4553 LONGEST written = 0;
4554 char path[128];
4555 DIR *dir;
4556 struct dirent *entry;
4557
4558 xsnprintf (path, sizeof path, "/proc/%d/fd", pid);
4559 dir = opendir (path);
4560 if (!dir)
4561 return -1;
4562
4563 rewinddir (dir);
4564 while ((entry = readdir (dir)) != NULL)
4565 {
4566 struct stat st;
4567 struct statfs stfs;
4568 int fd;
4569
4570 fd = atoi (entry->d_name);
4571 if (!fd)
4572 continue;
4573
4574 xsnprintf (path, sizeof path, "/proc/%d/fd/%d", pid, fd);
4575 if (stat (path, &st) != 0)
4576 continue;
4577 if (!S_ISDIR (st.st_mode))
4578 continue;
4579
4580 if (statfs (path, &stfs) != 0)
4581 continue;
4582 if (stfs.f_type != SPUFS_MAGIC)
4583 continue;
4584
4585 if (pos >= offset && pos + 4 <= offset + len)
4586 {
4587 store_unsigned_integer (buf + pos - offset, 4, byte_order, fd);
4588 written += 4;
4589 }
4590 pos += 4;
4591 }
4592
4593 closedir (dir);
4594 return written;
4595 }
4596
4597 /* Implement the to_xfer_partial interface for the TARGET_OBJECT_SPU
4598 object type, using the /proc file system. */
4599 static LONGEST
4600 linux_proc_xfer_spu (struct target_ops *ops, enum target_object object,
4601 const char *annex, gdb_byte *readbuf,
4602 const gdb_byte *writebuf,
4603 ULONGEST offset, LONGEST len)
4604 {
4605 char buf[128];
4606 int fd = 0;
4607 int ret = -1;
4608 int pid = PIDGET (inferior_ptid);
4609
4610 if (!annex)
4611 {
4612 if (!readbuf)
4613 return -1;
4614 else
4615 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
4616 }
4617
4618 xsnprintf (buf, sizeof buf, "/proc/%d/fd/%s", pid, annex);
4619 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
4620 if (fd <= 0)
4621 return -1;
4622
4623 if (offset != 0
4624 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4625 {
4626 close (fd);
4627 return 0;
4628 }
4629
4630 if (writebuf)
4631 ret = write (fd, writebuf, (size_t) len);
4632 else if (readbuf)
4633 ret = read (fd, readbuf, (size_t) len);
4634
4635 close (fd);
4636 return ret;
4637 }
4638
4639
4640 /* Parse LINE as a signal set and add its set bits to SIGS. */
4641
4642 static void
4643 add_line_to_sigset (const char *line, sigset_t *sigs)
4644 {
4645 int len = strlen (line) - 1;
4646 const char *p;
4647 int signum;
4648
4649 if (line[len] != '\n')
4650 error (_("Could not parse signal set: %s"), line);
4651
4652 p = line;
4653 signum = len * 4;
4654 while (len-- > 0)
4655 {
4656 int digit;
4657
4658 if (*p >= '0' && *p <= '9')
4659 digit = *p - '0';
4660 else if (*p >= 'a' && *p <= 'f')
4661 digit = *p - 'a' + 10;
4662 else
4663 error (_("Could not parse signal set: %s"), line);
4664
4665 signum -= 4;
4666
4667 if (digit & 1)
4668 sigaddset (sigs, signum + 1);
4669 if (digit & 2)
4670 sigaddset (sigs, signum + 2);
4671 if (digit & 4)
4672 sigaddset (sigs, signum + 3);
4673 if (digit & 8)
4674 sigaddset (sigs, signum + 4);
4675
4676 p++;
4677 }
4678 }
4679
4680 /* Find process PID's pending signals from /proc/pid/status and set
4681 SIGS to match. */
4682
4683 void
4684 linux_proc_pending_signals (int pid, sigset_t *pending,
4685 sigset_t *blocked, sigset_t *ignored)
4686 {
4687 FILE *procfile;
4688 char buffer[MAXPATHLEN], fname[MAXPATHLEN];
4689 struct cleanup *cleanup;
4690
4691 sigemptyset (pending);
4692 sigemptyset (blocked);
4693 sigemptyset (ignored);
4694 sprintf (fname, "/proc/%d/status", pid);
4695 procfile = fopen (fname, "r");
4696 if (procfile == NULL)
4697 error (_("Could not open %s"), fname);
4698 cleanup = make_cleanup_fclose (procfile);
4699
4700 while (fgets (buffer, MAXPATHLEN, procfile) != NULL)
4701 {
4702 /* Normal queued signals are on the SigPnd line in the status
4703 file. However, 2.6 kernels also have a "shared" pending
4704 queue for delivering signals to a thread group, so check for
4705 a ShdPnd line also.
4706
4707 Unfortunately some Red Hat kernels include the shared pending
4708 queue but not the ShdPnd status field. */
4709
4710 if (strncmp (buffer, "SigPnd:\t", 8) == 0)
4711 add_line_to_sigset (buffer + 8, pending);
4712 else if (strncmp (buffer, "ShdPnd:\t", 8) == 0)
4713 add_line_to_sigset (buffer + 8, pending);
4714 else if (strncmp (buffer, "SigBlk:\t", 8) == 0)
4715 add_line_to_sigset (buffer + 8, blocked);
4716 else if (strncmp (buffer, "SigIgn:\t", 8) == 0)
4717 add_line_to_sigset (buffer + 8, ignored);
4718 }
4719
4720 do_cleanups (cleanup);
4721 }
4722
4723 static LONGEST
4724 linux_nat_xfer_osdata (struct target_ops *ops, enum target_object object,
4725 const char *annex, gdb_byte *readbuf,
4726 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
4727 {
4728 gdb_assert (object == TARGET_OBJECT_OSDATA);
4729
4730 return linux_common_xfer_osdata (annex, readbuf, offset, len);
4731 }
4732
4733 static LONGEST
4734 linux_xfer_partial (struct target_ops *ops, enum target_object object,
4735 const char *annex, gdb_byte *readbuf,
4736 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
4737 {
4738 LONGEST xfer;
4739
4740 if (object == TARGET_OBJECT_AUXV)
4741 return memory_xfer_auxv (ops, object, annex, readbuf, writebuf,
4742 offset, len);
4743
4744 if (object == TARGET_OBJECT_OSDATA)
4745 return linux_nat_xfer_osdata (ops, object, annex, readbuf, writebuf,
4746 offset, len);
4747
4748 if (object == TARGET_OBJECT_SPU)
4749 return linux_proc_xfer_spu (ops, object, annex, readbuf, writebuf,
4750 offset, len);
4751
4752 /* GDB calculates all the addresses in possibly larget width of the address.
4753 Address width needs to be masked before its final use - either by
4754 linux_proc_xfer_partial or inf_ptrace_xfer_partial.
4755
4756 Compare ADDR_BIT first to avoid a compiler warning on shift overflow. */
4757
4758 if (object == TARGET_OBJECT_MEMORY)
4759 {
4760 int addr_bit = gdbarch_addr_bit (target_gdbarch);
4761
4762 if (addr_bit < (sizeof (ULONGEST) * HOST_CHAR_BIT))
4763 offset &= ((ULONGEST) 1 << addr_bit) - 1;
4764 }
4765
4766 xfer = linux_proc_xfer_partial (ops, object, annex, readbuf, writebuf,
4767 offset, len);
4768 if (xfer != 0)
4769 return xfer;
4770
4771 return super_xfer_partial (ops, object, annex, readbuf, writebuf,
4772 offset, len);
4773 }
4774
4775 /* Create a prototype generic GNU/Linux target. The client can override
4776 it with local methods. */
4777
4778 static void
4779 linux_target_install_ops (struct target_ops *t)
4780 {
4781 t->to_insert_fork_catchpoint = linux_child_insert_fork_catchpoint;
4782 t->to_remove_fork_catchpoint = linux_child_remove_fork_catchpoint;
4783 t->to_insert_vfork_catchpoint = linux_child_insert_vfork_catchpoint;
4784 t->to_remove_vfork_catchpoint = linux_child_remove_vfork_catchpoint;
4785 t->to_insert_exec_catchpoint = linux_child_insert_exec_catchpoint;
4786 t->to_remove_exec_catchpoint = linux_child_remove_exec_catchpoint;
4787 t->to_set_syscall_catchpoint = linux_child_set_syscall_catchpoint;
4788 t->to_pid_to_exec_file = linux_child_pid_to_exec_file;
4789 t->to_post_startup_inferior = linux_child_post_startup_inferior;
4790 t->to_post_attach = linux_child_post_attach;
4791 t->to_follow_fork = linux_child_follow_fork;
4792 t->to_make_corefile_notes = linux_nat_make_corefile_notes;
4793
4794 super_xfer_partial = t->to_xfer_partial;
4795 t->to_xfer_partial = linux_xfer_partial;
4796 }
4797
4798 struct target_ops *
4799 linux_target (void)
4800 {
4801 struct target_ops *t;
4802
4803 t = inf_ptrace_target ();
4804 linux_target_install_ops (t);
4805
4806 return t;
4807 }
4808
4809 struct target_ops *
4810 linux_trad_target (CORE_ADDR (*register_u_offset)(struct gdbarch *, int, int))
4811 {
4812 struct target_ops *t;
4813
4814 t = inf_ptrace_trad_target (register_u_offset);
4815 linux_target_install_ops (t);
4816
4817 return t;
4818 }
4819
4820 /* target_is_async_p implementation. */
4821
4822 static int
4823 linux_nat_is_async_p (void)
4824 {
4825 /* NOTE: palves 2008-03-21: We're only async when the user requests
4826 it explicitly with the "set target-async" command.
4827 Someday, linux will always be async. */
4828 return target_async_permitted;
4829 }
4830
4831 /* target_can_async_p implementation. */
4832
4833 static int
4834 linux_nat_can_async_p (void)
4835 {
4836 /* NOTE: palves 2008-03-21: We're only async when the user requests
4837 it explicitly with the "set target-async" command.
4838 Someday, linux will always be async. */
4839 return target_async_permitted;
4840 }
4841
4842 static int
4843 linux_nat_supports_non_stop (void)
4844 {
4845 return 1;
4846 }
4847
4848 /* True if we want to support multi-process. To be removed when GDB
4849 supports multi-exec. */
4850
4851 int linux_multi_process = 1;
4852
4853 static int
4854 linux_nat_supports_multi_process (void)
4855 {
4856 return linux_multi_process;
4857 }
4858
4859 static int
4860 linux_nat_supports_disable_randomization (void)
4861 {
4862 #ifdef HAVE_PERSONALITY
4863 return 1;
4864 #else
4865 return 0;
4866 #endif
4867 }
4868
4869 static int async_terminal_is_ours = 1;
4870
4871 /* target_terminal_inferior implementation. */
4872
4873 static void
4874 linux_nat_terminal_inferior (void)
4875 {
4876 if (!target_is_async_p ())
4877 {
4878 /* Async mode is disabled. */
4879 terminal_inferior ();
4880 return;
4881 }
4882
4883 terminal_inferior ();
4884
4885 /* Calls to target_terminal_*() are meant to be idempotent. */
4886 if (!async_terminal_is_ours)
4887 return;
4888
4889 delete_file_handler (input_fd);
4890 async_terminal_is_ours = 0;
4891 set_sigint_trap ();
4892 }
4893
4894 /* target_terminal_ours implementation. */
4895
4896 static void
4897 linux_nat_terminal_ours (void)
4898 {
4899 if (!target_is_async_p ())
4900 {
4901 /* Async mode is disabled. */
4902 terminal_ours ();
4903 return;
4904 }
4905
4906 /* GDB should never give the terminal to the inferior if the
4907 inferior is running in the background (run&, continue&, etc.),
4908 but claiming it sure should. */
4909 terminal_ours ();
4910
4911 if (async_terminal_is_ours)
4912 return;
4913
4914 clear_sigint_trap ();
4915 add_file_handler (input_fd, stdin_event_handler, 0);
4916 async_terminal_is_ours = 1;
4917 }
4918
4919 static void (*async_client_callback) (enum inferior_event_type event_type,
4920 void *context);
4921 static void *async_client_context;
4922
4923 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4924 so we notice when any child changes state, and notify the
4925 event-loop; it allows us to use sigsuspend in linux_nat_wait_1
4926 above to wait for the arrival of a SIGCHLD. */
4927
4928 static void
4929 sigchld_handler (int signo)
4930 {
4931 int old_errno = errno;
4932
4933 if (debug_linux_nat)
4934 ui_file_write_async_safe (gdb_stdlog,
4935 "sigchld\n", sizeof ("sigchld\n") - 1);
4936
4937 if (signo == SIGCHLD
4938 && linux_nat_event_pipe[0] != -1)
4939 async_file_mark (); /* Let the event loop know that there are
4940 events to handle. */
4941
4942 errno = old_errno;
4943 }
4944
4945 /* Callback registered with the target events file descriptor. */
4946
4947 static void
4948 handle_target_event (int error, gdb_client_data client_data)
4949 {
4950 (*async_client_callback) (INF_REG_EVENT, async_client_context);
4951 }
4952
4953 /* Create/destroy the target events pipe. Returns previous state. */
4954
4955 static int
4956 linux_async_pipe (int enable)
4957 {
4958 int previous = (linux_nat_event_pipe[0] != -1);
4959
4960 if (previous != enable)
4961 {
4962 sigset_t prev_mask;
4963
4964 block_child_signals (&prev_mask);
4965
4966 if (enable)
4967 {
4968 if (pipe (linux_nat_event_pipe) == -1)
4969 internal_error (__FILE__, __LINE__,
4970 "creating event pipe failed.");
4971
4972 fcntl (linux_nat_event_pipe[0], F_SETFL, O_NONBLOCK);
4973 fcntl (linux_nat_event_pipe[1], F_SETFL, O_NONBLOCK);
4974 }
4975 else
4976 {
4977 close (linux_nat_event_pipe[0]);
4978 close (linux_nat_event_pipe[1]);
4979 linux_nat_event_pipe[0] = -1;
4980 linux_nat_event_pipe[1] = -1;
4981 }
4982
4983 restore_child_signals_mask (&prev_mask);
4984 }
4985
4986 return previous;
4987 }
4988
4989 /* target_async implementation. */
4990
4991 static void
4992 linux_nat_async (void (*callback) (enum inferior_event_type event_type,
4993 void *context), void *context)
4994 {
4995 if (callback != NULL)
4996 {
4997 async_client_callback = callback;
4998 async_client_context = context;
4999 if (!linux_async_pipe (1))
5000 {
5001 add_file_handler (linux_nat_event_pipe[0],
5002 handle_target_event, NULL);
5003 /* There may be pending events to handle. Tell the event loop
5004 to poll them. */
5005 async_file_mark ();
5006 }
5007 }
5008 else
5009 {
5010 async_client_callback = callback;
5011 async_client_context = context;
5012 delete_file_handler (linux_nat_event_pipe[0]);
5013 linux_async_pipe (0);
5014 }
5015 return;
5016 }
5017
5018 /* Stop an LWP, and push a TARGET_SIGNAL_0 stop status if no other
5019 event came out. */
5020
5021 static int
5022 linux_nat_stop_lwp (struct lwp_info *lwp, void *data)
5023 {
5024 if (!lwp->stopped)
5025 {
5026 ptid_t ptid = lwp->ptid;
5027
5028 if (debug_linux_nat)
5029 fprintf_unfiltered (gdb_stdlog,
5030 "LNSL: running -> suspending %s\n",
5031 target_pid_to_str (lwp->ptid));
5032
5033
5034 if (lwp->last_resume_kind == resume_stop)
5035 {
5036 if (debug_linux_nat)
5037 fprintf_unfiltered (gdb_stdlog,
5038 "linux-nat: already stopping LWP %ld at "
5039 "GDB's request\n",
5040 ptid_get_lwp (lwp->ptid));
5041 return 0;
5042 }
5043
5044 stop_callback (lwp, NULL);
5045 lwp->last_resume_kind = resume_stop;
5046 }
5047 else
5048 {
5049 /* Already known to be stopped; do nothing. */
5050
5051 if (debug_linux_nat)
5052 {
5053 if (find_thread_ptid (lwp->ptid)->stop_requested)
5054 fprintf_unfiltered (gdb_stdlog,
5055 "LNSL: already stopped/stop_requested %s\n",
5056 target_pid_to_str (lwp->ptid));
5057 else
5058 fprintf_unfiltered (gdb_stdlog,
5059 "LNSL: already stopped/no "
5060 "stop_requested yet %s\n",
5061 target_pid_to_str (lwp->ptid));
5062 }
5063 }
5064 return 0;
5065 }
5066
5067 static void
5068 linux_nat_stop (ptid_t ptid)
5069 {
5070 if (non_stop)
5071 iterate_over_lwps (ptid, linux_nat_stop_lwp, NULL);
5072 else
5073 linux_ops->to_stop (ptid);
5074 }
5075
5076 static void
5077 linux_nat_close (int quitting)
5078 {
5079 /* Unregister from the event loop. */
5080 if (linux_nat_is_async_p ())
5081 linux_nat_async (NULL, 0);
5082
5083 if (linux_ops->to_close)
5084 linux_ops->to_close (quitting);
5085 }
5086
5087 /* When requests are passed down from the linux-nat layer to the
5088 single threaded inf-ptrace layer, ptids of (lwpid,0,0) form are
5089 used. The address space pointer is stored in the inferior object,
5090 but the common code that is passed such ptid can't tell whether
5091 lwpid is a "main" process id or not (it assumes so). We reverse
5092 look up the "main" process id from the lwp here. */
5093
5094 struct address_space *
5095 linux_nat_thread_address_space (struct target_ops *t, ptid_t ptid)
5096 {
5097 struct lwp_info *lwp;
5098 struct inferior *inf;
5099 int pid;
5100
5101 pid = GET_LWP (ptid);
5102 if (GET_LWP (ptid) == 0)
5103 {
5104 /* An (lwpid,0,0) ptid. Look up the lwp object to get at the
5105 tgid. */
5106 lwp = find_lwp_pid (ptid);
5107 pid = GET_PID (lwp->ptid);
5108 }
5109 else
5110 {
5111 /* A (pid,lwpid,0) ptid. */
5112 pid = GET_PID (ptid);
5113 }
5114
5115 inf = find_inferior_pid (pid);
5116 gdb_assert (inf != NULL);
5117 return inf->aspace;
5118 }
5119
5120 int
5121 linux_nat_core_of_thread_1 (ptid_t ptid)
5122 {
5123 struct cleanup *back_to;
5124 char *filename;
5125 FILE *f;
5126 char *content = NULL;
5127 char *p;
5128 char *ts = 0;
5129 int content_read = 0;
5130 int i;
5131 int core;
5132
5133 filename = xstrprintf ("/proc/%d/task/%ld/stat",
5134 GET_PID (ptid), GET_LWP (ptid));
5135 back_to = make_cleanup (xfree, filename);
5136
5137 f = fopen (filename, "r");
5138 if (!f)
5139 {
5140 do_cleanups (back_to);
5141 return -1;
5142 }
5143
5144 make_cleanup_fclose (f);
5145
5146 for (;;)
5147 {
5148 int n;
5149
5150 content = xrealloc (content, content_read + 1024);
5151 n = fread (content + content_read, 1, 1024, f);
5152 content_read += n;
5153 if (n < 1024)
5154 {
5155 content[content_read] = '\0';
5156 break;
5157 }
5158 }
5159
5160 make_cleanup (xfree, content);
5161
5162 p = strchr (content, '(');
5163
5164 /* Skip ")". */
5165 if (p != NULL)
5166 p = strchr (p, ')');
5167 if (p != NULL)
5168 p++;
5169
5170 /* If the first field after program name has index 0, then core number is
5171 the field with index 36. There's no constant for that anywhere. */
5172 if (p != NULL)
5173 p = strtok_r (p, " ", &ts);
5174 for (i = 0; p != NULL && i != 36; ++i)
5175 p = strtok_r (NULL, " ", &ts);
5176
5177 if (p == NULL || sscanf (p, "%d", &core) == 0)
5178 core = -1;
5179
5180 do_cleanups (back_to);
5181
5182 return core;
5183 }
5184
5185 /* Return the cached value of the processor core for thread PTID. */
5186
5187 int
5188 linux_nat_core_of_thread (struct target_ops *ops, ptid_t ptid)
5189 {
5190 struct lwp_info *info = find_lwp_pid (ptid);
5191
5192 if (info)
5193 return info->core;
5194 return -1;
5195 }
5196
5197 void
5198 linux_nat_add_target (struct target_ops *t)
5199 {
5200 /* Save the provided single-threaded target. We save this in a separate
5201 variable because another target we've inherited from (e.g. inf-ptrace)
5202 may have saved a pointer to T; we want to use it for the final
5203 process stratum target. */
5204 linux_ops_saved = *t;
5205 linux_ops = &linux_ops_saved;
5206
5207 /* Override some methods for multithreading. */
5208 t->to_create_inferior = linux_nat_create_inferior;
5209 t->to_attach = linux_nat_attach;
5210 t->to_detach = linux_nat_detach;
5211 t->to_resume = linux_nat_resume;
5212 t->to_wait = linux_nat_wait;
5213 t->to_pass_signals = linux_nat_pass_signals;
5214 t->to_xfer_partial = linux_nat_xfer_partial;
5215 t->to_kill = linux_nat_kill;
5216 t->to_mourn_inferior = linux_nat_mourn_inferior;
5217 t->to_thread_alive = linux_nat_thread_alive;
5218 t->to_pid_to_str = linux_nat_pid_to_str;
5219 t->to_thread_name = linux_nat_thread_name;
5220 t->to_has_thread_control = tc_schedlock;
5221 t->to_thread_address_space = linux_nat_thread_address_space;
5222 t->to_stopped_by_watchpoint = linux_nat_stopped_by_watchpoint;
5223 t->to_stopped_data_address = linux_nat_stopped_data_address;
5224
5225 t->to_can_async_p = linux_nat_can_async_p;
5226 t->to_is_async_p = linux_nat_is_async_p;
5227 t->to_supports_non_stop = linux_nat_supports_non_stop;
5228 t->to_async = linux_nat_async;
5229 t->to_terminal_inferior = linux_nat_terminal_inferior;
5230 t->to_terminal_ours = linux_nat_terminal_ours;
5231 t->to_close = linux_nat_close;
5232
5233 /* Methods for non-stop support. */
5234 t->to_stop = linux_nat_stop;
5235
5236 t->to_supports_multi_process = linux_nat_supports_multi_process;
5237
5238 t->to_supports_disable_randomization
5239 = linux_nat_supports_disable_randomization;
5240
5241 t->to_core_of_thread = linux_nat_core_of_thread;
5242
5243 /* We don't change the stratum; this target will sit at
5244 process_stratum and thread_db will set at thread_stratum. This
5245 is a little strange, since this is a multi-threaded-capable
5246 target, but we want to be on the stack below thread_db, and we
5247 also want to be used for single-threaded processes. */
5248
5249 add_target (t);
5250 }
5251
5252 /* Register a method to call whenever a new thread is attached. */
5253 void
5254 linux_nat_set_new_thread (struct target_ops *t,
5255 void (*new_thread) (struct lwp_info *))
5256 {
5257 /* Save the pointer. We only support a single registered instance
5258 of the GNU/Linux native target, so we do not need to map this to
5259 T. */
5260 linux_nat_new_thread = new_thread;
5261 }
5262
5263 /* Register a method that converts a siginfo object between the layout
5264 that ptrace returns, and the layout in the architecture of the
5265 inferior. */
5266 void
5267 linux_nat_set_siginfo_fixup (struct target_ops *t,
5268 int (*siginfo_fixup) (struct siginfo *,
5269 gdb_byte *,
5270 int))
5271 {
5272 /* Save the pointer. */
5273 linux_nat_siginfo_fixup = siginfo_fixup;
5274 }
5275
5276 /* Register a method to call prior to resuming a thread. */
5277
5278 void
5279 linux_nat_set_prepare_to_resume (struct target_ops *t,
5280 void (*prepare_to_resume) (struct lwp_info *))
5281 {
5282 /* Save the pointer. */
5283 linux_nat_prepare_to_resume = prepare_to_resume;
5284 }
5285
5286 /* Return the saved siginfo associated with PTID. */
5287 struct siginfo *
5288 linux_nat_get_siginfo (ptid_t ptid)
5289 {
5290 struct lwp_info *lp = find_lwp_pid (ptid);
5291
5292 gdb_assert (lp != NULL);
5293
5294 return &lp->siginfo;
5295 }
5296
5297 /* Provide a prototype to silence -Wmissing-prototypes. */
5298 extern initialize_file_ftype _initialize_linux_nat;
5299
5300 void
5301 _initialize_linux_nat (void)
5302 {
5303 add_setshow_zinteger_cmd ("lin-lwp", class_maintenance,
5304 &debug_linux_nat, _("\
5305 Set debugging of GNU/Linux lwp module."), _("\
5306 Show debugging of GNU/Linux lwp module."), _("\
5307 Enables printf debugging output."),
5308 NULL,
5309 show_debug_linux_nat,
5310 &setdebuglist, &showdebuglist);
5311
5312 /* Save this mask as the default. */
5313 sigprocmask (SIG_SETMASK, NULL, &normal_mask);
5314
5315 /* Install a SIGCHLD handler. */
5316 sigchld_action.sa_handler = sigchld_handler;
5317 sigemptyset (&sigchld_action.sa_mask);
5318 sigchld_action.sa_flags = SA_RESTART;
5319
5320 /* Make it the default. */
5321 sigaction (SIGCHLD, &sigchld_action, NULL);
5322
5323 /* Make sure we don't block SIGCHLD during a sigsuspend. */
5324 sigprocmask (SIG_SETMASK, NULL, &suspend_mask);
5325 sigdelset (&suspend_mask, SIGCHLD);
5326
5327 sigemptyset (&blocked_mask);
5328 }
5329 \f
5330
5331 /* FIXME: kettenis/2000-08-26: The stuff on this page is specific to
5332 the GNU/Linux Threads library and therefore doesn't really belong
5333 here. */
5334
5335 /* Read variable NAME in the target and return its value if found.
5336 Otherwise return zero. It is assumed that the type of the variable
5337 is `int'. */
5338
5339 static int
5340 get_signo (const char *name)
5341 {
5342 struct minimal_symbol *ms;
5343 int signo;
5344
5345 ms = lookup_minimal_symbol (name, NULL, NULL);
5346 if (ms == NULL)
5347 return 0;
5348
5349 if (target_read_memory (SYMBOL_VALUE_ADDRESS (ms), (gdb_byte *) &signo,
5350 sizeof (signo)) != 0)
5351 return 0;
5352
5353 return signo;
5354 }
5355
5356 /* Return the set of signals used by the threads library in *SET. */
5357
5358 void
5359 lin_thread_get_thread_signals (sigset_t *set)
5360 {
5361 struct sigaction action;
5362 int restart, cancel;
5363
5364 sigemptyset (&blocked_mask);
5365 sigemptyset (set);
5366
5367 restart = get_signo ("__pthread_sig_restart");
5368 cancel = get_signo ("__pthread_sig_cancel");
5369
5370 /* LinuxThreads normally uses the first two RT signals, but in some legacy
5371 cases may use SIGUSR1/SIGUSR2. NPTL always uses RT signals, but does
5372 not provide any way for the debugger to query the signal numbers -
5373 fortunately they don't change! */
5374
5375 if (restart == 0)
5376 restart = __SIGRTMIN;
5377
5378 if (cancel == 0)
5379 cancel = __SIGRTMIN + 1;
5380
5381 sigaddset (set, restart);
5382 sigaddset (set, cancel);
5383
5384 /* The GNU/Linux Threads library makes terminating threads send a
5385 special "cancel" signal instead of SIGCHLD. Make sure we catch
5386 those (to prevent them from terminating GDB itself, which is
5387 likely to be their default action) and treat them the same way as
5388 SIGCHLD. */
5389
5390 action.sa_handler = sigchld_handler;
5391 sigemptyset (&action.sa_mask);
5392 action.sa_flags = SA_RESTART;
5393 sigaction (cancel, &action, NULL);
5394
5395 /* We block the "cancel" signal throughout this code ... */
5396 sigaddset (&blocked_mask, cancel);
5397 sigprocmask (SIG_BLOCK, &blocked_mask, NULL);
5398
5399 /* ... except during a sigsuspend. */
5400 sigdelset (&suspend_mask, cancel);
5401 }
This page took 0.199747 seconds and 5 git commands to generate.