2012-01-24 Pedro Alves <pedro@codesourcery.com>
[deliverable/binutils-gdb.git] / gdb / linux-nat.c
1 /* GNU/Linux native-dependent code common to multiple platforms.
2
3 Copyright (C) 2001-2012 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21 #include "inferior.h"
22 #include "target.h"
23 #include "gdb_string.h"
24 #include "gdb_wait.h"
25 #include "gdb_assert.h"
26 #ifdef HAVE_TKILL_SYSCALL
27 #include <unistd.h>
28 #include <sys/syscall.h>
29 #endif
30 #include <sys/ptrace.h>
31 #include "linux-nat.h"
32 #include "linux-ptrace.h"
33 #include "linux-procfs.h"
34 #include "linux-fork.h"
35 #include "gdbthread.h"
36 #include "gdbcmd.h"
37 #include "regcache.h"
38 #include "regset.h"
39 #include "inf-ptrace.h"
40 #include "auxv.h"
41 #include <sys/param.h> /* for MAXPATHLEN */
42 #include <sys/procfs.h> /* for elf_gregset etc. */
43 #include "elf-bfd.h" /* for elfcore_write_* */
44 #include "gregset.h" /* for gregset */
45 #include "gdbcore.h" /* for get_exec_file */
46 #include <ctype.h> /* for isdigit */
47 #include "gdbthread.h" /* for struct thread_info etc. */
48 #include "gdb_stat.h" /* for struct stat */
49 #include <fcntl.h> /* for O_RDONLY */
50 #include "inf-loop.h"
51 #include "event-loop.h"
52 #include "event-top.h"
53 #include <pwd.h>
54 #include <sys/types.h>
55 #include "gdb_dirent.h"
56 #include "xml-support.h"
57 #include "terminal.h"
58 #include <sys/vfs.h>
59 #include "solib.h"
60 #include "linux-osdata.h"
61 #include "linux-tdep.h"
62
63 #ifndef SPUFS_MAGIC
64 #define SPUFS_MAGIC 0x23c9b64e
65 #endif
66
67 #ifdef HAVE_PERSONALITY
68 # include <sys/personality.h>
69 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
70 # define ADDR_NO_RANDOMIZE 0x0040000
71 # endif
72 #endif /* HAVE_PERSONALITY */
73
74 /* This comment documents high-level logic of this file.
75
76 Waiting for events in sync mode
77 ===============================
78
79 When waiting for an event in a specific thread, we just use waitpid, passing
80 the specific pid, and not passing WNOHANG.
81
82 When waiting for an event in all threads, waitpid is not quite good. Prior to
83 version 2.4, Linux can either wait for event in main thread, or in secondary
84 threads. (2.4 has the __WALL flag). So, if we use blocking waitpid, we might
85 miss an event. The solution is to use non-blocking waitpid, together with
86 sigsuspend. First, we use non-blocking waitpid to get an event in the main
87 process, if any. Second, we use non-blocking waitpid with the __WCLONED
88 flag to check for events in cloned processes. If nothing is found, we use
89 sigsuspend to wait for SIGCHLD. When SIGCHLD arrives, it means something
90 happened to a child process -- and SIGCHLD will be delivered both for events
91 in main debugged process and in cloned processes. As soon as we know there's
92 an event, we get back to calling nonblocking waitpid with and without
93 __WCLONED.
94
95 Note that SIGCHLD should be blocked between waitpid and sigsuspend calls,
96 so that we don't miss a signal. If SIGCHLD arrives in between, when it's
97 blocked, the signal becomes pending and sigsuspend immediately
98 notices it and returns.
99
100 Waiting for events in async mode
101 ================================
102
103 In async mode, GDB should always be ready to handle both user input
104 and target events, so neither blocking waitpid nor sigsuspend are
105 viable options. Instead, we should asynchronously notify the GDB main
106 event loop whenever there's an unprocessed event from the target. We
107 detect asynchronous target events by handling SIGCHLD signals. To
108 notify the event loop about target events, the self-pipe trick is used
109 --- a pipe is registered as waitable event source in the event loop,
110 the event loop select/poll's on the read end of this pipe (as well on
111 other event sources, e.g., stdin), and the SIGCHLD handler writes a
112 byte to this pipe. This is more portable than relying on
113 pselect/ppoll, since on kernels that lack those syscalls, libc
114 emulates them with select/poll+sigprocmask, and that is racy
115 (a.k.a. plain broken).
116
117 Obviously, if we fail to notify the event loop if there's a target
118 event, it's bad. OTOH, if we notify the event loop when there's no
119 event from the target, linux_nat_wait will detect that there's no real
120 event to report, and return event of type TARGET_WAITKIND_IGNORE.
121 This is mostly harmless, but it will waste time and is better avoided.
122
123 The main design point is that every time GDB is outside linux-nat.c,
124 we have a SIGCHLD handler installed that is called when something
125 happens to the target and notifies the GDB event loop. Whenever GDB
126 core decides to handle the event, and calls into linux-nat.c, we
127 process things as in sync mode, except that the we never block in
128 sigsuspend.
129
130 While processing an event, we may end up momentarily blocked in
131 waitpid calls. Those waitpid calls, while blocking, are guarantied to
132 return quickly. E.g., in all-stop mode, before reporting to the core
133 that an LWP hit a breakpoint, all LWPs are stopped by sending them
134 SIGSTOP, and synchronously waiting for the SIGSTOP to be reported.
135 Note that this is different from blocking indefinitely waiting for the
136 next event --- here, we're already handling an event.
137
138 Use of signals
139 ==============
140
141 We stop threads by sending a SIGSTOP. The use of SIGSTOP instead of another
142 signal is not entirely significant; we just need for a signal to be delivered,
143 so that we can intercept it. SIGSTOP's advantage is that it can not be
144 blocked. A disadvantage is that it is not a real-time signal, so it can only
145 be queued once; we do not keep track of other sources of SIGSTOP.
146
147 Two other signals that can't be blocked are SIGCONT and SIGKILL. But we can't
148 use them, because they have special behavior when the signal is generated -
149 not when it is delivered. SIGCONT resumes the entire thread group and SIGKILL
150 kills the entire thread group.
151
152 A delivered SIGSTOP would stop the entire thread group, not just the thread we
153 tkill'd. But we never let the SIGSTOP be delivered; we always intercept and
154 cancel it (by PTRACE_CONT without passing SIGSTOP).
155
156 We could use a real-time signal instead. This would solve those problems; we
157 could use PTRACE_GETSIGINFO to locate the specific stop signals sent by GDB.
158 But we would still have to have some support for SIGSTOP, since PTRACE_ATTACH
159 generates it, and there are races with trying to find a signal that is not
160 blocked. */
161
162 #ifndef O_LARGEFILE
163 #define O_LARGEFILE 0
164 #endif
165
166 /* Unlike other extended result codes, WSTOPSIG (status) on
167 PTRACE_O_TRACESYSGOOD syscall events doesn't return SIGTRAP, but
168 instead SIGTRAP with bit 7 set. */
169 #define SYSCALL_SIGTRAP (SIGTRAP | 0x80)
170
171 /* The single-threaded native GNU/Linux target_ops. We save a pointer for
172 the use of the multi-threaded target. */
173 static struct target_ops *linux_ops;
174 static struct target_ops linux_ops_saved;
175
176 /* The method to call, if any, when a new thread is attached. */
177 static void (*linux_nat_new_thread) (struct lwp_info *);
178
179 /* Hook to call prior to resuming a thread. */
180 static void (*linux_nat_prepare_to_resume) (struct lwp_info *);
181
182 /* The method to call, if any, when the siginfo object needs to be
183 converted between the layout returned by ptrace, and the layout in
184 the architecture of the inferior. */
185 static int (*linux_nat_siginfo_fixup) (struct siginfo *,
186 gdb_byte *,
187 int);
188
189 /* The saved to_xfer_partial method, inherited from inf-ptrace.c.
190 Called by our to_xfer_partial. */
191 static LONGEST (*super_xfer_partial) (struct target_ops *,
192 enum target_object,
193 const char *, gdb_byte *,
194 const gdb_byte *,
195 ULONGEST, LONGEST);
196
197 static int debug_linux_nat;
198 static void
199 show_debug_linux_nat (struct ui_file *file, int from_tty,
200 struct cmd_list_element *c, const char *value)
201 {
202 fprintf_filtered (file, _("Debugging of GNU/Linux lwp module is %s.\n"),
203 value);
204 }
205
206 struct simple_pid_list
207 {
208 int pid;
209 int status;
210 struct simple_pid_list *next;
211 };
212 struct simple_pid_list *stopped_pids;
213
214 /* This variable is a tri-state flag: -1 for unknown, 0 if PTRACE_O_TRACEFORK
215 can not be used, 1 if it can. */
216
217 static int linux_supports_tracefork_flag = -1;
218
219 /* This variable is a tri-state flag: -1 for unknown, 0 if
220 PTRACE_O_TRACESYSGOOD can not be used, 1 if it can. */
221
222 static int linux_supports_tracesysgood_flag = -1;
223
224 /* If we have PTRACE_O_TRACEFORK, this flag indicates whether we also have
225 PTRACE_O_TRACEVFORKDONE. */
226
227 static int linux_supports_tracevforkdone_flag = -1;
228
229 /* Stores the current used ptrace() options. */
230 static int current_ptrace_options = 0;
231
232 /* Async mode support. */
233
234 /* The read/write ends of the pipe registered as waitable file in the
235 event loop. */
236 static int linux_nat_event_pipe[2] = { -1, -1 };
237
238 /* Flush the event pipe. */
239
240 static void
241 async_file_flush (void)
242 {
243 int ret;
244 char buf;
245
246 do
247 {
248 ret = read (linux_nat_event_pipe[0], &buf, 1);
249 }
250 while (ret >= 0 || (ret == -1 && errno == EINTR));
251 }
252
253 /* Put something (anything, doesn't matter what, or how much) in event
254 pipe, so that the select/poll in the event-loop realizes we have
255 something to process. */
256
257 static void
258 async_file_mark (void)
259 {
260 int ret;
261
262 /* It doesn't really matter what the pipe contains, as long we end
263 up with something in it. Might as well flush the previous
264 left-overs. */
265 async_file_flush ();
266
267 do
268 {
269 ret = write (linux_nat_event_pipe[1], "+", 1);
270 }
271 while (ret == -1 && errno == EINTR);
272
273 /* Ignore EAGAIN. If the pipe is full, the event loop will already
274 be awakened anyway. */
275 }
276
277 static void linux_nat_async (void (*callback)
278 (enum inferior_event_type event_type,
279 void *context),
280 void *context);
281 static int kill_lwp (int lwpid, int signo);
282
283 static int stop_callback (struct lwp_info *lp, void *data);
284
285 static void block_child_signals (sigset_t *prev_mask);
286 static void restore_child_signals_mask (sigset_t *prev_mask);
287
288 struct lwp_info;
289 static struct lwp_info *add_lwp (ptid_t ptid);
290 static void purge_lwp_list (int pid);
291 static void delete_lwp (ptid_t ptid);
292 static struct lwp_info *find_lwp_pid (ptid_t ptid);
293
294 \f
295 /* Trivial list manipulation functions to keep track of a list of
296 new stopped processes. */
297 static void
298 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
299 {
300 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
301
302 new_pid->pid = pid;
303 new_pid->status = status;
304 new_pid->next = *listp;
305 *listp = new_pid;
306 }
307
308 static int
309 in_pid_list_p (struct simple_pid_list *list, int pid)
310 {
311 struct simple_pid_list *p;
312
313 for (p = list; p != NULL; p = p->next)
314 if (p->pid == pid)
315 return 1;
316 return 0;
317 }
318
319 static int
320 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
321 {
322 struct simple_pid_list **p;
323
324 for (p = listp; *p != NULL; p = &(*p)->next)
325 if ((*p)->pid == pid)
326 {
327 struct simple_pid_list *next = (*p)->next;
328
329 *statusp = (*p)->status;
330 xfree (*p);
331 *p = next;
332 return 1;
333 }
334 return 0;
335 }
336
337 \f
338 /* A helper function for linux_test_for_tracefork, called after fork (). */
339
340 static void
341 linux_tracefork_child (void)
342 {
343 ptrace (PTRACE_TRACEME, 0, 0, 0);
344 kill (getpid (), SIGSTOP);
345 fork ();
346 _exit (0);
347 }
348
349 /* Wrapper function for waitpid which handles EINTR. */
350
351 static int
352 my_waitpid (int pid, int *statusp, int flags)
353 {
354 int ret;
355
356 do
357 {
358 ret = waitpid (pid, statusp, flags);
359 }
360 while (ret == -1 && errno == EINTR);
361
362 return ret;
363 }
364
365 /* Determine if PTRACE_O_TRACEFORK can be used to follow fork events.
366
367 First, we try to enable fork tracing on ORIGINAL_PID. If this fails,
368 we know that the feature is not available. This may change the tracing
369 options for ORIGINAL_PID, but we'll be setting them shortly anyway.
370
371 However, if it succeeds, we don't know for sure that the feature is
372 available; old versions of PTRACE_SETOPTIONS ignored unknown options. We
373 create a child process, attach to it, use PTRACE_SETOPTIONS to enable
374 fork tracing, and let it fork. If the process exits, we assume that we
375 can't use TRACEFORK; if we get the fork notification, and we can extract
376 the new child's PID, then we assume that we can. */
377
378 static void
379 linux_test_for_tracefork (int original_pid)
380 {
381 int child_pid, ret, status;
382 long second_pid;
383 sigset_t prev_mask;
384
385 /* We don't want those ptrace calls to be interrupted. */
386 block_child_signals (&prev_mask);
387
388 linux_supports_tracefork_flag = 0;
389 linux_supports_tracevforkdone_flag = 0;
390
391 ret = ptrace (PTRACE_SETOPTIONS, original_pid, 0, PTRACE_O_TRACEFORK);
392 if (ret != 0)
393 {
394 restore_child_signals_mask (&prev_mask);
395 return;
396 }
397
398 child_pid = fork ();
399 if (child_pid == -1)
400 perror_with_name (("fork"));
401
402 if (child_pid == 0)
403 linux_tracefork_child ();
404
405 ret = my_waitpid (child_pid, &status, 0);
406 if (ret == -1)
407 perror_with_name (("waitpid"));
408 else if (ret != child_pid)
409 error (_("linux_test_for_tracefork: waitpid: unexpected result %d."), ret);
410 if (! WIFSTOPPED (status))
411 error (_("linux_test_for_tracefork: waitpid: unexpected status %d."),
412 status);
413
414 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0, PTRACE_O_TRACEFORK);
415 if (ret != 0)
416 {
417 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
418 if (ret != 0)
419 {
420 warning (_("linux_test_for_tracefork: failed to kill child"));
421 restore_child_signals_mask (&prev_mask);
422 return;
423 }
424
425 ret = my_waitpid (child_pid, &status, 0);
426 if (ret != child_pid)
427 warning (_("linux_test_for_tracefork: failed "
428 "to wait for killed child"));
429 else if (!WIFSIGNALED (status))
430 warning (_("linux_test_for_tracefork: unexpected "
431 "wait status 0x%x from killed child"), status);
432
433 restore_child_signals_mask (&prev_mask);
434 return;
435 }
436
437 /* Check whether PTRACE_O_TRACEVFORKDONE is available. */
438 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0,
439 PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORKDONE);
440 linux_supports_tracevforkdone_flag = (ret == 0);
441
442 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
443 if (ret != 0)
444 warning (_("linux_test_for_tracefork: failed to resume child"));
445
446 ret = my_waitpid (child_pid, &status, 0);
447
448 if (ret == child_pid && WIFSTOPPED (status)
449 && status >> 16 == PTRACE_EVENT_FORK)
450 {
451 second_pid = 0;
452 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
453 if (ret == 0 && second_pid != 0)
454 {
455 int second_status;
456
457 linux_supports_tracefork_flag = 1;
458 my_waitpid (second_pid, &second_status, 0);
459 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
460 if (ret != 0)
461 warning (_("linux_test_for_tracefork: "
462 "failed to kill second child"));
463 my_waitpid (second_pid, &status, 0);
464 }
465 }
466 else
467 warning (_("linux_test_for_tracefork: unexpected result from waitpid "
468 "(%d, status 0x%x)"), ret, status);
469
470 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
471 if (ret != 0)
472 warning (_("linux_test_for_tracefork: failed to kill child"));
473 my_waitpid (child_pid, &status, 0);
474
475 restore_child_signals_mask (&prev_mask);
476 }
477
478 /* Determine if PTRACE_O_TRACESYSGOOD can be used to follow syscalls.
479
480 We try to enable syscall tracing on ORIGINAL_PID. If this fails,
481 we know that the feature is not available. This may change the tracing
482 options for ORIGINAL_PID, but we'll be setting them shortly anyway. */
483
484 static void
485 linux_test_for_tracesysgood (int original_pid)
486 {
487 int ret;
488 sigset_t prev_mask;
489
490 /* We don't want those ptrace calls to be interrupted. */
491 block_child_signals (&prev_mask);
492
493 linux_supports_tracesysgood_flag = 0;
494
495 ret = ptrace (PTRACE_SETOPTIONS, original_pid, 0, PTRACE_O_TRACESYSGOOD);
496 if (ret != 0)
497 goto out;
498
499 linux_supports_tracesysgood_flag = 1;
500 out:
501 restore_child_signals_mask (&prev_mask);
502 }
503
504 /* Determine wether we support PTRACE_O_TRACESYSGOOD option available.
505 This function also sets linux_supports_tracesysgood_flag. */
506
507 static int
508 linux_supports_tracesysgood (int pid)
509 {
510 if (linux_supports_tracesysgood_flag == -1)
511 linux_test_for_tracesysgood (pid);
512 return linux_supports_tracesysgood_flag;
513 }
514
515 /* Return non-zero iff we have tracefork functionality available.
516 This function also sets linux_supports_tracefork_flag. */
517
518 static int
519 linux_supports_tracefork (int pid)
520 {
521 if (linux_supports_tracefork_flag == -1)
522 linux_test_for_tracefork (pid);
523 return linux_supports_tracefork_flag;
524 }
525
526 static int
527 linux_supports_tracevforkdone (int pid)
528 {
529 if (linux_supports_tracefork_flag == -1)
530 linux_test_for_tracefork (pid);
531 return linux_supports_tracevforkdone_flag;
532 }
533
534 static void
535 linux_enable_tracesysgood (ptid_t ptid)
536 {
537 int pid = ptid_get_lwp (ptid);
538
539 if (pid == 0)
540 pid = ptid_get_pid (ptid);
541
542 if (linux_supports_tracesysgood (pid) == 0)
543 return;
544
545 current_ptrace_options |= PTRACE_O_TRACESYSGOOD;
546
547 ptrace (PTRACE_SETOPTIONS, pid, 0, current_ptrace_options);
548 }
549
550 \f
551 void
552 linux_enable_event_reporting (ptid_t ptid)
553 {
554 int pid = ptid_get_lwp (ptid);
555
556 if (pid == 0)
557 pid = ptid_get_pid (ptid);
558
559 if (! linux_supports_tracefork (pid))
560 return;
561
562 current_ptrace_options |= PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORK
563 | PTRACE_O_TRACEEXEC | PTRACE_O_TRACECLONE;
564
565 if (linux_supports_tracevforkdone (pid))
566 current_ptrace_options |= PTRACE_O_TRACEVFORKDONE;
567
568 /* Do not enable PTRACE_O_TRACEEXIT until GDB is more prepared to support
569 read-only process state. */
570
571 ptrace (PTRACE_SETOPTIONS, pid, 0, current_ptrace_options);
572 }
573
574 static void
575 linux_child_post_attach (int pid)
576 {
577 linux_enable_event_reporting (pid_to_ptid (pid));
578 linux_enable_tracesysgood (pid_to_ptid (pid));
579 }
580
581 static void
582 linux_child_post_startup_inferior (ptid_t ptid)
583 {
584 linux_enable_event_reporting (ptid);
585 linux_enable_tracesysgood (ptid);
586 }
587
588 /* Return the number of known LWPs in the tgid given by PID. */
589
590 static int
591 num_lwps (int pid)
592 {
593 int count = 0;
594 struct lwp_info *lp;
595
596 for (lp = lwp_list; lp; lp = lp->next)
597 if (ptid_get_pid (lp->ptid) == pid)
598 count++;
599
600 return count;
601 }
602
603 /* Call delete_lwp with prototype compatible for make_cleanup. */
604
605 static void
606 delete_lwp_cleanup (void *lp_voidp)
607 {
608 struct lwp_info *lp = lp_voidp;
609
610 delete_lwp (lp->ptid);
611 }
612
613 static int
614 linux_child_follow_fork (struct target_ops *ops, int follow_child)
615 {
616 sigset_t prev_mask;
617 int has_vforked;
618 int parent_pid, child_pid;
619
620 block_child_signals (&prev_mask);
621
622 has_vforked = (inferior_thread ()->pending_follow.kind
623 == TARGET_WAITKIND_VFORKED);
624 parent_pid = ptid_get_lwp (inferior_ptid);
625 if (parent_pid == 0)
626 parent_pid = ptid_get_pid (inferior_ptid);
627 child_pid = PIDGET (inferior_thread ()->pending_follow.value.related_pid);
628
629 if (!detach_fork)
630 linux_enable_event_reporting (pid_to_ptid (child_pid));
631
632 if (has_vforked
633 && !non_stop /* Non-stop always resumes both branches. */
634 && (!target_is_async_p () || sync_execution)
635 && !(follow_child || detach_fork || sched_multi))
636 {
637 /* The parent stays blocked inside the vfork syscall until the
638 child execs or exits. If we don't let the child run, then
639 the parent stays blocked. If we're telling the parent to run
640 in the foreground, the user will not be able to ctrl-c to get
641 back the terminal, effectively hanging the debug session. */
642 fprintf_filtered (gdb_stderr, _("\
643 Can not resume the parent process over vfork in the foreground while\n\
644 holding the child stopped. Try \"set detach-on-fork\" or \
645 \"set schedule-multiple\".\n"));
646 /* FIXME output string > 80 columns. */
647 return 1;
648 }
649
650 if (! follow_child)
651 {
652 struct lwp_info *child_lp = NULL;
653
654 /* We're already attached to the parent, by default. */
655
656 /* Detach new forked process? */
657 if (detach_fork)
658 {
659 struct cleanup *old_chain;
660
661 /* Before detaching from the child, remove all breakpoints
662 from it. If we forked, then this has already been taken
663 care of by infrun.c. If we vforked however, any
664 breakpoint inserted in the parent is visible in the
665 child, even those added while stopped in a vfork
666 catchpoint. This will remove the breakpoints from the
667 parent also, but they'll be reinserted below. */
668 if (has_vforked)
669 {
670 /* keep breakpoints list in sync. */
671 remove_breakpoints_pid (GET_PID (inferior_ptid));
672 }
673
674 if (info_verbose || debug_linux_nat)
675 {
676 target_terminal_ours ();
677 fprintf_filtered (gdb_stdlog,
678 "Detaching after fork from "
679 "child process %d.\n",
680 child_pid);
681 }
682
683 old_chain = save_inferior_ptid ();
684 inferior_ptid = ptid_build (child_pid, child_pid, 0);
685
686 child_lp = add_lwp (inferior_ptid);
687 child_lp->stopped = 1;
688 child_lp->last_resume_kind = resume_stop;
689 make_cleanup (delete_lwp_cleanup, child_lp);
690
691 /* CHILD_LP has new PID, therefore linux_nat_new_thread is not called for it.
692 See i386_inferior_data_get for the Linux kernel specifics.
693 Ensure linux_nat_prepare_to_resume will reset the hardware debug
694 registers. It is done by the linux_nat_new_thread call, which is
695 being skipped in add_lwp above for the first lwp of a pid. */
696 gdb_assert (num_lwps (GET_PID (child_lp->ptid)) == 1);
697 if (linux_nat_new_thread != NULL)
698 linux_nat_new_thread (child_lp);
699
700 if (linux_nat_prepare_to_resume != NULL)
701 linux_nat_prepare_to_resume (child_lp);
702 ptrace (PTRACE_DETACH, child_pid, 0, 0);
703
704 do_cleanups (old_chain);
705 }
706 else
707 {
708 struct inferior *parent_inf, *child_inf;
709 struct cleanup *old_chain;
710
711 /* Add process to GDB's tables. */
712 child_inf = add_inferior (child_pid);
713
714 parent_inf = current_inferior ();
715 child_inf->attach_flag = parent_inf->attach_flag;
716 copy_terminal_info (child_inf, parent_inf);
717
718 old_chain = save_inferior_ptid ();
719 save_current_program_space ();
720
721 inferior_ptid = ptid_build (child_pid, child_pid, 0);
722 add_thread (inferior_ptid);
723 child_lp = add_lwp (inferior_ptid);
724 child_lp->stopped = 1;
725 child_lp->last_resume_kind = resume_stop;
726
727 /* If this is a vfork child, then the address-space is
728 shared with the parent. */
729 if (has_vforked)
730 {
731 child_inf->pspace = parent_inf->pspace;
732 child_inf->aspace = parent_inf->aspace;
733
734 /* The parent will be frozen until the child is done
735 with the shared region. Keep track of the
736 parent. */
737 child_inf->vfork_parent = parent_inf;
738 child_inf->pending_detach = 0;
739 parent_inf->vfork_child = child_inf;
740 parent_inf->pending_detach = 0;
741 }
742 else
743 {
744 child_inf->aspace = new_address_space ();
745 child_inf->pspace = add_program_space (child_inf->aspace);
746 child_inf->removable = 1;
747 set_current_program_space (child_inf->pspace);
748 clone_program_space (child_inf->pspace, parent_inf->pspace);
749
750 /* Let the shared library layer (solib-svr4) learn about
751 this new process, relocate the cloned exec, pull in
752 shared libraries, and install the solib event
753 breakpoint. If a "cloned-VM" event was propagated
754 better throughout the core, this wouldn't be
755 required. */
756 solib_create_inferior_hook (0);
757 }
758
759 /* Let the thread_db layer learn about this new process. */
760 check_for_thread_db ();
761
762 do_cleanups (old_chain);
763 }
764
765 if (has_vforked)
766 {
767 struct lwp_info *parent_lp;
768 struct inferior *parent_inf;
769
770 parent_inf = current_inferior ();
771
772 /* If we detached from the child, then we have to be careful
773 to not insert breakpoints in the parent until the child
774 is done with the shared memory region. However, if we're
775 staying attached to the child, then we can and should
776 insert breakpoints, so that we can debug it. A
777 subsequent child exec or exit is enough to know when does
778 the child stops using the parent's address space. */
779 parent_inf->waiting_for_vfork_done = detach_fork;
780 parent_inf->pspace->breakpoints_not_allowed = detach_fork;
781
782 parent_lp = find_lwp_pid (pid_to_ptid (parent_pid));
783 gdb_assert (linux_supports_tracefork_flag >= 0);
784
785 if (linux_supports_tracevforkdone (0))
786 {
787 if (debug_linux_nat)
788 fprintf_unfiltered (gdb_stdlog,
789 "LCFF: waiting for VFORK_DONE on %d\n",
790 parent_pid);
791 parent_lp->stopped = 1;
792
793 /* We'll handle the VFORK_DONE event like any other
794 event, in target_wait. */
795 }
796 else
797 {
798 /* We can't insert breakpoints until the child has
799 finished with the shared memory region. We need to
800 wait until that happens. Ideal would be to just
801 call:
802 - ptrace (PTRACE_SYSCALL, parent_pid, 0, 0);
803 - waitpid (parent_pid, &status, __WALL);
804 However, most architectures can't handle a syscall
805 being traced on the way out if it wasn't traced on
806 the way in.
807
808 We might also think to loop, continuing the child
809 until it exits or gets a SIGTRAP. One problem is
810 that the child might call ptrace with PTRACE_TRACEME.
811
812 There's no simple and reliable way to figure out when
813 the vforked child will be done with its copy of the
814 shared memory. We could step it out of the syscall,
815 two instructions, let it go, and then single-step the
816 parent once. When we have hardware single-step, this
817 would work; with software single-step it could still
818 be made to work but we'd have to be able to insert
819 single-step breakpoints in the child, and we'd have
820 to insert -just- the single-step breakpoint in the
821 parent. Very awkward.
822
823 In the end, the best we can do is to make sure it
824 runs for a little while. Hopefully it will be out of
825 range of any breakpoints we reinsert. Usually this
826 is only the single-step breakpoint at vfork's return
827 point. */
828
829 if (debug_linux_nat)
830 fprintf_unfiltered (gdb_stdlog,
831 "LCFF: no VFORK_DONE "
832 "support, sleeping a bit\n");
833
834 usleep (10000);
835
836 /* Pretend we've seen a PTRACE_EVENT_VFORK_DONE event,
837 and leave it pending. The next linux_nat_resume call
838 will notice a pending event, and bypasses actually
839 resuming the inferior. */
840 parent_lp->status = 0;
841 parent_lp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
842 parent_lp->stopped = 1;
843
844 /* If we're in async mode, need to tell the event loop
845 there's something here to process. */
846 if (target_can_async_p ())
847 async_file_mark ();
848 }
849 }
850 }
851 else
852 {
853 struct inferior *parent_inf, *child_inf;
854 struct lwp_info *child_lp;
855 struct program_space *parent_pspace;
856
857 if (info_verbose || debug_linux_nat)
858 {
859 target_terminal_ours ();
860 if (has_vforked)
861 fprintf_filtered (gdb_stdlog,
862 _("Attaching after process %d "
863 "vfork to child process %d.\n"),
864 parent_pid, child_pid);
865 else
866 fprintf_filtered (gdb_stdlog,
867 _("Attaching after process %d "
868 "fork to child process %d.\n"),
869 parent_pid, child_pid);
870 }
871
872 /* Add the new inferior first, so that the target_detach below
873 doesn't unpush the target. */
874
875 child_inf = add_inferior (child_pid);
876
877 parent_inf = current_inferior ();
878 child_inf->attach_flag = parent_inf->attach_flag;
879 copy_terminal_info (child_inf, parent_inf);
880
881 parent_pspace = parent_inf->pspace;
882
883 /* If we're vforking, we want to hold on to the parent until the
884 child exits or execs. At child exec or exit time we can
885 remove the old breakpoints from the parent and detach or
886 resume debugging it. Otherwise, detach the parent now; we'll
887 want to reuse it's program/address spaces, but we can't set
888 them to the child before removing breakpoints from the
889 parent, otherwise, the breakpoints module could decide to
890 remove breakpoints from the wrong process (since they'd be
891 assigned to the same address space). */
892
893 if (has_vforked)
894 {
895 gdb_assert (child_inf->vfork_parent == NULL);
896 gdb_assert (parent_inf->vfork_child == NULL);
897 child_inf->vfork_parent = parent_inf;
898 child_inf->pending_detach = 0;
899 parent_inf->vfork_child = child_inf;
900 parent_inf->pending_detach = detach_fork;
901 parent_inf->waiting_for_vfork_done = 0;
902 }
903 else if (detach_fork)
904 target_detach (NULL, 0);
905
906 /* Note that the detach above makes PARENT_INF dangling. */
907
908 /* Add the child thread to the appropriate lists, and switch to
909 this new thread, before cloning the program space, and
910 informing the solib layer about this new process. */
911
912 inferior_ptid = ptid_build (child_pid, child_pid, 0);
913 add_thread (inferior_ptid);
914 child_lp = add_lwp (inferior_ptid);
915 child_lp->stopped = 1;
916 child_lp->last_resume_kind = resume_stop;
917
918 /* If this is a vfork child, then the address-space is shared
919 with the parent. If we detached from the parent, then we can
920 reuse the parent's program/address spaces. */
921 if (has_vforked || detach_fork)
922 {
923 child_inf->pspace = parent_pspace;
924 child_inf->aspace = child_inf->pspace->aspace;
925 }
926 else
927 {
928 child_inf->aspace = new_address_space ();
929 child_inf->pspace = add_program_space (child_inf->aspace);
930 child_inf->removable = 1;
931 set_current_program_space (child_inf->pspace);
932 clone_program_space (child_inf->pspace, parent_pspace);
933
934 /* Let the shared library layer (solib-svr4) learn about
935 this new process, relocate the cloned exec, pull in
936 shared libraries, and install the solib event breakpoint.
937 If a "cloned-VM" event was propagated better throughout
938 the core, this wouldn't be required. */
939 solib_create_inferior_hook (0);
940 }
941
942 /* Let the thread_db layer learn about this new process. */
943 check_for_thread_db ();
944 }
945
946 restore_child_signals_mask (&prev_mask);
947 return 0;
948 }
949
950 \f
951 static int
952 linux_child_insert_fork_catchpoint (int pid)
953 {
954 return !linux_supports_tracefork (pid);
955 }
956
957 static int
958 linux_child_remove_fork_catchpoint (int pid)
959 {
960 return 0;
961 }
962
963 static int
964 linux_child_insert_vfork_catchpoint (int pid)
965 {
966 return !linux_supports_tracefork (pid);
967 }
968
969 static int
970 linux_child_remove_vfork_catchpoint (int pid)
971 {
972 return 0;
973 }
974
975 static int
976 linux_child_insert_exec_catchpoint (int pid)
977 {
978 return !linux_supports_tracefork (pid);
979 }
980
981 static int
982 linux_child_remove_exec_catchpoint (int pid)
983 {
984 return 0;
985 }
986
987 static int
988 linux_child_set_syscall_catchpoint (int pid, int needed, int any_count,
989 int table_size, int *table)
990 {
991 if (!linux_supports_tracesysgood (pid))
992 return 1;
993
994 /* On GNU/Linux, we ignore the arguments. It means that we only
995 enable the syscall catchpoints, but do not disable them.
996
997 Also, we do not use the `table' information because we do not
998 filter system calls here. We let GDB do the logic for us. */
999 return 0;
1000 }
1001
1002 /* On GNU/Linux there are no real LWP's. The closest thing to LWP's
1003 are processes sharing the same VM space. A multi-threaded process
1004 is basically a group of such processes. However, such a grouping
1005 is almost entirely a user-space issue; the kernel doesn't enforce
1006 such a grouping at all (this might change in the future). In
1007 general, we'll rely on the threads library (i.e. the GNU/Linux
1008 Threads library) to provide such a grouping.
1009
1010 It is perfectly well possible to write a multi-threaded application
1011 without the assistance of a threads library, by using the clone
1012 system call directly. This module should be able to give some
1013 rudimentary support for debugging such applications if developers
1014 specify the CLONE_PTRACE flag in the clone system call, and are
1015 using the Linux kernel 2.4 or above.
1016
1017 Note that there are some peculiarities in GNU/Linux that affect
1018 this code:
1019
1020 - In general one should specify the __WCLONE flag to waitpid in
1021 order to make it report events for any of the cloned processes
1022 (and leave it out for the initial process). However, if a cloned
1023 process has exited the exit status is only reported if the
1024 __WCLONE flag is absent. Linux kernel 2.4 has a __WALL flag, but
1025 we cannot use it since GDB must work on older systems too.
1026
1027 - When a traced, cloned process exits and is waited for by the
1028 debugger, the kernel reassigns it to the original parent and
1029 keeps it around as a "zombie". Somehow, the GNU/Linux Threads
1030 library doesn't notice this, which leads to the "zombie problem":
1031 When debugged a multi-threaded process that spawns a lot of
1032 threads will run out of processes, even if the threads exit,
1033 because the "zombies" stay around. */
1034
1035 /* List of known LWPs. */
1036 struct lwp_info *lwp_list;
1037 \f
1038
1039 /* Original signal mask. */
1040 static sigset_t normal_mask;
1041
1042 /* Signal mask for use with sigsuspend in linux_nat_wait, initialized in
1043 _initialize_linux_nat. */
1044 static sigset_t suspend_mask;
1045
1046 /* Signals to block to make that sigsuspend work. */
1047 static sigset_t blocked_mask;
1048
1049 /* SIGCHLD action. */
1050 struct sigaction sigchld_action;
1051
1052 /* Block child signals (SIGCHLD and linux threads signals), and store
1053 the previous mask in PREV_MASK. */
1054
1055 static void
1056 block_child_signals (sigset_t *prev_mask)
1057 {
1058 /* Make sure SIGCHLD is blocked. */
1059 if (!sigismember (&blocked_mask, SIGCHLD))
1060 sigaddset (&blocked_mask, SIGCHLD);
1061
1062 sigprocmask (SIG_BLOCK, &blocked_mask, prev_mask);
1063 }
1064
1065 /* Restore child signals mask, previously returned by
1066 block_child_signals. */
1067
1068 static void
1069 restore_child_signals_mask (sigset_t *prev_mask)
1070 {
1071 sigprocmask (SIG_SETMASK, prev_mask, NULL);
1072 }
1073
1074 /* Mask of signals to pass directly to the inferior. */
1075 static sigset_t pass_mask;
1076
1077 /* Update signals to pass to the inferior. */
1078 static void
1079 linux_nat_pass_signals (int numsigs, unsigned char *pass_signals)
1080 {
1081 int signo;
1082
1083 sigemptyset (&pass_mask);
1084
1085 for (signo = 1; signo < NSIG; signo++)
1086 {
1087 int target_signo = target_signal_from_host (signo);
1088 if (target_signo < numsigs && pass_signals[target_signo])
1089 sigaddset (&pass_mask, signo);
1090 }
1091 }
1092
1093 \f
1094
1095 /* Prototypes for local functions. */
1096 static int stop_wait_callback (struct lwp_info *lp, void *data);
1097 static int linux_thread_alive (ptid_t ptid);
1098 static char *linux_child_pid_to_exec_file (int pid);
1099
1100 \f
1101 /* Convert wait status STATUS to a string. Used for printing debug
1102 messages only. */
1103
1104 static char *
1105 status_to_str (int status)
1106 {
1107 static char buf[64];
1108
1109 if (WIFSTOPPED (status))
1110 {
1111 if (WSTOPSIG (status) == SYSCALL_SIGTRAP)
1112 snprintf (buf, sizeof (buf), "%s (stopped at syscall)",
1113 strsignal (SIGTRAP));
1114 else
1115 snprintf (buf, sizeof (buf), "%s (stopped)",
1116 strsignal (WSTOPSIG (status)));
1117 }
1118 else if (WIFSIGNALED (status))
1119 snprintf (buf, sizeof (buf), "%s (terminated)",
1120 strsignal (WTERMSIG (status)));
1121 else
1122 snprintf (buf, sizeof (buf), "%d (exited)", WEXITSTATUS (status));
1123
1124 return buf;
1125 }
1126
1127 /* Destroy and free LP. */
1128
1129 static void
1130 lwp_free (struct lwp_info *lp)
1131 {
1132 xfree (lp->arch_private);
1133 xfree (lp);
1134 }
1135
1136 /* Remove all LWPs belong to PID from the lwp list. */
1137
1138 static void
1139 purge_lwp_list (int pid)
1140 {
1141 struct lwp_info *lp, *lpprev, *lpnext;
1142
1143 lpprev = NULL;
1144
1145 for (lp = lwp_list; lp; lp = lpnext)
1146 {
1147 lpnext = lp->next;
1148
1149 if (ptid_get_pid (lp->ptid) == pid)
1150 {
1151 if (lp == lwp_list)
1152 lwp_list = lp->next;
1153 else
1154 lpprev->next = lp->next;
1155
1156 lwp_free (lp);
1157 }
1158 else
1159 lpprev = lp;
1160 }
1161 }
1162
1163 /* Add the LWP specified by PID to the list. Return a pointer to the
1164 structure describing the new LWP. The LWP should already be stopped
1165 (with an exception for the very first LWP). */
1166
1167 static struct lwp_info *
1168 add_lwp (ptid_t ptid)
1169 {
1170 struct lwp_info *lp;
1171
1172 gdb_assert (is_lwp (ptid));
1173
1174 lp = (struct lwp_info *) xmalloc (sizeof (struct lwp_info));
1175
1176 memset (lp, 0, sizeof (struct lwp_info));
1177
1178 lp->last_resume_kind = resume_continue;
1179 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
1180
1181 lp->ptid = ptid;
1182 lp->core = -1;
1183
1184 lp->next = lwp_list;
1185 lwp_list = lp;
1186
1187 /* Let the arch specific bits know about this new thread. Current
1188 clients of this callback take the opportunity to install
1189 watchpoints in the new thread. Don't do this for the first
1190 thread though. If we're spawning a child ("run"), the thread
1191 executes the shell wrapper first, and we shouldn't touch it until
1192 it execs the program we want to debug. For "attach", it'd be
1193 okay to call the callback, but it's not necessary, because
1194 watchpoints can't yet have been inserted into the inferior. */
1195 if (num_lwps (GET_PID (ptid)) > 1 && linux_nat_new_thread != NULL)
1196 linux_nat_new_thread (lp);
1197
1198 return lp;
1199 }
1200
1201 /* Remove the LWP specified by PID from the list. */
1202
1203 static void
1204 delete_lwp (ptid_t ptid)
1205 {
1206 struct lwp_info *lp, *lpprev;
1207
1208 lpprev = NULL;
1209
1210 for (lp = lwp_list; lp; lpprev = lp, lp = lp->next)
1211 if (ptid_equal (lp->ptid, ptid))
1212 break;
1213
1214 if (!lp)
1215 return;
1216
1217 if (lpprev)
1218 lpprev->next = lp->next;
1219 else
1220 lwp_list = lp->next;
1221
1222 lwp_free (lp);
1223 }
1224
1225 /* Return a pointer to the structure describing the LWP corresponding
1226 to PID. If no corresponding LWP could be found, return NULL. */
1227
1228 static struct lwp_info *
1229 find_lwp_pid (ptid_t ptid)
1230 {
1231 struct lwp_info *lp;
1232 int lwp;
1233
1234 if (is_lwp (ptid))
1235 lwp = GET_LWP (ptid);
1236 else
1237 lwp = GET_PID (ptid);
1238
1239 for (lp = lwp_list; lp; lp = lp->next)
1240 if (lwp == GET_LWP (lp->ptid))
1241 return lp;
1242
1243 return NULL;
1244 }
1245
1246 /* Call CALLBACK with its second argument set to DATA for every LWP in
1247 the list. If CALLBACK returns 1 for a particular LWP, return a
1248 pointer to the structure describing that LWP immediately.
1249 Otherwise return NULL. */
1250
1251 struct lwp_info *
1252 iterate_over_lwps (ptid_t filter,
1253 int (*callback) (struct lwp_info *, void *),
1254 void *data)
1255 {
1256 struct lwp_info *lp, *lpnext;
1257
1258 for (lp = lwp_list; lp; lp = lpnext)
1259 {
1260 lpnext = lp->next;
1261
1262 if (ptid_match (lp->ptid, filter))
1263 {
1264 if ((*callback) (lp, data))
1265 return lp;
1266 }
1267 }
1268
1269 return NULL;
1270 }
1271
1272 /* Iterate like iterate_over_lwps does except when forking-off a child call
1273 CALLBACK with CALLBACK_DATA specifically only for that new child PID. */
1274
1275 void
1276 linux_nat_iterate_watchpoint_lwps
1277 (linux_nat_iterate_watchpoint_lwps_ftype callback, void *callback_data)
1278 {
1279 int inferior_pid = ptid_get_pid (inferior_ptid);
1280 struct inferior *inf = current_inferior ();
1281
1282 if (inf->pid == inferior_pid)
1283 {
1284 /* Iterate all the threads of the current inferior. Without specifying
1285 INFERIOR_PID it would iterate all threads of all inferiors, which is
1286 inappropriate for watchpoints. */
1287
1288 iterate_over_lwps (pid_to_ptid (inferior_pid), callback, callback_data);
1289 }
1290 else
1291 {
1292 /* Detaching a new child PID temporarily present in INFERIOR_PID. */
1293
1294 struct lwp_info *child_lp;
1295 struct cleanup *old_chain;
1296 pid_t child_pid = GET_PID (inferior_ptid);
1297 ptid_t child_ptid = ptid_build (child_pid, child_pid, 0);
1298
1299 gdb_assert (!is_lwp (inferior_ptid));
1300 gdb_assert (find_lwp_pid (child_ptid) == NULL);
1301 child_lp = add_lwp (child_ptid);
1302 child_lp->stopped = 1;
1303 child_lp->last_resume_kind = resume_stop;
1304 old_chain = make_cleanup (delete_lwp_cleanup, child_lp);
1305
1306 callback (child_lp, callback_data);
1307
1308 do_cleanups (old_chain);
1309 }
1310 }
1311
1312 /* Update our internal state when changing from one checkpoint to
1313 another indicated by NEW_PTID. We can only switch single-threaded
1314 applications, so we only create one new LWP, and the previous list
1315 is discarded. */
1316
1317 void
1318 linux_nat_switch_fork (ptid_t new_ptid)
1319 {
1320 struct lwp_info *lp;
1321
1322 purge_lwp_list (GET_PID (inferior_ptid));
1323
1324 lp = add_lwp (new_ptid);
1325 lp->stopped = 1;
1326
1327 /* This changes the thread's ptid while preserving the gdb thread
1328 num. Also changes the inferior pid, while preserving the
1329 inferior num. */
1330 thread_change_ptid (inferior_ptid, new_ptid);
1331
1332 /* We've just told GDB core that the thread changed target id, but,
1333 in fact, it really is a different thread, with different register
1334 contents. */
1335 registers_changed ();
1336 }
1337
1338 /* Handle the exit of a single thread LP. */
1339
1340 static void
1341 exit_lwp (struct lwp_info *lp)
1342 {
1343 struct thread_info *th = find_thread_ptid (lp->ptid);
1344
1345 if (th)
1346 {
1347 if (print_thread_events)
1348 printf_unfiltered (_("[%s exited]\n"), target_pid_to_str (lp->ptid));
1349
1350 delete_thread (lp->ptid);
1351 }
1352
1353 delete_lwp (lp->ptid);
1354 }
1355
1356 /* Detect `T (stopped)' in `/proc/PID/status'.
1357 Other states including `T (tracing stop)' are reported as false. */
1358
1359 static int
1360 pid_is_stopped (pid_t pid)
1361 {
1362 FILE *status_file;
1363 char buf[100];
1364 int retval = 0;
1365
1366 snprintf (buf, sizeof (buf), "/proc/%d/status", (int) pid);
1367 status_file = fopen (buf, "r");
1368 if (status_file != NULL)
1369 {
1370 int have_state = 0;
1371
1372 while (fgets (buf, sizeof (buf), status_file))
1373 {
1374 if (strncmp (buf, "State:", 6) == 0)
1375 {
1376 have_state = 1;
1377 break;
1378 }
1379 }
1380 if (have_state && strstr (buf, "T (stopped)") != NULL)
1381 retval = 1;
1382 fclose (status_file);
1383 }
1384 return retval;
1385 }
1386
1387 /* Wait for the LWP specified by LP, which we have just attached to.
1388 Returns a wait status for that LWP, to cache. */
1389
1390 static int
1391 linux_nat_post_attach_wait (ptid_t ptid, int first, int *cloned,
1392 int *signalled)
1393 {
1394 pid_t new_pid, pid = GET_LWP (ptid);
1395 int status;
1396
1397 if (pid_is_stopped (pid))
1398 {
1399 if (debug_linux_nat)
1400 fprintf_unfiltered (gdb_stdlog,
1401 "LNPAW: Attaching to a stopped process\n");
1402
1403 /* The process is definitely stopped. It is in a job control
1404 stop, unless the kernel predates the TASK_STOPPED /
1405 TASK_TRACED distinction, in which case it might be in a
1406 ptrace stop. Make sure it is in a ptrace stop; from there we
1407 can kill it, signal it, et cetera.
1408
1409 First make sure there is a pending SIGSTOP. Since we are
1410 already attached, the process can not transition from stopped
1411 to running without a PTRACE_CONT; so we know this signal will
1412 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1413 probably already in the queue (unless this kernel is old
1414 enough to use TASK_STOPPED for ptrace stops); but since SIGSTOP
1415 is not an RT signal, it can only be queued once. */
1416 kill_lwp (pid, SIGSTOP);
1417
1418 /* Finally, resume the stopped process. This will deliver the SIGSTOP
1419 (or a higher priority signal, just like normal PTRACE_ATTACH). */
1420 ptrace (PTRACE_CONT, pid, 0, 0);
1421 }
1422
1423 /* Make sure the initial process is stopped. The user-level threads
1424 layer might want to poke around in the inferior, and that won't
1425 work if things haven't stabilized yet. */
1426 new_pid = my_waitpid (pid, &status, 0);
1427 if (new_pid == -1 && errno == ECHILD)
1428 {
1429 if (first)
1430 warning (_("%s is a cloned process"), target_pid_to_str (ptid));
1431
1432 /* Try again with __WCLONE to check cloned processes. */
1433 new_pid = my_waitpid (pid, &status, __WCLONE);
1434 *cloned = 1;
1435 }
1436
1437 gdb_assert (pid == new_pid);
1438
1439 if (!WIFSTOPPED (status))
1440 {
1441 /* The pid we tried to attach has apparently just exited. */
1442 if (debug_linux_nat)
1443 fprintf_unfiltered (gdb_stdlog, "LNPAW: Failed to stop %d: %s",
1444 pid, status_to_str (status));
1445 return status;
1446 }
1447
1448 if (WSTOPSIG (status) != SIGSTOP)
1449 {
1450 *signalled = 1;
1451 if (debug_linux_nat)
1452 fprintf_unfiltered (gdb_stdlog,
1453 "LNPAW: Received %s after attaching\n",
1454 status_to_str (status));
1455 }
1456
1457 return status;
1458 }
1459
1460 /* Attach to the LWP specified by PID. Return 0 if successful, -1 if
1461 the new LWP could not be attached, or 1 if we're already auto
1462 attached to this thread, but haven't processed the
1463 PTRACE_EVENT_CLONE event of its parent thread, so we just ignore
1464 its existance, without considering it an error. */
1465
1466 int
1467 lin_lwp_attach_lwp (ptid_t ptid)
1468 {
1469 struct lwp_info *lp;
1470 sigset_t prev_mask;
1471 int lwpid;
1472
1473 gdb_assert (is_lwp (ptid));
1474
1475 block_child_signals (&prev_mask);
1476
1477 lp = find_lwp_pid (ptid);
1478 lwpid = GET_LWP (ptid);
1479
1480 /* We assume that we're already attached to any LWP that has an id
1481 equal to the overall process id, and to any LWP that is already
1482 in our list of LWPs. If we're not seeing exit events from threads
1483 and we've had PID wraparound since we last tried to stop all threads,
1484 this assumption might be wrong; fortunately, this is very unlikely
1485 to happen. */
1486 if (lwpid != GET_PID (ptid) && lp == NULL)
1487 {
1488 int status, cloned = 0, signalled = 0;
1489
1490 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) < 0)
1491 {
1492 if (linux_supports_tracefork_flag)
1493 {
1494 /* If we haven't stopped all threads when we get here,
1495 we may have seen a thread listed in thread_db's list,
1496 but not processed the PTRACE_EVENT_CLONE yet. If
1497 that's the case, ignore this new thread, and let
1498 normal event handling discover it later. */
1499 if (in_pid_list_p (stopped_pids, lwpid))
1500 {
1501 /* We've already seen this thread stop, but we
1502 haven't seen the PTRACE_EVENT_CLONE extended
1503 event yet. */
1504 restore_child_signals_mask (&prev_mask);
1505 return 0;
1506 }
1507 else
1508 {
1509 int new_pid;
1510 int status;
1511
1512 /* See if we've got a stop for this new child
1513 pending. If so, we're already attached. */
1514 new_pid = my_waitpid (lwpid, &status, WNOHANG);
1515 if (new_pid == -1 && errno == ECHILD)
1516 new_pid = my_waitpid (lwpid, &status, __WCLONE | WNOHANG);
1517 if (new_pid != -1)
1518 {
1519 if (WIFSTOPPED (status))
1520 add_to_pid_list (&stopped_pids, lwpid, status);
1521
1522 restore_child_signals_mask (&prev_mask);
1523 return 1;
1524 }
1525 }
1526 }
1527
1528 /* If we fail to attach to the thread, issue a warning,
1529 but continue. One way this can happen is if thread
1530 creation is interrupted; as of Linux kernel 2.6.19, a
1531 bug may place threads in the thread list and then fail
1532 to create them. */
1533 warning (_("Can't attach %s: %s"), target_pid_to_str (ptid),
1534 safe_strerror (errno));
1535 restore_child_signals_mask (&prev_mask);
1536 return -1;
1537 }
1538
1539 if (debug_linux_nat)
1540 fprintf_unfiltered (gdb_stdlog,
1541 "LLAL: PTRACE_ATTACH %s, 0, 0 (OK)\n",
1542 target_pid_to_str (ptid));
1543
1544 status = linux_nat_post_attach_wait (ptid, 0, &cloned, &signalled);
1545 if (!WIFSTOPPED (status))
1546 {
1547 restore_child_signals_mask (&prev_mask);
1548 return 1;
1549 }
1550
1551 lp = add_lwp (ptid);
1552 lp->stopped = 1;
1553 lp->cloned = cloned;
1554 lp->signalled = signalled;
1555 if (WSTOPSIG (status) != SIGSTOP)
1556 {
1557 lp->resumed = 1;
1558 lp->status = status;
1559 }
1560
1561 target_post_attach (GET_LWP (lp->ptid));
1562
1563 if (debug_linux_nat)
1564 {
1565 fprintf_unfiltered (gdb_stdlog,
1566 "LLAL: waitpid %s received %s\n",
1567 target_pid_to_str (ptid),
1568 status_to_str (status));
1569 }
1570 }
1571 else
1572 {
1573 /* We assume that the LWP representing the original process is
1574 already stopped. Mark it as stopped in the data structure
1575 that the GNU/linux ptrace layer uses to keep track of
1576 threads. Note that this won't have already been done since
1577 the main thread will have, we assume, been stopped by an
1578 attach from a different layer. */
1579 if (lp == NULL)
1580 lp = add_lwp (ptid);
1581 lp->stopped = 1;
1582 }
1583
1584 lp->last_resume_kind = resume_stop;
1585 restore_child_signals_mask (&prev_mask);
1586 return 0;
1587 }
1588
1589 static void
1590 linux_nat_create_inferior (struct target_ops *ops,
1591 char *exec_file, char *allargs, char **env,
1592 int from_tty)
1593 {
1594 #ifdef HAVE_PERSONALITY
1595 int personality_orig = 0, personality_set = 0;
1596 #endif /* HAVE_PERSONALITY */
1597
1598 /* The fork_child mechanism is synchronous and calls target_wait, so
1599 we have to mask the async mode. */
1600
1601 #ifdef HAVE_PERSONALITY
1602 if (disable_randomization)
1603 {
1604 errno = 0;
1605 personality_orig = personality (0xffffffff);
1606 if (errno == 0 && !(personality_orig & ADDR_NO_RANDOMIZE))
1607 {
1608 personality_set = 1;
1609 personality (personality_orig | ADDR_NO_RANDOMIZE);
1610 }
1611 if (errno != 0 || (personality_set
1612 && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE)))
1613 warning (_("Error disabling address space randomization: %s"),
1614 safe_strerror (errno));
1615 }
1616 #endif /* HAVE_PERSONALITY */
1617
1618 /* Make sure we report all signals during startup. */
1619 linux_nat_pass_signals (0, NULL);
1620
1621 linux_ops->to_create_inferior (ops, exec_file, allargs, env, from_tty);
1622
1623 #ifdef HAVE_PERSONALITY
1624 if (personality_set)
1625 {
1626 errno = 0;
1627 personality (personality_orig);
1628 if (errno != 0)
1629 warning (_("Error restoring address space randomization: %s"),
1630 safe_strerror (errno));
1631 }
1632 #endif /* HAVE_PERSONALITY */
1633 }
1634
1635 static void
1636 linux_nat_attach (struct target_ops *ops, char *args, int from_tty)
1637 {
1638 struct lwp_info *lp;
1639 int status;
1640 ptid_t ptid;
1641
1642 /* Make sure we report all signals during attach. */
1643 linux_nat_pass_signals (0, NULL);
1644
1645 linux_ops->to_attach (ops, args, from_tty);
1646
1647 /* The ptrace base target adds the main thread with (pid,0,0)
1648 format. Decorate it with lwp info. */
1649 ptid = BUILD_LWP (GET_PID (inferior_ptid), GET_PID (inferior_ptid));
1650 thread_change_ptid (inferior_ptid, ptid);
1651
1652 /* Add the initial process as the first LWP to the list. */
1653 lp = add_lwp (ptid);
1654
1655 status = linux_nat_post_attach_wait (lp->ptid, 1, &lp->cloned,
1656 &lp->signalled);
1657 if (!WIFSTOPPED (status))
1658 {
1659 if (WIFEXITED (status))
1660 {
1661 int exit_code = WEXITSTATUS (status);
1662
1663 target_terminal_ours ();
1664 target_mourn_inferior ();
1665 if (exit_code == 0)
1666 error (_("Unable to attach: program exited normally."));
1667 else
1668 error (_("Unable to attach: program exited with code %d."),
1669 exit_code);
1670 }
1671 else if (WIFSIGNALED (status))
1672 {
1673 enum target_signal signo;
1674
1675 target_terminal_ours ();
1676 target_mourn_inferior ();
1677
1678 signo = target_signal_from_host (WTERMSIG (status));
1679 error (_("Unable to attach: program terminated with signal "
1680 "%s, %s."),
1681 target_signal_to_name (signo),
1682 target_signal_to_string (signo));
1683 }
1684
1685 internal_error (__FILE__, __LINE__,
1686 _("unexpected status %d for PID %ld"),
1687 status, (long) GET_LWP (ptid));
1688 }
1689
1690 lp->stopped = 1;
1691
1692 /* Save the wait status to report later. */
1693 lp->resumed = 1;
1694 if (debug_linux_nat)
1695 fprintf_unfiltered (gdb_stdlog,
1696 "LNA: waitpid %ld, saving status %s\n",
1697 (long) GET_PID (lp->ptid), status_to_str (status));
1698
1699 lp->status = status;
1700
1701 if (target_can_async_p ())
1702 target_async (inferior_event_handler, 0);
1703 }
1704
1705 /* Get pending status of LP. */
1706 static int
1707 get_pending_status (struct lwp_info *lp, int *status)
1708 {
1709 enum target_signal signo = TARGET_SIGNAL_0;
1710
1711 /* If we paused threads momentarily, we may have stored pending
1712 events in lp->status or lp->waitstatus (see stop_wait_callback),
1713 and GDB core hasn't seen any signal for those threads.
1714 Otherwise, the last signal reported to the core is found in the
1715 thread object's stop_signal.
1716
1717 There's a corner case that isn't handled here at present. Only
1718 if the thread stopped with a TARGET_WAITKIND_STOPPED does
1719 stop_signal make sense as a real signal to pass to the inferior.
1720 Some catchpoint related events, like
1721 TARGET_WAITKIND_(V)FORK|EXEC|SYSCALL, have their stop_signal set
1722 to TARGET_SIGNAL_SIGTRAP when the catchpoint triggers. But,
1723 those traps are debug API (ptrace in our case) related and
1724 induced; the inferior wouldn't see them if it wasn't being
1725 traced. Hence, we should never pass them to the inferior, even
1726 when set to pass state. Since this corner case isn't handled by
1727 infrun.c when proceeding with a signal, for consistency, neither
1728 do we handle it here (or elsewhere in the file we check for
1729 signal pass state). Normally SIGTRAP isn't set to pass state, so
1730 this is really a corner case. */
1731
1732 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
1733 signo = TARGET_SIGNAL_0; /* a pending ptrace event, not a real signal. */
1734 else if (lp->status)
1735 signo = target_signal_from_host (WSTOPSIG (lp->status));
1736 else if (non_stop && !is_executing (lp->ptid))
1737 {
1738 struct thread_info *tp = find_thread_ptid (lp->ptid);
1739
1740 signo = tp->suspend.stop_signal;
1741 }
1742 else if (!non_stop)
1743 {
1744 struct target_waitstatus last;
1745 ptid_t last_ptid;
1746
1747 get_last_target_status (&last_ptid, &last);
1748
1749 if (GET_LWP (lp->ptid) == GET_LWP (last_ptid))
1750 {
1751 struct thread_info *tp = find_thread_ptid (lp->ptid);
1752
1753 signo = tp->suspend.stop_signal;
1754 }
1755 }
1756
1757 *status = 0;
1758
1759 if (signo == TARGET_SIGNAL_0)
1760 {
1761 if (debug_linux_nat)
1762 fprintf_unfiltered (gdb_stdlog,
1763 "GPT: lwp %s has no pending signal\n",
1764 target_pid_to_str (lp->ptid));
1765 }
1766 else if (!signal_pass_state (signo))
1767 {
1768 if (debug_linux_nat)
1769 fprintf_unfiltered (gdb_stdlog,
1770 "GPT: lwp %s had signal %s, "
1771 "but it is in no pass state\n",
1772 target_pid_to_str (lp->ptid),
1773 target_signal_to_string (signo));
1774 }
1775 else
1776 {
1777 *status = W_STOPCODE (target_signal_to_host (signo));
1778
1779 if (debug_linux_nat)
1780 fprintf_unfiltered (gdb_stdlog,
1781 "GPT: lwp %s has pending signal %s\n",
1782 target_pid_to_str (lp->ptid),
1783 target_signal_to_string (signo));
1784 }
1785
1786 return 0;
1787 }
1788
1789 static int
1790 detach_callback (struct lwp_info *lp, void *data)
1791 {
1792 gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status));
1793
1794 if (debug_linux_nat && lp->status)
1795 fprintf_unfiltered (gdb_stdlog, "DC: Pending %s for %s on detach.\n",
1796 strsignal (WSTOPSIG (lp->status)),
1797 target_pid_to_str (lp->ptid));
1798
1799 /* If there is a pending SIGSTOP, get rid of it. */
1800 if (lp->signalled)
1801 {
1802 if (debug_linux_nat)
1803 fprintf_unfiltered (gdb_stdlog,
1804 "DC: Sending SIGCONT to %s\n",
1805 target_pid_to_str (lp->ptid));
1806
1807 kill_lwp (GET_LWP (lp->ptid), SIGCONT);
1808 lp->signalled = 0;
1809 }
1810
1811 /* We don't actually detach from the LWP that has an id equal to the
1812 overall process id just yet. */
1813 if (GET_LWP (lp->ptid) != GET_PID (lp->ptid))
1814 {
1815 int status = 0;
1816
1817 /* Pass on any pending signal for this LWP. */
1818 get_pending_status (lp, &status);
1819
1820 if (linux_nat_prepare_to_resume != NULL)
1821 linux_nat_prepare_to_resume (lp);
1822 errno = 0;
1823 if (ptrace (PTRACE_DETACH, GET_LWP (lp->ptid), 0,
1824 WSTOPSIG (status)) < 0)
1825 error (_("Can't detach %s: %s"), target_pid_to_str (lp->ptid),
1826 safe_strerror (errno));
1827
1828 if (debug_linux_nat)
1829 fprintf_unfiltered (gdb_stdlog,
1830 "PTRACE_DETACH (%s, %s, 0) (OK)\n",
1831 target_pid_to_str (lp->ptid),
1832 strsignal (WSTOPSIG (status)));
1833
1834 delete_lwp (lp->ptid);
1835 }
1836
1837 return 0;
1838 }
1839
1840 static void
1841 linux_nat_detach (struct target_ops *ops, char *args, int from_tty)
1842 {
1843 int pid;
1844 int status;
1845 struct lwp_info *main_lwp;
1846
1847 pid = GET_PID (inferior_ptid);
1848
1849 if (target_can_async_p ())
1850 linux_nat_async (NULL, 0);
1851
1852 /* Stop all threads before detaching. ptrace requires that the
1853 thread is stopped to sucessfully detach. */
1854 iterate_over_lwps (pid_to_ptid (pid), stop_callback, NULL);
1855 /* ... and wait until all of them have reported back that
1856 they're no longer running. */
1857 iterate_over_lwps (pid_to_ptid (pid), stop_wait_callback, NULL);
1858
1859 iterate_over_lwps (pid_to_ptid (pid), detach_callback, NULL);
1860
1861 /* Only the initial process should be left right now. */
1862 gdb_assert (num_lwps (GET_PID (inferior_ptid)) == 1);
1863
1864 main_lwp = find_lwp_pid (pid_to_ptid (pid));
1865
1866 /* Pass on any pending signal for the last LWP. */
1867 if ((args == NULL || *args == '\0')
1868 && get_pending_status (main_lwp, &status) != -1
1869 && WIFSTOPPED (status))
1870 {
1871 /* Put the signal number in ARGS so that inf_ptrace_detach will
1872 pass it along with PTRACE_DETACH. */
1873 args = alloca (8);
1874 sprintf (args, "%d", (int) WSTOPSIG (status));
1875 if (debug_linux_nat)
1876 fprintf_unfiltered (gdb_stdlog,
1877 "LND: Sending signal %s to %s\n",
1878 args,
1879 target_pid_to_str (main_lwp->ptid));
1880 }
1881
1882 if (linux_nat_prepare_to_resume != NULL)
1883 linux_nat_prepare_to_resume (main_lwp);
1884 delete_lwp (main_lwp->ptid);
1885
1886 if (forks_exist_p ())
1887 {
1888 /* Multi-fork case. The current inferior_ptid is being detached
1889 from, but there are other viable forks to debug. Detach from
1890 the current fork, and context-switch to the first
1891 available. */
1892 linux_fork_detach (args, from_tty);
1893
1894 if (non_stop && target_can_async_p ())
1895 target_async (inferior_event_handler, 0);
1896 }
1897 else
1898 linux_ops->to_detach (ops, args, from_tty);
1899 }
1900
1901 /* Resume LP. */
1902
1903 static void
1904 resume_lwp (struct lwp_info *lp, int step)
1905 {
1906 if (lp->stopped)
1907 {
1908 struct inferior *inf = find_inferior_pid (GET_PID (lp->ptid));
1909
1910 if (inf->vfork_child != NULL)
1911 {
1912 if (debug_linux_nat)
1913 fprintf_unfiltered (gdb_stdlog,
1914 "RC: Not resuming %s (vfork parent)\n",
1915 target_pid_to_str (lp->ptid));
1916 }
1917 else if (lp->status == 0
1918 && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE)
1919 {
1920 if (debug_linux_nat)
1921 fprintf_unfiltered (gdb_stdlog,
1922 "RC: PTRACE_CONT %s, 0, 0 (resuming sibling)\n",
1923 target_pid_to_str (lp->ptid));
1924
1925 if (linux_nat_prepare_to_resume != NULL)
1926 linux_nat_prepare_to_resume (lp);
1927 linux_ops->to_resume (linux_ops,
1928 pid_to_ptid (GET_LWP (lp->ptid)),
1929 step, TARGET_SIGNAL_0);
1930 lp->stopped = 0;
1931 lp->step = step;
1932 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
1933 lp->stopped_by_watchpoint = 0;
1934 }
1935 else
1936 {
1937 if (debug_linux_nat)
1938 fprintf_unfiltered (gdb_stdlog,
1939 "RC: Not resuming sibling %s (has pending)\n",
1940 target_pid_to_str (lp->ptid));
1941 }
1942 }
1943 else
1944 {
1945 if (debug_linux_nat)
1946 fprintf_unfiltered (gdb_stdlog,
1947 "RC: Not resuming sibling %s (not stopped)\n",
1948 target_pid_to_str (lp->ptid));
1949 }
1950 }
1951
1952 static int
1953 resume_callback (struct lwp_info *lp, void *data)
1954 {
1955 resume_lwp (lp, 0);
1956 return 0;
1957 }
1958
1959 static int
1960 resume_clear_callback (struct lwp_info *lp, void *data)
1961 {
1962 lp->resumed = 0;
1963 lp->last_resume_kind = resume_stop;
1964 return 0;
1965 }
1966
1967 static int
1968 resume_set_callback (struct lwp_info *lp, void *data)
1969 {
1970 lp->resumed = 1;
1971 lp->last_resume_kind = resume_continue;
1972 return 0;
1973 }
1974
1975 static void
1976 linux_nat_resume (struct target_ops *ops,
1977 ptid_t ptid, int step, enum target_signal signo)
1978 {
1979 sigset_t prev_mask;
1980 struct lwp_info *lp;
1981 int resume_many;
1982
1983 if (debug_linux_nat)
1984 fprintf_unfiltered (gdb_stdlog,
1985 "LLR: Preparing to %s %s, %s, inferior_ptid %s\n",
1986 step ? "step" : "resume",
1987 target_pid_to_str (ptid),
1988 (signo != TARGET_SIGNAL_0
1989 ? strsignal (target_signal_to_host (signo)) : "0"),
1990 target_pid_to_str (inferior_ptid));
1991
1992 block_child_signals (&prev_mask);
1993
1994 /* A specific PTID means `step only this process id'. */
1995 resume_many = (ptid_equal (minus_one_ptid, ptid)
1996 || ptid_is_pid (ptid));
1997
1998 /* Mark the lwps we're resuming as resumed. */
1999 iterate_over_lwps (ptid, resume_set_callback, NULL);
2000
2001 /* See if it's the current inferior that should be handled
2002 specially. */
2003 if (resume_many)
2004 lp = find_lwp_pid (inferior_ptid);
2005 else
2006 lp = find_lwp_pid (ptid);
2007 gdb_assert (lp != NULL);
2008
2009 /* Remember if we're stepping. */
2010 lp->step = step;
2011 lp->last_resume_kind = step ? resume_step : resume_continue;
2012
2013 /* If we have a pending wait status for this thread, there is no
2014 point in resuming the process. But first make sure that
2015 linux_nat_wait won't preemptively handle the event - we
2016 should never take this short-circuit if we are going to
2017 leave LP running, since we have skipped resuming all the
2018 other threads. This bit of code needs to be synchronized
2019 with linux_nat_wait. */
2020
2021 if (lp->status && WIFSTOPPED (lp->status))
2022 {
2023 if (!lp->step
2024 && WSTOPSIG (lp->status)
2025 && sigismember (&pass_mask, WSTOPSIG (lp->status)))
2026 {
2027 if (debug_linux_nat)
2028 fprintf_unfiltered (gdb_stdlog,
2029 "LLR: Not short circuiting for ignored "
2030 "status 0x%x\n", lp->status);
2031
2032 /* FIXME: What should we do if we are supposed to continue
2033 this thread with a signal? */
2034 gdb_assert (signo == TARGET_SIGNAL_0);
2035 signo = target_signal_from_host (WSTOPSIG (lp->status));
2036 lp->status = 0;
2037 }
2038 }
2039
2040 if (lp->status || lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
2041 {
2042 /* FIXME: What should we do if we are supposed to continue
2043 this thread with a signal? */
2044 gdb_assert (signo == TARGET_SIGNAL_0);
2045
2046 if (debug_linux_nat)
2047 fprintf_unfiltered (gdb_stdlog,
2048 "LLR: Short circuiting for status 0x%x\n",
2049 lp->status);
2050
2051 restore_child_signals_mask (&prev_mask);
2052 if (target_can_async_p ())
2053 {
2054 target_async (inferior_event_handler, 0);
2055 /* Tell the event loop we have something to process. */
2056 async_file_mark ();
2057 }
2058 return;
2059 }
2060
2061 /* Mark LWP as not stopped to prevent it from being continued by
2062 resume_callback. */
2063 lp->stopped = 0;
2064
2065 if (resume_many)
2066 iterate_over_lwps (ptid, resume_callback, NULL);
2067
2068 /* Convert to something the lower layer understands. */
2069 ptid = pid_to_ptid (GET_LWP (lp->ptid));
2070
2071 if (linux_nat_prepare_to_resume != NULL)
2072 linux_nat_prepare_to_resume (lp);
2073 linux_ops->to_resume (linux_ops, ptid, step, signo);
2074 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
2075 lp->stopped_by_watchpoint = 0;
2076
2077 if (debug_linux_nat)
2078 fprintf_unfiltered (gdb_stdlog,
2079 "LLR: %s %s, %s (resume event thread)\n",
2080 step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2081 target_pid_to_str (ptid),
2082 (signo != TARGET_SIGNAL_0
2083 ? strsignal (target_signal_to_host (signo)) : "0"));
2084
2085 restore_child_signals_mask (&prev_mask);
2086 if (target_can_async_p ())
2087 target_async (inferior_event_handler, 0);
2088 }
2089
2090 /* Send a signal to an LWP. */
2091
2092 static int
2093 kill_lwp (int lwpid, int signo)
2094 {
2095 /* Use tkill, if possible, in case we are using nptl threads. If tkill
2096 fails, then we are not using nptl threads and we should be using kill. */
2097
2098 #ifdef HAVE_TKILL_SYSCALL
2099 {
2100 static int tkill_failed;
2101
2102 if (!tkill_failed)
2103 {
2104 int ret;
2105
2106 errno = 0;
2107 ret = syscall (__NR_tkill, lwpid, signo);
2108 if (errno != ENOSYS)
2109 return ret;
2110 tkill_failed = 1;
2111 }
2112 }
2113 #endif
2114
2115 return kill (lwpid, signo);
2116 }
2117
2118 /* Handle a GNU/Linux syscall trap wait response. If we see a syscall
2119 event, check if the core is interested in it: if not, ignore the
2120 event, and keep waiting; otherwise, we need to toggle the LWP's
2121 syscall entry/exit status, since the ptrace event itself doesn't
2122 indicate it, and report the trap to higher layers. */
2123
2124 static int
2125 linux_handle_syscall_trap (struct lwp_info *lp, int stopping)
2126 {
2127 struct target_waitstatus *ourstatus = &lp->waitstatus;
2128 struct gdbarch *gdbarch = target_thread_architecture (lp->ptid);
2129 int syscall_number = (int) gdbarch_get_syscall_number (gdbarch, lp->ptid);
2130
2131 if (stopping)
2132 {
2133 /* If we're stopping threads, there's a SIGSTOP pending, which
2134 makes it so that the LWP reports an immediate syscall return,
2135 followed by the SIGSTOP. Skip seeing that "return" using
2136 PTRACE_CONT directly, and let stop_wait_callback collect the
2137 SIGSTOP. Later when the thread is resumed, a new syscall
2138 entry event. If we didn't do this (and returned 0), we'd
2139 leave a syscall entry pending, and our caller, by using
2140 PTRACE_CONT to collect the SIGSTOP, skips the syscall return
2141 itself. Later, when the user re-resumes this LWP, we'd see
2142 another syscall entry event and we'd mistake it for a return.
2143
2144 If stop_wait_callback didn't force the SIGSTOP out of the LWP
2145 (leaving immediately with LWP->signalled set, without issuing
2146 a PTRACE_CONT), it would still be problematic to leave this
2147 syscall enter pending, as later when the thread is resumed,
2148 it would then see the same syscall exit mentioned above,
2149 followed by the delayed SIGSTOP, while the syscall didn't
2150 actually get to execute. It seems it would be even more
2151 confusing to the user. */
2152
2153 if (debug_linux_nat)
2154 fprintf_unfiltered (gdb_stdlog,
2155 "LHST: ignoring syscall %d "
2156 "for LWP %ld (stopping threads), "
2157 "resuming with PTRACE_CONT for SIGSTOP\n",
2158 syscall_number,
2159 GET_LWP (lp->ptid));
2160
2161 lp->syscall_state = TARGET_WAITKIND_IGNORE;
2162 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2163 return 1;
2164 }
2165
2166 if (catch_syscall_enabled ())
2167 {
2168 /* Always update the entry/return state, even if this particular
2169 syscall isn't interesting to the core now. In async mode,
2170 the user could install a new catchpoint for this syscall
2171 between syscall enter/return, and we'll need to know to
2172 report a syscall return if that happens. */
2173 lp->syscall_state = (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2174 ? TARGET_WAITKIND_SYSCALL_RETURN
2175 : TARGET_WAITKIND_SYSCALL_ENTRY);
2176
2177 if (catching_syscall_number (syscall_number))
2178 {
2179 /* Alright, an event to report. */
2180 ourstatus->kind = lp->syscall_state;
2181 ourstatus->value.syscall_number = syscall_number;
2182
2183 if (debug_linux_nat)
2184 fprintf_unfiltered (gdb_stdlog,
2185 "LHST: stopping for %s of syscall %d"
2186 " for LWP %ld\n",
2187 lp->syscall_state
2188 == TARGET_WAITKIND_SYSCALL_ENTRY
2189 ? "entry" : "return",
2190 syscall_number,
2191 GET_LWP (lp->ptid));
2192 return 0;
2193 }
2194
2195 if (debug_linux_nat)
2196 fprintf_unfiltered (gdb_stdlog,
2197 "LHST: ignoring %s of syscall %d "
2198 "for LWP %ld\n",
2199 lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2200 ? "entry" : "return",
2201 syscall_number,
2202 GET_LWP (lp->ptid));
2203 }
2204 else
2205 {
2206 /* If we had been syscall tracing, and hence used PT_SYSCALL
2207 before on this LWP, it could happen that the user removes all
2208 syscall catchpoints before we get to process this event.
2209 There are two noteworthy issues here:
2210
2211 - When stopped at a syscall entry event, resuming with
2212 PT_STEP still resumes executing the syscall and reports a
2213 syscall return.
2214
2215 - Only PT_SYSCALL catches syscall enters. If we last
2216 single-stepped this thread, then this event can't be a
2217 syscall enter. If we last single-stepped this thread, this
2218 has to be a syscall exit.
2219
2220 The points above mean that the next resume, be it PT_STEP or
2221 PT_CONTINUE, can not trigger a syscall trace event. */
2222 if (debug_linux_nat)
2223 fprintf_unfiltered (gdb_stdlog,
2224 "LHST: caught syscall event "
2225 "with no syscall catchpoints."
2226 " %d for LWP %ld, ignoring\n",
2227 syscall_number,
2228 GET_LWP (lp->ptid));
2229 lp->syscall_state = TARGET_WAITKIND_IGNORE;
2230 }
2231
2232 /* The core isn't interested in this event. For efficiency, avoid
2233 stopping all threads only to have the core resume them all again.
2234 Since we're not stopping threads, if we're still syscall tracing
2235 and not stepping, we can't use PTRACE_CONT here, as we'd miss any
2236 subsequent syscall. Simply resume using the inf-ptrace layer,
2237 which knows when to use PT_SYSCALL or PT_CONTINUE. */
2238
2239 /* Note that gdbarch_get_syscall_number may access registers, hence
2240 fill a regcache. */
2241 registers_changed ();
2242 if (linux_nat_prepare_to_resume != NULL)
2243 linux_nat_prepare_to_resume (lp);
2244 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
2245 lp->step, TARGET_SIGNAL_0);
2246 return 1;
2247 }
2248
2249 /* Handle a GNU/Linux extended wait response. If we see a clone
2250 event, we need to add the new LWP to our list (and not report the
2251 trap to higher layers). This function returns non-zero if the
2252 event should be ignored and we should wait again. If STOPPING is
2253 true, the new LWP remains stopped, otherwise it is continued. */
2254
2255 static int
2256 linux_handle_extended_wait (struct lwp_info *lp, int status,
2257 int stopping)
2258 {
2259 int pid = GET_LWP (lp->ptid);
2260 struct target_waitstatus *ourstatus = &lp->waitstatus;
2261 int event = status >> 16;
2262
2263 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK
2264 || event == PTRACE_EVENT_CLONE)
2265 {
2266 unsigned long new_pid;
2267 int ret;
2268
2269 ptrace (PTRACE_GETEVENTMSG, pid, 0, &new_pid);
2270
2271 /* If we haven't already seen the new PID stop, wait for it now. */
2272 if (! pull_pid_from_list (&stopped_pids, new_pid, &status))
2273 {
2274 /* The new child has a pending SIGSTOP. We can't affect it until it
2275 hits the SIGSTOP, but we're already attached. */
2276 ret = my_waitpid (new_pid, &status,
2277 (event == PTRACE_EVENT_CLONE) ? __WCLONE : 0);
2278 if (ret == -1)
2279 perror_with_name (_("waiting for new child"));
2280 else if (ret != new_pid)
2281 internal_error (__FILE__, __LINE__,
2282 _("wait returned unexpected PID %d"), ret);
2283 else if (!WIFSTOPPED (status))
2284 internal_error (__FILE__, __LINE__,
2285 _("wait returned unexpected status 0x%x"), status);
2286 }
2287
2288 ourstatus->value.related_pid = ptid_build (new_pid, new_pid, 0);
2289
2290 if (event == PTRACE_EVENT_FORK
2291 && linux_fork_checkpointing_p (GET_PID (lp->ptid)))
2292 {
2293 /* Handle checkpointing by linux-fork.c here as a special
2294 case. We don't want the follow-fork-mode or 'catch fork'
2295 to interfere with this. */
2296
2297 /* This won't actually modify the breakpoint list, but will
2298 physically remove the breakpoints from the child. */
2299 detach_breakpoints (new_pid);
2300
2301 /* Retain child fork in ptrace (stopped) state. */
2302 if (!find_fork_pid (new_pid))
2303 add_fork (new_pid);
2304
2305 /* Report as spurious, so that infrun doesn't want to follow
2306 this fork. We're actually doing an infcall in
2307 linux-fork.c. */
2308 ourstatus->kind = TARGET_WAITKIND_SPURIOUS;
2309 linux_enable_event_reporting (pid_to_ptid (new_pid));
2310
2311 /* Report the stop to the core. */
2312 return 0;
2313 }
2314
2315 if (event == PTRACE_EVENT_FORK)
2316 ourstatus->kind = TARGET_WAITKIND_FORKED;
2317 else if (event == PTRACE_EVENT_VFORK)
2318 ourstatus->kind = TARGET_WAITKIND_VFORKED;
2319 else
2320 {
2321 struct lwp_info *new_lp;
2322
2323 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2324
2325 if (debug_linux_nat)
2326 fprintf_unfiltered (gdb_stdlog,
2327 "LHEW: Got clone event "
2328 "from LWP %d, new child is LWP %ld\n",
2329 pid, new_pid);
2330
2331 new_lp = add_lwp (BUILD_LWP (new_pid, GET_PID (lp->ptid)));
2332 new_lp->cloned = 1;
2333 new_lp->stopped = 1;
2334
2335 if (WSTOPSIG (status) != SIGSTOP)
2336 {
2337 /* This can happen if someone starts sending signals to
2338 the new thread before it gets a chance to run, which
2339 have a lower number than SIGSTOP (e.g. SIGUSR1).
2340 This is an unlikely case, and harder to handle for
2341 fork / vfork than for clone, so we do not try - but
2342 we handle it for clone events here. We'll send
2343 the other signal on to the thread below. */
2344
2345 new_lp->signalled = 1;
2346 }
2347 else
2348 {
2349 struct thread_info *tp;
2350
2351 /* When we stop for an event in some other thread, and
2352 pull the thread list just as this thread has cloned,
2353 we'll have seen the new thread in the thread_db list
2354 before handling the CLONE event (glibc's
2355 pthread_create adds the new thread to the thread list
2356 before clone'ing, and has the kernel fill in the
2357 thread's tid on the clone call with
2358 CLONE_PARENT_SETTID). If that happened, and the core
2359 had requested the new thread to stop, we'll have
2360 killed it with SIGSTOP. But since SIGSTOP is not an
2361 RT signal, it can only be queued once. We need to be
2362 careful to not resume the LWP if we wanted it to
2363 stop. In that case, we'll leave the SIGSTOP pending.
2364 It will later be reported as TARGET_SIGNAL_0. */
2365 tp = find_thread_ptid (new_lp->ptid);
2366 if (tp != NULL && tp->stop_requested)
2367 new_lp->last_resume_kind = resume_stop;
2368 else
2369 status = 0;
2370 }
2371
2372 if (non_stop)
2373 {
2374 /* Add the new thread to GDB's lists as soon as possible
2375 so that:
2376
2377 1) the frontend doesn't have to wait for a stop to
2378 display them, and,
2379
2380 2) we tag it with the correct running state. */
2381
2382 /* If the thread_db layer is active, let it know about
2383 this new thread, and add it to GDB's list. */
2384 if (!thread_db_attach_lwp (new_lp->ptid))
2385 {
2386 /* We're not using thread_db. Add it to GDB's
2387 list. */
2388 target_post_attach (GET_LWP (new_lp->ptid));
2389 add_thread (new_lp->ptid);
2390 }
2391
2392 if (!stopping)
2393 {
2394 set_running (new_lp->ptid, 1);
2395 set_executing (new_lp->ptid, 1);
2396 /* thread_db_attach_lwp -> lin_lwp_attach_lwp forced
2397 resume_stop. */
2398 new_lp->last_resume_kind = resume_continue;
2399 }
2400 }
2401
2402 if (status != 0)
2403 {
2404 /* We created NEW_LP so it cannot yet contain STATUS. */
2405 gdb_assert (new_lp->status == 0);
2406
2407 /* Save the wait status to report later. */
2408 if (debug_linux_nat)
2409 fprintf_unfiltered (gdb_stdlog,
2410 "LHEW: waitpid of new LWP %ld, "
2411 "saving status %s\n",
2412 (long) GET_LWP (new_lp->ptid),
2413 status_to_str (status));
2414 new_lp->status = status;
2415 }
2416
2417 /* Note the need to use the low target ops to resume, to
2418 handle resuming with PT_SYSCALL if we have syscall
2419 catchpoints. */
2420 if (!stopping)
2421 {
2422 new_lp->resumed = 1;
2423
2424 if (status == 0)
2425 {
2426 gdb_assert (new_lp->last_resume_kind == resume_continue);
2427 if (debug_linux_nat)
2428 fprintf_unfiltered (gdb_stdlog,
2429 "LHEW: resuming new LWP %ld\n",
2430 GET_LWP (new_lp->ptid));
2431 if (linux_nat_prepare_to_resume != NULL)
2432 linux_nat_prepare_to_resume (new_lp);
2433 linux_ops->to_resume (linux_ops, pid_to_ptid (new_pid),
2434 0, TARGET_SIGNAL_0);
2435 new_lp->stopped = 0;
2436 }
2437 }
2438
2439 if (debug_linux_nat)
2440 fprintf_unfiltered (gdb_stdlog,
2441 "LHEW: resuming parent LWP %d\n", pid);
2442 if (linux_nat_prepare_to_resume != NULL)
2443 linux_nat_prepare_to_resume (lp);
2444 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
2445 0, TARGET_SIGNAL_0);
2446
2447 return 1;
2448 }
2449
2450 return 0;
2451 }
2452
2453 if (event == PTRACE_EVENT_EXEC)
2454 {
2455 if (debug_linux_nat)
2456 fprintf_unfiltered (gdb_stdlog,
2457 "LHEW: Got exec event from LWP %ld\n",
2458 GET_LWP (lp->ptid));
2459
2460 ourstatus->kind = TARGET_WAITKIND_EXECD;
2461 ourstatus->value.execd_pathname
2462 = xstrdup (linux_child_pid_to_exec_file (pid));
2463
2464 return 0;
2465 }
2466
2467 if (event == PTRACE_EVENT_VFORK_DONE)
2468 {
2469 if (current_inferior ()->waiting_for_vfork_done)
2470 {
2471 if (debug_linux_nat)
2472 fprintf_unfiltered (gdb_stdlog,
2473 "LHEW: Got expected PTRACE_EVENT_"
2474 "VFORK_DONE from LWP %ld: stopping\n",
2475 GET_LWP (lp->ptid));
2476
2477 ourstatus->kind = TARGET_WAITKIND_VFORK_DONE;
2478 return 0;
2479 }
2480
2481 if (debug_linux_nat)
2482 fprintf_unfiltered (gdb_stdlog,
2483 "LHEW: Got PTRACE_EVENT_VFORK_DONE "
2484 "from LWP %ld: resuming\n",
2485 GET_LWP (lp->ptid));
2486 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2487 return 1;
2488 }
2489
2490 internal_error (__FILE__, __LINE__,
2491 _("unknown ptrace event %d"), event);
2492 }
2493
2494 /* Return non-zero if LWP is a zombie. */
2495
2496 static int
2497 linux_lwp_is_zombie (long lwp)
2498 {
2499 char buffer[MAXPATHLEN];
2500 FILE *procfile;
2501 int retval;
2502 int have_state;
2503
2504 xsnprintf (buffer, sizeof (buffer), "/proc/%ld/status", lwp);
2505 procfile = fopen (buffer, "r");
2506 if (procfile == NULL)
2507 {
2508 warning (_("unable to open /proc file '%s'"), buffer);
2509 return 0;
2510 }
2511
2512 have_state = 0;
2513 while (fgets (buffer, sizeof (buffer), procfile) != NULL)
2514 if (strncmp (buffer, "State:", 6) == 0)
2515 {
2516 have_state = 1;
2517 break;
2518 }
2519 retval = (have_state
2520 && strcmp (buffer, "State:\tZ (zombie)\n") == 0);
2521 fclose (procfile);
2522 return retval;
2523 }
2524
2525 /* Wait for LP to stop. Returns the wait status, or 0 if the LWP has
2526 exited. */
2527
2528 static int
2529 wait_lwp (struct lwp_info *lp)
2530 {
2531 pid_t pid;
2532 int status = 0;
2533 int thread_dead = 0;
2534 sigset_t prev_mask;
2535
2536 gdb_assert (!lp->stopped);
2537 gdb_assert (lp->status == 0);
2538
2539 /* Make sure SIGCHLD is blocked for sigsuspend avoiding a race below. */
2540 block_child_signals (&prev_mask);
2541
2542 for (;;)
2543 {
2544 /* If my_waitpid returns 0 it means the __WCLONE vs. non-__WCLONE kind
2545 was right and we should just call sigsuspend. */
2546
2547 pid = my_waitpid (GET_LWP (lp->ptid), &status, WNOHANG);
2548 if (pid == -1 && errno == ECHILD)
2549 pid = my_waitpid (GET_LWP (lp->ptid), &status, __WCLONE | WNOHANG);
2550 if (pid == -1 && errno == ECHILD)
2551 {
2552 /* The thread has previously exited. We need to delete it
2553 now because, for some vendor 2.4 kernels with NPTL
2554 support backported, there won't be an exit event unless
2555 it is the main thread. 2.6 kernels will report an exit
2556 event for each thread that exits, as expected. */
2557 thread_dead = 1;
2558 if (debug_linux_nat)
2559 fprintf_unfiltered (gdb_stdlog, "WL: %s vanished.\n",
2560 target_pid_to_str (lp->ptid));
2561 }
2562 if (pid != 0)
2563 break;
2564
2565 /* Bugs 10970, 12702.
2566 Thread group leader may have exited in which case we'll lock up in
2567 waitpid if there are other threads, even if they are all zombies too.
2568 Basically, we're not supposed to use waitpid this way.
2569 __WCLONE is not applicable for the leader so we can't use that.
2570 LINUX_NAT_THREAD_ALIVE cannot be used here as it requires a STOPPED
2571 process; it gets ESRCH both for the zombie and for running processes.
2572
2573 As a workaround, check if we're waiting for the thread group leader and
2574 if it's a zombie, and avoid calling waitpid if it is.
2575
2576 This is racy, what if the tgl becomes a zombie right after we check?
2577 Therefore always use WNOHANG with sigsuspend - it is equivalent to
2578 waiting waitpid but the linux_lwp_is_zombie is safe this way. */
2579
2580 if (GET_PID (lp->ptid) == GET_LWP (lp->ptid)
2581 && linux_lwp_is_zombie (GET_LWP (lp->ptid)))
2582 {
2583 thread_dead = 1;
2584 if (debug_linux_nat)
2585 fprintf_unfiltered (gdb_stdlog,
2586 "WL: Thread group leader %s vanished.\n",
2587 target_pid_to_str (lp->ptid));
2588 break;
2589 }
2590
2591 /* Wait for next SIGCHLD and try again. This may let SIGCHLD handlers
2592 get invoked despite our caller had them intentionally blocked by
2593 block_child_signals. This is sensitive only to the loop of
2594 linux_nat_wait_1 and there if we get called my_waitpid gets called
2595 again before it gets to sigsuspend so we can safely let the handlers
2596 get executed here. */
2597
2598 sigsuspend (&suspend_mask);
2599 }
2600
2601 restore_child_signals_mask (&prev_mask);
2602
2603 if (!thread_dead)
2604 {
2605 gdb_assert (pid == GET_LWP (lp->ptid));
2606
2607 if (debug_linux_nat)
2608 {
2609 fprintf_unfiltered (gdb_stdlog,
2610 "WL: waitpid %s received %s\n",
2611 target_pid_to_str (lp->ptid),
2612 status_to_str (status));
2613 }
2614
2615 /* Check if the thread has exited. */
2616 if (WIFEXITED (status) || WIFSIGNALED (status))
2617 {
2618 thread_dead = 1;
2619 if (debug_linux_nat)
2620 fprintf_unfiltered (gdb_stdlog, "WL: %s exited.\n",
2621 target_pid_to_str (lp->ptid));
2622 }
2623 }
2624
2625 if (thread_dead)
2626 {
2627 exit_lwp (lp);
2628 return 0;
2629 }
2630
2631 gdb_assert (WIFSTOPPED (status));
2632
2633 /* Handle GNU/Linux's syscall SIGTRAPs. */
2634 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
2635 {
2636 /* No longer need the sysgood bit. The ptrace event ends up
2637 recorded in lp->waitstatus if we care for it. We can carry
2638 on handling the event like a regular SIGTRAP from here
2639 on. */
2640 status = W_STOPCODE (SIGTRAP);
2641 if (linux_handle_syscall_trap (lp, 1))
2642 return wait_lwp (lp);
2643 }
2644
2645 /* Handle GNU/Linux's extended waitstatus for trace events. */
2646 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
2647 {
2648 if (debug_linux_nat)
2649 fprintf_unfiltered (gdb_stdlog,
2650 "WL: Handling extended status 0x%06x\n",
2651 status);
2652 if (linux_handle_extended_wait (lp, status, 1))
2653 return wait_lwp (lp);
2654 }
2655
2656 return status;
2657 }
2658
2659 /* Save the most recent siginfo for LP. This is currently only called
2660 for SIGTRAP; some ports use the si_addr field for
2661 target_stopped_data_address. In the future, it may also be used to
2662 restore the siginfo of requeued signals. */
2663
2664 static void
2665 save_siginfo (struct lwp_info *lp)
2666 {
2667 errno = 0;
2668 ptrace (PTRACE_GETSIGINFO, GET_LWP (lp->ptid),
2669 (PTRACE_TYPE_ARG3) 0, &lp->siginfo);
2670
2671 if (errno != 0)
2672 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
2673 }
2674
2675 /* Send a SIGSTOP to LP. */
2676
2677 static int
2678 stop_callback (struct lwp_info *lp, void *data)
2679 {
2680 if (!lp->stopped && !lp->signalled)
2681 {
2682 int ret;
2683
2684 if (debug_linux_nat)
2685 {
2686 fprintf_unfiltered (gdb_stdlog,
2687 "SC: kill %s **<SIGSTOP>**\n",
2688 target_pid_to_str (lp->ptid));
2689 }
2690 errno = 0;
2691 ret = kill_lwp (GET_LWP (lp->ptid), SIGSTOP);
2692 if (debug_linux_nat)
2693 {
2694 fprintf_unfiltered (gdb_stdlog,
2695 "SC: lwp kill %d %s\n",
2696 ret,
2697 errno ? safe_strerror (errno) : "ERRNO-OK");
2698 }
2699
2700 lp->signalled = 1;
2701 gdb_assert (lp->status == 0);
2702 }
2703
2704 return 0;
2705 }
2706
2707 /* Request a stop on LWP. */
2708
2709 void
2710 linux_stop_lwp (struct lwp_info *lwp)
2711 {
2712 stop_callback (lwp, NULL);
2713 }
2714
2715 /* Return non-zero if LWP PID has a pending SIGINT. */
2716
2717 static int
2718 linux_nat_has_pending_sigint (int pid)
2719 {
2720 sigset_t pending, blocked, ignored;
2721
2722 linux_proc_pending_signals (pid, &pending, &blocked, &ignored);
2723
2724 if (sigismember (&pending, SIGINT)
2725 && !sigismember (&ignored, SIGINT))
2726 return 1;
2727
2728 return 0;
2729 }
2730
2731 /* Set a flag in LP indicating that we should ignore its next SIGINT. */
2732
2733 static int
2734 set_ignore_sigint (struct lwp_info *lp, void *data)
2735 {
2736 /* If a thread has a pending SIGINT, consume it; otherwise, set a
2737 flag to consume the next one. */
2738 if (lp->stopped && lp->status != 0 && WIFSTOPPED (lp->status)
2739 && WSTOPSIG (lp->status) == SIGINT)
2740 lp->status = 0;
2741 else
2742 lp->ignore_sigint = 1;
2743
2744 return 0;
2745 }
2746
2747 /* If LP does not have a SIGINT pending, then clear the ignore_sigint flag.
2748 This function is called after we know the LWP has stopped; if the LWP
2749 stopped before the expected SIGINT was delivered, then it will never have
2750 arrived. Also, if the signal was delivered to a shared queue and consumed
2751 by a different thread, it will never be delivered to this LWP. */
2752
2753 static void
2754 maybe_clear_ignore_sigint (struct lwp_info *lp)
2755 {
2756 if (!lp->ignore_sigint)
2757 return;
2758
2759 if (!linux_nat_has_pending_sigint (GET_LWP (lp->ptid)))
2760 {
2761 if (debug_linux_nat)
2762 fprintf_unfiltered (gdb_stdlog,
2763 "MCIS: Clearing bogus flag for %s\n",
2764 target_pid_to_str (lp->ptid));
2765 lp->ignore_sigint = 0;
2766 }
2767 }
2768
2769 /* Fetch the possible triggered data watchpoint info and store it in
2770 LP.
2771
2772 On some archs, like x86, that use debug registers to set
2773 watchpoints, it's possible that the way to know which watched
2774 address trapped, is to check the register that is used to select
2775 which address to watch. Problem is, between setting the watchpoint
2776 and reading back which data address trapped, the user may change
2777 the set of watchpoints, and, as a consequence, GDB changes the
2778 debug registers in the inferior. To avoid reading back a stale
2779 stopped-data-address when that happens, we cache in LP the fact
2780 that a watchpoint trapped, and the corresponding data address, as
2781 soon as we see LP stop with a SIGTRAP. If GDB changes the debug
2782 registers meanwhile, we have the cached data we can rely on. */
2783
2784 static void
2785 save_sigtrap (struct lwp_info *lp)
2786 {
2787 struct cleanup *old_chain;
2788
2789 if (linux_ops->to_stopped_by_watchpoint == NULL)
2790 {
2791 lp->stopped_by_watchpoint = 0;
2792 return;
2793 }
2794
2795 old_chain = save_inferior_ptid ();
2796 inferior_ptid = lp->ptid;
2797
2798 lp->stopped_by_watchpoint = linux_ops->to_stopped_by_watchpoint ();
2799
2800 if (lp->stopped_by_watchpoint)
2801 {
2802 if (linux_ops->to_stopped_data_address != NULL)
2803 lp->stopped_data_address_p =
2804 linux_ops->to_stopped_data_address (&current_target,
2805 &lp->stopped_data_address);
2806 else
2807 lp->stopped_data_address_p = 0;
2808 }
2809
2810 do_cleanups (old_chain);
2811 }
2812
2813 /* See save_sigtrap. */
2814
2815 static int
2816 linux_nat_stopped_by_watchpoint (void)
2817 {
2818 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2819
2820 gdb_assert (lp != NULL);
2821
2822 return lp->stopped_by_watchpoint;
2823 }
2824
2825 static int
2826 linux_nat_stopped_data_address (struct target_ops *ops, CORE_ADDR *addr_p)
2827 {
2828 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2829
2830 gdb_assert (lp != NULL);
2831
2832 *addr_p = lp->stopped_data_address;
2833
2834 return lp->stopped_data_address_p;
2835 }
2836
2837 /* Commonly any breakpoint / watchpoint generate only SIGTRAP. */
2838
2839 static int
2840 sigtrap_is_event (int status)
2841 {
2842 return WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP;
2843 }
2844
2845 /* SIGTRAP-like events recognizer. */
2846
2847 static int (*linux_nat_status_is_event) (int status) = sigtrap_is_event;
2848
2849 /* Check for SIGTRAP-like events in LP. */
2850
2851 static int
2852 linux_nat_lp_status_is_event (struct lwp_info *lp)
2853 {
2854 /* We check for lp->waitstatus in addition to lp->status, because we can
2855 have pending process exits recorded in lp->status
2856 and W_EXITCODE(0,0) == 0. We should probably have an additional
2857 lp->status_p flag. */
2858
2859 return (lp->waitstatus.kind == TARGET_WAITKIND_IGNORE
2860 && linux_nat_status_is_event (lp->status));
2861 }
2862
2863 /* Set alternative SIGTRAP-like events recognizer. If
2864 breakpoint_inserted_here_p there then gdbarch_decr_pc_after_break will be
2865 applied. */
2866
2867 void
2868 linux_nat_set_status_is_event (struct target_ops *t,
2869 int (*status_is_event) (int status))
2870 {
2871 linux_nat_status_is_event = status_is_event;
2872 }
2873
2874 /* Wait until LP is stopped. */
2875
2876 static int
2877 stop_wait_callback (struct lwp_info *lp, void *data)
2878 {
2879 struct inferior *inf = find_inferior_pid (GET_PID (lp->ptid));
2880
2881 /* If this is a vfork parent, bail out, it is not going to report
2882 any SIGSTOP until the vfork is done with. */
2883 if (inf->vfork_child != NULL)
2884 return 0;
2885
2886 if (!lp->stopped)
2887 {
2888 int status;
2889
2890 status = wait_lwp (lp);
2891 if (status == 0)
2892 return 0;
2893
2894 if (lp->ignore_sigint && WIFSTOPPED (status)
2895 && WSTOPSIG (status) == SIGINT)
2896 {
2897 lp->ignore_sigint = 0;
2898
2899 errno = 0;
2900 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2901 if (debug_linux_nat)
2902 fprintf_unfiltered (gdb_stdlog,
2903 "PTRACE_CONT %s, 0, 0 (%s) "
2904 "(discarding SIGINT)\n",
2905 target_pid_to_str (lp->ptid),
2906 errno ? safe_strerror (errno) : "OK");
2907
2908 return stop_wait_callback (lp, NULL);
2909 }
2910
2911 maybe_clear_ignore_sigint (lp);
2912
2913 if (WSTOPSIG (status) != SIGSTOP)
2914 {
2915 if (linux_nat_status_is_event (status))
2916 {
2917 /* If a LWP other than the LWP that we're reporting an
2918 event for has hit a GDB breakpoint (as opposed to
2919 some random trap signal), then just arrange for it to
2920 hit it again later. We don't keep the SIGTRAP status
2921 and don't forward the SIGTRAP signal to the LWP. We
2922 will handle the current event, eventually we will
2923 resume all LWPs, and this one will get its breakpoint
2924 trap again.
2925
2926 If we do not do this, then we run the risk that the
2927 user will delete or disable the breakpoint, but the
2928 thread will have already tripped on it. */
2929
2930 /* Save the trap's siginfo in case we need it later. */
2931 save_siginfo (lp);
2932
2933 save_sigtrap (lp);
2934
2935 /* Now resume this LWP and get the SIGSTOP event. */
2936 errno = 0;
2937 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2938 if (debug_linux_nat)
2939 {
2940 fprintf_unfiltered (gdb_stdlog,
2941 "PTRACE_CONT %s, 0, 0 (%s)\n",
2942 target_pid_to_str (lp->ptid),
2943 errno ? safe_strerror (errno) : "OK");
2944
2945 fprintf_unfiltered (gdb_stdlog,
2946 "SWC: Candidate SIGTRAP event in %s\n",
2947 target_pid_to_str (lp->ptid));
2948 }
2949 /* Hold this event/waitstatus while we check to see if
2950 there are any more (we still want to get that SIGSTOP). */
2951 stop_wait_callback (lp, NULL);
2952
2953 /* Hold the SIGTRAP for handling by linux_nat_wait. If
2954 there's another event, throw it back into the
2955 queue. */
2956 if (lp->status)
2957 {
2958 if (debug_linux_nat)
2959 fprintf_unfiltered (gdb_stdlog,
2960 "SWC: kill %s, %s\n",
2961 target_pid_to_str (lp->ptid),
2962 status_to_str ((int) status));
2963 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (lp->status));
2964 }
2965
2966 /* Save the sigtrap event. */
2967 lp->status = status;
2968 return 0;
2969 }
2970 else
2971 {
2972 /* The thread was stopped with a signal other than
2973 SIGSTOP, and didn't accidentally trip a breakpoint. */
2974
2975 if (debug_linux_nat)
2976 {
2977 fprintf_unfiltered (gdb_stdlog,
2978 "SWC: Pending event %s in %s\n",
2979 status_to_str ((int) status),
2980 target_pid_to_str (lp->ptid));
2981 }
2982 /* Now resume this LWP and get the SIGSTOP event. */
2983 errno = 0;
2984 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2985 if (debug_linux_nat)
2986 fprintf_unfiltered (gdb_stdlog,
2987 "SWC: PTRACE_CONT %s, 0, 0 (%s)\n",
2988 target_pid_to_str (lp->ptid),
2989 errno ? safe_strerror (errno) : "OK");
2990
2991 /* Hold this event/waitstatus while we check to see if
2992 there are any more (we still want to get that SIGSTOP). */
2993 stop_wait_callback (lp, NULL);
2994
2995 /* If the lp->status field is still empty, use it to
2996 hold this event. If not, then this event must be
2997 returned to the event queue of the LWP. */
2998 if (lp->status)
2999 {
3000 if (debug_linux_nat)
3001 {
3002 fprintf_unfiltered (gdb_stdlog,
3003 "SWC: kill %s, %s\n",
3004 target_pid_to_str (lp->ptid),
3005 status_to_str ((int) status));
3006 }
3007 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (status));
3008 }
3009 else
3010 lp->status = status;
3011 return 0;
3012 }
3013 }
3014 else
3015 {
3016 /* We caught the SIGSTOP that we intended to catch, so
3017 there's no SIGSTOP pending. */
3018 lp->stopped = 1;
3019 lp->signalled = 0;
3020 }
3021 }
3022
3023 return 0;
3024 }
3025
3026 /* Return non-zero if LP has a wait status pending. */
3027
3028 static int
3029 status_callback (struct lwp_info *lp, void *data)
3030 {
3031 /* Only report a pending wait status if we pretend that this has
3032 indeed been resumed. */
3033 if (!lp->resumed)
3034 return 0;
3035
3036 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3037 {
3038 /* A ptrace event, like PTRACE_FORK|VFORK|EXEC, syscall event,
3039 or a pending process exit. Note that `W_EXITCODE(0,0) ==
3040 0', so a clean process exit can not be stored pending in
3041 lp->status, it is indistinguishable from
3042 no-pending-status. */
3043 return 1;
3044 }
3045
3046 if (lp->status != 0)
3047 return 1;
3048
3049 return 0;
3050 }
3051
3052 /* Return non-zero if LP isn't stopped. */
3053
3054 static int
3055 running_callback (struct lwp_info *lp, void *data)
3056 {
3057 return (!lp->stopped
3058 || ((lp->status != 0
3059 || lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3060 && lp->resumed));
3061 }
3062
3063 /* Count the LWP's that have had events. */
3064
3065 static int
3066 count_events_callback (struct lwp_info *lp, void *data)
3067 {
3068 int *count = data;
3069
3070 gdb_assert (count != NULL);
3071
3072 /* Count only resumed LWPs that have a SIGTRAP event pending. */
3073 if (lp->resumed && linux_nat_lp_status_is_event (lp))
3074 (*count)++;
3075
3076 return 0;
3077 }
3078
3079 /* Select the LWP (if any) that is currently being single-stepped. */
3080
3081 static int
3082 select_singlestep_lwp_callback (struct lwp_info *lp, void *data)
3083 {
3084 if (lp->last_resume_kind == resume_step
3085 && lp->status != 0)
3086 return 1;
3087 else
3088 return 0;
3089 }
3090
3091 /* Select the Nth LWP that has had a SIGTRAP event. */
3092
3093 static int
3094 select_event_lwp_callback (struct lwp_info *lp, void *data)
3095 {
3096 int *selector = data;
3097
3098 gdb_assert (selector != NULL);
3099
3100 /* Select only resumed LWPs that have a SIGTRAP event pending. */
3101 if (lp->resumed && linux_nat_lp_status_is_event (lp))
3102 if ((*selector)-- == 0)
3103 return 1;
3104
3105 return 0;
3106 }
3107
3108 static int
3109 cancel_breakpoint (struct lwp_info *lp)
3110 {
3111 /* Arrange for a breakpoint to be hit again later. We don't keep
3112 the SIGTRAP status and don't forward the SIGTRAP signal to the
3113 LWP. We will handle the current event, eventually we will resume
3114 this LWP, and this breakpoint will trap again.
3115
3116 If we do not do this, then we run the risk that the user will
3117 delete or disable the breakpoint, but the LWP will have already
3118 tripped on it. */
3119
3120 struct regcache *regcache = get_thread_regcache (lp->ptid);
3121 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3122 CORE_ADDR pc;
3123
3124 pc = regcache_read_pc (regcache) - gdbarch_decr_pc_after_break (gdbarch);
3125 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
3126 {
3127 if (debug_linux_nat)
3128 fprintf_unfiltered (gdb_stdlog,
3129 "CB: Push back breakpoint for %s\n",
3130 target_pid_to_str (lp->ptid));
3131
3132 /* Back up the PC if necessary. */
3133 if (gdbarch_decr_pc_after_break (gdbarch))
3134 regcache_write_pc (regcache, pc);
3135
3136 return 1;
3137 }
3138 return 0;
3139 }
3140
3141 static int
3142 cancel_breakpoints_callback (struct lwp_info *lp, void *data)
3143 {
3144 struct lwp_info *event_lp = data;
3145
3146 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
3147 if (lp == event_lp)
3148 return 0;
3149
3150 /* If a LWP other than the LWP that we're reporting an event for has
3151 hit a GDB breakpoint (as opposed to some random trap signal),
3152 then just arrange for it to hit it again later. We don't keep
3153 the SIGTRAP status and don't forward the SIGTRAP signal to the
3154 LWP. We will handle the current event, eventually we will resume
3155 all LWPs, and this one will get its breakpoint trap again.
3156
3157 If we do not do this, then we run the risk that the user will
3158 delete or disable the breakpoint, but the LWP will have already
3159 tripped on it. */
3160
3161 if (linux_nat_lp_status_is_event (lp)
3162 && cancel_breakpoint (lp))
3163 /* Throw away the SIGTRAP. */
3164 lp->status = 0;
3165
3166 return 0;
3167 }
3168
3169 /* Select one LWP out of those that have events pending. */
3170
3171 static void
3172 select_event_lwp (ptid_t filter, struct lwp_info **orig_lp, int *status)
3173 {
3174 int num_events = 0;
3175 int random_selector;
3176 struct lwp_info *event_lp;
3177
3178 /* Record the wait status for the original LWP. */
3179 (*orig_lp)->status = *status;
3180
3181 /* Give preference to any LWP that is being single-stepped. */
3182 event_lp = iterate_over_lwps (filter,
3183 select_singlestep_lwp_callback, NULL);
3184 if (event_lp != NULL)
3185 {
3186 if (debug_linux_nat)
3187 fprintf_unfiltered (gdb_stdlog,
3188 "SEL: Select single-step %s\n",
3189 target_pid_to_str (event_lp->ptid));
3190 }
3191 else
3192 {
3193 /* No single-stepping LWP. Select one at random, out of those
3194 which have had SIGTRAP events. */
3195
3196 /* First see how many SIGTRAP events we have. */
3197 iterate_over_lwps (filter, count_events_callback, &num_events);
3198
3199 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
3200 random_selector = (int)
3201 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
3202
3203 if (debug_linux_nat && num_events > 1)
3204 fprintf_unfiltered (gdb_stdlog,
3205 "SEL: Found %d SIGTRAP events, selecting #%d\n",
3206 num_events, random_selector);
3207
3208 event_lp = iterate_over_lwps (filter,
3209 select_event_lwp_callback,
3210 &random_selector);
3211 }
3212
3213 if (event_lp != NULL)
3214 {
3215 /* Switch the event LWP. */
3216 *orig_lp = event_lp;
3217 *status = event_lp->status;
3218 }
3219
3220 /* Flush the wait status for the event LWP. */
3221 (*orig_lp)->status = 0;
3222 }
3223
3224 /* Return non-zero if LP has been resumed. */
3225
3226 static int
3227 resumed_callback (struct lwp_info *lp, void *data)
3228 {
3229 return lp->resumed;
3230 }
3231
3232 /* Stop an active thread, verify it still exists, then resume it. If
3233 the thread ends up with a pending status, then it is not resumed,
3234 and *DATA (really a pointer to int), is set. */
3235
3236 static int
3237 stop_and_resume_callback (struct lwp_info *lp, void *data)
3238 {
3239 int *new_pending_p = data;
3240
3241 if (!lp->stopped)
3242 {
3243 ptid_t ptid = lp->ptid;
3244
3245 stop_callback (lp, NULL);
3246 stop_wait_callback (lp, NULL);
3247
3248 /* Resume if the lwp still exists, and the core wanted it
3249 running. */
3250 lp = find_lwp_pid (ptid);
3251 if (lp != NULL)
3252 {
3253 if (lp->last_resume_kind == resume_stop
3254 && lp->status == 0)
3255 {
3256 /* The core wanted the LWP to stop. Even if it stopped
3257 cleanly (with SIGSTOP), leave the event pending. */
3258 if (debug_linux_nat)
3259 fprintf_unfiltered (gdb_stdlog,
3260 "SARC: core wanted LWP %ld stopped "
3261 "(leaving SIGSTOP pending)\n",
3262 GET_LWP (lp->ptid));
3263 lp->status = W_STOPCODE (SIGSTOP);
3264 }
3265
3266 if (lp->status == 0)
3267 {
3268 if (debug_linux_nat)
3269 fprintf_unfiltered (gdb_stdlog,
3270 "SARC: re-resuming LWP %ld\n",
3271 GET_LWP (lp->ptid));
3272 resume_lwp (lp, lp->step);
3273 }
3274 else
3275 {
3276 if (debug_linux_nat)
3277 fprintf_unfiltered (gdb_stdlog,
3278 "SARC: not re-resuming LWP %ld "
3279 "(has pending)\n",
3280 GET_LWP (lp->ptid));
3281 if (new_pending_p)
3282 *new_pending_p = 1;
3283 }
3284 }
3285 }
3286 return 0;
3287 }
3288
3289 /* Check if we should go on and pass this event to common code.
3290 Return the affected lwp if we are, or NULL otherwise. If we stop
3291 all lwps temporarily, we may end up with new pending events in some
3292 other lwp. In that case set *NEW_PENDING_P to true. */
3293
3294 static struct lwp_info *
3295 linux_nat_filter_event (int lwpid, int status, int *new_pending_p)
3296 {
3297 struct lwp_info *lp;
3298
3299 *new_pending_p = 0;
3300
3301 lp = find_lwp_pid (pid_to_ptid (lwpid));
3302
3303 /* Check for stop events reported by a process we didn't already
3304 know about - anything not already in our LWP list.
3305
3306 If we're expecting to receive stopped processes after
3307 fork, vfork, and clone events, then we'll just add the
3308 new one to our list and go back to waiting for the event
3309 to be reported - the stopped process might be returned
3310 from waitpid before or after the event is.
3311
3312 But note the case of a non-leader thread exec'ing after the
3313 leader having exited, and gone from our lists. The non-leader
3314 thread changes its tid to the tgid. */
3315
3316 if (WIFSTOPPED (status) && lp == NULL
3317 && (WSTOPSIG (status) == SIGTRAP && status >> 16 == PTRACE_EVENT_EXEC))
3318 {
3319 /* A multi-thread exec after we had seen the leader exiting. */
3320 if (debug_linux_nat)
3321 fprintf_unfiltered (gdb_stdlog,
3322 "LLW: Re-adding thread group leader LWP %d.\n",
3323 lwpid);
3324
3325 lp = add_lwp (BUILD_LWP (lwpid, lwpid));
3326 lp->stopped = 1;
3327 lp->resumed = 1;
3328 add_thread (lp->ptid);
3329 }
3330
3331 if (WIFSTOPPED (status) && !lp)
3332 {
3333 add_to_pid_list (&stopped_pids, lwpid, status);
3334 return NULL;
3335 }
3336
3337 /* Make sure we don't report an event for the exit of an LWP not in
3338 our list, i.e. not part of the current process. This can happen
3339 if we detach from a program we originally forked and then it
3340 exits. */
3341 if (!WIFSTOPPED (status) && !lp)
3342 return NULL;
3343
3344 /* Handle GNU/Linux's syscall SIGTRAPs. */
3345 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
3346 {
3347 /* No longer need the sysgood bit. The ptrace event ends up
3348 recorded in lp->waitstatus if we care for it. We can carry
3349 on handling the event like a regular SIGTRAP from here
3350 on. */
3351 status = W_STOPCODE (SIGTRAP);
3352 if (linux_handle_syscall_trap (lp, 0))
3353 return NULL;
3354 }
3355
3356 /* Handle GNU/Linux's extended waitstatus for trace events. */
3357 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
3358 {
3359 if (debug_linux_nat)
3360 fprintf_unfiltered (gdb_stdlog,
3361 "LLW: Handling extended status 0x%06x\n",
3362 status);
3363 if (linux_handle_extended_wait (lp, status, 0))
3364 return NULL;
3365 }
3366
3367 if (linux_nat_status_is_event (status))
3368 {
3369 /* Save the trap's siginfo in case we need it later. */
3370 save_siginfo (lp);
3371
3372 save_sigtrap (lp);
3373 }
3374
3375 /* Check if the thread has exited. */
3376 if ((WIFEXITED (status) || WIFSIGNALED (status))
3377 && num_lwps (GET_PID (lp->ptid)) > 1)
3378 {
3379 /* If this is the main thread, we must stop all threads and verify
3380 if they are still alive. This is because in the nptl thread model
3381 on Linux 2.4, there is no signal issued for exiting LWPs
3382 other than the main thread. We only get the main thread exit
3383 signal once all child threads have already exited. If we
3384 stop all the threads and use the stop_wait_callback to check
3385 if they have exited we can determine whether this signal
3386 should be ignored or whether it means the end of the debugged
3387 application, regardless of which threading model is being
3388 used. */
3389 if (GET_PID (lp->ptid) == GET_LWP (lp->ptid))
3390 {
3391 lp->stopped = 1;
3392 iterate_over_lwps (pid_to_ptid (GET_PID (lp->ptid)),
3393 stop_and_resume_callback, new_pending_p);
3394 }
3395
3396 if (debug_linux_nat)
3397 fprintf_unfiltered (gdb_stdlog,
3398 "LLW: %s exited.\n",
3399 target_pid_to_str (lp->ptid));
3400
3401 if (num_lwps (GET_PID (lp->ptid)) > 1)
3402 {
3403 /* If there is at least one more LWP, then the exit signal
3404 was not the end of the debugged application and should be
3405 ignored. */
3406 exit_lwp (lp);
3407 return NULL;
3408 }
3409 }
3410
3411 /* Check if the current LWP has previously exited. In the nptl
3412 thread model, LWPs other than the main thread do not issue
3413 signals when they exit so we must check whenever the thread has
3414 stopped. A similar check is made in stop_wait_callback(). */
3415 if (num_lwps (GET_PID (lp->ptid)) > 1 && !linux_thread_alive (lp->ptid))
3416 {
3417 ptid_t ptid = pid_to_ptid (GET_PID (lp->ptid));
3418
3419 if (debug_linux_nat)
3420 fprintf_unfiltered (gdb_stdlog,
3421 "LLW: %s exited.\n",
3422 target_pid_to_str (lp->ptid));
3423
3424 exit_lwp (lp);
3425
3426 /* Make sure there is at least one thread running. */
3427 gdb_assert (iterate_over_lwps (ptid, running_callback, NULL));
3428
3429 /* Discard the event. */
3430 return NULL;
3431 }
3432
3433 /* Make sure we don't report a SIGSTOP that we sent ourselves in
3434 an attempt to stop an LWP. */
3435 if (lp->signalled
3436 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP)
3437 {
3438 if (debug_linux_nat)
3439 fprintf_unfiltered (gdb_stdlog,
3440 "LLW: Delayed SIGSTOP caught for %s.\n",
3441 target_pid_to_str (lp->ptid));
3442
3443 lp->signalled = 0;
3444
3445 if (lp->last_resume_kind != resume_stop)
3446 {
3447 /* This is a delayed SIGSTOP. */
3448
3449 registers_changed ();
3450
3451 if (linux_nat_prepare_to_resume != NULL)
3452 linux_nat_prepare_to_resume (lp);
3453 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
3454 lp->step, TARGET_SIGNAL_0);
3455 if (debug_linux_nat)
3456 fprintf_unfiltered (gdb_stdlog,
3457 "LLW: %s %s, 0, 0 (discard SIGSTOP)\n",
3458 lp->step ?
3459 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3460 target_pid_to_str (lp->ptid));
3461
3462 lp->stopped = 0;
3463 gdb_assert (lp->resumed);
3464
3465 /* Discard the event. */
3466 return NULL;
3467 }
3468 }
3469
3470 /* Make sure we don't report a SIGINT that we have already displayed
3471 for another thread. */
3472 if (lp->ignore_sigint
3473 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGINT)
3474 {
3475 if (debug_linux_nat)
3476 fprintf_unfiltered (gdb_stdlog,
3477 "LLW: Delayed SIGINT caught for %s.\n",
3478 target_pid_to_str (lp->ptid));
3479
3480 /* This is a delayed SIGINT. */
3481 lp->ignore_sigint = 0;
3482
3483 registers_changed ();
3484 if (linux_nat_prepare_to_resume != NULL)
3485 linux_nat_prepare_to_resume (lp);
3486 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
3487 lp->step, TARGET_SIGNAL_0);
3488 if (debug_linux_nat)
3489 fprintf_unfiltered (gdb_stdlog,
3490 "LLW: %s %s, 0, 0 (discard SIGINT)\n",
3491 lp->step ?
3492 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3493 target_pid_to_str (lp->ptid));
3494
3495 lp->stopped = 0;
3496 gdb_assert (lp->resumed);
3497
3498 /* Discard the event. */
3499 return NULL;
3500 }
3501
3502 /* An interesting event. */
3503 gdb_assert (lp);
3504 lp->status = status;
3505 return lp;
3506 }
3507
3508 /* Detect zombie thread group leaders, and "exit" them. We can't reap
3509 their exits until all other threads in the group have exited. */
3510
3511 static void
3512 check_zombie_leaders (void)
3513 {
3514 struct inferior *inf;
3515
3516 ALL_INFERIORS (inf)
3517 {
3518 struct lwp_info *leader_lp;
3519
3520 if (inf->pid == 0)
3521 continue;
3522
3523 leader_lp = find_lwp_pid (pid_to_ptid (inf->pid));
3524 if (leader_lp != NULL
3525 /* Check if there are other threads in the group, as we may
3526 have raced with the inferior simply exiting. */
3527 && num_lwps (inf->pid) > 1
3528 && linux_lwp_is_zombie (inf->pid))
3529 {
3530 if (debug_linux_nat)
3531 fprintf_unfiltered (gdb_stdlog,
3532 "CZL: Thread group leader %d zombie "
3533 "(it exited, or another thread execd).\n",
3534 inf->pid);
3535
3536 /* A leader zombie can mean one of two things:
3537
3538 - It exited, and there's an exit status pending
3539 available, or only the leader exited (not the whole
3540 program). In the latter case, we can't waitpid the
3541 leader's exit status until all other threads are gone.
3542
3543 - There are 3 or more threads in the group, and a thread
3544 other than the leader exec'd. On an exec, the Linux
3545 kernel destroys all other threads (except the execing
3546 one) in the thread group, and resets the execing thread's
3547 tid to the tgid. No exit notification is sent for the
3548 execing thread -- from the ptracer's perspective, it
3549 appears as though the execing thread just vanishes.
3550 Until we reap all other threads except the leader and the
3551 execing thread, the leader will be zombie, and the
3552 execing thread will be in `D (disc sleep)'. As soon as
3553 all other threads are reaped, the execing thread changes
3554 it's tid to the tgid, and the previous (zombie) leader
3555 vanishes, giving place to the "new" leader. We could try
3556 distinguishing the exit and exec cases, by waiting once
3557 more, and seeing if something comes out, but it doesn't
3558 sound useful. The previous leader _does_ go away, and
3559 we'll re-add the new one once we see the exec event
3560 (which is just the same as what would happen if the
3561 previous leader did exit voluntarily before some other
3562 thread execs). */
3563
3564 if (debug_linux_nat)
3565 fprintf_unfiltered (gdb_stdlog,
3566 "CZL: Thread group leader %d vanished.\n",
3567 inf->pid);
3568 exit_lwp (leader_lp);
3569 }
3570 }
3571 }
3572
3573 static ptid_t
3574 linux_nat_wait_1 (struct target_ops *ops,
3575 ptid_t ptid, struct target_waitstatus *ourstatus,
3576 int target_options)
3577 {
3578 static sigset_t prev_mask;
3579 enum resume_kind last_resume_kind;
3580 struct lwp_info *lp;
3581 int status;
3582
3583 if (debug_linux_nat)
3584 fprintf_unfiltered (gdb_stdlog, "LLW: enter\n");
3585
3586 /* The first time we get here after starting a new inferior, we may
3587 not have added it to the LWP list yet - this is the earliest
3588 moment at which we know its PID. */
3589 if (ptid_is_pid (inferior_ptid))
3590 {
3591 /* Upgrade the main thread's ptid. */
3592 thread_change_ptid (inferior_ptid,
3593 BUILD_LWP (GET_PID (inferior_ptid),
3594 GET_PID (inferior_ptid)));
3595
3596 lp = add_lwp (inferior_ptid);
3597 lp->resumed = 1;
3598 }
3599
3600 /* Make sure SIGCHLD is blocked. */
3601 block_child_signals (&prev_mask);
3602
3603 retry:
3604 lp = NULL;
3605 status = 0;
3606
3607 /* First check if there is a LWP with a wait status pending. */
3608 if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid))
3609 {
3610 /* Any LWP in the PTID group that's been resumed will do. */
3611 lp = iterate_over_lwps (ptid, status_callback, NULL);
3612 if (lp)
3613 {
3614 if (debug_linux_nat && lp->status)
3615 fprintf_unfiltered (gdb_stdlog,
3616 "LLW: Using pending wait status %s for %s.\n",
3617 status_to_str (lp->status),
3618 target_pid_to_str (lp->ptid));
3619 }
3620 }
3621 else if (is_lwp (ptid))
3622 {
3623 if (debug_linux_nat)
3624 fprintf_unfiltered (gdb_stdlog,
3625 "LLW: Waiting for specific LWP %s.\n",
3626 target_pid_to_str (ptid));
3627
3628 /* We have a specific LWP to check. */
3629 lp = find_lwp_pid (ptid);
3630 gdb_assert (lp);
3631
3632 if (debug_linux_nat && lp->status)
3633 fprintf_unfiltered (gdb_stdlog,
3634 "LLW: Using pending wait status %s for %s.\n",
3635 status_to_str (lp->status),
3636 target_pid_to_str (lp->ptid));
3637
3638 /* We check for lp->waitstatus in addition to lp->status,
3639 because we can have pending process exits recorded in
3640 lp->status and W_EXITCODE(0,0) == 0. We should probably have
3641 an additional lp->status_p flag. */
3642 if (lp->status == 0 && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE)
3643 lp = NULL;
3644 }
3645
3646 if (lp && lp->signalled && lp->last_resume_kind != resume_stop)
3647 {
3648 /* A pending SIGSTOP may interfere with the normal stream of
3649 events. In a typical case where interference is a problem,
3650 we have a SIGSTOP signal pending for LWP A while
3651 single-stepping it, encounter an event in LWP B, and take the
3652 pending SIGSTOP while trying to stop LWP A. After processing
3653 the event in LWP B, LWP A is continued, and we'll never see
3654 the SIGTRAP associated with the last time we were
3655 single-stepping LWP A. */
3656
3657 /* Resume the thread. It should halt immediately returning the
3658 pending SIGSTOP. */
3659 registers_changed ();
3660 if (linux_nat_prepare_to_resume != NULL)
3661 linux_nat_prepare_to_resume (lp);
3662 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
3663 lp->step, TARGET_SIGNAL_0);
3664 if (debug_linux_nat)
3665 fprintf_unfiltered (gdb_stdlog,
3666 "LLW: %s %s, 0, 0 (expect SIGSTOP)\n",
3667 lp->step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3668 target_pid_to_str (lp->ptid));
3669 lp->stopped = 0;
3670 gdb_assert (lp->resumed);
3671
3672 /* Catch the pending SIGSTOP. */
3673 status = lp->status;
3674 lp->status = 0;
3675
3676 stop_wait_callback (lp, NULL);
3677
3678 /* If the lp->status field isn't empty, we caught another signal
3679 while flushing the SIGSTOP. Return it back to the event
3680 queue of the LWP, as we already have an event to handle. */
3681 if (lp->status)
3682 {
3683 if (debug_linux_nat)
3684 fprintf_unfiltered (gdb_stdlog,
3685 "LLW: kill %s, %s\n",
3686 target_pid_to_str (lp->ptid),
3687 status_to_str (lp->status));
3688 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (lp->status));
3689 }
3690
3691 lp->status = status;
3692 }
3693
3694 if (!target_can_async_p ())
3695 {
3696 /* Causes SIGINT to be passed on to the attached process. */
3697 set_sigint_trap ();
3698 }
3699
3700 /* But if we don't find a pending event, we'll have to wait. */
3701
3702 while (lp == NULL)
3703 {
3704 pid_t lwpid;
3705
3706 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
3707 quirks:
3708
3709 - If the thread group leader exits while other threads in the
3710 thread group still exist, waitpid(TGID, ...) hangs. That
3711 waitpid won't return an exit status until the other threads
3712 in the group are reapped.
3713
3714 - When a non-leader thread execs, that thread just vanishes
3715 without reporting an exit (so we'd hang if we waited for it
3716 explicitly in that case). The exec event is reported to
3717 the TGID pid. */
3718
3719 errno = 0;
3720 lwpid = my_waitpid (-1, &status, __WCLONE | WNOHANG);
3721 if (lwpid == 0 || (lwpid == -1 && errno == ECHILD))
3722 lwpid = my_waitpid (-1, &status, WNOHANG);
3723
3724 if (debug_linux_nat)
3725 fprintf_unfiltered (gdb_stdlog,
3726 "LNW: waitpid(-1, ...) returned %d, %s\n",
3727 lwpid, errno ? safe_strerror (errno) : "ERRNO-OK");
3728
3729 if (lwpid > 0)
3730 {
3731 /* If this is true, then we paused LWPs momentarily, and may
3732 now have pending events to handle. */
3733 int new_pending;
3734
3735 if (debug_linux_nat)
3736 {
3737 fprintf_unfiltered (gdb_stdlog,
3738 "LLW: waitpid %ld received %s\n",
3739 (long) lwpid, status_to_str (status));
3740 }
3741
3742 lp = linux_nat_filter_event (lwpid, status, &new_pending);
3743
3744 /* STATUS is now no longer valid, use LP->STATUS instead. */
3745 status = 0;
3746
3747 if (lp && !ptid_match (lp->ptid, ptid))
3748 {
3749 gdb_assert (lp->resumed);
3750
3751 if (debug_linux_nat)
3752 fprintf (stderr,
3753 "LWP %ld got an event %06x, leaving pending.\n",
3754 ptid_get_lwp (lp->ptid), lp->status);
3755
3756 if (WIFSTOPPED (lp->status))
3757 {
3758 if (WSTOPSIG (lp->status) != SIGSTOP)
3759 {
3760 /* Cancel breakpoint hits. The breakpoint may
3761 be removed before we fetch events from this
3762 process to report to the core. It is best
3763 not to assume the moribund breakpoints
3764 heuristic always handles these cases --- it
3765 could be too many events go through to the
3766 core before this one is handled. All-stop
3767 always cancels breakpoint hits in all
3768 threads. */
3769 if (non_stop
3770 && linux_nat_lp_status_is_event (lp)
3771 && cancel_breakpoint (lp))
3772 {
3773 /* Throw away the SIGTRAP. */
3774 lp->status = 0;
3775
3776 if (debug_linux_nat)
3777 fprintf (stderr,
3778 "LLW: LWP %ld hit a breakpoint while"
3779 " waiting for another process;"
3780 " cancelled it\n",
3781 ptid_get_lwp (lp->ptid));
3782 }
3783 lp->stopped = 1;
3784 }
3785 else
3786 {
3787 lp->stopped = 1;
3788 lp->signalled = 0;
3789 }
3790 }
3791 else if (WIFEXITED (lp->status) || WIFSIGNALED (lp->status))
3792 {
3793 if (debug_linux_nat)
3794 fprintf (stderr,
3795 "Process %ld exited while stopping LWPs\n",
3796 ptid_get_lwp (lp->ptid));
3797
3798 /* This was the last lwp in the process. Since
3799 events are serialized to GDB core, and we can't
3800 report this one right now, but GDB core and the
3801 other target layers will want to be notified
3802 about the exit code/signal, leave the status
3803 pending for the next time we're able to report
3804 it. */
3805
3806 /* Prevent trying to stop this thread again. We'll
3807 never try to resume it because it has a pending
3808 status. */
3809 lp->stopped = 1;
3810
3811 /* Dead LWP's aren't expected to reported a pending
3812 sigstop. */
3813 lp->signalled = 0;
3814
3815 /* Store the pending event in the waitstatus as
3816 well, because W_EXITCODE(0,0) == 0. */
3817 store_waitstatus (&lp->waitstatus, lp->status);
3818 }
3819
3820 /* Keep looking. */
3821 lp = NULL;
3822 }
3823
3824 if (new_pending)
3825 {
3826 /* Some LWP now has a pending event. Go all the way
3827 back to check it. */
3828 goto retry;
3829 }
3830
3831 if (lp)
3832 {
3833 /* We got an event to report to the core. */
3834 break;
3835 }
3836
3837 /* Retry until nothing comes out of waitpid. A single
3838 SIGCHLD can indicate more than one child stopped. */
3839 continue;
3840 }
3841
3842 /* Check for zombie thread group leaders. Those can't be reaped
3843 until all other threads in the thread group are. */
3844 check_zombie_leaders ();
3845
3846 /* If there are no resumed children left, bail. We'd be stuck
3847 forever in the sigsuspend call below otherwise. */
3848 if (iterate_over_lwps (ptid, resumed_callback, NULL) == NULL)
3849 {
3850 if (debug_linux_nat)
3851 fprintf_unfiltered (gdb_stdlog, "LLW: exit (no resumed LWP)\n");
3852
3853 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
3854
3855 if (!target_can_async_p ())
3856 clear_sigint_trap ();
3857
3858 restore_child_signals_mask (&prev_mask);
3859 return minus_one_ptid;
3860 }
3861
3862 /* No interesting event to report to the core. */
3863
3864 if (target_options & TARGET_WNOHANG)
3865 {
3866 if (debug_linux_nat)
3867 fprintf_unfiltered (gdb_stdlog, "LLW: exit (ignore)\n");
3868
3869 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3870 restore_child_signals_mask (&prev_mask);
3871 return minus_one_ptid;
3872 }
3873
3874 /* We shouldn't end up here unless we want to try again. */
3875 gdb_assert (lp == NULL);
3876
3877 /* Block until we get an event reported with SIGCHLD. */
3878 sigsuspend (&suspend_mask);
3879 }
3880
3881 if (!target_can_async_p ())
3882 clear_sigint_trap ();
3883
3884 gdb_assert (lp);
3885
3886 status = lp->status;
3887 lp->status = 0;
3888
3889 /* Don't report signals that GDB isn't interested in, such as
3890 signals that are neither printed nor stopped upon. Stopping all
3891 threads can be a bit time-consuming so if we want decent
3892 performance with heavily multi-threaded programs, especially when
3893 they're using a high frequency timer, we'd better avoid it if we
3894 can. */
3895
3896 if (WIFSTOPPED (status))
3897 {
3898 enum target_signal signo = target_signal_from_host (WSTOPSIG (status));
3899
3900 /* When using hardware single-step, we need to report every signal.
3901 Otherwise, signals in pass_mask may be short-circuited. */
3902 if (!lp->step
3903 && WSTOPSIG (status) && sigismember (&pass_mask, WSTOPSIG (status)))
3904 {
3905 /* FIMXE: kettenis/2001-06-06: Should we resume all threads
3906 here? It is not clear we should. GDB may not expect
3907 other threads to run. On the other hand, not resuming
3908 newly attached threads may cause an unwanted delay in
3909 getting them running. */
3910 registers_changed ();
3911 if (linux_nat_prepare_to_resume != NULL)
3912 linux_nat_prepare_to_resume (lp);
3913 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
3914 lp->step, signo);
3915 if (debug_linux_nat)
3916 fprintf_unfiltered (gdb_stdlog,
3917 "LLW: %s %s, %s (preempt 'handle')\n",
3918 lp->step ?
3919 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3920 target_pid_to_str (lp->ptid),
3921 (signo != TARGET_SIGNAL_0
3922 ? strsignal (target_signal_to_host (signo))
3923 : "0"));
3924 lp->stopped = 0;
3925 goto retry;
3926 }
3927
3928 if (!non_stop)
3929 {
3930 /* Only do the below in all-stop, as we currently use SIGINT
3931 to implement target_stop (see linux_nat_stop) in
3932 non-stop. */
3933 if (signo == TARGET_SIGNAL_INT && signal_pass_state (signo) == 0)
3934 {
3935 /* If ^C/BREAK is typed at the tty/console, SIGINT gets
3936 forwarded to the entire process group, that is, all LWPs
3937 will receive it - unless they're using CLONE_THREAD to
3938 share signals. Since we only want to report it once, we
3939 mark it as ignored for all LWPs except this one. */
3940 iterate_over_lwps (pid_to_ptid (ptid_get_pid (ptid)),
3941 set_ignore_sigint, NULL);
3942 lp->ignore_sigint = 0;
3943 }
3944 else
3945 maybe_clear_ignore_sigint (lp);
3946 }
3947 }
3948
3949 /* This LWP is stopped now. */
3950 lp->stopped = 1;
3951
3952 if (debug_linux_nat)
3953 fprintf_unfiltered (gdb_stdlog, "LLW: Candidate event %s in %s.\n",
3954 status_to_str (status), target_pid_to_str (lp->ptid));
3955
3956 if (!non_stop)
3957 {
3958 /* Now stop all other LWP's ... */
3959 iterate_over_lwps (minus_one_ptid, stop_callback, NULL);
3960
3961 /* ... and wait until all of them have reported back that
3962 they're no longer running. */
3963 iterate_over_lwps (minus_one_ptid, stop_wait_callback, NULL);
3964
3965 /* If we're not waiting for a specific LWP, choose an event LWP
3966 from among those that have had events. Giving equal priority
3967 to all LWPs that have had events helps prevent
3968 starvation. */
3969 if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid))
3970 select_event_lwp (ptid, &lp, &status);
3971
3972 /* Now that we've selected our final event LWP, cancel any
3973 breakpoints in other LWPs that have hit a GDB breakpoint.
3974 See the comment in cancel_breakpoints_callback to find out
3975 why. */
3976 iterate_over_lwps (minus_one_ptid, cancel_breakpoints_callback, lp);
3977
3978 /* We'll need this to determine whether to report a SIGSTOP as
3979 TARGET_WAITKIND_0. Need to take a copy because
3980 resume_clear_callback clears it. */
3981 last_resume_kind = lp->last_resume_kind;
3982
3983 /* In all-stop, from the core's perspective, all LWPs are now
3984 stopped until a new resume action is sent over. */
3985 iterate_over_lwps (minus_one_ptid, resume_clear_callback, NULL);
3986 }
3987 else
3988 {
3989 /* See above. */
3990 last_resume_kind = lp->last_resume_kind;
3991 resume_clear_callback (lp, NULL);
3992 }
3993
3994 if (linux_nat_status_is_event (status))
3995 {
3996 if (debug_linux_nat)
3997 fprintf_unfiltered (gdb_stdlog,
3998 "LLW: trap ptid is %s.\n",
3999 target_pid_to_str (lp->ptid));
4000 }
4001
4002 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
4003 {
4004 *ourstatus = lp->waitstatus;
4005 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
4006 }
4007 else
4008 store_waitstatus (ourstatus, status);
4009
4010 if (debug_linux_nat)
4011 fprintf_unfiltered (gdb_stdlog, "LLW: exit\n");
4012
4013 restore_child_signals_mask (&prev_mask);
4014
4015 if (last_resume_kind == resume_stop
4016 && ourstatus->kind == TARGET_WAITKIND_STOPPED
4017 && WSTOPSIG (status) == SIGSTOP)
4018 {
4019 /* A thread that has been requested to stop by GDB with
4020 target_stop, and it stopped cleanly, so report as SIG0. The
4021 use of SIGSTOP is an implementation detail. */
4022 ourstatus->value.sig = TARGET_SIGNAL_0;
4023 }
4024
4025 if (ourstatus->kind == TARGET_WAITKIND_EXITED
4026 || ourstatus->kind == TARGET_WAITKIND_SIGNALLED)
4027 lp->core = -1;
4028 else
4029 lp->core = linux_nat_core_of_thread_1 (lp->ptid);
4030
4031 return lp->ptid;
4032 }
4033
4034 /* Resume LWPs that are currently stopped without any pending status
4035 to report, but are resumed from the core's perspective. */
4036
4037 static int
4038 resume_stopped_resumed_lwps (struct lwp_info *lp, void *data)
4039 {
4040 ptid_t *wait_ptid_p = data;
4041
4042 if (lp->stopped
4043 && lp->resumed
4044 && lp->status == 0
4045 && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE)
4046 {
4047 struct regcache *regcache = get_thread_regcache (lp->ptid);
4048 struct gdbarch *gdbarch = get_regcache_arch (regcache);
4049 CORE_ADDR pc = regcache_read_pc (regcache);
4050
4051 gdb_assert (is_executing (lp->ptid));
4052
4053 /* Don't bother if there's a breakpoint at PC that we'd hit
4054 immediately, and we're not waiting for this LWP. */
4055 if (!ptid_match (lp->ptid, *wait_ptid_p))
4056 {
4057 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
4058 return 0;
4059 }
4060
4061 if (debug_linux_nat)
4062 fprintf_unfiltered (gdb_stdlog,
4063 "RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
4064 target_pid_to_str (lp->ptid),
4065 paddress (gdbarch, pc),
4066 lp->step);
4067
4068 registers_changed ();
4069 if (linux_nat_prepare_to_resume != NULL)
4070 linux_nat_prepare_to_resume (lp);
4071 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
4072 lp->step, TARGET_SIGNAL_0);
4073 lp->stopped = 0;
4074 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
4075 lp->stopped_by_watchpoint = 0;
4076 }
4077
4078 return 0;
4079 }
4080
4081 static ptid_t
4082 linux_nat_wait (struct target_ops *ops,
4083 ptid_t ptid, struct target_waitstatus *ourstatus,
4084 int target_options)
4085 {
4086 ptid_t event_ptid;
4087
4088 if (debug_linux_nat)
4089 fprintf_unfiltered (gdb_stdlog,
4090 "linux_nat_wait: [%s]\n", target_pid_to_str (ptid));
4091
4092 /* Flush the async file first. */
4093 if (target_can_async_p ())
4094 async_file_flush ();
4095
4096 /* Resume LWPs that are currently stopped without any pending status
4097 to report, but are resumed from the core's perspective. LWPs get
4098 in this state if we find them stopping at a time we're not
4099 interested in reporting the event (target_wait on a
4100 specific_process, for example, see linux_nat_wait_1), and
4101 meanwhile the event became uninteresting. Don't bother resuming
4102 LWPs we're not going to wait for if they'd stop immediately. */
4103 if (non_stop)
4104 iterate_over_lwps (minus_one_ptid, resume_stopped_resumed_lwps, &ptid);
4105
4106 event_ptid = linux_nat_wait_1 (ops, ptid, ourstatus, target_options);
4107
4108 /* If we requested any event, and something came out, assume there
4109 may be more. If we requested a specific lwp or process, also
4110 assume there may be more. */
4111 if (target_can_async_p ()
4112 && ((ourstatus->kind != TARGET_WAITKIND_IGNORE
4113 && ourstatus->kind != TARGET_WAITKIND_NO_RESUMED)
4114 || !ptid_equal (ptid, minus_one_ptid)))
4115 async_file_mark ();
4116
4117 /* Get ready for the next event. */
4118 if (target_can_async_p ())
4119 target_async (inferior_event_handler, 0);
4120
4121 return event_ptid;
4122 }
4123
4124 static int
4125 kill_callback (struct lwp_info *lp, void *data)
4126 {
4127 /* PTRACE_KILL may resume the inferior. Send SIGKILL first. */
4128
4129 errno = 0;
4130 kill (GET_LWP (lp->ptid), SIGKILL);
4131 if (debug_linux_nat)
4132 fprintf_unfiltered (gdb_stdlog,
4133 "KC: kill (SIGKILL) %s, 0, 0 (%s)\n",
4134 target_pid_to_str (lp->ptid),
4135 errno ? safe_strerror (errno) : "OK");
4136
4137 /* Some kernels ignore even SIGKILL for processes under ptrace. */
4138
4139 errno = 0;
4140 ptrace (PTRACE_KILL, GET_LWP (lp->ptid), 0, 0);
4141 if (debug_linux_nat)
4142 fprintf_unfiltered (gdb_stdlog,
4143 "KC: PTRACE_KILL %s, 0, 0 (%s)\n",
4144 target_pid_to_str (lp->ptid),
4145 errno ? safe_strerror (errno) : "OK");
4146
4147 return 0;
4148 }
4149
4150 static int
4151 kill_wait_callback (struct lwp_info *lp, void *data)
4152 {
4153 pid_t pid;
4154
4155 /* We must make sure that there are no pending events (delayed
4156 SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current
4157 program doesn't interfere with any following debugging session. */
4158
4159 /* For cloned processes we must check both with __WCLONE and
4160 without, since the exit status of a cloned process isn't reported
4161 with __WCLONE. */
4162 if (lp->cloned)
4163 {
4164 do
4165 {
4166 pid = my_waitpid (GET_LWP (lp->ptid), NULL, __WCLONE);
4167 if (pid != (pid_t) -1)
4168 {
4169 if (debug_linux_nat)
4170 fprintf_unfiltered (gdb_stdlog,
4171 "KWC: wait %s received unknown.\n",
4172 target_pid_to_str (lp->ptid));
4173 /* The Linux kernel sometimes fails to kill a thread
4174 completely after PTRACE_KILL; that goes from the stop
4175 point in do_fork out to the one in
4176 get_signal_to_deliever and waits again. So kill it
4177 again. */
4178 kill_callback (lp, NULL);
4179 }
4180 }
4181 while (pid == GET_LWP (lp->ptid));
4182
4183 gdb_assert (pid == -1 && errno == ECHILD);
4184 }
4185
4186 do
4187 {
4188 pid = my_waitpid (GET_LWP (lp->ptid), NULL, 0);
4189 if (pid != (pid_t) -1)
4190 {
4191 if (debug_linux_nat)
4192 fprintf_unfiltered (gdb_stdlog,
4193 "KWC: wait %s received unk.\n",
4194 target_pid_to_str (lp->ptid));
4195 /* See the call to kill_callback above. */
4196 kill_callback (lp, NULL);
4197 }
4198 }
4199 while (pid == GET_LWP (lp->ptid));
4200
4201 gdb_assert (pid == -1 && errno == ECHILD);
4202 return 0;
4203 }
4204
4205 static void
4206 linux_nat_kill (struct target_ops *ops)
4207 {
4208 struct target_waitstatus last;
4209 ptid_t last_ptid;
4210 int status;
4211
4212 /* If we're stopped while forking and we haven't followed yet,
4213 kill the other task. We need to do this first because the
4214 parent will be sleeping if this is a vfork. */
4215
4216 get_last_target_status (&last_ptid, &last);
4217
4218 if (last.kind == TARGET_WAITKIND_FORKED
4219 || last.kind == TARGET_WAITKIND_VFORKED)
4220 {
4221 ptrace (PT_KILL, PIDGET (last.value.related_pid), 0, 0);
4222 wait (&status);
4223 }
4224
4225 if (forks_exist_p ())
4226 linux_fork_killall ();
4227 else
4228 {
4229 ptid_t ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
4230
4231 /* Stop all threads before killing them, since ptrace requires
4232 that the thread is stopped to sucessfully PTRACE_KILL. */
4233 iterate_over_lwps (ptid, stop_callback, NULL);
4234 /* ... and wait until all of them have reported back that
4235 they're no longer running. */
4236 iterate_over_lwps (ptid, stop_wait_callback, NULL);
4237
4238 /* Kill all LWP's ... */
4239 iterate_over_lwps (ptid, kill_callback, NULL);
4240
4241 /* ... and wait until we've flushed all events. */
4242 iterate_over_lwps (ptid, kill_wait_callback, NULL);
4243 }
4244
4245 target_mourn_inferior ();
4246 }
4247
4248 static void
4249 linux_nat_mourn_inferior (struct target_ops *ops)
4250 {
4251 purge_lwp_list (ptid_get_pid (inferior_ptid));
4252
4253 if (! forks_exist_p ())
4254 /* Normal case, no other forks available. */
4255 linux_ops->to_mourn_inferior (ops);
4256 else
4257 /* Multi-fork case. The current inferior_ptid has exited, but
4258 there are other viable forks to debug. Delete the exiting
4259 one and context-switch to the first available. */
4260 linux_fork_mourn_inferior ();
4261 }
4262
4263 /* Convert a native/host siginfo object, into/from the siginfo in the
4264 layout of the inferiors' architecture. */
4265
4266 static void
4267 siginfo_fixup (struct siginfo *siginfo, gdb_byte *inf_siginfo, int direction)
4268 {
4269 int done = 0;
4270
4271 if (linux_nat_siginfo_fixup != NULL)
4272 done = linux_nat_siginfo_fixup (siginfo, inf_siginfo, direction);
4273
4274 /* If there was no callback, or the callback didn't do anything,
4275 then just do a straight memcpy. */
4276 if (!done)
4277 {
4278 if (direction == 1)
4279 memcpy (siginfo, inf_siginfo, sizeof (struct siginfo));
4280 else
4281 memcpy (inf_siginfo, siginfo, sizeof (struct siginfo));
4282 }
4283 }
4284
4285 static LONGEST
4286 linux_xfer_siginfo (struct target_ops *ops, enum target_object object,
4287 const char *annex, gdb_byte *readbuf,
4288 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
4289 {
4290 int pid;
4291 struct siginfo siginfo;
4292 gdb_byte inf_siginfo[sizeof (struct siginfo)];
4293
4294 gdb_assert (object == TARGET_OBJECT_SIGNAL_INFO);
4295 gdb_assert (readbuf || writebuf);
4296
4297 pid = GET_LWP (inferior_ptid);
4298 if (pid == 0)
4299 pid = GET_PID (inferior_ptid);
4300
4301 if (offset > sizeof (siginfo))
4302 return -1;
4303
4304 errno = 0;
4305 ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
4306 if (errno != 0)
4307 return -1;
4308
4309 /* When GDB is built as a 64-bit application, ptrace writes into
4310 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
4311 inferior with a 64-bit GDB should look the same as debugging it
4312 with a 32-bit GDB, we need to convert it. GDB core always sees
4313 the converted layout, so any read/write will have to be done
4314 post-conversion. */
4315 siginfo_fixup (&siginfo, inf_siginfo, 0);
4316
4317 if (offset + len > sizeof (siginfo))
4318 len = sizeof (siginfo) - offset;
4319
4320 if (readbuf != NULL)
4321 memcpy (readbuf, inf_siginfo + offset, len);
4322 else
4323 {
4324 memcpy (inf_siginfo + offset, writebuf, len);
4325
4326 /* Convert back to ptrace layout before flushing it out. */
4327 siginfo_fixup (&siginfo, inf_siginfo, 1);
4328
4329 errno = 0;
4330 ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
4331 if (errno != 0)
4332 return -1;
4333 }
4334
4335 return len;
4336 }
4337
4338 static LONGEST
4339 linux_nat_xfer_partial (struct target_ops *ops, enum target_object object,
4340 const char *annex, gdb_byte *readbuf,
4341 const gdb_byte *writebuf,
4342 ULONGEST offset, LONGEST len)
4343 {
4344 struct cleanup *old_chain;
4345 LONGEST xfer;
4346
4347 if (object == TARGET_OBJECT_SIGNAL_INFO)
4348 return linux_xfer_siginfo (ops, object, annex, readbuf, writebuf,
4349 offset, len);
4350
4351 /* The target is connected but no live inferior is selected. Pass
4352 this request down to a lower stratum (e.g., the executable
4353 file). */
4354 if (object == TARGET_OBJECT_MEMORY && ptid_equal (inferior_ptid, null_ptid))
4355 return 0;
4356
4357 old_chain = save_inferior_ptid ();
4358
4359 if (is_lwp (inferior_ptid))
4360 inferior_ptid = pid_to_ptid (GET_LWP (inferior_ptid));
4361
4362 xfer = linux_ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
4363 offset, len);
4364
4365 do_cleanups (old_chain);
4366 return xfer;
4367 }
4368
4369 static int
4370 linux_thread_alive (ptid_t ptid)
4371 {
4372 int err, tmp_errno;
4373
4374 gdb_assert (is_lwp (ptid));
4375
4376 /* Send signal 0 instead of anything ptrace, because ptracing a
4377 running thread errors out claiming that the thread doesn't
4378 exist. */
4379 err = kill_lwp (GET_LWP (ptid), 0);
4380 tmp_errno = errno;
4381 if (debug_linux_nat)
4382 fprintf_unfiltered (gdb_stdlog,
4383 "LLTA: KILL(SIG0) %s (%s)\n",
4384 target_pid_to_str (ptid),
4385 err ? safe_strerror (tmp_errno) : "OK");
4386
4387 if (err != 0)
4388 return 0;
4389
4390 return 1;
4391 }
4392
4393 static int
4394 linux_nat_thread_alive (struct target_ops *ops, ptid_t ptid)
4395 {
4396 return linux_thread_alive (ptid);
4397 }
4398
4399 static char *
4400 linux_nat_pid_to_str (struct target_ops *ops, ptid_t ptid)
4401 {
4402 static char buf[64];
4403
4404 if (is_lwp (ptid)
4405 && (GET_PID (ptid) != GET_LWP (ptid)
4406 || num_lwps (GET_PID (ptid)) > 1))
4407 {
4408 snprintf (buf, sizeof (buf), "LWP %ld", GET_LWP (ptid));
4409 return buf;
4410 }
4411
4412 return normal_pid_to_str (ptid);
4413 }
4414
4415 static char *
4416 linux_nat_thread_name (struct thread_info *thr)
4417 {
4418 int pid = ptid_get_pid (thr->ptid);
4419 long lwp = ptid_get_lwp (thr->ptid);
4420 #define FORMAT "/proc/%d/task/%ld/comm"
4421 char buf[sizeof (FORMAT) + 30];
4422 FILE *comm_file;
4423 char *result = NULL;
4424
4425 snprintf (buf, sizeof (buf), FORMAT, pid, lwp);
4426 comm_file = fopen (buf, "r");
4427 if (comm_file)
4428 {
4429 /* Not exported by the kernel, so we define it here. */
4430 #define COMM_LEN 16
4431 static char line[COMM_LEN + 1];
4432
4433 if (fgets (line, sizeof (line), comm_file))
4434 {
4435 char *nl = strchr (line, '\n');
4436
4437 if (nl)
4438 *nl = '\0';
4439 if (*line != '\0')
4440 result = line;
4441 }
4442
4443 fclose (comm_file);
4444 }
4445
4446 #undef COMM_LEN
4447 #undef FORMAT
4448
4449 return result;
4450 }
4451
4452 /* Accepts an integer PID; Returns a string representing a file that
4453 can be opened to get the symbols for the child process. */
4454
4455 static char *
4456 linux_child_pid_to_exec_file (int pid)
4457 {
4458 char *name1, *name2;
4459
4460 name1 = xmalloc (MAXPATHLEN);
4461 name2 = xmalloc (MAXPATHLEN);
4462 make_cleanup (xfree, name1);
4463 make_cleanup (xfree, name2);
4464 memset (name2, 0, MAXPATHLEN);
4465
4466 sprintf (name1, "/proc/%d/exe", pid);
4467 if (readlink (name1, name2, MAXPATHLEN) > 0)
4468 return name2;
4469 else
4470 return name1;
4471 }
4472
4473 /* Records the thread's register state for the corefile note
4474 section. */
4475
4476 static char *
4477 linux_nat_collect_thread_registers (const struct regcache *regcache,
4478 ptid_t ptid, bfd *obfd,
4479 char *note_data, int *note_size,
4480 enum target_signal stop_signal)
4481 {
4482 struct gdbarch *gdbarch = get_regcache_arch (regcache);
4483 const struct regset *regset;
4484 int core_regset_p;
4485 gdb_gregset_t gregs;
4486 gdb_fpregset_t fpregs;
4487
4488 core_regset_p = gdbarch_regset_from_core_section_p (gdbarch);
4489
4490 if (core_regset_p
4491 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg",
4492 sizeof (gregs)))
4493 != NULL && regset->collect_regset != NULL)
4494 regset->collect_regset (regset, regcache, -1, &gregs, sizeof (gregs));
4495 else
4496 fill_gregset (regcache, &gregs, -1);
4497
4498 note_data = (char *) elfcore_write_prstatus
4499 (obfd, note_data, note_size, ptid_get_lwp (ptid),
4500 target_signal_to_host (stop_signal), &gregs);
4501
4502 if (core_regset_p
4503 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg2",
4504 sizeof (fpregs)))
4505 != NULL && regset->collect_regset != NULL)
4506 regset->collect_regset (regset, regcache, -1, &fpregs, sizeof (fpregs));
4507 else
4508 fill_fpregset (regcache, &fpregs, -1);
4509
4510 note_data = (char *) elfcore_write_prfpreg (obfd, note_data, note_size,
4511 &fpregs, sizeof (fpregs));
4512
4513 return note_data;
4514 }
4515
4516 /* Fills the "to_make_corefile_note" target vector. Builds the note
4517 section for a corefile, and returns it in a malloc buffer. */
4518
4519 static char *
4520 linux_nat_make_corefile_notes (bfd *obfd, int *note_size)
4521 {
4522 /* FIXME: uweigand/2011-10-06: Once all GNU/Linux architectures have been
4523 converted to gdbarch_core_regset_sections, this function can go away. */
4524 return linux_make_corefile_notes (target_gdbarch, obfd, note_size,
4525 linux_nat_collect_thread_registers);
4526 }
4527
4528 /* Implement the to_xfer_partial interface for memory reads using the /proc
4529 filesystem. Because we can use a single read() call for /proc, this
4530 can be much more efficient than banging away at PTRACE_PEEKTEXT,
4531 but it doesn't support writes. */
4532
4533 static LONGEST
4534 linux_proc_xfer_partial (struct target_ops *ops, enum target_object object,
4535 const char *annex, gdb_byte *readbuf,
4536 const gdb_byte *writebuf,
4537 ULONGEST offset, LONGEST len)
4538 {
4539 LONGEST ret;
4540 int fd;
4541 char filename[64];
4542
4543 if (object != TARGET_OBJECT_MEMORY || !readbuf)
4544 return 0;
4545
4546 /* Don't bother for one word. */
4547 if (len < 3 * sizeof (long))
4548 return 0;
4549
4550 /* We could keep this file open and cache it - possibly one per
4551 thread. That requires some juggling, but is even faster. */
4552 sprintf (filename, "/proc/%d/mem", PIDGET (inferior_ptid));
4553 fd = open (filename, O_RDONLY | O_LARGEFILE);
4554 if (fd == -1)
4555 return 0;
4556
4557 /* If pread64 is available, use it. It's faster if the kernel
4558 supports it (only one syscall), and it's 64-bit safe even on
4559 32-bit platforms (for instance, SPARC debugging a SPARC64
4560 application). */
4561 #ifdef HAVE_PREAD64
4562 if (pread64 (fd, readbuf, len, offset) != len)
4563 #else
4564 if (lseek (fd, offset, SEEK_SET) == -1 || read (fd, readbuf, len) != len)
4565 #endif
4566 ret = 0;
4567 else
4568 ret = len;
4569
4570 close (fd);
4571 return ret;
4572 }
4573
4574
4575 /* Enumerate spufs IDs for process PID. */
4576 static LONGEST
4577 spu_enumerate_spu_ids (int pid, gdb_byte *buf, ULONGEST offset, LONGEST len)
4578 {
4579 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch);
4580 LONGEST pos = 0;
4581 LONGEST written = 0;
4582 char path[128];
4583 DIR *dir;
4584 struct dirent *entry;
4585
4586 xsnprintf (path, sizeof path, "/proc/%d/fd", pid);
4587 dir = opendir (path);
4588 if (!dir)
4589 return -1;
4590
4591 rewinddir (dir);
4592 while ((entry = readdir (dir)) != NULL)
4593 {
4594 struct stat st;
4595 struct statfs stfs;
4596 int fd;
4597
4598 fd = atoi (entry->d_name);
4599 if (!fd)
4600 continue;
4601
4602 xsnprintf (path, sizeof path, "/proc/%d/fd/%d", pid, fd);
4603 if (stat (path, &st) != 0)
4604 continue;
4605 if (!S_ISDIR (st.st_mode))
4606 continue;
4607
4608 if (statfs (path, &stfs) != 0)
4609 continue;
4610 if (stfs.f_type != SPUFS_MAGIC)
4611 continue;
4612
4613 if (pos >= offset && pos + 4 <= offset + len)
4614 {
4615 store_unsigned_integer (buf + pos - offset, 4, byte_order, fd);
4616 written += 4;
4617 }
4618 pos += 4;
4619 }
4620
4621 closedir (dir);
4622 return written;
4623 }
4624
4625 /* Implement the to_xfer_partial interface for the TARGET_OBJECT_SPU
4626 object type, using the /proc file system. */
4627 static LONGEST
4628 linux_proc_xfer_spu (struct target_ops *ops, enum target_object object,
4629 const char *annex, gdb_byte *readbuf,
4630 const gdb_byte *writebuf,
4631 ULONGEST offset, LONGEST len)
4632 {
4633 char buf[128];
4634 int fd = 0;
4635 int ret = -1;
4636 int pid = PIDGET (inferior_ptid);
4637
4638 if (!annex)
4639 {
4640 if (!readbuf)
4641 return -1;
4642 else
4643 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
4644 }
4645
4646 xsnprintf (buf, sizeof buf, "/proc/%d/fd/%s", pid, annex);
4647 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
4648 if (fd <= 0)
4649 return -1;
4650
4651 if (offset != 0
4652 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4653 {
4654 close (fd);
4655 return 0;
4656 }
4657
4658 if (writebuf)
4659 ret = write (fd, writebuf, (size_t) len);
4660 else if (readbuf)
4661 ret = read (fd, readbuf, (size_t) len);
4662
4663 close (fd);
4664 return ret;
4665 }
4666
4667
4668 /* Parse LINE as a signal set and add its set bits to SIGS. */
4669
4670 static void
4671 add_line_to_sigset (const char *line, sigset_t *sigs)
4672 {
4673 int len = strlen (line) - 1;
4674 const char *p;
4675 int signum;
4676
4677 if (line[len] != '\n')
4678 error (_("Could not parse signal set: %s"), line);
4679
4680 p = line;
4681 signum = len * 4;
4682 while (len-- > 0)
4683 {
4684 int digit;
4685
4686 if (*p >= '0' && *p <= '9')
4687 digit = *p - '0';
4688 else if (*p >= 'a' && *p <= 'f')
4689 digit = *p - 'a' + 10;
4690 else
4691 error (_("Could not parse signal set: %s"), line);
4692
4693 signum -= 4;
4694
4695 if (digit & 1)
4696 sigaddset (sigs, signum + 1);
4697 if (digit & 2)
4698 sigaddset (sigs, signum + 2);
4699 if (digit & 4)
4700 sigaddset (sigs, signum + 3);
4701 if (digit & 8)
4702 sigaddset (sigs, signum + 4);
4703
4704 p++;
4705 }
4706 }
4707
4708 /* Find process PID's pending signals from /proc/pid/status and set
4709 SIGS to match. */
4710
4711 void
4712 linux_proc_pending_signals (int pid, sigset_t *pending,
4713 sigset_t *blocked, sigset_t *ignored)
4714 {
4715 FILE *procfile;
4716 char buffer[MAXPATHLEN], fname[MAXPATHLEN];
4717 struct cleanup *cleanup;
4718
4719 sigemptyset (pending);
4720 sigemptyset (blocked);
4721 sigemptyset (ignored);
4722 sprintf (fname, "/proc/%d/status", pid);
4723 procfile = fopen (fname, "r");
4724 if (procfile == NULL)
4725 error (_("Could not open %s"), fname);
4726 cleanup = make_cleanup_fclose (procfile);
4727
4728 while (fgets (buffer, MAXPATHLEN, procfile) != NULL)
4729 {
4730 /* Normal queued signals are on the SigPnd line in the status
4731 file. However, 2.6 kernels also have a "shared" pending
4732 queue for delivering signals to a thread group, so check for
4733 a ShdPnd line also.
4734
4735 Unfortunately some Red Hat kernels include the shared pending
4736 queue but not the ShdPnd status field. */
4737
4738 if (strncmp (buffer, "SigPnd:\t", 8) == 0)
4739 add_line_to_sigset (buffer + 8, pending);
4740 else if (strncmp (buffer, "ShdPnd:\t", 8) == 0)
4741 add_line_to_sigset (buffer + 8, pending);
4742 else if (strncmp (buffer, "SigBlk:\t", 8) == 0)
4743 add_line_to_sigset (buffer + 8, blocked);
4744 else if (strncmp (buffer, "SigIgn:\t", 8) == 0)
4745 add_line_to_sigset (buffer + 8, ignored);
4746 }
4747
4748 do_cleanups (cleanup);
4749 }
4750
4751 static LONGEST
4752 linux_nat_xfer_osdata (struct target_ops *ops, enum target_object object,
4753 const char *annex, gdb_byte *readbuf,
4754 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
4755 {
4756 gdb_assert (object == TARGET_OBJECT_OSDATA);
4757
4758 return linux_common_xfer_osdata (annex, readbuf, offset, len);
4759 }
4760
4761 static LONGEST
4762 linux_xfer_partial (struct target_ops *ops, enum target_object object,
4763 const char *annex, gdb_byte *readbuf,
4764 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
4765 {
4766 LONGEST xfer;
4767
4768 if (object == TARGET_OBJECT_AUXV)
4769 return memory_xfer_auxv (ops, object, annex, readbuf, writebuf,
4770 offset, len);
4771
4772 if (object == TARGET_OBJECT_OSDATA)
4773 return linux_nat_xfer_osdata (ops, object, annex, readbuf, writebuf,
4774 offset, len);
4775
4776 if (object == TARGET_OBJECT_SPU)
4777 return linux_proc_xfer_spu (ops, object, annex, readbuf, writebuf,
4778 offset, len);
4779
4780 /* GDB calculates all the addresses in possibly larget width of the address.
4781 Address width needs to be masked before its final use - either by
4782 linux_proc_xfer_partial or inf_ptrace_xfer_partial.
4783
4784 Compare ADDR_BIT first to avoid a compiler warning on shift overflow. */
4785
4786 if (object == TARGET_OBJECT_MEMORY)
4787 {
4788 int addr_bit = gdbarch_addr_bit (target_gdbarch);
4789
4790 if (addr_bit < (sizeof (ULONGEST) * HOST_CHAR_BIT))
4791 offset &= ((ULONGEST) 1 << addr_bit) - 1;
4792 }
4793
4794 xfer = linux_proc_xfer_partial (ops, object, annex, readbuf, writebuf,
4795 offset, len);
4796 if (xfer != 0)
4797 return xfer;
4798
4799 return super_xfer_partial (ops, object, annex, readbuf, writebuf,
4800 offset, len);
4801 }
4802
4803 /* Create a prototype generic GNU/Linux target. The client can override
4804 it with local methods. */
4805
4806 static void
4807 linux_target_install_ops (struct target_ops *t)
4808 {
4809 t->to_insert_fork_catchpoint = linux_child_insert_fork_catchpoint;
4810 t->to_remove_fork_catchpoint = linux_child_remove_fork_catchpoint;
4811 t->to_insert_vfork_catchpoint = linux_child_insert_vfork_catchpoint;
4812 t->to_remove_vfork_catchpoint = linux_child_remove_vfork_catchpoint;
4813 t->to_insert_exec_catchpoint = linux_child_insert_exec_catchpoint;
4814 t->to_remove_exec_catchpoint = linux_child_remove_exec_catchpoint;
4815 t->to_set_syscall_catchpoint = linux_child_set_syscall_catchpoint;
4816 t->to_pid_to_exec_file = linux_child_pid_to_exec_file;
4817 t->to_post_startup_inferior = linux_child_post_startup_inferior;
4818 t->to_post_attach = linux_child_post_attach;
4819 t->to_follow_fork = linux_child_follow_fork;
4820 t->to_make_corefile_notes = linux_nat_make_corefile_notes;
4821
4822 super_xfer_partial = t->to_xfer_partial;
4823 t->to_xfer_partial = linux_xfer_partial;
4824 }
4825
4826 struct target_ops *
4827 linux_target (void)
4828 {
4829 struct target_ops *t;
4830
4831 t = inf_ptrace_target ();
4832 linux_target_install_ops (t);
4833
4834 return t;
4835 }
4836
4837 struct target_ops *
4838 linux_trad_target (CORE_ADDR (*register_u_offset)(struct gdbarch *, int, int))
4839 {
4840 struct target_ops *t;
4841
4842 t = inf_ptrace_trad_target (register_u_offset);
4843 linux_target_install_ops (t);
4844
4845 return t;
4846 }
4847
4848 /* target_is_async_p implementation. */
4849
4850 static int
4851 linux_nat_is_async_p (void)
4852 {
4853 /* NOTE: palves 2008-03-21: We're only async when the user requests
4854 it explicitly with the "set target-async" command.
4855 Someday, linux will always be async. */
4856 return target_async_permitted;
4857 }
4858
4859 /* target_can_async_p implementation. */
4860
4861 static int
4862 linux_nat_can_async_p (void)
4863 {
4864 /* NOTE: palves 2008-03-21: We're only async when the user requests
4865 it explicitly with the "set target-async" command.
4866 Someday, linux will always be async. */
4867 return target_async_permitted;
4868 }
4869
4870 static int
4871 linux_nat_supports_non_stop (void)
4872 {
4873 return 1;
4874 }
4875
4876 /* True if we want to support multi-process. To be removed when GDB
4877 supports multi-exec. */
4878
4879 int linux_multi_process = 1;
4880
4881 static int
4882 linux_nat_supports_multi_process (void)
4883 {
4884 return linux_multi_process;
4885 }
4886
4887 static int
4888 linux_nat_supports_disable_randomization (void)
4889 {
4890 #ifdef HAVE_PERSONALITY
4891 return 1;
4892 #else
4893 return 0;
4894 #endif
4895 }
4896
4897 static int async_terminal_is_ours = 1;
4898
4899 /* target_terminal_inferior implementation. */
4900
4901 static void
4902 linux_nat_terminal_inferior (void)
4903 {
4904 if (!target_is_async_p ())
4905 {
4906 /* Async mode is disabled. */
4907 terminal_inferior ();
4908 return;
4909 }
4910
4911 terminal_inferior ();
4912
4913 /* Calls to target_terminal_*() are meant to be idempotent. */
4914 if (!async_terminal_is_ours)
4915 return;
4916
4917 delete_file_handler (input_fd);
4918 async_terminal_is_ours = 0;
4919 set_sigint_trap ();
4920 }
4921
4922 /* target_terminal_ours implementation. */
4923
4924 static void
4925 linux_nat_terminal_ours (void)
4926 {
4927 if (!target_is_async_p ())
4928 {
4929 /* Async mode is disabled. */
4930 terminal_ours ();
4931 return;
4932 }
4933
4934 /* GDB should never give the terminal to the inferior if the
4935 inferior is running in the background (run&, continue&, etc.),
4936 but claiming it sure should. */
4937 terminal_ours ();
4938
4939 if (async_terminal_is_ours)
4940 return;
4941
4942 clear_sigint_trap ();
4943 add_file_handler (input_fd, stdin_event_handler, 0);
4944 async_terminal_is_ours = 1;
4945 }
4946
4947 static void (*async_client_callback) (enum inferior_event_type event_type,
4948 void *context);
4949 static void *async_client_context;
4950
4951 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4952 so we notice when any child changes state, and notify the
4953 event-loop; it allows us to use sigsuspend in linux_nat_wait_1
4954 above to wait for the arrival of a SIGCHLD. */
4955
4956 static void
4957 sigchld_handler (int signo)
4958 {
4959 int old_errno = errno;
4960
4961 if (debug_linux_nat)
4962 ui_file_write_async_safe (gdb_stdlog,
4963 "sigchld\n", sizeof ("sigchld\n") - 1);
4964
4965 if (signo == SIGCHLD
4966 && linux_nat_event_pipe[0] != -1)
4967 async_file_mark (); /* Let the event loop know that there are
4968 events to handle. */
4969
4970 errno = old_errno;
4971 }
4972
4973 /* Callback registered with the target events file descriptor. */
4974
4975 static void
4976 handle_target_event (int error, gdb_client_data client_data)
4977 {
4978 (*async_client_callback) (INF_REG_EVENT, async_client_context);
4979 }
4980
4981 /* Create/destroy the target events pipe. Returns previous state. */
4982
4983 static int
4984 linux_async_pipe (int enable)
4985 {
4986 int previous = (linux_nat_event_pipe[0] != -1);
4987
4988 if (previous != enable)
4989 {
4990 sigset_t prev_mask;
4991
4992 block_child_signals (&prev_mask);
4993
4994 if (enable)
4995 {
4996 if (pipe (linux_nat_event_pipe) == -1)
4997 internal_error (__FILE__, __LINE__,
4998 "creating event pipe failed.");
4999
5000 fcntl (linux_nat_event_pipe[0], F_SETFL, O_NONBLOCK);
5001 fcntl (linux_nat_event_pipe[1], F_SETFL, O_NONBLOCK);
5002 }
5003 else
5004 {
5005 close (linux_nat_event_pipe[0]);
5006 close (linux_nat_event_pipe[1]);
5007 linux_nat_event_pipe[0] = -1;
5008 linux_nat_event_pipe[1] = -1;
5009 }
5010
5011 restore_child_signals_mask (&prev_mask);
5012 }
5013
5014 return previous;
5015 }
5016
5017 /* target_async implementation. */
5018
5019 static void
5020 linux_nat_async (void (*callback) (enum inferior_event_type event_type,
5021 void *context), void *context)
5022 {
5023 if (callback != NULL)
5024 {
5025 async_client_callback = callback;
5026 async_client_context = context;
5027 if (!linux_async_pipe (1))
5028 {
5029 add_file_handler (linux_nat_event_pipe[0],
5030 handle_target_event, NULL);
5031 /* There may be pending events to handle. Tell the event loop
5032 to poll them. */
5033 async_file_mark ();
5034 }
5035 }
5036 else
5037 {
5038 async_client_callback = callback;
5039 async_client_context = context;
5040 delete_file_handler (linux_nat_event_pipe[0]);
5041 linux_async_pipe (0);
5042 }
5043 return;
5044 }
5045
5046 /* Stop an LWP, and push a TARGET_SIGNAL_0 stop status if no other
5047 event came out. */
5048
5049 static int
5050 linux_nat_stop_lwp (struct lwp_info *lwp, void *data)
5051 {
5052 if (!lwp->stopped)
5053 {
5054 ptid_t ptid = lwp->ptid;
5055
5056 if (debug_linux_nat)
5057 fprintf_unfiltered (gdb_stdlog,
5058 "LNSL: running -> suspending %s\n",
5059 target_pid_to_str (lwp->ptid));
5060
5061
5062 if (lwp->last_resume_kind == resume_stop)
5063 {
5064 if (debug_linux_nat)
5065 fprintf_unfiltered (gdb_stdlog,
5066 "linux-nat: already stopping LWP %ld at "
5067 "GDB's request\n",
5068 ptid_get_lwp (lwp->ptid));
5069 return 0;
5070 }
5071
5072 stop_callback (lwp, NULL);
5073 lwp->last_resume_kind = resume_stop;
5074 }
5075 else
5076 {
5077 /* Already known to be stopped; do nothing. */
5078
5079 if (debug_linux_nat)
5080 {
5081 if (find_thread_ptid (lwp->ptid)->stop_requested)
5082 fprintf_unfiltered (gdb_stdlog,
5083 "LNSL: already stopped/stop_requested %s\n",
5084 target_pid_to_str (lwp->ptid));
5085 else
5086 fprintf_unfiltered (gdb_stdlog,
5087 "LNSL: already stopped/no "
5088 "stop_requested yet %s\n",
5089 target_pid_to_str (lwp->ptid));
5090 }
5091 }
5092 return 0;
5093 }
5094
5095 static void
5096 linux_nat_stop (ptid_t ptid)
5097 {
5098 if (non_stop)
5099 iterate_over_lwps (ptid, linux_nat_stop_lwp, NULL);
5100 else
5101 linux_ops->to_stop (ptid);
5102 }
5103
5104 static void
5105 linux_nat_close (int quitting)
5106 {
5107 /* Unregister from the event loop. */
5108 if (linux_nat_is_async_p ())
5109 linux_nat_async (NULL, 0);
5110
5111 if (linux_ops->to_close)
5112 linux_ops->to_close (quitting);
5113 }
5114
5115 /* When requests are passed down from the linux-nat layer to the
5116 single threaded inf-ptrace layer, ptids of (lwpid,0,0) form are
5117 used. The address space pointer is stored in the inferior object,
5118 but the common code that is passed such ptid can't tell whether
5119 lwpid is a "main" process id or not (it assumes so). We reverse
5120 look up the "main" process id from the lwp here. */
5121
5122 struct address_space *
5123 linux_nat_thread_address_space (struct target_ops *t, ptid_t ptid)
5124 {
5125 struct lwp_info *lwp;
5126 struct inferior *inf;
5127 int pid;
5128
5129 pid = GET_LWP (ptid);
5130 if (GET_LWP (ptid) == 0)
5131 {
5132 /* An (lwpid,0,0) ptid. Look up the lwp object to get at the
5133 tgid. */
5134 lwp = find_lwp_pid (ptid);
5135 pid = GET_PID (lwp->ptid);
5136 }
5137 else
5138 {
5139 /* A (pid,lwpid,0) ptid. */
5140 pid = GET_PID (ptid);
5141 }
5142
5143 inf = find_inferior_pid (pid);
5144 gdb_assert (inf != NULL);
5145 return inf->aspace;
5146 }
5147
5148 int
5149 linux_nat_core_of_thread_1 (ptid_t ptid)
5150 {
5151 struct cleanup *back_to;
5152 char *filename;
5153 FILE *f;
5154 char *content = NULL;
5155 char *p;
5156 char *ts = 0;
5157 int content_read = 0;
5158 int i;
5159 int core;
5160
5161 filename = xstrprintf ("/proc/%d/task/%ld/stat",
5162 GET_PID (ptid), GET_LWP (ptid));
5163 back_to = make_cleanup (xfree, filename);
5164
5165 f = fopen (filename, "r");
5166 if (!f)
5167 {
5168 do_cleanups (back_to);
5169 return -1;
5170 }
5171
5172 make_cleanup_fclose (f);
5173
5174 for (;;)
5175 {
5176 int n;
5177
5178 content = xrealloc (content, content_read + 1024);
5179 n = fread (content + content_read, 1, 1024, f);
5180 content_read += n;
5181 if (n < 1024)
5182 {
5183 content[content_read] = '\0';
5184 break;
5185 }
5186 }
5187
5188 make_cleanup (xfree, content);
5189
5190 p = strchr (content, '(');
5191
5192 /* Skip ")". */
5193 if (p != NULL)
5194 p = strchr (p, ')');
5195 if (p != NULL)
5196 p++;
5197
5198 /* If the first field after program name has index 0, then core number is
5199 the field with index 36. There's no constant for that anywhere. */
5200 if (p != NULL)
5201 p = strtok_r (p, " ", &ts);
5202 for (i = 0; p != NULL && i != 36; ++i)
5203 p = strtok_r (NULL, " ", &ts);
5204
5205 if (p == NULL || sscanf (p, "%d", &core) == 0)
5206 core = -1;
5207
5208 do_cleanups (back_to);
5209
5210 return core;
5211 }
5212
5213 /* Return the cached value of the processor core for thread PTID. */
5214
5215 int
5216 linux_nat_core_of_thread (struct target_ops *ops, ptid_t ptid)
5217 {
5218 struct lwp_info *info = find_lwp_pid (ptid);
5219
5220 if (info)
5221 return info->core;
5222 return -1;
5223 }
5224
5225 void
5226 linux_nat_add_target (struct target_ops *t)
5227 {
5228 /* Save the provided single-threaded target. We save this in a separate
5229 variable because another target we've inherited from (e.g. inf-ptrace)
5230 may have saved a pointer to T; we want to use it for the final
5231 process stratum target. */
5232 linux_ops_saved = *t;
5233 linux_ops = &linux_ops_saved;
5234
5235 /* Override some methods for multithreading. */
5236 t->to_create_inferior = linux_nat_create_inferior;
5237 t->to_attach = linux_nat_attach;
5238 t->to_detach = linux_nat_detach;
5239 t->to_resume = linux_nat_resume;
5240 t->to_wait = linux_nat_wait;
5241 t->to_pass_signals = linux_nat_pass_signals;
5242 t->to_xfer_partial = linux_nat_xfer_partial;
5243 t->to_kill = linux_nat_kill;
5244 t->to_mourn_inferior = linux_nat_mourn_inferior;
5245 t->to_thread_alive = linux_nat_thread_alive;
5246 t->to_pid_to_str = linux_nat_pid_to_str;
5247 t->to_thread_name = linux_nat_thread_name;
5248 t->to_has_thread_control = tc_schedlock;
5249 t->to_thread_address_space = linux_nat_thread_address_space;
5250 t->to_stopped_by_watchpoint = linux_nat_stopped_by_watchpoint;
5251 t->to_stopped_data_address = linux_nat_stopped_data_address;
5252
5253 t->to_can_async_p = linux_nat_can_async_p;
5254 t->to_is_async_p = linux_nat_is_async_p;
5255 t->to_supports_non_stop = linux_nat_supports_non_stop;
5256 t->to_async = linux_nat_async;
5257 t->to_terminal_inferior = linux_nat_terminal_inferior;
5258 t->to_terminal_ours = linux_nat_terminal_ours;
5259 t->to_close = linux_nat_close;
5260
5261 /* Methods for non-stop support. */
5262 t->to_stop = linux_nat_stop;
5263
5264 t->to_supports_multi_process = linux_nat_supports_multi_process;
5265
5266 t->to_supports_disable_randomization
5267 = linux_nat_supports_disable_randomization;
5268
5269 t->to_core_of_thread = linux_nat_core_of_thread;
5270
5271 /* We don't change the stratum; this target will sit at
5272 process_stratum and thread_db will set at thread_stratum. This
5273 is a little strange, since this is a multi-threaded-capable
5274 target, but we want to be on the stack below thread_db, and we
5275 also want to be used for single-threaded processes. */
5276
5277 add_target (t);
5278 }
5279
5280 /* Register a method to call whenever a new thread is attached. */
5281 void
5282 linux_nat_set_new_thread (struct target_ops *t,
5283 void (*new_thread) (struct lwp_info *))
5284 {
5285 /* Save the pointer. We only support a single registered instance
5286 of the GNU/Linux native target, so we do not need to map this to
5287 T. */
5288 linux_nat_new_thread = new_thread;
5289 }
5290
5291 /* Register a method that converts a siginfo object between the layout
5292 that ptrace returns, and the layout in the architecture of the
5293 inferior. */
5294 void
5295 linux_nat_set_siginfo_fixup (struct target_ops *t,
5296 int (*siginfo_fixup) (struct siginfo *,
5297 gdb_byte *,
5298 int))
5299 {
5300 /* Save the pointer. */
5301 linux_nat_siginfo_fixup = siginfo_fixup;
5302 }
5303
5304 /* Register a method to call prior to resuming a thread. */
5305
5306 void
5307 linux_nat_set_prepare_to_resume (struct target_ops *t,
5308 void (*prepare_to_resume) (struct lwp_info *))
5309 {
5310 /* Save the pointer. */
5311 linux_nat_prepare_to_resume = prepare_to_resume;
5312 }
5313
5314 /* Return the saved siginfo associated with PTID. */
5315 struct siginfo *
5316 linux_nat_get_siginfo (ptid_t ptid)
5317 {
5318 struct lwp_info *lp = find_lwp_pid (ptid);
5319
5320 gdb_assert (lp != NULL);
5321
5322 return &lp->siginfo;
5323 }
5324
5325 /* Provide a prototype to silence -Wmissing-prototypes. */
5326 extern initialize_file_ftype _initialize_linux_nat;
5327
5328 void
5329 _initialize_linux_nat (void)
5330 {
5331 add_setshow_zinteger_cmd ("lin-lwp", class_maintenance,
5332 &debug_linux_nat, _("\
5333 Set debugging of GNU/Linux lwp module."), _("\
5334 Show debugging of GNU/Linux lwp module."), _("\
5335 Enables printf debugging output."),
5336 NULL,
5337 show_debug_linux_nat,
5338 &setdebuglist, &showdebuglist);
5339
5340 /* Save this mask as the default. */
5341 sigprocmask (SIG_SETMASK, NULL, &normal_mask);
5342
5343 /* Install a SIGCHLD handler. */
5344 sigchld_action.sa_handler = sigchld_handler;
5345 sigemptyset (&sigchld_action.sa_mask);
5346 sigchld_action.sa_flags = SA_RESTART;
5347
5348 /* Make it the default. */
5349 sigaction (SIGCHLD, &sigchld_action, NULL);
5350
5351 /* Make sure we don't block SIGCHLD during a sigsuspend. */
5352 sigprocmask (SIG_SETMASK, NULL, &suspend_mask);
5353 sigdelset (&suspend_mask, SIGCHLD);
5354
5355 sigemptyset (&blocked_mask);
5356 }
5357 \f
5358
5359 /* FIXME: kettenis/2000-08-26: The stuff on this page is specific to
5360 the GNU/Linux Threads library and therefore doesn't really belong
5361 here. */
5362
5363 /* Read variable NAME in the target and return its value if found.
5364 Otherwise return zero. It is assumed that the type of the variable
5365 is `int'. */
5366
5367 static int
5368 get_signo (const char *name)
5369 {
5370 struct minimal_symbol *ms;
5371 int signo;
5372
5373 ms = lookup_minimal_symbol (name, NULL, NULL);
5374 if (ms == NULL)
5375 return 0;
5376
5377 if (target_read_memory (SYMBOL_VALUE_ADDRESS (ms), (gdb_byte *) &signo,
5378 sizeof (signo)) != 0)
5379 return 0;
5380
5381 return signo;
5382 }
5383
5384 /* Return the set of signals used by the threads library in *SET. */
5385
5386 void
5387 lin_thread_get_thread_signals (sigset_t *set)
5388 {
5389 struct sigaction action;
5390 int restart, cancel;
5391
5392 sigemptyset (&blocked_mask);
5393 sigemptyset (set);
5394
5395 restart = get_signo ("__pthread_sig_restart");
5396 cancel = get_signo ("__pthread_sig_cancel");
5397
5398 /* LinuxThreads normally uses the first two RT signals, but in some legacy
5399 cases may use SIGUSR1/SIGUSR2. NPTL always uses RT signals, but does
5400 not provide any way for the debugger to query the signal numbers -
5401 fortunately they don't change! */
5402
5403 if (restart == 0)
5404 restart = __SIGRTMIN;
5405
5406 if (cancel == 0)
5407 cancel = __SIGRTMIN + 1;
5408
5409 sigaddset (set, restart);
5410 sigaddset (set, cancel);
5411
5412 /* The GNU/Linux Threads library makes terminating threads send a
5413 special "cancel" signal instead of SIGCHLD. Make sure we catch
5414 those (to prevent them from terminating GDB itself, which is
5415 likely to be their default action) and treat them the same way as
5416 SIGCHLD. */
5417
5418 action.sa_handler = sigchld_handler;
5419 sigemptyset (&action.sa_mask);
5420 action.sa_flags = SA_RESTART;
5421 sigaction (cancel, &action, NULL);
5422
5423 /* We block the "cancel" signal throughout this code ... */
5424 sigaddset (&blocked_mask, cancel);
5425 sigprocmask (SIG_BLOCK, &blocked_mask, NULL);
5426
5427 /* ... except during a sigsuspend. */
5428 sigdelset (&suspend_mask, cancel);
5429 }
This page took 0.142553 seconds and 4 git commands to generate.