gdb/
[deliverable/binutils-gdb.git] / gdb / linux-nat.c
1 /* GNU/Linux native-dependent code common to multiple platforms.
2
3 Copyright (C) 2001-2012 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21 #include "inferior.h"
22 #include "target.h"
23 #include "gdb_string.h"
24 #include "gdb_wait.h"
25 #include "gdb_assert.h"
26 #ifdef HAVE_TKILL_SYSCALL
27 #include <unistd.h>
28 #include <sys/syscall.h>
29 #endif
30 #include <sys/ptrace.h>
31 #include "linux-nat.h"
32 #include "linux-ptrace.h"
33 #include "linux-procfs.h"
34 #include "linux-fork.h"
35 #include "gdbthread.h"
36 #include "gdbcmd.h"
37 #include "regcache.h"
38 #include "regset.h"
39 #include "inf-ptrace.h"
40 #include "auxv.h"
41 #include <sys/param.h> /* for MAXPATHLEN */
42 #include <sys/procfs.h> /* for elf_gregset etc. */
43 #include "elf-bfd.h" /* for elfcore_write_* */
44 #include "gregset.h" /* for gregset */
45 #include "gdbcore.h" /* for get_exec_file */
46 #include <ctype.h> /* for isdigit */
47 #include "gdbthread.h" /* for struct thread_info etc. */
48 #include "gdb_stat.h" /* for struct stat */
49 #include <fcntl.h> /* for O_RDONLY */
50 #include "inf-loop.h"
51 #include "event-loop.h"
52 #include "event-top.h"
53 #include <pwd.h>
54 #include <sys/types.h>
55 #include "gdb_dirent.h"
56 #include "xml-support.h"
57 #include "terminal.h"
58 #include <sys/vfs.h>
59 #include "solib.h"
60 #include "linux-osdata.h"
61 #include "linux-tdep.h"
62 #include "symfile.h"
63 #include "agent.h"
64 #include "tracepoint.h"
65
66 #ifndef SPUFS_MAGIC
67 #define SPUFS_MAGIC 0x23c9b64e
68 #endif
69
70 #ifdef HAVE_PERSONALITY
71 # include <sys/personality.h>
72 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
73 # define ADDR_NO_RANDOMIZE 0x0040000
74 # endif
75 #endif /* HAVE_PERSONALITY */
76
77 /* This comment documents high-level logic of this file.
78
79 Waiting for events in sync mode
80 ===============================
81
82 When waiting for an event in a specific thread, we just use waitpid, passing
83 the specific pid, and not passing WNOHANG.
84
85 When waiting for an event in all threads, waitpid is not quite good. Prior to
86 version 2.4, Linux can either wait for event in main thread, or in secondary
87 threads. (2.4 has the __WALL flag). So, if we use blocking waitpid, we might
88 miss an event. The solution is to use non-blocking waitpid, together with
89 sigsuspend. First, we use non-blocking waitpid to get an event in the main
90 process, if any. Second, we use non-blocking waitpid with the __WCLONED
91 flag to check for events in cloned processes. If nothing is found, we use
92 sigsuspend to wait for SIGCHLD. When SIGCHLD arrives, it means something
93 happened to a child process -- and SIGCHLD will be delivered both for events
94 in main debugged process and in cloned processes. As soon as we know there's
95 an event, we get back to calling nonblocking waitpid with and without
96 __WCLONED.
97
98 Note that SIGCHLD should be blocked between waitpid and sigsuspend calls,
99 so that we don't miss a signal. If SIGCHLD arrives in between, when it's
100 blocked, the signal becomes pending and sigsuspend immediately
101 notices it and returns.
102
103 Waiting for events in async mode
104 ================================
105
106 In async mode, GDB should always be ready to handle both user input
107 and target events, so neither blocking waitpid nor sigsuspend are
108 viable options. Instead, we should asynchronously notify the GDB main
109 event loop whenever there's an unprocessed event from the target. We
110 detect asynchronous target events by handling SIGCHLD signals. To
111 notify the event loop about target events, the self-pipe trick is used
112 --- a pipe is registered as waitable event source in the event loop,
113 the event loop select/poll's on the read end of this pipe (as well on
114 other event sources, e.g., stdin), and the SIGCHLD handler writes a
115 byte to this pipe. This is more portable than relying on
116 pselect/ppoll, since on kernels that lack those syscalls, libc
117 emulates them with select/poll+sigprocmask, and that is racy
118 (a.k.a. plain broken).
119
120 Obviously, if we fail to notify the event loop if there's a target
121 event, it's bad. OTOH, if we notify the event loop when there's no
122 event from the target, linux_nat_wait will detect that there's no real
123 event to report, and return event of type TARGET_WAITKIND_IGNORE.
124 This is mostly harmless, but it will waste time and is better avoided.
125
126 The main design point is that every time GDB is outside linux-nat.c,
127 we have a SIGCHLD handler installed that is called when something
128 happens to the target and notifies the GDB event loop. Whenever GDB
129 core decides to handle the event, and calls into linux-nat.c, we
130 process things as in sync mode, except that the we never block in
131 sigsuspend.
132
133 While processing an event, we may end up momentarily blocked in
134 waitpid calls. Those waitpid calls, while blocking, are guarantied to
135 return quickly. E.g., in all-stop mode, before reporting to the core
136 that an LWP hit a breakpoint, all LWPs are stopped by sending them
137 SIGSTOP, and synchronously waiting for the SIGSTOP to be reported.
138 Note that this is different from blocking indefinitely waiting for the
139 next event --- here, we're already handling an event.
140
141 Use of signals
142 ==============
143
144 We stop threads by sending a SIGSTOP. The use of SIGSTOP instead of another
145 signal is not entirely significant; we just need for a signal to be delivered,
146 so that we can intercept it. SIGSTOP's advantage is that it can not be
147 blocked. A disadvantage is that it is not a real-time signal, so it can only
148 be queued once; we do not keep track of other sources of SIGSTOP.
149
150 Two other signals that can't be blocked are SIGCONT and SIGKILL. But we can't
151 use them, because they have special behavior when the signal is generated -
152 not when it is delivered. SIGCONT resumes the entire thread group and SIGKILL
153 kills the entire thread group.
154
155 A delivered SIGSTOP would stop the entire thread group, not just the thread we
156 tkill'd. But we never let the SIGSTOP be delivered; we always intercept and
157 cancel it (by PTRACE_CONT without passing SIGSTOP).
158
159 We could use a real-time signal instead. This would solve those problems; we
160 could use PTRACE_GETSIGINFO to locate the specific stop signals sent by GDB.
161 But we would still have to have some support for SIGSTOP, since PTRACE_ATTACH
162 generates it, and there are races with trying to find a signal that is not
163 blocked. */
164
165 #ifndef O_LARGEFILE
166 #define O_LARGEFILE 0
167 #endif
168
169 /* Unlike other extended result codes, WSTOPSIG (status) on
170 PTRACE_O_TRACESYSGOOD syscall events doesn't return SIGTRAP, but
171 instead SIGTRAP with bit 7 set. */
172 #define SYSCALL_SIGTRAP (SIGTRAP | 0x80)
173
174 /* The single-threaded native GNU/Linux target_ops. We save a pointer for
175 the use of the multi-threaded target. */
176 static struct target_ops *linux_ops;
177 static struct target_ops linux_ops_saved;
178
179 /* The method to call, if any, when a new thread is attached. */
180 static void (*linux_nat_new_thread) (struct lwp_info *);
181
182 /* Hook to call prior to resuming a thread. */
183 static void (*linux_nat_prepare_to_resume) (struct lwp_info *);
184
185 /* The method to call, if any, when the siginfo object needs to be
186 converted between the layout returned by ptrace, and the layout in
187 the architecture of the inferior. */
188 static int (*linux_nat_siginfo_fixup) (struct siginfo *,
189 gdb_byte *,
190 int);
191
192 /* The saved to_xfer_partial method, inherited from inf-ptrace.c.
193 Called by our to_xfer_partial. */
194 static LONGEST (*super_xfer_partial) (struct target_ops *,
195 enum target_object,
196 const char *, gdb_byte *,
197 const gdb_byte *,
198 ULONGEST, LONGEST);
199
200 static int debug_linux_nat;
201 static void
202 show_debug_linux_nat (struct ui_file *file, int from_tty,
203 struct cmd_list_element *c, const char *value)
204 {
205 fprintf_filtered (file, _("Debugging of GNU/Linux lwp module is %s.\n"),
206 value);
207 }
208
209 struct simple_pid_list
210 {
211 int pid;
212 int status;
213 struct simple_pid_list *next;
214 };
215 struct simple_pid_list *stopped_pids;
216
217 /* This variable is a tri-state flag: -1 for unknown, 0 if PTRACE_O_TRACEFORK
218 can not be used, 1 if it can. */
219
220 static int linux_supports_tracefork_flag = -1;
221
222 /* This variable is a tri-state flag: -1 for unknown, 0 if
223 PTRACE_O_TRACESYSGOOD can not be used, 1 if it can. */
224
225 static int linux_supports_tracesysgood_flag = -1;
226
227 /* If we have PTRACE_O_TRACEFORK, this flag indicates whether we also have
228 PTRACE_O_TRACEVFORKDONE. */
229
230 static int linux_supports_tracevforkdone_flag = -1;
231
232 /* Stores the current used ptrace() options. */
233 static int current_ptrace_options = 0;
234
235 /* Async mode support. */
236
237 /* The read/write ends of the pipe registered as waitable file in the
238 event loop. */
239 static int linux_nat_event_pipe[2] = { -1, -1 };
240
241 /* Flush the event pipe. */
242
243 static void
244 async_file_flush (void)
245 {
246 int ret;
247 char buf;
248
249 do
250 {
251 ret = read (linux_nat_event_pipe[0], &buf, 1);
252 }
253 while (ret >= 0 || (ret == -1 && errno == EINTR));
254 }
255
256 /* Put something (anything, doesn't matter what, or how much) in event
257 pipe, so that the select/poll in the event-loop realizes we have
258 something to process. */
259
260 static void
261 async_file_mark (void)
262 {
263 int ret;
264
265 /* It doesn't really matter what the pipe contains, as long we end
266 up with something in it. Might as well flush the previous
267 left-overs. */
268 async_file_flush ();
269
270 do
271 {
272 ret = write (linux_nat_event_pipe[1], "+", 1);
273 }
274 while (ret == -1 && errno == EINTR);
275
276 /* Ignore EAGAIN. If the pipe is full, the event loop will already
277 be awakened anyway. */
278 }
279
280 static void linux_nat_async (void (*callback)
281 (enum inferior_event_type event_type,
282 void *context),
283 void *context);
284 static int kill_lwp (int lwpid, int signo);
285
286 static int stop_callback (struct lwp_info *lp, void *data);
287
288 static void block_child_signals (sigset_t *prev_mask);
289 static void restore_child_signals_mask (sigset_t *prev_mask);
290
291 struct lwp_info;
292 static struct lwp_info *add_lwp (ptid_t ptid);
293 static void purge_lwp_list (int pid);
294 static void delete_lwp (ptid_t ptid);
295 static struct lwp_info *find_lwp_pid (ptid_t ptid);
296
297 \f
298 /* Trivial list manipulation functions to keep track of a list of
299 new stopped processes. */
300 static void
301 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
302 {
303 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
304
305 new_pid->pid = pid;
306 new_pid->status = status;
307 new_pid->next = *listp;
308 *listp = new_pid;
309 }
310
311 static int
312 in_pid_list_p (struct simple_pid_list *list, int pid)
313 {
314 struct simple_pid_list *p;
315
316 for (p = list; p != NULL; p = p->next)
317 if (p->pid == pid)
318 return 1;
319 return 0;
320 }
321
322 static int
323 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
324 {
325 struct simple_pid_list **p;
326
327 for (p = listp; *p != NULL; p = &(*p)->next)
328 if ((*p)->pid == pid)
329 {
330 struct simple_pid_list *next = (*p)->next;
331
332 *statusp = (*p)->status;
333 xfree (*p);
334 *p = next;
335 return 1;
336 }
337 return 0;
338 }
339
340 \f
341 /* A helper function for linux_test_for_tracefork, called after fork (). */
342
343 static void
344 linux_tracefork_child (void)
345 {
346 ptrace (PTRACE_TRACEME, 0, 0, 0);
347 kill (getpid (), SIGSTOP);
348 fork ();
349 _exit (0);
350 }
351
352 /* Wrapper function for waitpid which handles EINTR. */
353
354 static int
355 my_waitpid (int pid, int *statusp, int flags)
356 {
357 int ret;
358
359 do
360 {
361 ret = waitpid (pid, statusp, flags);
362 }
363 while (ret == -1 && errno == EINTR);
364
365 return ret;
366 }
367
368 /* Determine if PTRACE_O_TRACEFORK can be used to follow fork events.
369
370 First, we try to enable fork tracing on ORIGINAL_PID. If this fails,
371 we know that the feature is not available. This may change the tracing
372 options for ORIGINAL_PID, but we'll be setting them shortly anyway.
373
374 However, if it succeeds, we don't know for sure that the feature is
375 available; old versions of PTRACE_SETOPTIONS ignored unknown options. We
376 create a child process, attach to it, use PTRACE_SETOPTIONS to enable
377 fork tracing, and let it fork. If the process exits, we assume that we
378 can't use TRACEFORK; if we get the fork notification, and we can extract
379 the new child's PID, then we assume that we can. */
380
381 static void
382 linux_test_for_tracefork (int original_pid)
383 {
384 int child_pid, ret, status;
385 long second_pid;
386 sigset_t prev_mask;
387
388 /* We don't want those ptrace calls to be interrupted. */
389 block_child_signals (&prev_mask);
390
391 linux_supports_tracefork_flag = 0;
392 linux_supports_tracevforkdone_flag = 0;
393
394 ret = ptrace (PTRACE_SETOPTIONS, original_pid, 0, PTRACE_O_TRACEFORK);
395 if (ret != 0)
396 {
397 restore_child_signals_mask (&prev_mask);
398 return;
399 }
400
401 child_pid = fork ();
402 if (child_pid == -1)
403 perror_with_name (("fork"));
404
405 if (child_pid == 0)
406 linux_tracefork_child ();
407
408 ret = my_waitpid (child_pid, &status, 0);
409 if (ret == -1)
410 perror_with_name (("waitpid"));
411 else if (ret != child_pid)
412 error (_("linux_test_for_tracefork: waitpid: unexpected result %d."), ret);
413 if (! WIFSTOPPED (status))
414 error (_("linux_test_for_tracefork: waitpid: unexpected status %d."),
415 status);
416
417 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0, PTRACE_O_TRACEFORK);
418 if (ret != 0)
419 {
420 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
421 if (ret != 0)
422 {
423 warning (_("linux_test_for_tracefork: failed to kill child"));
424 restore_child_signals_mask (&prev_mask);
425 return;
426 }
427
428 ret = my_waitpid (child_pid, &status, 0);
429 if (ret != child_pid)
430 warning (_("linux_test_for_tracefork: failed "
431 "to wait for killed child"));
432 else if (!WIFSIGNALED (status))
433 warning (_("linux_test_for_tracefork: unexpected "
434 "wait status 0x%x from killed child"), status);
435
436 restore_child_signals_mask (&prev_mask);
437 return;
438 }
439
440 /* Check whether PTRACE_O_TRACEVFORKDONE is available. */
441 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0,
442 PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORKDONE);
443 linux_supports_tracevforkdone_flag = (ret == 0);
444
445 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
446 if (ret != 0)
447 warning (_("linux_test_for_tracefork: failed to resume child"));
448
449 ret = my_waitpid (child_pid, &status, 0);
450
451 if (ret == child_pid && WIFSTOPPED (status)
452 && status >> 16 == PTRACE_EVENT_FORK)
453 {
454 second_pid = 0;
455 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
456 if (ret == 0 && second_pid != 0)
457 {
458 int second_status;
459
460 linux_supports_tracefork_flag = 1;
461 my_waitpid (second_pid, &second_status, 0);
462 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
463 if (ret != 0)
464 warning (_("linux_test_for_tracefork: "
465 "failed to kill second child"));
466 my_waitpid (second_pid, &status, 0);
467 }
468 }
469 else
470 warning (_("linux_test_for_tracefork: unexpected result from waitpid "
471 "(%d, status 0x%x)"), ret, status);
472
473 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
474 if (ret != 0)
475 warning (_("linux_test_for_tracefork: failed to kill child"));
476 my_waitpid (child_pid, &status, 0);
477
478 restore_child_signals_mask (&prev_mask);
479 }
480
481 /* Determine if PTRACE_O_TRACESYSGOOD can be used to follow syscalls.
482
483 We try to enable syscall tracing on ORIGINAL_PID. If this fails,
484 we know that the feature is not available. This may change the tracing
485 options for ORIGINAL_PID, but we'll be setting them shortly anyway. */
486
487 static void
488 linux_test_for_tracesysgood (int original_pid)
489 {
490 int ret;
491 sigset_t prev_mask;
492
493 /* We don't want those ptrace calls to be interrupted. */
494 block_child_signals (&prev_mask);
495
496 linux_supports_tracesysgood_flag = 0;
497
498 ret = ptrace (PTRACE_SETOPTIONS, original_pid, 0, PTRACE_O_TRACESYSGOOD);
499 if (ret != 0)
500 goto out;
501
502 linux_supports_tracesysgood_flag = 1;
503 out:
504 restore_child_signals_mask (&prev_mask);
505 }
506
507 /* Determine wether we support PTRACE_O_TRACESYSGOOD option available.
508 This function also sets linux_supports_tracesysgood_flag. */
509
510 static int
511 linux_supports_tracesysgood (int pid)
512 {
513 if (linux_supports_tracesysgood_flag == -1)
514 linux_test_for_tracesysgood (pid);
515 return linux_supports_tracesysgood_flag;
516 }
517
518 /* Return non-zero iff we have tracefork functionality available.
519 This function also sets linux_supports_tracefork_flag. */
520
521 static int
522 linux_supports_tracefork (int pid)
523 {
524 if (linux_supports_tracefork_flag == -1)
525 linux_test_for_tracefork (pid);
526 return linux_supports_tracefork_flag;
527 }
528
529 static int
530 linux_supports_tracevforkdone (int pid)
531 {
532 if (linux_supports_tracefork_flag == -1)
533 linux_test_for_tracefork (pid);
534 return linux_supports_tracevforkdone_flag;
535 }
536
537 static void
538 linux_enable_tracesysgood (ptid_t ptid)
539 {
540 int pid = ptid_get_lwp (ptid);
541
542 if (pid == 0)
543 pid = ptid_get_pid (ptid);
544
545 if (linux_supports_tracesysgood (pid) == 0)
546 return;
547
548 current_ptrace_options |= PTRACE_O_TRACESYSGOOD;
549
550 ptrace (PTRACE_SETOPTIONS, pid, 0, current_ptrace_options);
551 }
552
553 \f
554 void
555 linux_enable_event_reporting (ptid_t ptid)
556 {
557 int pid = ptid_get_lwp (ptid);
558
559 if (pid == 0)
560 pid = ptid_get_pid (ptid);
561
562 if (! linux_supports_tracefork (pid))
563 return;
564
565 current_ptrace_options |= PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORK
566 | PTRACE_O_TRACEEXEC | PTRACE_O_TRACECLONE;
567
568 if (linux_supports_tracevforkdone (pid))
569 current_ptrace_options |= PTRACE_O_TRACEVFORKDONE;
570
571 /* Do not enable PTRACE_O_TRACEEXIT until GDB is more prepared to support
572 read-only process state. */
573
574 ptrace (PTRACE_SETOPTIONS, pid, 0, current_ptrace_options);
575 }
576
577 static void
578 linux_child_post_attach (int pid)
579 {
580 linux_enable_event_reporting (pid_to_ptid (pid));
581 linux_enable_tracesysgood (pid_to_ptid (pid));
582 }
583
584 static void
585 linux_child_post_startup_inferior (ptid_t ptid)
586 {
587 linux_enable_event_reporting (ptid);
588 linux_enable_tracesysgood (ptid);
589 }
590
591 /* Return the number of known LWPs in the tgid given by PID. */
592
593 static int
594 num_lwps (int pid)
595 {
596 int count = 0;
597 struct lwp_info *lp;
598
599 for (lp = lwp_list; lp; lp = lp->next)
600 if (ptid_get_pid (lp->ptid) == pid)
601 count++;
602
603 return count;
604 }
605
606 /* Call delete_lwp with prototype compatible for make_cleanup. */
607
608 static void
609 delete_lwp_cleanup (void *lp_voidp)
610 {
611 struct lwp_info *lp = lp_voidp;
612
613 delete_lwp (lp->ptid);
614 }
615
616 static int
617 linux_child_follow_fork (struct target_ops *ops, int follow_child)
618 {
619 sigset_t prev_mask;
620 int has_vforked;
621 int parent_pid, child_pid;
622
623 block_child_signals (&prev_mask);
624
625 has_vforked = (inferior_thread ()->pending_follow.kind
626 == TARGET_WAITKIND_VFORKED);
627 parent_pid = ptid_get_lwp (inferior_ptid);
628 if (parent_pid == 0)
629 parent_pid = ptid_get_pid (inferior_ptid);
630 child_pid = PIDGET (inferior_thread ()->pending_follow.value.related_pid);
631
632 if (!detach_fork)
633 linux_enable_event_reporting (pid_to_ptid (child_pid));
634
635 if (has_vforked
636 && !non_stop /* Non-stop always resumes both branches. */
637 && (!target_is_async_p () || sync_execution)
638 && !(follow_child || detach_fork || sched_multi))
639 {
640 /* The parent stays blocked inside the vfork syscall until the
641 child execs or exits. If we don't let the child run, then
642 the parent stays blocked. If we're telling the parent to run
643 in the foreground, the user will not be able to ctrl-c to get
644 back the terminal, effectively hanging the debug session. */
645 fprintf_filtered (gdb_stderr, _("\
646 Can not resume the parent process over vfork in the foreground while\n\
647 holding the child stopped. Try \"set detach-on-fork\" or \
648 \"set schedule-multiple\".\n"));
649 /* FIXME output string > 80 columns. */
650 return 1;
651 }
652
653 if (! follow_child)
654 {
655 struct lwp_info *child_lp = NULL;
656
657 /* We're already attached to the parent, by default. */
658
659 /* Detach new forked process? */
660 if (detach_fork)
661 {
662 struct cleanup *old_chain;
663
664 /* Before detaching from the child, remove all breakpoints
665 from it. If we forked, then this has already been taken
666 care of by infrun.c. If we vforked however, any
667 breakpoint inserted in the parent is visible in the
668 child, even those added while stopped in a vfork
669 catchpoint. This will remove the breakpoints from the
670 parent also, but they'll be reinserted below. */
671 if (has_vforked)
672 {
673 /* keep breakpoints list in sync. */
674 remove_breakpoints_pid (GET_PID (inferior_ptid));
675 }
676
677 if (info_verbose || debug_linux_nat)
678 {
679 target_terminal_ours ();
680 fprintf_filtered (gdb_stdlog,
681 "Detaching after fork from "
682 "child process %d.\n",
683 child_pid);
684 }
685
686 old_chain = save_inferior_ptid ();
687 inferior_ptid = ptid_build (child_pid, child_pid, 0);
688
689 child_lp = add_lwp (inferior_ptid);
690 child_lp->stopped = 1;
691 child_lp->last_resume_kind = resume_stop;
692 make_cleanup (delete_lwp_cleanup, child_lp);
693
694 /* CHILD_LP has new PID, therefore linux_nat_new_thread is not called for it.
695 See i386_inferior_data_get for the Linux kernel specifics.
696 Ensure linux_nat_prepare_to_resume will reset the hardware debug
697 registers. It is done by the linux_nat_new_thread call, which is
698 being skipped in add_lwp above for the first lwp of a pid. */
699 gdb_assert (num_lwps (GET_PID (child_lp->ptid)) == 1);
700 if (linux_nat_new_thread != NULL)
701 linux_nat_new_thread (child_lp);
702
703 if (linux_nat_prepare_to_resume != NULL)
704 linux_nat_prepare_to_resume (child_lp);
705 ptrace (PTRACE_DETACH, child_pid, 0, 0);
706
707 do_cleanups (old_chain);
708 }
709 else
710 {
711 struct inferior *parent_inf, *child_inf;
712 struct cleanup *old_chain;
713
714 /* Add process to GDB's tables. */
715 child_inf = add_inferior (child_pid);
716
717 parent_inf = current_inferior ();
718 child_inf->attach_flag = parent_inf->attach_flag;
719 copy_terminal_info (child_inf, parent_inf);
720
721 old_chain = save_inferior_ptid ();
722 save_current_program_space ();
723
724 inferior_ptid = ptid_build (child_pid, child_pid, 0);
725 add_thread (inferior_ptid);
726 child_lp = add_lwp (inferior_ptid);
727 child_lp->stopped = 1;
728 child_lp->last_resume_kind = resume_stop;
729 child_inf->symfile_flags = SYMFILE_NO_READ;
730
731 /* If this is a vfork child, then the address-space is
732 shared with the parent. */
733 if (has_vforked)
734 {
735 child_inf->pspace = parent_inf->pspace;
736 child_inf->aspace = parent_inf->aspace;
737
738 /* The parent will be frozen until the child is done
739 with the shared region. Keep track of the
740 parent. */
741 child_inf->vfork_parent = parent_inf;
742 child_inf->pending_detach = 0;
743 parent_inf->vfork_child = child_inf;
744 parent_inf->pending_detach = 0;
745 }
746 else
747 {
748 child_inf->aspace = new_address_space ();
749 child_inf->pspace = add_program_space (child_inf->aspace);
750 child_inf->removable = 1;
751 set_current_program_space (child_inf->pspace);
752 clone_program_space (child_inf->pspace, parent_inf->pspace);
753
754 /* Let the shared library layer (solib-svr4) learn about
755 this new process, relocate the cloned exec, pull in
756 shared libraries, and install the solib event
757 breakpoint. If a "cloned-VM" event was propagated
758 better throughout the core, this wouldn't be
759 required. */
760 solib_create_inferior_hook (0);
761 }
762
763 /* Let the thread_db layer learn about this new process. */
764 check_for_thread_db ();
765
766 do_cleanups (old_chain);
767 }
768
769 if (has_vforked)
770 {
771 struct lwp_info *parent_lp;
772 struct inferior *parent_inf;
773
774 parent_inf = current_inferior ();
775
776 /* If we detached from the child, then we have to be careful
777 to not insert breakpoints in the parent until the child
778 is done with the shared memory region. However, if we're
779 staying attached to the child, then we can and should
780 insert breakpoints, so that we can debug it. A
781 subsequent child exec or exit is enough to know when does
782 the child stops using the parent's address space. */
783 parent_inf->waiting_for_vfork_done = detach_fork;
784 parent_inf->pspace->breakpoints_not_allowed = detach_fork;
785
786 parent_lp = find_lwp_pid (pid_to_ptid (parent_pid));
787 gdb_assert (linux_supports_tracefork_flag >= 0);
788
789 if (linux_supports_tracevforkdone (0))
790 {
791 if (debug_linux_nat)
792 fprintf_unfiltered (gdb_stdlog,
793 "LCFF: waiting for VFORK_DONE on %d\n",
794 parent_pid);
795 parent_lp->stopped = 1;
796
797 /* We'll handle the VFORK_DONE event like any other
798 event, in target_wait. */
799 }
800 else
801 {
802 /* We can't insert breakpoints until the child has
803 finished with the shared memory region. We need to
804 wait until that happens. Ideal would be to just
805 call:
806 - ptrace (PTRACE_SYSCALL, parent_pid, 0, 0);
807 - waitpid (parent_pid, &status, __WALL);
808 However, most architectures can't handle a syscall
809 being traced on the way out if it wasn't traced on
810 the way in.
811
812 We might also think to loop, continuing the child
813 until it exits or gets a SIGTRAP. One problem is
814 that the child might call ptrace with PTRACE_TRACEME.
815
816 There's no simple and reliable way to figure out when
817 the vforked child will be done with its copy of the
818 shared memory. We could step it out of the syscall,
819 two instructions, let it go, and then single-step the
820 parent once. When we have hardware single-step, this
821 would work; with software single-step it could still
822 be made to work but we'd have to be able to insert
823 single-step breakpoints in the child, and we'd have
824 to insert -just- the single-step breakpoint in the
825 parent. Very awkward.
826
827 In the end, the best we can do is to make sure it
828 runs for a little while. Hopefully it will be out of
829 range of any breakpoints we reinsert. Usually this
830 is only the single-step breakpoint at vfork's return
831 point. */
832
833 if (debug_linux_nat)
834 fprintf_unfiltered (gdb_stdlog,
835 "LCFF: no VFORK_DONE "
836 "support, sleeping a bit\n");
837
838 usleep (10000);
839
840 /* Pretend we've seen a PTRACE_EVENT_VFORK_DONE event,
841 and leave it pending. The next linux_nat_resume call
842 will notice a pending event, and bypasses actually
843 resuming the inferior. */
844 parent_lp->status = 0;
845 parent_lp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
846 parent_lp->stopped = 1;
847
848 /* If we're in async mode, need to tell the event loop
849 there's something here to process. */
850 if (target_can_async_p ())
851 async_file_mark ();
852 }
853 }
854 }
855 else
856 {
857 struct inferior *parent_inf, *child_inf;
858 struct lwp_info *child_lp;
859 struct program_space *parent_pspace;
860
861 if (info_verbose || debug_linux_nat)
862 {
863 target_terminal_ours ();
864 if (has_vforked)
865 fprintf_filtered (gdb_stdlog,
866 _("Attaching after process %d "
867 "vfork to child process %d.\n"),
868 parent_pid, child_pid);
869 else
870 fprintf_filtered (gdb_stdlog,
871 _("Attaching after process %d "
872 "fork to child process %d.\n"),
873 parent_pid, child_pid);
874 }
875
876 /* Add the new inferior first, so that the target_detach below
877 doesn't unpush the target. */
878
879 child_inf = add_inferior (child_pid);
880
881 parent_inf = current_inferior ();
882 child_inf->attach_flag = parent_inf->attach_flag;
883 copy_terminal_info (child_inf, parent_inf);
884
885 parent_pspace = parent_inf->pspace;
886
887 /* If we're vforking, we want to hold on to the parent until the
888 child exits or execs. At child exec or exit time we can
889 remove the old breakpoints from the parent and detach or
890 resume debugging it. Otherwise, detach the parent now; we'll
891 want to reuse it's program/address spaces, but we can't set
892 them to the child before removing breakpoints from the
893 parent, otherwise, the breakpoints module could decide to
894 remove breakpoints from the wrong process (since they'd be
895 assigned to the same address space). */
896
897 if (has_vforked)
898 {
899 gdb_assert (child_inf->vfork_parent == NULL);
900 gdb_assert (parent_inf->vfork_child == NULL);
901 child_inf->vfork_parent = parent_inf;
902 child_inf->pending_detach = 0;
903 parent_inf->vfork_child = child_inf;
904 parent_inf->pending_detach = detach_fork;
905 parent_inf->waiting_for_vfork_done = 0;
906 }
907 else if (detach_fork)
908 target_detach (NULL, 0);
909
910 /* Note that the detach above makes PARENT_INF dangling. */
911
912 /* Add the child thread to the appropriate lists, and switch to
913 this new thread, before cloning the program space, and
914 informing the solib layer about this new process. */
915
916 inferior_ptid = ptid_build (child_pid, child_pid, 0);
917 add_thread (inferior_ptid);
918 child_lp = add_lwp (inferior_ptid);
919 child_lp->stopped = 1;
920 child_lp->last_resume_kind = resume_stop;
921
922 /* If this is a vfork child, then the address-space is shared
923 with the parent. If we detached from the parent, then we can
924 reuse the parent's program/address spaces. */
925 if (has_vforked || detach_fork)
926 {
927 child_inf->pspace = parent_pspace;
928 child_inf->aspace = child_inf->pspace->aspace;
929 }
930 else
931 {
932 child_inf->aspace = new_address_space ();
933 child_inf->pspace = add_program_space (child_inf->aspace);
934 child_inf->removable = 1;
935 child_inf->symfile_flags = SYMFILE_NO_READ;
936 set_current_program_space (child_inf->pspace);
937 clone_program_space (child_inf->pspace, parent_pspace);
938
939 /* Let the shared library layer (solib-svr4) learn about
940 this new process, relocate the cloned exec, pull in
941 shared libraries, and install the solib event breakpoint.
942 If a "cloned-VM" event was propagated better throughout
943 the core, this wouldn't be required. */
944 solib_create_inferior_hook (0);
945 }
946
947 /* Let the thread_db layer learn about this new process. */
948 check_for_thread_db ();
949 }
950
951 restore_child_signals_mask (&prev_mask);
952 return 0;
953 }
954
955 \f
956 static int
957 linux_child_insert_fork_catchpoint (int pid)
958 {
959 return !linux_supports_tracefork (pid);
960 }
961
962 static int
963 linux_child_remove_fork_catchpoint (int pid)
964 {
965 return 0;
966 }
967
968 static int
969 linux_child_insert_vfork_catchpoint (int pid)
970 {
971 return !linux_supports_tracefork (pid);
972 }
973
974 static int
975 linux_child_remove_vfork_catchpoint (int pid)
976 {
977 return 0;
978 }
979
980 static int
981 linux_child_insert_exec_catchpoint (int pid)
982 {
983 return !linux_supports_tracefork (pid);
984 }
985
986 static int
987 linux_child_remove_exec_catchpoint (int pid)
988 {
989 return 0;
990 }
991
992 static int
993 linux_child_set_syscall_catchpoint (int pid, int needed, int any_count,
994 int table_size, int *table)
995 {
996 if (!linux_supports_tracesysgood (pid))
997 return 1;
998
999 /* On GNU/Linux, we ignore the arguments. It means that we only
1000 enable the syscall catchpoints, but do not disable them.
1001
1002 Also, we do not use the `table' information because we do not
1003 filter system calls here. We let GDB do the logic for us. */
1004 return 0;
1005 }
1006
1007 /* On GNU/Linux there are no real LWP's. The closest thing to LWP's
1008 are processes sharing the same VM space. A multi-threaded process
1009 is basically a group of such processes. However, such a grouping
1010 is almost entirely a user-space issue; the kernel doesn't enforce
1011 such a grouping at all (this might change in the future). In
1012 general, we'll rely on the threads library (i.e. the GNU/Linux
1013 Threads library) to provide such a grouping.
1014
1015 It is perfectly well possible to write a multi-threaded application
1016 without the assistance of a threads library, by using the clone
1017 system call directly. This module should be able to give some
1018 rudimentary support for debugging such applications if developers
1019 specify the CLONE_PTRACE flag in the clone system call, and are
1020 using the Linux kernel 2.4 or above.
1021
1022 Note that there are some peculiarities in GNU/Linux that affect
1023 this code:
1024
1025 - In general one should specify the __WCLONE flag to waitpid in
1026 order to make it report events for any of the cloned processes
1027 (and leave it out for the initial process). However, if a cloned
1028 process has exited the exit status is only reported if the
1029 __WCLONE flag is absent. Linux kernel 2.4 has a __WALL flag, but
1030 we cannot use it since GDB must work on older systems too.
1031
1032 - When a traced, cloned process exits and is waited for by the
1033 debugger, the kernel reassigns it to the original parent and
1034 keeps it around as a "zombie". Somehow, the GNU/Linux Threads
1035 library doesn't notice this, which leads to the "zombie problem":
1036 When debugged a multi-threaded process that spawns a lot of
1037 threads will run out of processes, even if the threads exit,
1038 because the "zombies" stay around. */
1039
1040 /* List of known LWPs. */
1041 struct lwp_info *lwp_list;
1042 \f
1043
1044 /* Original signal mask. */
1045 static sigset_t normal_mask;
1046
1047 /* Signal mask for use with sigsuspend in linux_nat_wait, initialized in
1048 _initialize_linux_nat. */
1049 static sigset_t suspend_mask;
1050
1051 /* Signals to block to make that sigsuspend work. */
1052 static sigset_t blocked_mask;
1053
1054 /* SIGCHLD action. */
1055 struct sigaction sigchld_action;
1056
1057 /* Block child signals (SIGCHLD and linux threads signals), and store
1058 the previous mask in PREV_MASK. */
1059
1060 static void
1061 block_child_signals (sigset_t *prev_mask)
1062 {
1063 /* Make sure SIGCHLD is blocked. */
1064 if (!sigismember (&blocked_mask, SIGCHLD))
1065 sigaddset (&blocked_mask, SIGCHLD);
1066
1067 sigprocmask (SIG_BLOCK, &blocked_mask, prev_mask);
1068 }
1069
1070 /* Restore child signals mask, previously returned by
1071 block_child_signals. */
1072
1073 static void
1074 restore_child_signals_mask (sigset_t *prev_mask)
1075 {
1076 sigprocmask (SIG_SETMASK, prev_mask, NULL);
1077 }
1078
1079 /* Mask of signals to pass directly to the inferior. */
1080 static sigset_t pass_mask;
1081
1082 /* Update signals to pass to the inferior. */
1083 static void
1084 linux_nat_pass_signals (int numsigs, unsigned char *pass_signals)
1085 {
1086 int signo;
1087
1088 sigemptyset (&pass_mask);
1089
1090 for (signo = 1; signo < NSIG; signo++)
1091 {
1092 int target_signo = target_signal_from_host (signo);
1093 if (target_signo < numsigs && pass_signals[target_signo])
1094 sigaddset (&pass_mask, signo);
1095 }
1096 }
1097
1098 \f
1099
1100 /* Prototypes for local functions. */
1101 static int stop_wait_callback (struct lwp_info *lp, void *data);
1102 static int linux_thread_alive (ptid_t ptid);
1103 static char *linux_child_pid_to_exec_file (int pid);
1104
1105 \f
1106 /* Convert wait status STATUS to a string. Used for printing debug
1107 messages only. */
1108
1109 static char *
1110 status_to_str (int status)
1111 {
1112 static char buf[64];
1113
1114 if (WIFSTOPPED (status))
1115 {
1116 if (WSTOPSIG (status) == SYSCALL_SIGTRAP)
1117 snprintf (buf, sizeof (buf), "%s (stopped at syscall)",
1118 strsignal (SIGTRAP));
1119 else
1120 snprintf (buf, sizeof (buf), "%s (stopped)",
1121 strsignal (WSTOPSIG (status)));
1122 }
1123 else if (WIFSIGNALED (status))
1124 snprintf (buf, sizeof (buf), "%s (terminated)",
1125 strsignal (WTERMSIG (status)));
1126 else
1127 snprintf (buf, sizeof (buf), "%d (exited)", WEXITSTATUS (status));
1128
1129 return buf;
1130 }
1131
1132 /* Destroy and free LP. */
1133
1134 static void
1135 lwp_free (struct lwp_info *lp)
1136 {
1137 xfree (lp->arch_private);
1138 xfree (lp);
1139 }
1140
1141 /* Remove all LWPs belong to PID from the lwp list. */
1142
1143 static void
1144 purge_lwp_list (int pid)
1145 {
1146 struct lwp_info *lp, *lpprev, *lpnext;
1147
1148 lpprev = NULL;
1149
1150 for (lp = lwp_list; lp; lp = lpnext)
1151 {
1152 lpnext = lp->next;
1153
1154 if (ptid_get_pid (lp->ptid) == pid)
1155 {
1156 if (lp == lwp_list)
1157 lwp_list = lp->next;
1158 else
1159 lpprev->next = lp->next;
1160
1161 lwp_free (lp);
1162 }
1163 else
1164 lpprev = lp;
1165 }
1166 }
1167
1168 /* Add the LWP specified by PID to the list. Return a pointer to the
1169 structure describing the new LWP. The LWP should already be stopped
1170 (with an exception for the very first LWP). */
1171
1172 static struct lwp_info *
1173 add_lwp (ptid_t ptid)
1174 {
1175 struct lwp_info *lp;
1176
1177 gdb_assert (is_lwp (ptid));
1178
1179 lp = (struct lwp_info *) xmalloc (sizeof (struct lwp_info));
1180
1181 memset (lp, 0, sizeof (struct lwp_info));
1182
1183 lp->last_resume_kind = resume_continue;
1184 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
1185
1186 lp->ptid = ptid;
1187 lp->core = -1;
1188
1189 lp->next = lwp_list;
1190 lwp_list = lp;
1191
1192 /* Let the arch specific bits know about this new thread. Current
1193 clients of this callback take the opportunity to install
1194 watchpoints in the new thread. Don't do this for the first
1195 thread though. If we're spawning a child ("run"), the thread
1196 executes the shell wrapper first, and we shouldn't touch it until
1197 it execs the program we want to debug. For "attach", it'd be
1198 okay to call the callback, but it's not necessary, because
1199 watchpoints can't yet have been inserted into the inferior. */
1200 if (num_lwps (GET_PID (ptid)) > 1 && linux_nat_new_thread != NULL)
1201 linux_nat_new_thread (lp);
1202
1203 return lp;
1204 }
1205
1206 /* Remove the LWP specified by PID from the list. */
1207
1208 static void
1209 delete_lwp (ptid_t ptid)
1210 {
1211 struct lwp_info *lp, *lpprev;
1212
1213 lpprev = NULL;
1214
1215 for (lp = lwp_list; lp; lpprev = lp, lp = lp->next)
1216 if (ptid_equal (lp->ptid, ptid))
1217 break;
1218
1219 if (!lp)
1220 return;
1221
1222 if (lpprev)
1223 lpprev->next = lp->next;
1224 else
1225 lwp_list = lp->next;
1226
1227 lwp_free (lp);
1228 }
1229
1230 /* Return a pointer to the structure describing the LWP corresponding
1231 to PID. If no corresponding LWP could be found, return NULL. */
1232
1233 static struct lwp_info *
1234 find_lwp_pid (ptid_t ptid)
1235 {
1236 struct lwp_info *lp;
1237 int lwp;
1238
1239 if (is_lwp (ptid))
1240 lwp = GET_LWP (ptid);
1241 else
1242 lwp = GET_PID (ptid);
1243
1244 for (lp = lwp_list; lp; lp = lp->next)
1245 if (lwp == GET_LWP (lp->ptid))
1246 return lp;
1247
1248 return NULL;
1249 }
1250
1251 /* Call CALLBACK with its second argument set to DATA for every LWP in
1252 the list. If CALLBACK returns 1 for a particular LWP, return a
1253 pointer to the structure describing that LWP immediately.
1254 Otherwise return NULL. */
1255
1256 struct lwp_info *
1257 iterate_over_lwps (ptid_t filter,
1258 int (*callback) (struct lwp_info *, void *),
1259 void *data)
1260 {
1261 struct lwp_info *lp, *lpnext;
1262
1263 for (lp = lwp_list; lp; lp = lpnext)
1264 {
1265 lpnext = lp->next;
1266
1267 if (ptid_match (lp->ptid, filter))
1268 {
1269 if ((*callback) (lp, data))
1270 return lp;
1271 }
1272 }
1273
1274 return NULL;
1275 }
1276
1277 /* Iterate like iterate_over_lwps does except when forking-off a child call
1278 CALLBACK with CALLBACK_DATA specifically only for that new child PID. */
1279
1280 void
1281 linux_nat_iterate_watchpoint_lwps
1282 (linux_nat_iterate_watchpoint_lwps_ftype callback, void *callback_data)
1283 {
1284 int inferior_pid = ptid_get_pid (inferior_ptid);
1285 struct inferior *inf = current_inferior ();
1286
1287 if (inf->pid == inferior_pid)
1288 {
1289 /* Iterate all the threads of the current inferior. Without specifying
1290 INFERIOR_PID it would iterate all threads of all inferiors, which is
1291 inappropriate for watchpoints. */
1292
1293 iterate_over_lwps (pid_to_ptid (inferior_pid), callback, callback_data);
1294 }
1295 else
1296 {
1297 /* Detaching a new child PID temporarily present in INFERIOR_PID. */
1298
1299 struct lwp_info *child_lp;
1300 struct cleanup *old_chain;
1301 pid_t child_pid = GET_PID (inferior_ptid);
1302 ptid_t child_ptid = ptid_build (child_pid, child_pid, 0);
1303
1304 gdb_assert (!is_lwp (inferior_ptid));
1305 gdb_assert (find_lwp_pid (child_ptid) == NULL);
1306 child_lp = add_lwp (child_ptid);
1307 child_lp->stopped = 1;
1308 child_lp->last_resume_kind = resume_stop;
1309 old_chain = make_cleanup (delete_lwp_cleanup, child_lp);
1310
1311 callback (child_lp, callback_data);
1312
1313 do_cleanups (old_chain);
1314 }
1315 }
1316
1317 /* Update our internal state when changing from one checkpoint to
1318 another indicated by NEW_PTID. We can only switch single-threaded
1319 applications, so we only create one new LWP, and the previous list
1320 is discarded. */
1321
1322 void
1323 linux_nat_switch_fork (ptid_t new_ptid)
1324 {
1325 struct lwp_info *lp;
1326
1327 purge_lwp_list (GET_PID (inferior_ptid));
1328
1329 lp = add_lwp (new_ptid);
1330 lp->stopped = 1;
1331
1332 /* This changes the thread's ptid while preserving the gdb thread
1333 num. Also changes the inferior pid, while preserving the
1334 inferior num. */
1335 thread_change_ptid (inferior_ptid, new_ptid);
1336
1337 /* We've just told GDB core that the thread changed target id, but,
1338 in fact, it really is a different thread, with different register
1339 contents. */
1340 registers_changed ();
1341 }
1342
1343 /* Handle the exit of a single thread LP. */
1344
1345 static void
1346 exit_lwp (struct lwp_info *lp)
1347 {
1348 struct thread_info *th = find_thread_ptid (lp->ptid);
1349
1350 if (th)
1351 {
1352 if (print_thread_events)
1353 printf_unfiltered (_("[%s exited]\n"), target_pid_to_str (lp->ptid));
1354
1355 delete_thread (lp->ptid);
1356 }
1357
1358 delete_lwp (lp->ptid);
1359 }
1360
1361 /* Wait for the LWP specified by LP, which we have just attached to.
1362 Returns a wait status for that LWP, to cache. */
1363
1364 static int
1365 linux_nat_post_attach_wait (ptid_t ptid, int first, int *cloned,
1366 int *signalled)
1367 {
1368 pid_t new_pid, pid = GET_LWP (ptid);
1369 int status;
1370
1371 if (linux_proc_pid_is_stopped (pid))
1372 {
1373 if (debug_linux_nat)
1374 fprintf_unfiltered (gdb_stdlog,
1375 "LNPAW: Attaching to a stopped process\n");
1376
1377 /* The process is definitely stopped. It is in a job control
1378 stop, unless the kernel predates the TASK_STOPPED /
1379 TASK_TRACED distinction, in which case it might be in a
1380 ptrace stop. Make sure it is in a ptrace stop; from there we
1381 can kill it, signal it, et cetera.
1382
1383 First make sure there is a pending SIGSTOP. Since we are
1384 already attached, the process can not transition from stopped
1385 to running without a PTRACE_CONT; so we know this signal will
1386 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1387 probably already in the queue (unless this kernel is old
1388 enough to use TASK_STOPPED for ptrace stops); but since SIGSTOP
1389 is not an RT signal, it can only be queued once. */
1390 kill_lwp (pid, SIGSTOP);
1391
1392 /* Finally, resume the stopped process. This will deliver the SIGSTOP
1393 (or a higher priority signal, just like normal PTRACE_ATTACH). */
1394 ptrace (PTRACE_CONT, pid, 0, 0);
1395 }
1396
1397 /* Make sure the initial process is stopped. The user-level threads
1398 layer might want to poke around in the inferior, and that won't
1399 work if things haven't stabilized yet. */
1400 new_pid = my_waitpid (pid, &status, 0);
1401 if (new_pid == -1 && errno == ECHILD)
1402 {
1403 if (first)
1404 warning (_("%s is a cloned process"), target_pid_to_str (ptid));
1405
1406 /* Try again with __WCLONE to check cloned processes. */
1407 new_pid = my_waitpid (pid, &status, __WCLONE);
1408 *cloned = 1;
1409 }
1410
1411 gdb_assert (pid == new_pid);
1412
1413 if (!WIFSTOPPED (status))
1414 {
1415 /* The pid we tried to attach has apparently just exited. */
1416 if (debug_linux_nat)
1417 fprintf_unfiltered (gdb_stdlog, "LNPAW: Failed to stop %d: %s",
1418 pid, status_to_str (status));
1419 return status;
1420 }
1421
1422 if (WSTOPSIG (status) != SIGSTOP)
1423 {
1424 *signalled = 1;
1425 if (debug_linux_nat)
1426 fprintf_unfiltered (gdb_stdlog,
1427 "LNPAW: Received %s after attaching\n",
1428 status_to_str (status));
1429 }
1430
1431 return status;
1432 }
1433
1434 /* Attach to the LWP specified by PID. Return 0 if successful, -1 if
1435 the new LWP could not be attached, or 1 if we're already auto
1436 attached to this thread, but haven't processed the
1437 PTRACE_EVENT_CLONE event of its parent thread, so we just ignore
1438 its existance, without considering it an error. */
1439
1440 int
1441 lin_lwp_attach_lwp (ptid_t ptid)
1442 {
1443 struct lwp_info *lp;
1444 sigset_t prev_mask;
1445 int lwpid;
1446
1447 gdb_assert (is_lwp (ptid));
1448
1449 block_child_signals (&prev_mask);
1450
1451 lp = find_lwp_pid (ptid);
1452 lwpid = GET_LWP (ptid);
1453
1454 /* We assume that we're already attached to any LWP that has an id
1455 equal to the overall process id, and to any LWP that is already
1456 in our list of LWPs. If we're not seeing exit events from threads
1457 and we've had PID wraparound since we last tried to stop all threads,
1458 this assumption might be wrong; fortunately, this is very unlikely
1459 to happen. */
1460 if (lwpid != GET_PID (ptid) && lp == NULL)
1461 {
1462 int status, cloned = 0, signalled = 0;
1463
1464 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) < 0)
1465 {
1466 if (linux_supports_tracefork_flag)
1467 {
1468 /* If we haven't stopped all threads when we get here,
1469 we may have seen a thread listed in thread_db's list,
1470 but not processed the PTRACE_EVENT_CLONE yet. If
1471 that's the case, ignore this new thread, and let
1472 normal event handling discover it later. */
1473 if (in_pid_list_p (stopped_pids, lwpid))
1474 {
1475 /* We've already seen this thread stop, but we
1476 haven't seen the PTRACE_EVENT_CLONE extended
1477 event yet. */
1478 restore_child_signals_mask (&prev_mask);
1479 return 0;
1480 }
1481 else
1482 {
1483 int new_pid;
1484 int status;
1485
1486 /* See if we've got a stop for this new child
1487 pending. If so, we're already attached. */
1488 new_pid = my_waitpid (lwpid, &status, WNOHANG);
1489 if (new_pid == -1 && errno == ECHILD)
1490 new_pid = my_waitpid (lwpid, &status, __WCLONE | WNOHANG);
1491 if (new_pid != -1)
1492 {
1493 if (WIFSTOPPED (status))
1494 add_to_pid_list (&stopped_pids, lwpid, status);
1495
1496 restore_child_signals_mask (&prev_mask);
1497 return 1;
1498 }
1499 }
1500 }
1501
1502 /* If we fail to attach to the thread, issue a warning,
1503 but continue. One way this can happen is if thread
1504 creation is interrupted; as of Linux kernel 2.6.19, a
1505 bug may place threads in the thread list and then fail
1506 to create them. */
1507 warning (_("Can't attach %s: %s"), target_pid_to_str (ptid),
1508 safe_strerror (errno));
1509 restore_child_signals_mask (&prev_mask);
1510 return -1;
1511 }
1512
1513 if (debug_linux_nat)
1514 fprintf_unfiltered (gdb_stdlog,
1515 "LLAL: PTRACE_ATTACH %s, 0, 0 (OK)\n",
1516 target_pid_to_str (ptid));
1517
1518 status = linux_nat_post_attach_wait (ptid, 0, &cloned, &signalled);
1519 if (!WIFSTOPPED (status))
1520 {
1521 restore_child_signals_mask (&prev_mask);
1522 return 1;
1523 }
1524
1525 lp = add_lwp (ptid);
1526 lp->stopped = 1;
1527 lp->cloned = cloned;
1528 lp->signalled = signalled;
1529 if (WSTOPSIG (status) != SIGSTOP)
1530 {
1531 lp->resumed = 1;
1532 lp->status = status;
1533 }
1534
1535 target_post_attach (GET_LWP (lp->ptid));
1536
1537 if (debug_linux_nat)
1538 {
1539 fprintf_unfiltered (gdb_stdlog,
1540 "LLAL: waitpid %s received %s\n",
1541 target_pid_to_str (ptid),
1542 status_to_str (status));
1543 }
1544 }
1545 else
1546 {
1547 /* We assume that the LWP representing the original process is
1548 already stopped. Mark it as stopped in the data structure
1549 that the GNU/linux ptrace layer uses to keep track of
1550 threads. Note that this won't have already been done since
1551 the main thread will have, we assume, been stopped by an
1552 attach from a different layer. */
1553 if (lp == NULL)
1554 lp = add_lwp (ptid);
1555 lp->stopped = 1;
1556 }
1557
1558 lp->last_resume_kind = resume_stop;
1559 restore_child_signals_mask (&prev_mask);
1560 return 0;
1561 }
1562
1563 static void
1564 linux_nat_create_inferior (struct target_ops *ops,
1565 char *exec_file, char *allargs, char **env,
1566 int from_tty)
1567 {
1568 #ifdef HAVE_PERSONALITY
1569 int personality_orig = 0, personality_set = 0;
1570 #endif /* HAVE_PERSONALITY */
1571
1572 /* The fork_child mechanism is synchronous and calls target_wait, so
1573 we have to mask the async mode. */
1574
1575 #ifdef HAVE_PERSONALITY
1576 if (disable_randomization)
1577 {
1578 errno = 0;
1579 personality_orig = personality (0xffffffff);
1580 if (errno == 0 && !(personality_orig & ADDR_NO_RANDOMIZE))
1581 {
1582 personality_set = 1;
1583 personality (personality_orig | ADDR_NO_RANDOMIZE);
1584 }
1585 if (errno != 0 || (personality_set
1586 && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE)))
1587 warning (_("Error disabling address space randomization: %s"),
1588 safe_strerror (errno));
1589 }
1590 #endif /* HAVE_PERSONALITY */
1591
1592 /* Make sure we report all signals during startup. */
1593 linux_nat_pass_signals (0, NULL);
1594
1595 linux_ops->to_create_inferior (ops, exec_file, allargs, env, from_tty);
1596
1597 #ifdef HAVE_PERSONALITY
1598 if (personality_set)
1599 {
1600 errno = 0;
1601 personality (personality_orig);
1602 if (errno != 0)
1603 warning (_("Error restoring address space randomization: %s"),
1604 safe_strerror (errno));
1605 }
1606 #endif /* HAVE_PERSONALITY */
1607 }
1608
1609 static void
1610 linux_nat_attach (struct target_ops *ops, char *args, int from_tty)
1611 {
1612 struct lwp_info *lp;
1613 int status;
1614 ptid_t ptid;
1615
1616 /* Make sure we report all signals during attach. */
1617 linux_nat_pass_signals (0, NULL);
1618
1619 linux_ops->to_attach (ops, args, from_tty);
1620
1621 /* The ptrace base target adds the main thread with (pid,0,0)
1622 format. Decorate it with lwp info. */
1623 ptid = BUILD_LWP (GET_PID (inferior_ptid), GET_PID (inferior_ptid));
1624 thread_change_ptid (inferior_ptid, ptid);
1625
1626 /* Add the initial process as the first LWP to the list. */
1627 lp = add_lwp (ptid);
1628
1629 status = linux_nat_post_attach_wait (lp->ptid, 1, &lp->cloned,
1630 &lp->signalled);
1631 if (!WIFSTOPPED (status))
1632 {
1633 if (WIFEXITED (status))
1634 {
1635 int exit_code = WEXITSTATUS (status);
1636
1637 target_terminal_ours ();
1638 target_mourn_inferior ();
1639 if (exit_code == 0)
1640 error (_("Unable to attach: program exited normally."));
1641 else
1642 error (_("Unable to attach: program exited with code %d."),
1643 exit_code);
1644 }
1645 else if (WIFSIGNALED (status))
1646 {
1647 enum target_signal signo;
1648
1649 target_terminal_ours ();
1650 target_mourn_inferior ();
1651
1652 signo = target_signal_from_host (WTERMSIG (status));
1653 error (_("Unable to attach: program terminated with signal "
1654 "%s, %s."),
1655 target_signal_to_name (signo),
1656 target_signal_to_string (signo));
1657 }
1658
1659 internal_error (__FILE__, __LINE__,
1660 _("unexpected status %d for PID %ld"),
1661 status, (long) GET_LWP (ptid));
1662 }
1663
1664 lp->stopped = 1;
1665
1666 /* Save the wait status to report later. */
1667 lp->resumed = 1;
1668 if (debug_linux_nat)
1669 fprintf_unfiltered (gdb_stdlog,
1670 "LNA: waitpid %ld, saving status %s\n",
1671 (long) GET_PID (lp->ptid), status_to_str (status));
1672
1673 lp->status = status;
1674
1675 if (target_can_async_p ())
1676 target_async (inferior_event_handler, 0);
1677 }
1678
1679 /* Get pending status of LP. */
1680 static int
1681 get_pending_status (struct lwp_info *lp, int *status)
1682 {
1683 enum target_signal signo = TARGET_SIGNAL_0;
1684
1685 /* If we paused threads momentarily, we may have stored pending
1686 events in lp->status or lp->waitstatus (see stop_wait_callback),
1687 and GDB core hasn't seen any signal for those threads.
1688 Otherwise, the last signal reported to the core is found in the
1689 thread object's stop_signal.
1690
1691 There's a corner case that isn't handled here at present. Only
1692 if the thread stopped with a TARGET_WAITKIND_STOPPED does
1693 stop_signal make sense as a real signal to pass to the inferior.
1694 Some catchpoint related events, like
1695 TARGET_WAITKIND_(V)FORK|EXEC|SYSCALL, have their stop_signal set
1696 to TARGET_SIGNAL_SIGTRAP when the catchpoint triggers. But,
1697 those traps are debug API (ptrace in our case) related and
1698 induced; the inferior wouldn't see them if it wasn't being
1699 traced. Hence, we should never pass them to the inferior, even
1700 when set to pass state. Since this corner case isn't handled by
1701 infrun.c when proceeding with a signal, for consistency, neither
1702 do we handle it here (or elsewhere in the file we check for
1703 signal pass state). Normally SIGTRAP isn't set to pass state, so
1704 this is really a corner case. */
1705
1706 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
1707 signo = TARGET_SIGNAL_0; /* a pending ptrace event, not a real signal. */
1708 else if (lp->status)
1709 signo = target_signal_from_host (WSTOPSIG (lp->status));
1710 else if (non_stop && !is_executing (lp->ptid))
1711 {
1712 struct thread_info *tp = find_thread_ptid (lp->ptid);
1713
1714 signo = tp->suspend.stop_signal;
1715 }
1716 else if (!non_stop)
1717 {
1718 struct target_waitstatus last;
1719 ptid_t last_ptid;
1720
1721 get_last_target_status (&last_ptid, &last);
1722
1723 if (GET_LWP (lp->ptid) == GET_LWP (last_ptid))
1724 {
1725 struct thread_info *tp = find_thread_ptid (lp->ptid);
1726
1727 signo = tp->suspend.stop_signal;
1728 }
1729 }
1730
1731 *status = 0;
1732
1733 if (signo == TARGET_SIGNAL_0)
1734 {
1735 if (debug_linux_nat)
1736 fprintf_unfiltered (gdb_stdlog,
1737 "GPT: lwp %s has no pending signal\n",
1738 target_pid_to_str (lp->ptid));
1739 }
1740 else if (!signal_pass_state (signo))
1741 {
1742 if (debug_linux_nat)
1743 fprintf_unfiltered (gdb_stdlog,
1744 "GPT: lwp %s had signal %s, "
1745 "but it is in no pass state\n",
1746 target_pid_to_str (lp->ptid),
1747 target_signal_to_string (signo));
1748 }
1749 else
1750 {
1751 *status = W_STOPCODE (target_signal_to_host (signo));
1752
1753 if (debug_linux_nat)
1754 fprintf_unfiltered (gdb_stdlog,
1755 "GPT: lwp %s has pending signal %s\n",
1756 target_pid_to_str (lp->ptid),
1757 target_signal_to_string (signo));
1758 }
1759
1760 return 0;
1761 }
1762
1763 static int
1764 detach_callback (struct lwp_info *lp, void *data)
1765 {
1766 gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status));
1767
1768 if (debug_linux_nat && lp->status)
1769 fprintf_unfiltered (gdb_stdlog, "DC: Pending %s for %s on detach.\n",
1770 strsignal (WSTOPSIG (lp->status)),
1771 target_pid_to_str (lp->ptid));
1772
1773 /* If there is a pending SIGSTOP, get rid of it. */
1774 if (lp->signalled)
1775 {
1776 if (debug_linux_nat)
1777 fprintf_unfiltered (gdb_stdlog,
1778 "DC: Sending SIGCONT to %s\n",
1779 target_pid_to_str (lp->ptid));
1780
1781 kill_lwp (GET_LWP (lp->ptid), SIGCONT);
1782 lp->signalled = 0;
1783 }
1784
1785 /* We don't actually detach from the LWP that has an id equal to the
1786 overall process id just yet. */
1787 if (GET_LWP (lp->ptid) != GET_PID (lp->ptid))
1788 {
1789 int status = 0;
1790
1791 /* Pass on any pending signal for this LWP. */
1792 get_pending_status (lp, &status);
1793
1794 if (linux_nat_prepare_to_resume != NULL)
1795 linux_nat_prepare_to_resume (lp);
1796 errno = 0;
1797 if (ptrace (PTRACE_DETACH, GET_LWP (lp->ptid), 0,
1798 WSTOPSIG (status)) < 0)
1799 error (_("Can't detach %s: %s"), target_pid_to_str (lp->ptid),
1800 safe_strerror (errno));
1801
1802 if (debug_linux_nat)
1803 fprintf_unfiltered (gdb_stdlog,
1804 "PTRACE_DETACH (%s, %s, 0) (OK)\n",
1805 target_pid_to_str (lp->ptid),
1806 strsignal (WSTOPSIG (status)));
1807
1808 delete_lwp (lp->ptid);
1809 }
1810
1811 return 0;
1812 }
1813
1814 static void
1815 linux_nat_detach (struct target_ops *ops, char *args, int from_tty)
1816 {
1817 int pid;
1818 int status;
1819 struct lwp_info *main_lwp;
1820
1821 pid = GET_PID (inferior_ptid);
1822
1823 if (target_can_async_p ())
1824 linux_nat_async (NULL, 0);
1825
1826 /* Stop all threads before detaching. ptrace requires that the
1827 thread is stopped to sucessfully detach. */
1828 iterate_over_lwps (pid_to_ptid (pid), stop_callback, NULL);
1829 /* ... and wait until all of them have reported back that
1830 they're no longer running. */
1831 iterate_over_lwps (pid_to_ptid (pid), stop_wait_callback, NULL);
1832
1833 iterate_over_lwps (pid_to_ptid (pid), detach_callback, NULL);
1834
1835 /* Only the initial process should be left right now. */
1836 gdb_assert (num_lwps (GET_PID (inferior_ptid)) == 1);
1837
1838 main_lwp = find_lwp_pid (pid_to_ptid (pid));
1839
1840 /* Pass on any pending signal for the last LWP. */
1841 if ((args == NULL || *args == '\0')
1842 && get_pending_status (main_lwp, &status) != -1
1843 && WIFSTOPPED (status))
1844 {
1845 /* Put the signal number in ARGS so that inf_ptrace_detach will
1846 pass it along with PTRACE_DETACH. */
1847 args = alloca (8);
1848 sprintf (args, "%d", (int) WSTOPSIG (status));
1849 if (debug_linux_nat)
1850 fprintf_unfiltered (gdb_stdlog,
1851 "LND: Sending signal %s to %s\n",
1852 args,
1853 target_pid_to_str (main_lwp->ptid));
1854 }
1855
1856 if (linux_nat_prepare_to_resume != NULL)
1857 linux_nat_prepare_to_resume (main_lwp);
1858 delete_lwp (main_lwp->ptid);
1859
1860 if (forks_exist_p ())
1861 {
1862 /* Multi-fork case. The current inferior_ptid is being detached
1863 from, but there are other viable forks to debug. Detach from
1864 the current fork, and context-switch to the first
1865 available. */
1866 linux_fork_detach (args, from_tty);
1867
1868 if (non_stop && target_can_async_p ())
1869 target_async (inferior_event_handler, 0);
1870 }
1871 else
1872 linux_ops->to_detach (ops, args, from_tty);
1873 }
1874
1875 /* Resume LP. */
1876
1877 static void
1878 resume_lwp (struct lwp_info *lp, int step)
1879 {
1880 if (lp->stopped)
1881 {
1882 struct inferior *inf = find_inferior_pid (GET_PID (lp->ptid));
1883
1884 if (inf->vfork_child != NULL)
1885 {
1886 if (debug_linux_nat)
1887 fprintf_unfiltered (gdb_stdlog,
1888 "RC: Not resuming %s (vfork parent)\n",
1889 target_pid_to_str (lp->ptid));
1890 }
1891 else if (lp->status == 0
1892 && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE)
1893 {
1894 if (debug_linux_nat)
1895 fprintf_unfiltered (gdb_stdlog,
1896 "RC: PTRACE_CONT %s, 0, 0 (resuming sibling)\n",
1897 target_pid_to_str (lp->ptid));
1898
1899 if (linux_nat_prepare_to_resume != NULL)
1900 linux_nat_prepare_to_resume (lp);
1901 linux_ops->to_resume (linux_ops,
1902 pid_to_ptid (GET_LWP (lp->ptid)),
1903 step, TARGET_SIGNAL_0);
1904 lp->stopped = 0;
1905 lp->step = step;
1906 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
1907 lp->stopped_by_watchpoint = 0;
1908 }
1909 else
1910 {
1911 if (debug_linux_nat)
1912 fprintf_unfiltered (gdb_stdlog,
1913 "RC: Not resuming sibling %s (has pending)\n",
1914 target_pid_to_str (lp->ptid));
1915 }
1916 }
1917 else
1918 {
1919 if (debug_linux_nat)
1920 fprintf_unfiltered (gdb_stdlog,
1921 "RC: Not resuming sibling %s (not stopped)\n",
1922 target_pid_to_str (lp->ptid));
1923 }
1924 }
1925
1926 static int
1927 resume_callback (struct lwp_info *lp, void *data)
1928 {
1929 resume_lwp (lp, 0);
1930 return 0;
1931 }
1932
1933 static int
1934 resume_clear_callback (struct lwp_info *lp, void *data)
1935 {
1936 lp->resumed = 0;
1937 lp->last_resume_kind = resume_stop;
1938 return 0;
1939 }
1940
1941 static int
1942 resume_set_callback (struct lwp_info *lp, void *data)
1943 {
1944 lp->resumed = 1;
1945 lp->last_resume_kind = resume_continue;
1946 return 0;
1947 }
1948
1949 static void
1950 linux_nat_resume (struct target_ops *ops,
1951 ptid_t ptid, int step, enum target_signal signo)
1952 {
1953 sigset_t prev_mask;
1954 struct lwp_info *lp;
1955 int resume_many;
1956
1957 if (debug_linux_nat)
1958 fprintf_unfiltered (gdb_stdlog,
1959 "LLR: Preparing to %s %s, %s, inferior_ptid %s\n",
1960 step ? "step" : "resume",
1961 target_pid_to_str (ptid),
1962 (signo != TARGET_SIGNAL_0
1963 ? strsignal (target_signal_to_host (signo)) : "0"),
1964 target_pid_to_str (inferior_ptid));
1965
1966 block_child_signals (&prev_mask);
1967
1968 /* A specific PTID means `step only this process id'. */
1969 resume_many = (ptid_equal (minus_one_ptid, ptid)
1970 || ptid_is_pid (ptid));
1971
1972 /* Mark the lwps we're resuming as resumed. */
1973 iterate_over_lwps (ptid, resume_set_callback, NULL);
1974
1975 /* See if it's the current inferior that should be handled
1976 specially. */
1977 if (resume_many)
1978 lp = find_lwp_pid (inferior_ptid);
1979 else
1980 lp = find_lwp_pid (ptid);
1981 gdb_assert (lp != NULL);
1982
1983 /* Remember if we're stepping. */
1984 lp->step = step;
1985 lp->last_resume_kind = step ? resume_step : resume_continue;
1986
1987 /* If we have a pending wait status for this thread, there is no
1988 point in resuming the process. But first make sure that
1989 linux_nat_wait won't preemptively handle the event - we
1990 should never take this short-circuit if we are going to
1991 leave LP running, since we have skipped resuming all the
1992 other threads. This bit of code needs to be synchronized
1993 with linux_nat_wait. */
1994
1995 if (lp->status && WIFSTOPPED (lp->status))
1996 {
1997 if (!lp->step
1998 && WSTOPSIG (lp->status)
1999 && sigismember (&pass_mask, WSTOPSIG (lp->status)))
2000 {
2001 if (debug_linux_nat)
2002 fprintf_unfiltered (gdb_stdlog,
2003 "LLR: Not short circuiting for ignored "
2004 "status 0x%x\n", lp->status);
2005
2006 /* FIXME: What should we do if we are supposed to continue
2007 this thread with a signal? */
2008 gdb_assert (signo == TARGET_SIGNAL_0);
2009 signo = target_signal_from_host (WSTOPSIG (lp->status));
2010 lp->status = 0;
2011 }
2012 }
2013
2014 if (lp->status || lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
2015 {
2016 /* FIXME: What should we do if we are supposed to continue
2017 this thread with a signal? */
2018 gdb_assert (signo == TARGET_SIGNAL_0);
2019
2020 if (debug_linux_nat)
2021 fprintf_unfiltered (gdb_stdlog,
2022 "LLR: Short circuiting for status 0x%x\n",
2023 lp->status);
2024
2025 restore_child_signals_mask (&prev_mask);
2026 if (target_can_async_p ())
2027 {
2028 target_async (inferior_event_handler, 0);
2029 /* Tell the event loop we have something to process. */
2030 async_file_mark ();
2031 }
2032 return;
2033 }
2034
2035 /* Mark LWP as not stopped to prevent it from being continued by
2036 resume_callback. */
2037 lp->stopped = 0;
2038
2039 if (resume_many)
2040 iterate_over_lwps (ptid, resume_callback, NULL);
2041
2042 /* Convert to something the lower layer understands. */
2043 ptid = pid_to_ptid (GET_LWP (lp->ptid));
2044
2045 if (linux_nat_prepare_to_resume != NULL)
2046 linux_nat_prepare_to_resume (lp);
2047 linux_ops->to_resume (linux_ops, ptid, step, signo);
2048 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
2049 lp->stopped_by_watchpoint = 0;
2050
2051 if (debug_linux_nat)
2052 fprintf_unfiltered (gdb_stdlog,
2053 "LLR: %s %s, %s (resume event thread)\n",
2054 step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2055 target_pid_to_str (ptid),
2056 (signo != TARGET_SIGNAL_0
2057 ? strsignal (target_signal_to_host (signo)) : "0"));
2058
2059 restore_child_signals_mask (&prev_mask);
2060 if (target_can_async_p ())
2061 target_async (inferior_event_handler, 0);
2062 }
2063
2064 /* Send a signal to an LWP. */
2065
2066 static int
2067 kill_lwp (int lwpid, int signo)
2068 {
2069 /* Use tkill, if possible, in case we are using nptl threads. If tkill
2070 fails, then we are not using nptl threads and we should be using kill. */
2071
2072 #ifdef HAVE_TKILL_SYSCALL
2073 {
2074 static int tkill_failed;
2075
2076 if (!tkill_failed)
2077 {
2078 int ret;
2079
2080 errno = 0;
2081 ret = syscall (__NR_tkill, lwpid, signo);
2082 if (errno != ENOSYS)
2083 return ret;
2084 tkill_failed = 1;
2085 }
2086 }
2087 #endif
2088
2089 return kill (lwpid, signo);
2090 }
2091
2092 /* Handle a GNU/Linux syscall trap wait response. If we see a syscall
2093 event, check if the core is interested in it: if not, ignore the
2094 event, and keep waiting; otherwise, we need to toggle the LWP's
2095 syscall entry/exit status, since the ptrace event itself doesn't
2096 indicate it, and report the trap to higher layers. */
2097
2098 static int
2099 linux_handle_syscall_trap (struct lwp_info *lp, int stopping)
2100 {
2101 struct target_waitstatus *ourstatus = &lp->waitstatus;
2102 struct gdbarch *gdbarch = target_thread_architecture (lp->ptid);
2103 int syscall_number = (int) gdbarch_get_syscall_number (gdbarch, lp->ptid);
2104
2105 if (stopping)
2106 {
2107 /* If we're stopping threads, there's a SIGSTOP pending, which
2108 makes it so that the LWP reports an immediate syscall return,
2109 followed by the SIGSTOP. Skip seeing that "return" using
2110 PTRACE_CONT directly, and let stop_wait_callback collect the
2111 SIGSTOP. Later when the thread is resumed, a new syscall
2112 entry event. If we didn't do this (and returned 0), we'd
2113 leave a syscall entry pending, and our caller, by using
2114 PTRACE_CONT to collect the SIGSTOP, skips the syscall return
2115 itself. Later, when the user re-resumes this LWP, we'd see
2116 another syscall entry event and we'd mistake it for a return.
2117
2118 If stop_wait_callback didn't force the SIGSTOP out of the LWP
2119 (leaving immediately with LWP->signalled set, without issuing
2120 a PTRACE_CONT), it would still be problematic to leave this
2121 syscall enter pending, as later when the thread is resumed,
2122 it would then see the same syscall exit mentioned above,
2123 followed by the delayed SIGSTOP, while the syscall didn't
2124 actually get to execute. It seems it would be even more
2125 confusing to the user. */
2126
2127 if (debug_linux_nat)
2128 fprintf_unfiltered (gdb_stdlog,
2129 "LHST: ignoring syscall %d "
2130 "for LWP %ld (stopping threads), "
2131 "resuming with PTRACE_CONT for SIGSTOP\n",
2132 syscall_number,
2133 GET_LWP (lp->ptid));
2134
2135 lp->syscall_state = TARGET_WAITKIND_IGNORE;
2136 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2137 return 1;
2138 }
2139
2140 if (catch_syscall_enabled ())
2141 {
2142 /* Always update the entry/return state, even if this particular
2143 syscall isn't interesting to the core now. In async mode,
2144 the user could install a new catchpoint for this syscall
2145 between syscall enter/return, and we'll need to know to
2146 report a syscall return if that happens. */
2147 lp->syscall_state = (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2148 ? TARGET_WAITKIND_SYSCALL_RETURN
2149 : TARGET_WAITKIND_SYSCALL_ENTRY);
2150
2151 if (catching_syscall_number (syscall_number))
2152 {
2153 /* Alright, an event to report. */
2154 ourstatus->kind = lp->syscall_state;
2155 ourstatus->value.syscall_number = syscall_number;
2156
2157 if (debug_linux_nat)
2158 fprintf_unfiltered (gdb_stdlog,
2159 "LHST: stopping for %s of syscall %d"
2160 " for LWP %ld\n",
2161 lp->syscall_state
2162 == TARGET_WAITKIND_SYSCALL_ENTRY
2163 ? "entry" : "return",
2164 syscall_number,
2165 GET_LWP (lp->ptid));
2166 return 0;
2167 }
2168
2169 if (debug_linux_nat)
2170 fprintf_unfiltered (gdb_stdlog,
2171 "LHST: ignoring %s of syscall %d "
2172 "for LWP %ld\n",
2173 lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2174 ? "entry" : "return",
2175 syscall_number,
2176 GET_LWP (lp->ptid));
2177 }
2178 else
2179 {
2180 /* If we had been syscall tracing, and hence used PT_SYSCALL
2181 before on this LWP, it could happen that the user removes all
2182 syscall catchpoints before we get to process this event.
2183 There are two noteworthy issues here:
2184
2185 - When stopped at a syscall entry event, resuming with
2186 PT_STEP still resumes executing the syscall and reports a
2187 syscall return.
2188
2189 - Only PT_SYSCALL catches syscall enters. If we last
2190 single-stepped this thread, then this event can't be a
2191 syscall enter. If we last single-stepped this thread, this
2192 has to be a syscall exit.
2193
2194 The points above mean that the next resume, be it PT_STEP or
2195 PT_CONTINUE, can not trigger a syscall trace event. */
2196 if (debug_linux_nat)
2197 fprintf_unfiltered (gdb_stdlog,
2198 "LHST: caught syscall event "
2199 "with no syscall catchpoints."
2200 " %d for LWP %ld, ignoring\n",
2201 syscall_number,
2202 GET_LWP (lp->ptid));
2203 lp->syscall_state = TARGET_WAITKIND_IGNORE;
2204 }
2205
2206 /* The core isn't interested in this event. For efficiency, avoid
2207 stopping all threads only to have the core resume them all again.
2208 Since we're not stopping threads, if we're still syscall tracing
2209 and not stepping, we can't use PTRACE_CONT here, as we'd miss any
2210 subsequent syscall. Simply resume using the inf-ptrace layer,
2211 which knows when to use PT_SYSCALL or PT_CONTINUE. */
2212
2213 /* Note that gdbarch_get_syscall_number may access registers, hence
2214 fill a regcache. */
2215 registers_changed ();
2216 if (linux_nat_prepare_to_resume != NULL)
2217 linux_nat_prepare_to_resume (lp);
2218 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
2219 lp->step, TARGET_SIGNAL_0);
2220 return 1;
2221 }
2222
2223 /* Handle a GNU/Linux extended wait response. If we see a clone
2224 event, we need to add the new LWP to our list (and not report the
2225 trap to higher layers). This function returns non-zero if the
2226 event should be ignored and we should wait again. If STOPPING is
2227 true, the new LWP remains stopped, otherwise it is continued. */
2228
2229 static int
2230 linux_handle_extended_wait (struct lwp_info *lp, int status,
2231 int stopping)
2232 {
2233 int pid = GET_LWP (lp->ptid);
2234 struct target_waitstatus *ourstatus = &lp->waitstatus;
2235 int event = status >> 16;
2236
2237 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK
2238 || event == PTRACE_EVENT_CLONE)
2239 {
2240 unsigned long new_pid;
2241 int ret;
2242
2243 ptrace (PTRACE_GETEVENTMSG, pid, 0, &new_pid);
2244
2245 /* If we haven't already seen the new PID stop, wait for it now. */
2246 if (! pull_pid_from_list (&stopped_pids, new_pid, &status))
2247 {
2248 /* The new child has a pending SIGSTOP. We can't affect it until it
2249 hits the SIGSTOP, but we're already attached. */
2250 ret = my_waitpid (new_pid, &status,
2251 (event == PTRACE_EVENT_CLONE) ? __WCLONE : 0);
2252 if (ret == -1)
2253 perror_with_name (_("waiting for new child"));
2254 else if (ret != new_pid)
2255 internal_error (__FILE__, __LINE__,
2256 _("wait returned unexpected PID %d"), ret);
2257 else if (!WIFSTOPPED (status))
2258 internal_error (__FILE__, __LINE__,
2259 _("wait returned unexpected status 0x%x"), status);
2260 }
2261
2262 ourstatus->value.related_pid = ptid_build (new_pid, new_pid, 0);
2263
2264 if (event == PTRACE_EVENT_FORK
2265 && linux_fork_checkpointing_p (GET_PID (lp->ptid)))
2266 {
2267 /* Handle checkpointing by linux-fork.c here as a special
2268 case. We don't want the follow-fork-mode or 'catch fork'
2269 to interfere with this. */
2270
2271 /* This won't actually modify the breakpoint list, but will
2272 physically remove the breakpoints from the child. */
2273 detach_breakpoints (new_pid);
2274
2275 /* Retain child fork in ptrace (stopped) state. */
2276 if (!find_fork_pid (new_pid))
2277 add_fork (new_pid);
2278
2279 /* Report as spurious, so that infrun doesn't want to follow
2280 this fork. We're actually doing an infcall in
2281 linux-fork.c. */
2282 ourstatus->kind = TARGET_WAITKIND_SPURIOUS;
2283 linux_enable_event_reporting (pid_to_ptid (new_pid));
2284
2285 /* Report the stop to the core. */
2286 return 0;
2287 }
2288
2289 if (event == PTRACE_EVENT_FORK)
2290 ourstatus->kind = TARGET_WAITKIND_FORKED;
2291 else if (event == PTRACE_EVENT_VFORK)
2292 ourstatus->kind = TARGET_WAITKIND_VFORKED;
2293 else
2294 {
2295 struct lwp_info *new_lp;
2296
2297 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2298
2299 if (debug_linux_nat)
2300 fprintf_unfiltered (gdb_stdlog,
2301 "LHEW: Got clone event "
2302 "from LWP %d, new child is LWP %ld\n",
2303 pid, new_pid);
2304
2305 new_lp = add_lwp (BUILD_LWP (new_pid, GET_PID (lp->ptid)));
2306 new_lp->cloned = 1;
2307 new_lp->stopped = 1;
2308
2309 if (WSTOPSIG (status) != SIGSTOP)
2310 {
2311 /* This can happen if someone starts sending signals to
2312 the new thread before it gets a chance to run, which
2313 have a lower number than SIGSTOP (e.g. SIGUSR1).
2314 This is an unlikely case, and harder to handle for
2315 fork / vfork than for clone, so we do not try - but
2316 we handle it for clone events here. We'll send
2317 the other signal on to the thread below. */
2318
2319 new_lp->signalled = 1;
2320 }
2321 else
2322 {
2323 struct thread_info *tp;
2324
2325 /* When we stop for an event in some other thread, and
2326 pull the thread list just as this thread has cloned,
2327 we'll have seen the new thread in the thread_db list
2328 before handling the CLONE event (glibc's
2329 pthread_create adds the new thread to the thread list
2330 before clone'ing, and has the kernel fill in the
2331 thread's tid on the clone call with
2332 CLONE_PARENT_SETTID). If that happened, and the core
2333 had requested the new thread to stop, we'll have
2334 killed it with SIGSTOP. But since SIGSTOP is not an
2335 RT signal, it can only be queued once. We need to be
2336 careful to not resume the LWP if we wanted it to
2337 stop. In that case, we'll leave the SIGSTOP pending.
2338 It will later be reported as TARGET_SIGNAL_0. */
2339 tp = find_thread_ptid (new_lp->ptid);
2340 if (tp != NULL && tp->stop_requested)
2341 new_lp->last_resume_kind = resume_stop;
2342 else
2343 status = 0;
2344 }
2345
2346 if (non_stop)
2347 {
2348 /* Add the new thread to GDB's lists as soon as possible
2349 so that:
2350
2351 1) the frontend doesn't have to wait for a stop to
2352 display them, and,
2353
2354 2) we tag it with the correct running state. */
2355
2356 /* If the thread_db layer is active, let it know about
2357 this new thread, and add it to GDB's list. */
2358 if (!thread_db_attach_lwp (new_lp->ptid))
2359 {
2360 /* We're not using thread_db. Add it to GDB's
2361 list. */
2362 target_post_attach (GET_LWP (new_lp->ptid));
2363 add_thread (new_lp->ptid);
2364 }
2365
2366 if (!stopping)
2367 {
2368 set_running (new_lp->ptid, 1);
2369 set_executing (new_lp->ptid, 1);
2370 /* thread_db_attach_lwp -> lin_lwp_attach_lwp forced
2371 resume_stop. */
2372 new_lp->last_resume_kind = resume_continue;
2373 }
2374 }
2375
2376 if (status != 0)
2377 {
2378 /* We created NEW_LP so it cannot yet contain STATUS. */
2379 gdb_assert (new_lp->status == 0);
2380
2381 /* Save the wait status to report later. */
2382 if (debug_linux_nat)
2383 fprintf_unfiltered (gdb_stdlog,
2384 "LHEW: waitpid of new LWP %ld, "
2385 "saving status %s\n",
2386 (long) GET_LWP (new_lp->ptid),
2387 status_to_str (status));
2388 new_lp->status = status;
2389 }
2390
2391 /* Note the need to use the low target ops to resume, to
2392 handle resuming with PT_SYSCALL if we have syscall
2393 catchpoints. */
2394 if (!stopping)
2395 {
2396 new_lp->resumed = 1;
2397
2398 if (status == 0)
2399 {
2400 gdb_assert (new_lp->last_resume_kind == resume_continue);
2401 if (debug_linux_nat)
2402 fprintf_unfiltered (gdb_stdlog,
2403 "LHEW: resuming new LWP %ld\n",
2404 GET_LWP (new_lp->ptid));
2405 if (linux_nat_prepare_to_resume != NULL)
2406 linux_nat_prepare_to_resume (new_lp);
2407 linux_ops->to_resume (linux_ops, pid_to_ptid (new_pid),
2408 0, TARGET_SIGNAL_0);
2409 new_lp->stopped = 0;
2410 }
2411 }
2412
2413 if (debug_linux_nat)
2414 fprintf_unfiltered (gdb_stdlog,
2415 "LHEW: resuming parent LWP %d\n", pid);
2416 if (linux_nat_prepare_to_resume != NULL)
2417 linux_nat_prepare_to_resume (lp);
2418 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
2419 0, TARGET_SIGNAL_0);
2420
2421 return 1;
2422 }
2423
2424 return 0;
2425 }
2426
2427 if (event == PTRACE_EVENT_EXEC)
2428 {
2429 if (debug_linux_nat)
2430 fprintf_unfiltered (gdb_stdlog,
2431 "LHEW: Got exec event from LWP %ld\n",
2432 GET_LWP (lp->ptid));
2433
2434 ourstatus->kind = TARGET_WAITKIND_EXECD;
2435 ourstatus->value.execd_pathname
2436 = xstrdup (linux_child_pid_to_exec_file (pid));
2437
2438 return 0;
2439 }
2440
2441 if (event == PTRACE_EVENT_VFORK_DONE)
2442 {
2443 if (current_inferior ()->waiting_for_vfork_done)
2444 {
2445 if (debug_linux_nat)
2446 fprintf_unfiltered (gdb_stdlog,
2447 "LHEW: Got expected PTRACE_EVENT_"
2448 "VFORK_DONE from LWP %ld: stopping\n",
2449 GET_LWP (lp->ptid));
2450
2451 ourstatus->kind = TARGET_WAITKIND_VFORK_DONE;
2452 return 0;
2453 }
2454
2455 if (debug_linux_nat)
2456 fprintf_unfiltered (gdb_stdlog,
2457 "LHEW: Got PTRACE_EVENT_VFORK_DONE "
2458 "from LWP %ld: resuming\n",
2459 GET_LWP (lp->ptid));
2460 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2461 return 1;
2462 }
2463
2464 internal_error (__FILE__, __LINE__,
2465 _("unknown ptrace event %d"), event);
2466 }
2467
2468 /* Wait for LP to stop. Returns the wait status, or 0 if the LWP has
2469 exited. */
2470
2471 static int
2472 wait_lwp (struct lwp_info *lp)
2473 {
2474 pid_t pid;
2475 int status = 0;
2476 int thread_dead = 0;
2477 sigset_t prev_mask;
2478
2479 gdb_assert (!lp->stopped);
2480 gdb_assert (lp->status == 0);
2481
2482 /* Make sure SIGCHLD is blocked for sigsuspend avoiding a race below. */
2483 block_child_signals (&prev_mask);
2484
2485 for (;;)
2486 {
2487 /* If my_waitpid returns 0 it means the __WCLONE vs. non-__WCLONE kind
2488 was right and we should just call sigsuspend. */
2489
2490 pid = my_waitpid (GET_LWP (lp->ptid), &status, WNOHANG);
2491 if (pid == -1 && errno == ECHILD)
2492 pid = my_waitpid (GET_LWP (lp->ptid), &status, __WCLONE | WNOHANG);
2493 if (pid == -1 && errno == ECHILD)
2494 {
2495 /* The thread has previously exited. We need to delete it
2496 now because, for some vendor 2.4 kernels with NPTL
2497 support backported, there won't be an exit event unless
2498 it is the main thread. 2.6 kernels will report an exit
2499 event for each thread that exits, as expected. */
2500 thread_dead = 1;
2501 if (debug_linux_nat)
2502 fprintf_unfiltered (gdb_stdlog, "WL: %s vanished.\n",
2503 target_pid_to_str (lp->ptid));
2504 }
2505 if (pid != 0)
2506 break;
2507
2508 /* Bugs 10970, 12702.
2509 Thread group leader may have exited in which case we'll lock up in
2510 waitpid if there are other threads, even if they are all zombies too.
2511 Basically, we're not supposed to use waitpid this way.
2512 __WCLONE is not applicable for the leader so we can't use that.
2513 LINUX_NAT_THREAD_ALIVE cannot be used here as it requires a STOPPED
2514 process; it gets ESRCH both for the zombie and for running processes.
2515
2516 As a workaround, check if we're waiting for the thread group leader and
2517 if it's a zombie, and avoid calling waitpid if it is.
2518
2519 This is racy, what if the tgl becomes a zombie right after we check?
2520 Therefore always use WNOHANG with sigsuspend - it is equivalent to
2521 waiting waitpid but linux_proc_pid_is_zombie is safe this way. */
2522
2523 if (GET_PID (lp->ptid) == GET_LWP (lp->ptid)
2524 && linux_proc_pid_is_zombie (GET_LWP (lp->ptid)))
2525 {
2526 thread_dead = 1;
2527 if (debug_linux_nat)
2528 fprintf_unfiltered (gdb_stdlog,
2529 "WL: Thread group leader %s vanished.\n",
2530 target_pid_to_str (lp->ptid));
2531 break;
2532 }
2533
2534 /* Wait for next SIGCHLD and try again. This may let SIGCHLD handlers
2535 get invoked despite our caller had them intentionally blocked by
2536 block_child_signals. This is sensitive only to the loop of
2537 linux_nat_wait_1 and there if we get called my_waitpid gets called
2538 again before it gets to sigsuspend so we can safely let the handlers
2539 get executed here. */
2540
2541 sigsuspend (&suspend_mask);
2542 }
2543
2544 restore_child_signals_mask (&prev_mask);
2545
2546 if (!thread_dead)
2547 {
2548 gdb_assert (pid == GET_LWP (lp->ptid));
2549
2550 if (debug_linux_nat)
2551 {
2552 fprintf_unfiltered (gdb_stdlog,
2553 "WL: waitpid %s received %s\n",
2554 target_pid_to_str (lp->ptid),
2555 status_to_str (status));
2556 }
2557
2558 /* Check if the thread has exited. */
2559 if (WIFEXITED (status) || WIFSIGNALED (status))
2560 {
2561 thread_dead = 1;
2562 if (debug_linux_nat)
2563 fprintf_unfiltered (gdb_stdlog, "WL: %s exited.\n",
2564 target_pid_to_str (lp->ptid));
2565 }
2566 }
2567
2568 if (thread_dead)
2569 {
2570 exit_lwp (lp);
2571 return 0;
2572 }
2573
2574 gdb_assert (WIFSTOPPED (status));
2575
2576 /* Handle GNU/Linux's syscall SIGTRAPs. */
2577 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
2578 {
2579 /* No longer need the sysgood bit. The ptrace event ends up
2580 recorded in lp->waitstatus if we care for it. We can carry
2581 on handling the event like a regular SIGTRAP from here
2582 on. */
2583 status = W_STOPCODE (SIGTRAP);
2584 if (linux_handle_syscall_trap (lp, 1))
2585 return wait_lwp (lp);
2586 }
2587
2588 /* Handle GNU/Linux's extended waitstatus for trace events. */
2589 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
2590 {
2591 if (debug_linux_nat)
2592 fprintf_unfiltered (gdb_stdlog,
2593 "WL: Handling extended status 0x%06x\n",
2594 status);
2595 if (linux_handle_extended_wait (lp, status, 1))
2596 return wait_lwp (lp);
2597 }
2598
2599 return status;
2600 }
2601
2602 /* Save the most recent siginfo for LP. This is currently only called
2603 for SIGTRAP; some ports use the si_addr field for
2604 target_stopped_data_address. In the future, it may also be used to
2605 restore the siginfo of requeued signals. */
2606
2607 static void
2608 save_siginfo (struct lwp_info *lp)
2609 {
2610 errno = 0;
2611 ptrace (PTRACE_GETSIGINFO, GET_LWP (lp->ptid),
2612 (PTRACE_TYPE_ARG3) 0, &lp->siginfo);
2613
2614 if (errno != 0)
2615 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
2616 }
2617
2618 /* Send a SIGSTOP to LP. */
2619
2620 static int
2621 stop_callback (struct lwp_info *lp, void *data)
2622 {
2623 if (!lp->stopped && !lp->signalled)
2624 {
2625 int ret;
2626
2627 if (debug_linux_nat)
2628 {
2629 fprintf_unfiltered (gdb_stdlog,
2630 "SC: kill %s **<SIGSTOP>**\n",
2631 target_pid_to_str (lp->ptid));
2632 }
2633 errno = 0;
2634 ret = kill_lwp (GET_LWP (lp->ptid), SIGSTOP);
2635 if (debug_linux_nat)
2636 {
2637 fprintf_unfiltered (gdb_stdlog,
2638 "SC: lwp kill %d %s\n",
2639 ret,
2640 errno ? safe_strerror (errno) : "ERRNO-OK");
2641 }
2642
2643 lp->signalled = 1;
2644 gdb_assert (lp->status == 0);
2645 }
2646
2647 return 0;
2648 }
2649
2650 /* Request a stop on LWP. */
2651
2652 void
2653 linux_stop_lwp (struct lwp_info *lwp)
2654 {
2655 stop_callback (lwp, NULL);
2656 }
2657
2658 /* Return non-zero if LWP PID has a pending SIGINT. */
2659
2660 static int
2661 linux_nat_has_pending_sigint (int pid)
2662 {
2663 sigset_t pending, blocked, ignored;
2664
2665 linux_proc_pending_signals (pid, &pending, &blocked, &ignored);
2666
2667 if (sigismember (&pending, SIGINT)
2668 && !sigismember (&ignored, SIGINT))
2669 return 1;
2670
2671 return 0;
2672 }
2673
2674 /* Set a flag in LP indicating that we should ignore its next SIGINT. */
2675
2676 static int
2677 set_ignore_sigint (struct lwp_info *lp, void *data)
2678 {
2679 /* If a thread has a pending SIGINT, consume it; otherwise, set a
2680 flag to consume the next one. */
2681 if (lp->stopped && lp->status != 0 && WIFSTOPPED (lp->status)
2682 && WSTOPSIG (lp->status) == SIGINT)
2683 lp->status = 0;
2684 else
2685 lp->ignore_sigint = 1;
2686
2687 return 0;
2688 }
2689
2690 /* If LP does not have a SIGINT pending, then clear the ignore_sigint flag.
2691 This function is called after we know the LWP has stopped; if the LWP
2692 stopped before the expected SIGINT was delivered, then it will never have
2693 arrived. Also, if the signal was delivered to a shared queue and consumed
2694 by a different thread, it will never be delivered to this LWP. */
2695
2696 static void
2697 maybe_clear_ignore_sigint (struct lwp_info *lp)
2698 {
2699 if (!lp->ignore_sigint)
2700 return;
2701
2702 if (!linux_nat_has_pending_sigint (GET_LWP (lp->ptid)))
2703 {
2704 if (debug_linux_nat)
2705 fprintf_unfiltered (gdb_stdlog,
2706 "MCIS: Clearing bogus flag for %s\n",
2707 target_pid_to_str (lp->ptid));
2708 lp->ignore_sigint = 0;
2709 }
2710 }
2711
2712 /* Fetch the possible triggered data watchpoint info and store it in
2713 LP.
2714
2715 On some archs, like x86, that use debug registers to set
2716 watchpoints, it's possible that the way to know which watched
2717 address trapped, is to check the register that is used to select
2718 which address to watch. Problem is, between setting the watchpoint
2719 and reading back which data address trapped, the user may change
2720 the set of watchpoints, and, as a consequence, GDB changes the
2721 debug registers in the inferior. To avoid reading back a stale
2722 stopped-data-address when that happens, we cache in LP the fact
2723 that a watchpoint trapped, and the corresponding data address, as
2724 soon as we see LP stop with a SIGTRAP. If GDB changes the debug
2725 registers meanwhile, we have the cached data we can rely on. */
2726
2727 static void
2728 save_sigtrap (struct lwp_info *lp)
2729 {
2730 struct cleanup *old_chain;
2731
2732 if (linux_ops->to_stopped_by_watchpoint == NULL)
2733 {
2734 lp->stopped_by_watchpoint = 0;
2735 return;
2736 }
2737
2738 old_chain = save_inferior_ptid ();
2739 inferior_ptid = lp->ptid;
2740
2741 lp->stopped_by_watchpoint = linux_ops->to_stopped_by_watchpoint ();
2742
2743 if (lp->stopped_by_watchpoint)
2744 {
2745 if (linux_ops->to_stopped_data_address != NULL)
2746 lp->stopped_data_address_p =
2747 linux_ops->to_stopped_data_address (&current_target,
2748 &lp->stopped_data_address);
2749 else
2750 lp->stopped_data_address_p = 0;
2751 }
2752
2753 do_cleanups (old_chain);
2754 }
2755
2756 /* See save_sigtrap. */
2757
2758 static int
2759 linux_nat_stopped_by_watchpoint (void)
2760 {
2761 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2762
2763 gdb_assert (lp != NULL);
2764
2765 return lp->stopped_by_watchpoint;
2766 }
2767
2768 static int
2769 linux_nat_stopped_data_address (struct target_ops *ops, CORE_ADDR *addr_p)
2770 {
2771 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2772
2773 gdb_assert (lp != NULL);
2774
2775 *addr_p = lp->stopped_data_address;
2776
2777 return lp->stopped_data_address_p;
2778 }
2779
2780 /* Commonly any breakpoint / watchpoint generate only SIGTRAP. */
2781
2782 static int
2783 sigtrap_is_event (int status)
2784 {
2785 return WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP;
2786 }
2787
2788 /* SIGTRAP-like events recognizer. */
2789
2790 static int (*linux_nat_status_is_event) (int status) = sigtrap_is_event;
2791
2792 /* Check for SIGTRAP-like events in LP. */
2793
2794 static int
2795 linux_nat_lp_status_is_event (struct lwp_info *lp)
2796 {
2797 /* We check for lp->waitstatus in addition to lp->status, because we can
2798 have pending process exits recorded in lp->status
2799 and W_EXITCODE(0,0) == 0. We should probably have an additional
2800 lp->status_p flag. */
2801
2802 return (lp->waitstatus.kind == TARGET_WAITKIND_IGNORE
2803 && linux_nat_status_is_event (lp->status));
2804 }
2805
2806 /* Set alternative SIGTRAP-like events recognizer. If
2807 breakpoint_inserted_here_p there then gdbarch_decr_pc_after_break will be
2808 applied. */
2809
2810 void
2811 linux_nat_set_status_is_event (struct target_ops *t,
2812 int (*status_is_event) (int status))
2813 {
2814 linux_nat_status_is_event = status_is_event;
2815 }
2816
2817 /* Wait until LP is stopped. */
2818
2819 static int
2820 stop_wait_callback (struct lwp_info *lp, void *data)
2821 {
2822 struct inferior *inf = find_inferior_pid (GET_PID (lp->ptid));
2823
2824 /* If this is a vfork parent, bail out, it is not going to report
2825 any SIGSTOP until the vfork is done with. */
2826 if (inf->vfork_child != NULL)
2827 return 0;
2828
2829 if (!lp->stopped)
2830 {
2831 int status;
2832
2833 status = wait_lwp (lp);
2834 if (status == 0)
2835 return 0;
2836
2837 if (lp->ignore_sigint && WIFSTOPPED (status)
2838 && WSTOPSIG (status) == SIGINT)
2839 {
2840 lp->ignore_sigint = 0;
2841
2842 errno = 0;
2843 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2844 if (debug_linux_nat)
2845 fprintf_unfiltered (gdb_stdlog,
2846 "PTRACE_CONT %s, 0, 0 (%s) "
2847 "(discarding SIGINT)\n",
2848 target_pid_to_str (lp->ptid),
2849 errno ? safe_strerror (errno) : "OK");
2850
2851 return stop_wait_callback (lp, NULL);
2852 }
2853
2854 maybe_clear_ignore_sigint (lp);
2855
2856 if (WSTOPSIG (status) != SIGSTOP)
2857 {
2858 if (linux_nat_status_is_event (status))
2859 {
2860 /* If a LWP other than the LWP that we're reporting an
2861 event for has hit a GDB breakpoint (as opposed to
2862 some random trap signal), then just arrange for it to
2863 hit it again later. We don't keep the SIGTRAP status
2864 and don't forward the SIGTRAP signal to the LWP. We
2865 will handle the current event, eventually we will
2866 resume all LWPs, and this one will get its breakpoint
2867 trap again.
2868
2869 If we do not do this, then we run the risk that the
2870 user will delete or disable the breakpoint, but the
2871 thread will have already tripped on it. */
2872
2873 /* Save the trap's siginfo in case we need it later. */
2874 save_siginfo (lp);
2875
2876 save_sigtrap (lp);
2877
2878 /* Now resume this LWP and get the SIGSTOP event. */
2879 errno = 0;
2880 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2881 if (debug_linux_nat)
2882 {
2883 fprintf_unfiltered (gdb_stdlog,
2884 "PTRACE_CONT %s, 0, 0 (%s)\n",
2885 target_pid_to_str (lp->ptid),
2886 errno ? safe_strerror (errno) : "OK");
2887
2888 fprintf_unfiltered (gdb_stdlog,
2889 "SWC: Candidate SIGTRAP event in %s\n",
2890 target_pid_to_str (lp->ptid));
2891 }
2892 /* Hold this event/waitstatus while we check to see if
2893 there are any more (we still want to get that SIGSTOP). */
2894 stop_wait_callback (lp, NULL);
2895
2896 /* Hold the SIGTRAP for handling by linux_nat_wait. If
2897 there's another event, throw it back into the
2898 queue. */
2899 if (lp->status)
2900 {
2901 if (debug_linux_nat)
2902 fprintf_unfiltered (gdb_stdlog,
2903 "SWC: kill %s, %s\n",
2904 target_pid_to_str (lp->ptid),
2905 status_to_str ((int) status));
2906 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (lp->status));
2907 }
2908
2909 /* Save the sigtrap event. */
2910 lp->status = status;
2911 return 0;
2912 }
2913 else
2914 {
2915 /* The thread was stopped with a signal other than
2916 SIGSTOP, and didn't accidentally trip a breakpoint. */
2917
2918 if (debug_linux_nat)
2919 {
2920 fprintf_unfiltered (gdb_stdlog,
2921 "SWC: Pending event %s in %s\n",
2922 status_to_str ((int) status),
2923 target_pid_to_str (lp->ptid));
2924 }
2925 /* Now resume this LWP and get the SIGSTOP event. */
2926 errno = 0;
2927 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2928 if (debug_linux_nat)
2929 fprintf_unfiltered (gdb_stdlog,
2930 "SWC: PTRACE_CONT %s, 0, 0 (%s)\n",
2931 target_pid_to_str (lp->ptid),
2932 errno ? safe_strerror (errno) : "OK");
2933
2934 /* Hold this event/waitstatus while we check to see if
2935 there are any more (we still want to get that SIGSTOP). */
2936 stop_wait_callback (lp, NULL);
2937
2938 /* If the lp->status field is still empty, use it to
2939 hold this event. If not, then this event must be
2940 returned to the event queue of the LWP. */
2941 if (lp->status)
2942 {
2943 if (debug_linux_nat)
2944 {
2945 fprintf_unfiltered (gdb_stdlog,
2946 "SWC: kill %s, %s\n",
2947 target_pid_to_str (lp->ptid),
2948 status_to_str ((int) status));
2949 }
2950 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (status));
2951 }
2952 else
2953 lp->status = status;
2954 return 0;
2955 }
2956 }
2957 else
2958 {
2959 /* We caught the SIGSTOP that we intended to catch, so
2960 there's no SIGSTOP pending. */
2961 lp->stopped = 1;
2962 lp->signalled = 0;
2963 }
2964 }
2965
2966 return 0;
2967 }
2968
2969 /* Return non-zero if LP has a wait status pending. */
2970
2971 static int
2972 status_callback (struct lwp_info *lp, void *data)
2973 {
2974 /* Only report a pending wait status if we pretend that this has
2975 indeed been resumed. */
2976 if (!lp->resumed)
2977 return 0;
2978
2979 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
2980 {
2981 /* A ptrace event, like PTRACE_FORK|VFORK|EXEC, syscall event,
2982 or a pending process exit. Note that `W_EXITCODE(0,0) ==
2983 0', so a clean process exit can not be stored pending in
2984 lp->status, it is indistinguishable from
2985 no-pending-status. */
2986 return 1;
2987 }
2988
2989 if (lp->status != 0)
2990 return 1;
2991
2992 return 0;
2993 }
2994
2995 /* Return non-zero if LP isn't stopped. */
2996
2997 static int
2998 running_callback (struct lwp_info *lp, void *data)
2999 {
3000 return (!lp->stopped
3001 || ((lp->status != 0
3002 || lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3003 && lp->resumed));
3004 }
3005
3006 /* Count the LWP's that have had events. */
3007
3008 static int
3009 count_events_callback (struct lwp_info *lp, void *data)
3010 {
3011 int *count = data;
3012
3013 gdb_assert (count != NULL);
3014
3015 /* Count only resumed LWPs that have a SIGTRAP event pending. */
3016 if (lp->resumed && linux_nat_lp_status_is_event (lp))
3017 (*count)++;
3018
3019 return 0;
3020 }
3021
3022 /* Select the LWP (if any) that is currently being single-stepped. */
3023
3024 static int
3025 select_singlestep_lwp_callback (struct lwp_info *lp, void *data)
3026 {
3027 if (lp->last_resume_kind == resume_step
3028 && lp->status != 0)
3029 return 1;
3030 else
3031 return 0;
3032 }
3033
3034 /* Select the Nth LWP that has had a SIGTRAP event. */
3035
3036 static int
3037 select_event_lwp_callback (struct lwp_info *lp, void *data)
3038 {
3039 int *selector = data;
3040
3041 gdb_assert (selector != NULL);
3042
3043 /* Select only resumed LWPs that have a SIGTRAP event pending. */
3044 if (lp->resumed && linux_nat_lp_status_is_event (lp))
3045 if ((*selector)-- == 0)
3046 return 1;
3047
3048 return 0;
3049 }
3050
3051 static int
3052 cancel_breakpoint (struct lwp_info *lp)
3053 {
3054 /* Arrange for a breakpoint to be hit again later. We don't keep
3055 the SIGTRAP status and don't forward the SIGTRAP signal to the
3056 LWP. We will handle the current event, eventually we will resume
3057 this LWP, and this breakpoint will trap again.
3058
3059 If we do not do this, then we run the risk that the user will
3060 delete or disable the breakpoint, but the LWP will have already
3061 tripped on it. */
3062
3063 struct regcache *regcache = get_thread_regcache (lp->ptid);
3064 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3065 CORE_ADDR pc;
3066
3067 pc = regcache_read_pc (regcache) - gdbarch_decr_pc_after_break (gdbarch);
3068 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
3069 {
3070 if (debug_linux_nat)
3071 fprintf_unfiltered (gdb_stdlog,
3072 "CB: Push back breakpoint for %s\n",
3073 target_pid_to_str (lp->ptid));
3074
3075 /* Back up the PC if necessary. */
3076 if (gdbarch_decr_pc_after_break (gdbarch))
3077 regcache_write_pc (regcache, pc);
3078
3079 return 1;
3080 }
3081 return 0;
3082 }
3083
3084 static int
3085 cancel_breakpoints_callback (struct lwp_info *lp, void *data)
3086 {
3087 struct lwp_info *event_lp = data;
3088
3089 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
3090 if (lp == event_lp)
3091 return 0;
3092
3093 /* If a LWP other than the LWP that we're reporting an event for has
3094 hit a GDB breakpoint (as opposed to some random trap signal),
3095 then just arrange for it to hit it again later. We don't keep
3096 the SIGTRAP status and don't forward the SIGTRAP signal to the
3097 LWP. We will handle the current event, eventually we will resume
3098 all LWPs, and this one will get its breakpoint trap again.
3099
3100 If we do not do this, then we run the risk that the user will
3101 delete or disable the breakpoint, but the LWP will have already
3102 tripped on it. */
3103
3104 if (linux_nat_lp_status_is_event (lp)
3105 && cancel_breakpoint (lp))
3106 /* Throw away the SIGTRAP. */
3107 lp->status = 0;
3108
3109 return 0;
3110 }
3111
3112 /* Select one LWP out of those that have events pending. */
3113
3114 static void
3115 select_event_lwp (ptid_t filter, struct lwp_info **orig_lp, int *status)
3116 {
3117 int num_events = 0;
3118 int random_selector;
3119 struct lwp_info *event_lp;
3120
3121 /* Record the wait status for the original LWP. */
3122 (*orig_lp)->status = *status;
3123
3124 /* Give preference to any LWP that is being single-stepped. */
3125 event_lp = iterate_over_lwps (filter,
3126 select_singlestep_lwp_callback, NULL);
3127 if (event_lp != NULL)
3128 {
3129 if (debug_linux_nat)
3130 fprintf_unfiltered (gdb_stdlog,
3131 "SEL: Select single-step %s\n",
3132 target_pid_to_str (event_lp->ptid));
3133 }
3134 else
3135 {
3136 /* No single-stepping LWP. Select one at random, out of those
3137 which have had SIGTRAP events. */
3138
3139 /* First see how many SIGTRAP events we have. */
3140 iterate_over_lwps (filter, count_events_callback, &num_events);
3141
3142 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
3143 random_selector = (int)
3144 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
3145
3146 if (debug_linux_nat && num_events > 1)
3147 fprintf_unfiltered (gdb_stdlog,
3148 "SEL: Found %d SIGTRAP events, selecting #%d\n",
3149 num_events, random_selector);
3150
3151 event_lp = iterate_over_lwps (filter,
3152 select_event_lwp_callback,
3153 &random_selector);
3154 }
3155
3156 if (event_lp != NULL)
3157 {
3158 /* Switch the event LWP. */
3159 *orig_lp = event_lp;
3160 *status = event_lp->status;
3161 }
3162
3163 /* Flush the wait status for the event LWP. */
3164 (*orig_lp)->status = 0;
3165 }
3166
3167 /* Return non-zero if LP has been resumed. */
3168
3169 static int
3170 resumed_callback (struct lwp_info *lp, void *data)
3171 {
3172 return lp->resumed;
3173 }
3174
3175 /* Stop an active thread, verify it still exists, then resume it. If
3176 the thread ends up with a pending status, then it is not resumed,
3177 and *DATA (really a pointer to int), is set. */
3178
3179 static int
3180 stop_and_resume_callback (struct lwp_info *lp, void *data)
3181 {
3182 int *new_pending_p = data;
3183
3184 if (!lp->stopped)
3185 {
3186 ptid_t ptid = lp->ptid;
3187
3188 stop_callback (lp, NULL);
3189 stop_wait_callback (lp, NULL);
3190
3191 /* Resume if the lwp still exists, and the core wanted it
3192 running. */
3193 lp = find_lwp_pid (ptid);
3194 if (lp != NULL)
3195 {
3196 if (lp->last_resume_kind == resume_stop
3197 && lp->status == 0)
3198 {
3199 /* The core wanted the LWP to stop. Even if it stopped
3200 cleanly (with SIGSTOP), leave the event pending. */
3201 if (debug_linux_nat)
3202 fprintf_unfiltered (gdb_stdlog,
3203 "SARC: core wanted LWP %ld stopped "
3204 "(leaving SIGSTOP pending)\n",
3205 GET_LWP (lp->ptid));
3206 lp->status = W_STOPCODE (SIGSTOP);
3207 }
3208
3209 if (lp->status == 0)
3210 {
3211 if (debug_linux_nat)
3212 fprintf_unfiltered (gdb_stdlog,
3213 "SARC: re-resuming LWP %ld\n",
3214 GET_LWP (lp->ptid));
3215 resume_lwp (lp, lp->step);
3216 }
3217 else
3218 {
3219 if (debug_linux_nat)
3220 fprintf_unfiltered (gdb_stdlog,
3221 "SARC: not re-resuming LWP %ld "
3222 "(has pending)\n",
3223 GET_LWP (lp->ptid));
3224 if (new_pending_p)
3225 *new_pending_p = 1;
3226 }
3227 }
3228 }
3229 return 0;
3230 }
3231
3232 /* Check if we should go on and pass this event to common code.
3233 Return the affected lwp if we are, or NULL otherwise. If we stop
3234 all lwps temporarily, we may end up with new pending events in some
3235 other lwp. In that case set *NEW_PENDING_P to true. */
3236
3237 static struct lwp_info *
3238 linux_nat_filter_event (int lwpid, int status, int *new_pending_p)
3239 {
3240 struct lwp_info *lp;
3241
3242 *new_pending_p = 0;
3243
3244 lp = find_lwp_pid (pid_to_ptid (lwpid));
3245
3246 /* Check for stop events reported by a process we didn't already
3247 know about - anything not already in our LWP list.
3248
3249 If we're expecting to receive stopped processes after
3250 fork, vfork, and clone events, then we'll just add the
3251 new one to our list and go back to waiting for the event
3252 to be reported - the stopped process might be returned
3253 from waitpid before or after the event is.
3254
3255 But note the case of a non-leader thread exec'ing after the
3256 leader having exited, and gone from our lists. The non-leader
3257 thread changes its tid to the tgid. */
3258
3259 if (WIFSTOPPED (status) && lp == NULL
3260 && (WSTOPSIG (status) == SIGTRAP && status >> 16 == PTRACE_EVENT_EXEC))
3261 {
3262 /* A multi-thread exec after we had seen the leader exiting. */
3263 if (debug_linux_nat)
3264 fprintf_unfiltered (gdb_stdlog,
3265 "LLW: Re-adding thread group leader LWP %d.\n",
3266 lwpid);
3267
3268 lp = add_lwp (BUILD_LWP (lwpid, lwpid));
3269 lp->stopped = 1;
3270 lp->resumed = 1;
3271 add_thread (lp->ptid);
3272 }
3273
3274 if (WIFSTOPPED (status) && !lp)
3275 {
3276 add_to_pid_list (&stopped_pids, lwpid, status);
3277 return NULL;
3278 }
3279
3280 /* Make sure we don't report an event for the exit of an LWP not in
3281 our list, i.e. not part of the current process. This can happen
3282 if we detach from a program we originally forked and then it
3283 exits. */
3284 if (!WIFSTOPPED (status) && !lp)
3285 return NULL;
3286
3287 /* Handle GNU/Linux's syscall SIGTRAPs. */
3288 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
3289 {
3290 /* No longer need the sysgood bit. The ptrace event ends up
3291 recorded in lp->waitstatus if we care for it. We can carry
3292 on handling the event like a regular SIGTRAP from here
3293 on. */
3294 status = W_STOPCODE (SIGTRAP);
3295 if (linux_handle_syscall_trap (lp, 0))
3296 return NULL;
3297 }
3298
3299 /* Handle GNU/Linux's extended waitstatus for trace events. */
3300 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
3301 {
3302 if (debug_linux_nat)
3303 fprintf_unfiltered (gdb_stdlog,
3304 "LLW: Handling extended status 0x%06x\n",
3305 status);
3306 if (linux_handle_extended_wait (lp, status, 0))
3307 return NULL;
3308 }
3309
3310 if (linux_nat_status_is_event (status))
3311 {
3312 /* Save the trap's siginfo in case we need it later. */
3313 save_siginfo (lp);
3314
3315 save_sigtrap (lp);
3316 }
3317
3318 /* Check if the thread has exited. */
3319 if ((WIFEXITED (status) || WIFSIGNALED (status))
3320 && num_lwps (GET_PID (lp->ptid)) > 1)
3321 {
3322 /* If this is the main thread, we must stop all threads and verify
3323 if they are still alive. This is because in the nptl thread model
3324 on Linux 2.4, there is no signal issued for exiting LWPs
3325 other than the main thread. We only get the main thread exit
3326 signal once all child threads have already exited. If we
3327 stop all the threads and use the stop_wait_callback to check
3328 if they have exited we can determine whether this signal
3329 should be ignored or whether it means the end of the debugged
3330 application, regardless of which threading model is being
3331 used. */
3332 if (GET_PID (lp->ptid) == GET_LWP (lp->ptid))
3333 {
3334 lp->stopped = 1;
3335 iterate_over_lwps (pid_to_ptid (GET_PID (lp->ptid)),
3336 stop_and_resume_callback, new_pending_p);
3337 }
3338
3339 if (debug_linux_nat)
3340 fprintf_unfiltered (gdb_stdlog,
3341 "LLW: %s exited.\n",
3342 target_pid_to_str (lp->ptid));
3343
3344 if (num_lwps (GET_PID (lp->ptid)) > 1)
3345 {
3346 /* If there is at least one more LWP, then the exit signal
3347 was not the end of the debugged application and should be
3348 ignored. */
3349 exit_lwp (lp);
3350 return NULL;
3351 }
3352 }
3353
3354 /* Check if the current LWP has previously exited. In the nptl
3355 thread model, LWPs other than the main thread do not issue
3356 signals when they exit so we must check whenever the thread has
3357 stopped. A similar check is made in stop_wait_callback(). */
3358 if (num_lwps (GET_PID (lp->ptid)) > 1 && !linux_thread_alive (lp->ptid))
3359 {
3360 ptid_t ptid = pid_to_ptid (GET_PID (lp->ptid));
3361
3362 if (debug_linux_nat)
3363 fprintf_unfiltered (gdb_stdlog,
3364 "LLW: %s exited.\n",
3365 target_pid_to_str (lp->ptid));
3366
3367 exit_lwp (lp);
3368
3369 /* Make sure there is at least one thread running. */
3370 gdb_assert (iterate_over_lwps (ptid, running_callback, NULL));
3371
3372 /* Discard the event. */
3373 return NULL;
3374 }
3375
3376 /* Make sure we don't report a SIGSTOP that we sent ourselves in
3377 an attempt to stop an LWP. */
3378 if (lp->signalled
3379 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP)
3380 {
3381 if (debug_linux_nat)
3382 fprintf_unfiltered (gdb_stdlog,
3383 "LLW: Delayed SIGSTOP caught for %s.\n",
3384 target_pid_to_str (lp->ptid));
3385
3386 lp->signalled = 0;
3387
3388 if (lp->last_resume_kind != resume_stop)
3389 {
3390 /* This is a delayed SIGSTOP. */
3391
3392 registers_changed ();
3393
3394 if (linux_nat_prepare_to_resume != NULL)
3395 linux_nat_prepare_to_resume (lp);
3396 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
3397 lp->step, TARGET_SIGNAL_0);
3398 if (debug_linux_nat)
3399 fprintf_unfiltered (gdb_stdlog,
3400 "LLW: %s %s, 0, 0 (discard SIGSTOP)\n",
3401 lp->step ?
3402 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3403 target_pid_to_str (lp->ptid));
3404
3405 lp->stopped = 0;
3406 gdb_assert (lp->resumed);
3407
3408 /* Discard the event. */
3409 return NULL;
3410 }
3411 }
3412
3413 /* Make sure we don't report a SIGINT that we have already displayed
3414 for another thread. */
3415 if (lp->ignore_sigint
3416 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGINT)
3417 {
3418 if (debug_linux_nat)
3419 fprintf_unfiltered (gdb_stdlog,
3420 "LLW: Delayed SIGINT caught for %s.\n",
3421 target_pid_to_str (lp->ptid));
3422
3423 /* This is a delayed SIGINT. */
3424 lp->ignore_sigint = 0;
3425
3426 registers_changed ();
3427 if (linux_nat_prepare_to_resume != NULL)
3428 linux_nat_prepare_to_resume (lp);
3429 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
3430 lp->step, TARGET_SIGNAL_0);
3431 if (debug_linux_nat)
3432 fprintf_unfiltered (gdb_stdlog,
3433 "LLW: %s %s, 0, 0 (discard SIGINT)\n",
3434 lp->step ?
3435 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3436 target_pid_to_str (lp->ptid));
3437
3438 lp->stopped = 0;
3439 gdb_assert (lp->resumed);
3440
3441 /* Discard the event. */
3442 return NULL;
3443 }
3444
3445 /* An interesting event. */
3446 gdb_assert (lp);
3447 lp->status = status;
3448 return lp;
3449 }
3450
3451 /* Detect zombie thread group leaders, and "exit" them. We can't reap
3452 their exits until all other threads in the group have exited. */
3453
3454 static void
3455 check_zombie_leaders (void)
3456 {
3457 struct inferior *inf;
3458
3459 ALL_INFERIORS (inf)
3460 {
3461 struct lwp_info *leader_lp;
3462
3463 if (inf->pid == 0)
3464 continue;
3465
3466 leader_lp = find_lwp_pid (pid_to_ptid (inf->pid));
3467 if (leader_lp != NULL
3468 /* Check if there are other threads in the group, as we may
3469 have raced with the inferior simply exiting. */
3470 && num_lwps (inf->pid) > 1
3471 && linux_proc_pid_is_zombie (inf->pid))
3472 {
3473 if (debug_linux_nat)
3474 fprintf_unfiltered (gdb_stdlog,
3475 "CZL: Thread group leader %d zombie "
3476 "(it exited, or another thread execd).\n",
3477 inf->pid);
3478
3479 /* A leader zombie can mean one of two things:
3480
3481 - It exited, and there's an exit status pending
3482 available, or only the leader exited (not the whole
3483 program). In the latter case, we can't waitpid the
3484 leader's exit status until all other threads are gone.
3485
3486 - There are 3 or more threads in the group, and a thread
3487 other than the leader exec'd. On an exec, the Linux
3488 kernel destroys all other threads (except the execing
3489 one) in the thread group, and resets the execing thread's
3490 tid to the tgid. No exit notification is sent for the
3491 execing thread -- from the ptracer's perspective, it
3492 appears as though the execing thread just vanishes.
3493 Until we reap all other threads except the leader and the
3494 execing thread, the leader will be zombie, and the
3495 execing thread will be in `D (disc sleep)'. As soon as
3496 all other threads are reaped, the execing thread changes
3497 it's tid to the tgid, and the previous (zombie) leader
3498 vanishes, giving place to the "new" leader. We could try
3499 distinguishing the exit and exec cases, by waiting once
3500 more, and seeing if something comes out, but it doesn't
3501 sound useful. The previous leader _does_ go away, and
3502 we'll re-add the new one once we see the exec event
3503 (which is just the same as what would happen if the
3504 previous leader did exit voluntarily before some other
3505 thread execs). */
3506
3507 if (debug_linux_nat)
3508 fprintf_unfiltered (gdb_stdlog,
3509 "CZL: Thread group leader %d vanished.\n",
3510 inf->pid);
3511 exit_lwp (leader_lp);
3512 }
3513 }
3514 }
3515
3516 static ptid_t
3517 linux_nat_wait_1 (struct target_ops *ops,
3518 ptid_t ptid, struct target_waitstatus *ourstatus,
3519 int target_options)
3520 {
3521 static sigset_t prev_mask;
3522 enum resume_kind last_resume_kind;
3523 struct lwp_info *lp;
3524 int status;
3525
3526 if (debug_linux_nat)
3527 fprintf_unfiltered (gdb_stdlog, "LLW: enter\n");
3528
3529 /* The first time we get here after starting a new inferior, we may
3530 not have added it to the LWP list yet - this is the earliest
3531 moment at which we know its PID. */
3532 if (ptid_is_pid (inferior_ptid))
3533 {
3534 /* Upgrade the main thread's ptid. */
3535 thread_change_ptid (inferior_ptid,
3536 BUILD_LWP (GET_PID (inferior_ptid),
3537 GET_PID (inferior_ptid)));
3538
3539 lp = add_lwp (inferior_ptid);
3540 lp->resumed = 1;
3541 }
3542
3543 /* Make sure SIGCHLD is blocked. */
3544 block_child_signals (&prev_mask);
3545
3546 retry:
3547 lp = NULL;
3548 status = 0;
3549
3550 /* First check if there is a LWP with a wait status pending. */
3551 if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid))
3552 {
3553 /* Any LWP in the PTID group that's been resumed will do. */
3554 lp = iterate_over_lwps (ptid, status_callback, NULL);
3555 if (lp)
3556 {
3557 if (debug_linux_nat && lp->status)
3558 fprintf_unfiltered (gdb_stdlog,
3559 "LLW: Using pending wait status %s for %s.\n",
3560 status_to_str (lp->status),
3561 target_pid_to_str (lp->ptid));
3562 }
3563 }
3564 else if (is_lwp (ptid))
3565 {
3566 if (debug_linux_nat)
3567 fprintf_unfiltered (gdb_stdlog,
3568 "LLW: Waiting for specific LWP %s.\n",
3569 target_pid_to_str (ptid));
3570
3571 /* We have a specific LWP to check. */
3572 lp = find_lwp_pid (ptid);
3573 gdb_assert (lp);
3574
3575 if (debug_linux_nat && lp->status)
3576 fprintf_unfiltered (gdb_stdlog,
3577 "LLW: Using pending wait status %s for %s.\n",
3578 status_to_str (lp->status),
3579 target_pid_to_str (lp->ptid));
3580
3581 /* We check for lp->waitstatus in addition to lp->status,
3582 because we can have pending process exits recorded in
3583 lp->status and W_EXITCODE(0,0) == 0. We should probably have
3584 an additional lp->status_p flag. */
3585 if (lp->status == 0 && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE)
3586 lp = NULL;
3587 }
3588
3589 if (lp && lp->signalled && lp->last_resume_kind != resume_stop)
3590 {
3591 /* A pending SIGSTOP may interfere with the normal stream of
3592 events. In a typical case where interference is a problem,
3593 we have a SIGSTOP signal pending for LWP A while
3594 single-stepping it, encounter an event in LWP B, and take the
3595 pending SIGSTOP while trying to stop LWP A. After processing
3596 the event in LWP B, LWP A is continued, and we'll never see
3597 the SIGTRAP associated with the last time we were
3598 single-stepping LWP A. */
3599
3600 /* Resume the thread. It should halt immediately returning the
3601 pending SIGSTOP. */
3602 registers_changed ();
3603 if (linux_nat_prepare_to_resume != NULL)
3604 linux_nat_prepare_to_resume (lp);
3605 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
3606 lp->step, TARGET_SIGNAL_0);
3607 if (debug_linux_nat)
3608 fprintf_unfiltered (gdb_stdlog,
3609 "LLW: %s %s, 0, 0 (expect SIGSTOP)\n",
3610 lp->step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3611 target_pid_to_str (lp->ptid));
3612 lp->stopped = 0;
3613 gdb_assert (lp->resumed);
3614
3615 /* Catch the pending SIGSTOP. */
3616 status = lp->status;
3617 lp->status = 0;
3618
3619 stop_wait_callback (lp, NULL);
3620
3621 /* If the lp->status field isn't empty, we caught another signal
3622 while flushing the SIGSTOP. Return it back to the event
3623 queue of the LWP, as we already have an event to handle. */
3624 if (lp->status)
3625 {
3626 if (debug_linux_nat)
3627 fprintf_unfiltered (gdb_stdlog,
3628 "LLW: kill %s, %s\n",
3629 target_pid_to_str (lp->ptid),
3630 status_to_str (lp->status));
3631 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (lp->status));
3632 }
3633
3634 lp->status = status;
3635 }
3636
3637 if (!target_can_async_p ())
3638 {
3639 /* Causes SIGINT to be passed on to the attached process. */
3640 set_sigint_trap ();
3641 }
3642
3643 /* But if we don't find a pending event, we'll have to wait. */
3644
3645 while (lp == NULL)
3646 {
3647 pid_t lwpid;
3648
3649 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
3650 quirks:
3651
3652 - If the thread group leader exits while other threads in the
3653 thread group still exist, waitpid(TGID, ...) hangs. That
3654 waitpid won't return an exit status until the other threads
3655 in the group are reapped.
3656
3657 - When a non-leader thread execs, that thread just vanishes
3658 without reporting an exit (so we'd hang if we waited for it
3659 explicitly in that case). The exec event is reported to
3660 the TGID pid. */
3661
3662 errno = 0;
3663 lwpid = my_waitpid (-1, &status, __WCLONE | WNOHANG);
3664 if (lwpid == 0 || (lwpid == -1 && errno == ECHILD))
3665 lwpid = my_waitpid (-1, &status, WNOHANG);
3666
3667 if (debug_linux_nat)
3668 fprintf_unfiltered (gdb_stdlog,
3669 "LNW: waitpid(-1, ...) returned %d, %s\n",
3670 lwpid, errno ? safe_strerror (errno) : "ERRNO-OK");
3671
3672 if (lwpid > 0)
3673 {
3674 /* If this is true, then we paused LWPs momentarily, and may
3675 now have pending events to handle. */
3676 int new_pending;
3677
3678 if (debug_linux_nat)
3679 {
3680 fprintf_unfiltered (gdb_stdlog,
3681 "LLW: waitpid %ld received %s\n",
3682 (long) lwpid, status_to_str (status));
3683 }
3684
3685 lp = linux_nat_filter_event (lwpid, status, &new_pending);
3686
3687 /* STATUS is now no longer valid, use LP->STATUS instead. */
3688 status = 0;
3689
3690 if (lp && !ptid_match (lp->ptid, ptid))
3691 {
3692 gdb_assert (lp->resumed);
3693
3694 if (debug_linux_nat)
3695 fprintf (stderr,
3696 "LWP %ld got an event %06x, leaving pending.\n",
3697 ptid_get_lwp (lp->ptid), lp->status);
3698
3699 if (WIFSTOPPED (lp->status))
3700 {
3701 if (WSTOPSIG (lp->status) != SIGSTOP)
3702 {
3703 /* Cancel breakpoint hits. The breakpoint may
3704 be removed before we fetch events from this
3705 process to report to the core. It is best
3706 not to assume the moribund breakpoints
3707 heuristic always handles these cases --- it
3708 could be too many events go through to the
3709 core before this one is handled. All-stop
3710 always cancels breakpoint hits in all
3711 threads. */
3712 if (non_stop
3713 && linux_nat_lp_status_is_event (lp)
3714 && cancel_breakpoint (lp))
3715 {
3716 /* Throw away the SIGTRAP. */
3717 lp->status = 0;
3718
3719 if (debug_linux_nat)
3720 fprintf (stderr,
3721 "LLW: LWP %ld hit a breakpoint while"
3722 " waiting for another process;"
3723 " cancelled it\n",
3724 ptid_get_lwp (lp->ptid));
3725 }
3726 lp->stopped = 1;
3727 }
3728 else
3729 {
3730 lp->stopped = 1;
3731 lp->signalled = 0;
3732 }
3733 }
3734 else if (WIFEXITED (lp->status) || WIFSIGNALED (lp->status))
3735 {
3736 if (debug_linux_nat)
3737 fprintf (stderr,
3738 "Process %ld exited while stopping LWPs\n",
3739 ptid_get_lwp (lp->ptid));
3740
3741 /* This was the last lwp in the process. Since
3742 events are serialized to GDB core, and we can't
3743 report this one right now, but GDB core and the
3744 other target layers will want to be notified
3745 about the exit code/signal, leave the status
3746 pending for the next time we're able to report
3747 it. */
3748
3749 /* Prevent trying to stop this thread again. We'll
3750 never try to resume it because it has a pending
3751 status. */
3752 lp->stopped = 1;
3753
3754 /* Dead LWP's aren't expected to reported a pending
3755 sigstop. */
3756 lp->signalled = 0;
3757
3758 /* Store the pending event in the waitstatus as
3759 well, because W_EXITCODE(0,0) == 0. */
3760 store_waitstatus (&lp->waitstatus, lp->status);
3761 }
3762
3763 /* Keep looking. */
3764 lp = NULL;
3765 }
3766
3767 if (new_pending)
3768 {
3769 /* Some LWP now has a pending event. Go all the way
3770 back to check it. */
3771 goto retry;
3772 }
3773
3774 if (lp)
3775 {
3776 /* We got an event to report to the core. */
3777 break;
3778 }
3779
3780 /* Retry until nothing comes out of waitpid. A single
3781 SIGCHLD can indicate more than one child stopped. */
3782 continue;
3783 }
3784
3785 /* Check for zombie thread group leaders. Those can't be reaped
3786 until all other threads in the thread group are. */
3787 check_zombie_leaders ();
3788
3789 /* If there are no resumed children left, bail. We'd be stuck
3790 forever in the sigsuspend call below otherwise. */
3791 if (iterate_over_lwps (ptid, resumed_callback, NULL) == NULL)
3792 {
3793 if (debug_linux_nat)
3794 fprintf_unfiltered (gdb_stdlog, "LLW: exit (no resumed LWP)\n");
3795
3796 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
3797
3798 if (!target_can_async_p ())
3799 clear_sigint_trap ();
3800
3801 restore_child_signals_mask (&prev_mask);
3802 return minus_one_ptid;
3803 }
3804
3805 /* No interesting event to report to the core. */
3806
3807 if (target_options & TARGET_WNOHANG)
3808 {
3809 if (debug_linux_nat)
3810 fprintf_unfiltered (gdb_stdlog, "LLW: exit (ignore)\n");
3811
3812 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3813 restore_child_signals_mask (&prev_mask);
3814 return minus_one_ptid;
3815 }
3816
3817 /* We shouldn't end up here unless we want to try again. */
3818 gdb_assert (lp == NULL);
3819
3820 /* Block until we get an event reported with SIGCHLD. */
3821 sigsuspend (&suspend_mask);
3822 }
3823
3824 if (!target_can_async_p ())
3825 clear_sigint_trap ();
3826
3827 gdb_assert (lp);
3828
3829 status = lp->status;
3830 lp->status = 0;
3831
3832 /* Don't report signals that GDB isn't interested in, such as
3833 signals that are neither printed nor stopped upon. Stopping all
3834 threads can be a bit time-consuming so if we want decent
3835 performance with heavily multi-threaded programs, especially when
3836 they're using a high frequency timer, we'd better avoid it if we
3837 can. */
3838
3839 if (WIFSTOPPED (status))
3840 {
3841 enum target_signal signo = target_signal_from_host (WSTOPSIG (status));
3842
3843 /* When using hardware single-step, we need to report every signal.
3844 Otherwise, signals in pass_mask may be short-circuited. */
3845 if (!lp->step
3846 && WSTOPSIG (status) && sigismember (&pass_mask, WSTOPSIG (status)))
3847 {
3848 /* FIMXE: kettenis/2001-06-06: Should we resume all threads
3849 here? It is not clear we should. GDB may not expect
3850 other threads to run. On the other hand, not resuming
3851 newly attached threads may cause an unwanted delay in
3852 getting them running. */
3853 registers_changed ();
3854 if (linux_nat_prepare_to_resume != NULL)
3855 linux_nat_prepare_to_resume (lp);
3856 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
3857 lp->step, signo);
3858 if (debug_linux_nat)
3859 fprintf_unfiltered (gdb_stdlog,
3860 "LLW: %s %s, %s (preempt 'handle')\n",
3861 lp->step ?
3862 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3863 target_pid_to_str (lp->ptid),
3864 (signo != TARGET_SIGNAL_0
3865 ? strsignal (target_signal_to_host (signo))
3866 : "0"));
3867 lp->stopped = 0;
3868 goto retry;
3869 }
3870
3871 if (!non_stop)
3872 {
3873 /* Only do the below in all-stop, as we currently use SIGINT
3874 to implement target_stop (see linux_nat_stop) in
3875 non-stop. */
3876 if (signo == TARGET_SIGNAL_INT && signal_pass_state (signo) == 0)
3877 {
3878 /* If ^C/BREAK is typed at the tty/console, SIGINT gets
3879 forwarded to the entire process group, that is, all LWPs
3880 will receive it - unless they're using CLONE_THREAD to
3881 share signals. Since we only want to report it once, we
3882 mark it as ignored for all LWPs except this one. */
3883 iterate_over_lwps (pid_to_ptid (ptid_get_pid (ptid)),
3884 set_ignore_sigint, NULL);
3885 lp->ignore_sigint = 0;
3886 }
3887 else
3888 maybe_clear_ignore_sigint (lp);
3889 }
3890 }
3891
3892 /* This LWP is stopped now. */
3893 lp->stopped = 1;
3894
3895 if (debug_linux_nat)
3896 fprintf_unfiltered (gdb_stdlog, "LLW: Candidate event %s in %s.\n",
3897 status_to_str (status), target_pid_to_str (lp->ptid));
3898
3899 if (!non_stop)
3900 {
3901 /* Now stop all other LWP's ... */
3902 iterate_over_lwps (minus_one_ptid, stop_callback, NULL);
3903
3904 /* ... and wait until all of them have reported back that
3905 they're no longer running. */
3906 iterate_over_lwps (minus_one_ptid, stop_wait_callback, NULL);
3907
3908 /* If we're not waiting for a specific LWP, choose an event LWP
3909 from among those that have had events. Giving equal priority
3910 to all LWPs that have had events helps prevent
3911 starvation. */
3912 if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid))
3913 select_event_lwp (ptid, &lp, &status);
3914
3915 /* Now that we've selected our final event LWP, cancel any
3916 breakpoints in other LWPs that have hit a GDB breakpoint.
3917 See the comment in cancel_breakpoints_callback to find out
3918 why. */
3919 iterate_over_lwps (minus_one_ptid, cancel_breakpoints_callback, lp);
3920
3921 /* We'll need this to determine whether to report a SIGSTOP as
3922 TARGET_WAITKIND_0. Need to take a copy because
3923 resume_clear_callback clears it. */
3924 last_resume_kind = lp->last_resume_kind;
3925
3926 /* In all-stop, from the core's perspective, all LWPs are now
3927 stopped until a new resume action is sent over. */
3928 iterate_over_lwps (minus_one_ptid, resume_clear_callback, NULL);
3929 }
3930 else
3931 {
3932 /* See above. */
3933 last_resume_kind = lp->last_resume_kind;
3934 resume_clear_callback (lp, NULL);
3935 }
3936
3937 if (linux_nat_status_is_event (status))
3938 {
3939 if (debug_linux_nat)
3940 fprintf_unfiltered (gdb_stdlog,
3941 "LLW: trap ptid is %s.\n",
3942 target_pid_to_str (lp->ptid));
3943 }
3944
3945 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3946 {
3947 *ourstatus = lp->waitstatus;
3948 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3949 }
3950 else
3951 store_waitstatus (ourstatus, status);
3952
3953 if (debug_linux_nat)
3954 fprintf_unfiltered (gdb_stdlog, "LLW: exit\n");
3955
3956 restore_child_signals_mask (&prev_mask);
3957
3958 if (last_resume_kind == resume_stop
3959 && ourstatus->kind == TARGET_WAITKIND_STOPPED
3960 && WSTOPSIG (status) == SIGSTOP)
3961 {
3962 /* A thread that has been requested to stop by GDB with
3963 target_stop, and it stopped cleanly, so report as SIG0. The
3964 use of SIGSTOP is an implementation detail. */
3965 ourstatus->value.sig = TARGET_SIGNAL_0;
3966 }
3967
3968 if (ourstatus->kind == TARGET_WAITKIND_EXITED
3969 || ourstatus->kind == TARGET_WAITKIND_SIGNALLED)
3970 lp->core = -1;
3971 else
3972 lp->core = linux_common_core_of_thread (lp->ptid);
3973
3974 return lp->ptid;
3975 }
3976
3977 /* Resume LWPs that are currently stopped without any pending status
3978 to report, but are resumed from the core's perspective. */
3979
3980 static int
3981 resume_stopped_resumed_lwps (struct lwp_info *lp, void *data)
3982 {
3983 ptid_t *wait_ptid_p = data;
3984
3985 if (lp->stopped
3986 && lp->resumed
3987 && lp->status == 0
3988 && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE)
3989 {
3990 struct regcache *regcache = get_thread_regcache (lp->ptid);
3991 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3992 CORE_ADDR pc = regcache_read_pc (regcache);
3993
3994 gdb_assert (is_executing (lp->ptid));
3995
3996 /* Don't bother if there's a breakpoint at PC that we'd hit
3997 immediately, and we're not waiting for this LWP. */
3998 if (!ptid_match (lp->ptid, *wait_ptid_p))
3999 {
4000 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
4001 return 0;
4002 }
4003
4004 if (debug_linux_nat)
4005 fprintf_unfiltered (gdb_stdlog,
4006 "RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
4007 target_pid_to_str (lp->ptid),
4008 paddress (gdbarch, pc),
4009 lp->step);
4010
4011 registers_changed ();
4012 if (linux_nat_prepare_to_resume != NULL)
4013 linux_nat_prepare_to_resume (lp);
4014 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
4015 lp->step, TARGET_SIGNAL_0);
4016 lp->stopped = 0;
4017 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
4018 lp->stopped_by_watchpoint = 0;
4019 }
4020
4021 return 0;
4022 }
4023
4024 static ptid_t
4025 linux_nat_wait (struct target_ops *ops,
4026 ptid_t ptid, struct target_waitstatus *ourstatus,
4027 int target_options)
4028 {
4029 ptid_t event_ptid;
4030
4031 if (debug_linux_nat)
4032 fprintf_unfiltered (gdb_stdlog,
4033 "linux_nat_wait: [%s]\n", target_pid_to_str (ptid));
4034
4035 /* Flush the async file first. */
4036 if (target_can_async_p ())
4037 async_file_flush ();
4038
4039 /* Resume LWPs that are currently stopped without any pending status
4040 to report, but are resumed from the core's perspective. LWPs get
4041 in this state if we find them stopping at a time we're not
4042 interested in reporting the event (target_wait on a
4043 specific_process, for example, see linux_nat_wait_1), and
4044 meanwhile the event became uninteresting. Don't bother resuming
4045 LWPs we're not going to wait for if they'd stop immediately. */
4046 if (non_stop)
4047 iterate_over_lwps (minus_one_ptid, resume_stopped_resumed_lwps, &ptid);
4048
4049 event_ptid = linux_nat_wait_1 (ops, ptid, ourstatus, target_options);
4050
4051 /* If we requested any event, and something came out, assume there
4052 may be more. If we requested a specific lwp or process, also
4053 assume there may be more. */
4054 if (target_can_async_p ()
4055 && ((ourstatus->kind != TARGET_WAITKIND_IGNORE
4056 && ourstatus->kind != TARGET_WAITKIND_NO_RESUMED)
4057 || !ptid_equal (ptid, minus_one_ptid)))
4058 async_file_mark ();
4059
4060 /* Get ready for the next event. */
4061 if (target_can_async_p ())
4062 target_async (inferior_event_handler, 0);
4063
4064 return event_ptid;
4065 }
4066
4067 static int
4068 kill_callback (struct lwp_info *lp, void *data)
4069 {
4070 /* PTRACE_KILL may resume the inferior. Send SIGKILL first. */
4071
4072 errno = 0;
4073 kill (GET_LWP (lp->ptid), SIGKILL);
4074 if (debug_linux_nat)
4075 fprintf_unfiltered (gdb_stdlog,
4076 "KC: kill (SIGKILL) %s, 0, 0 (%s)\n",
4077 target_pid_to_str (lp->ptid),
4078 errno ? safe_strerror (errno) : "OK");
4079
4080 /* Some kernels ignore even SIGKILL for processes under ptrace. */
4081
4082 errno = 0;
4083 ptrace (PTRACE_KILL, GET_LWP (lp->ptid), 0, 0);
4084 if (debug_linux_nat)
4085 fprintf_unfiltered (gdb_stdlog,
4086 "KC: PTRACE_KILL %s, 0, 0 (%s)\n",
4087 target_pid_to_str (lp->ptid),
4088 errno ? safe_strerror (errno) : "OK");
4089
4090 return 0;
4091 }
4092
4093 static int
4094 kill_wait_callback (struct lwp_info *lp, void *data)
4095 {
4096 pid_t pid;
4097
4098 /* We must make sure that there are no pending events (delayed
4099 SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current
4100 program doesn't interfere with any following debugging session. */
4101
4102 /* For cloned processes we must check both with __WCLONE and
4103 without, since the exit status of a cloned process isn't reported
4104 with __WCLONE. */
4105 if (lp->cloned)
4106 {
4107 do
4108 {
4109 pid = my_waitpid (GET_LWP (lp->ptid), NULL, __WCLONE);
4110 if (pid != (pid_t) -1)
4111 {
4112 if (debug_linux_nat)
4113 fprintf_unfiltered (gdb_stdlog,
4114 "KWC: wait %s received unknown.\n",
4115 target_pid_to_str (lp->ptid));
4116 /* The Linux kernel sometimes fails to kill a thread
4117 completely after PTRACE_KILL; that goes from the stop
4118 point in do_fork out to the one in
4119 get_signal_to_deliever and waits again. So kill it
4120 again. */
4121 kill_callback (lp, NULL);
4122 }
4123 }
4124 while (pid == GET_LWP (lp->ptid));
4125
4126 gdb_assert (pid == -1 && errno == ECHILD);
4127 }
4128
4129 do
4130 {
4131 pid = my_waitpid (GET_LWP (lp->ptid), NULL, 0);
4132 if (pid != (pid_t) -1)
4133 {
4134 if (debug_linux_nat)
4135 fprintf_unfiltered (gdb_stdlog,
4136 "KWC: wait %s received unk.\n",
4137 target_pid_to_str (lp->ptid));
4138 /* See the call to kill_callback above. */
4139 kill_callback (lp, NULL);
4140 }
4141 }
4142 while (pid == GET_LWP (lp->ptid));
4143
4144 gdb_assert (pid == -1 && errno == ECHILD);
4145 return 0;
4146 }
4147
4148 static void
4149 linux_nat_kill (struct target_ops *ops)
4150 {
4151 struct target_waitstatus last;
4152 ptid_t last_ptid;
4153 int status;
4154
4155 /* If we're stopped while forking and we haven't followed yet,
4156 kill the other task. We need to do this first because the
4157 parent will be sleeping if this is a vfork. */
4158
4159 get_last_target_status (&last_ptid, &last);
4160
4161 if (last.kind == TARGET_WAITKIND_FORKED
4162 || last.kind == TARGET_WAITKIND_VFORKED)
4163 {
4164 ptrace (PT_KILL, PIDGET (last.value.related_pid), 0, 0);
4165 wait (&status);
4166 }
4167
4168 if (forks_exist_p ())
4169 linux_fork_killall ();
4170 else
4171 {
4172 ptid_t ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
4173
4174 /* Stop all threads before killing them, since ptrace requires
4175 that the thread is stopped to sucessfully PTRACE_KILL. */
4176 iterate_over_lwps (ptid, stop_callback, NULL);
4177 /* ... and wait until all of them have reported back that
4178 they're no longer running. */
4179 iterate_over_lwps (ptid, stop_wait_callback, NULL);
4180
4181 /* Kill all LWP's ... */
4182 iterate_over_lwps (ptid, kill_callback, NULL);
4183
4184 /* ... and wait until we've flushed all events. */
4185 iterate_over_lwps (ptid, kill_wait_callback, NULL);
4186 }
4187
4188 target_mourn_inferior ();
4189 }
4190
4191 static void
4192 linux_nat_mourn_inferior (struct target_ops *ops)
4193 {
4194 purge_lwp_list (ptid_get_pid (inferior_ptid));
4195
4196 if (! forks_exist_p ())
4197 /* Normal case, no other forks available. */
4198 linux_ops->to_mourn_inferior (ops);
4199 else
4200 /* Multi-fork case. The current inferior_ptid has exited, but
4201 there are other viable forks to debug. Delete the exiting
4202 one and context-switch to the first available. */
4203 linux_fork_mourn_inferior ();
4204 }
4205
4206 /* Convert a native/host siginfo object, into/from the siginfo in the
4207 layout of the inferiors' architecture. */
4208
4209 static void
4210 siginfo_fixup (struct siginfo *siginfo, gdb_byte *inf_siginfo, int direction)
4211 {
4212 int done = 0;
4213
4214 if (linux_nat_siginfo_fixup != NULL)
4215 done = linux_nat_siginfo_fixup (siginfo, inf_siginfo, direction);
4216
4217 /* If there was no callback, or the callback didn't do anything,
4218 then just do a straight memcpy. */
4219 if (!done)
4220 {
4221 if (direction == 1)
4222 memcpy (siginfo, inf_siginfo, sizeof (struct siginfo));
4223 else
4224 memcpy (inf_siginfo, siginfo, sizeof (struct siginfo));
4225 }
4226 }
4227
4228 static LONGEST
4229 linux_xfer_siginfo (struct target_ops *ops, enum target_object object,
4230 const char *annex, gdb_byte *readbuf,
4231 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
4232 {
4233 int pid;
4234 struct siginfo siginfo;
4235 gdb_byte inf_siginfo[sizeof (struct siginfo)];
4236
4237 gdb_assert (object == TARGET_OBJECT_SIGNAL_INFO);
4238 gdb_assert (readbuf || writebuf);
4239
4240 pid = GET_LWP (inferior_ptid);
4241 if (pid == 0)
4242 pid = GET_PID (inferior_ptid);
4243
4244 if (offset > sizeof (siginfo))
4245 return -1;
4246
4247 errno = 0;
4248 ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
4249 if (errno != 0)
4250 return -1;
4251
4252 /* When GDB is built as a 64-bit application, ptrace writes into
4253 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
4254 inferior with a 64-bit GDB should look the same as debugging it
4255 with a 32-bit GDB, we need to convert it. GDB core always sees
4256 the converted layout, so any read/write will have to be done
4257 post-conversion. */
4258 siginfo_fixup (&siginfo, inf_siginfo, 0);
4259
4260 if (offset + len > sizeof (siginfo))
4261 len = sizeof (siginfo) - offset;
4262
4263 if (readbuf != NULL)
4264 memcpy (readbuf, inf_siginfo + offset, len);
4265 else
4266 {
4267 memcpy (inf_siginfo + offset, writebuf, len);
4268
4269 /* Convert back to ptrace layout before flushing it out. */
4270 siginfo_fixup (&siginfo, inf_siginfo, 1);
4271
4272 errno = 0;
4273 ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
4274 if (errno != 0)
4275 return -1;
4276 }
4277
4278 return len;
4279 }
4280
4281 static LONGEST
4282 linux_nat_xfer_partial (struct target_ops *ops, enum target_object object,
4283 const char *annex, gdb_byte *readbuf,
4284 const gdb_byte *writebuf,
4285 ULONGEST offset, LONGEST len)
4286 {
4287 struct cleanup *old_chain;
4288 LONGEST xfer;
4289
4290 if (object == TARGET_OBJECT_SIGNAL_INFO)
4291 return linux_xfer_siginfo (ops, object, annex, readbuf, writebuf,
4292 offset, len);
4293
4294 /* The target is connected but no live inferior is selected. Pass
4295 this request down to a lower stratum (e.g., the executable
4296 file). */
4297 if (object == TARGET_OBJECT_MEMORY && ptid_equal (inferior_ptid, null_ptid))
4298 return 0;
4299
4300 old_chain = save_inferior_ptid ();
4301
4302 if (is_lwp (inferior_ptid))
4303 inferior_ptid = pid_to_ptid (GET_LWP (inferior_ptid));
4304
4305 xfer = linux_ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
4306 offset, len);
4307
4308 do_cleanups (old_chain);
4309 return xfer;
4310 }
4311
4312 static int
4313 linux_thread_alive (ptid_t ptid)
4314 {
4315 int err, tmp_errno;
4316
4317 gdb_assert (is_lwp (ptid));
4318
4319 /* Send signal 0 instead of anything ptrace, because ptracing a
4320 running thread errors out claiming that the thread doesn't
4321 exist. */
4322 err = kill_lwp (GET_LWP (ptid), 0);
4323 tmp_errno = errno;
4324 if (debug_linux_nat)
4325 fprintf_unfiltered (gdb_stdlog,
4326 "LLTA: KILL(SIG0) %s (%s)\n",
4327 target_pid_to_str (ptid),
4328 err ? safe_strerror (tmp_errno) : "OK");
4329
4330 if (err != 0)
4331 return 0;
4332
4333 return 1;
4334 }
4335
4336 static int
4337 linux_nat_thread_alive (struct target_ops *ops, ptid_t ptid)
4338 {
4339 return linux_thread_alive (ptid);
4340 }
4341
4342 static char *
4343 linux_nat_pid_to_str (struct target_ops *ops, ptid_t ptid)
4344 {
4345 static char buf[64];
4346
4347 if (is_lwp (ptid)
4348 && (GET_PID (ptid) != GET_LWP (ptid)
4349 || num_lwps (GET_PID (ptid)) > 1))
4350 {
4351 snprintf (buf, sizeof (buf), "LWP %ld", GET_LWP (ptid));
4352 return buf;
4353 }
4354
4355 return normal_pid_to_str (ptid);
4356 }
4357
4358 static char *
4359 linux_nat_thread_name (struct thread_info *thr)
4360 {
4361 int pid = ptid_get_pid (thr->ptid);
4362 long lwp = ptid_get_lwp (thr->ptid);
4363 #define FORMAT "/proc/%d/task/%ld/comm"
4364 char buf[sizeof (FORMAT) + 30];
4365 FILE *comm_file;
4366 char *result = NULL;
4367
4368 snprintf (buf, sizeof (buf), FORMAT, pid, lwp);
4369 comm_file = fopen (buf, "r");
4370 if (comm_file)
4371 {
4372 /* Not exported by the kernel, so we define it here. */
4373 #define COMM_LEN 16
4374 static char line[COMM_LEN + 1];
4375
4376 if (fgets (line, sizeof (line), comm_file))
4377 {
4378 char *nl = strchr (line, '\n');
4379
4380 if (nl)
4381 *nl = '\0';
4382 if (*line != '\0')
4383 result = line;
4384 }
4385
4386 fclose (comm_file);
4387 }
4388
4389 #undef COMM_LEN
4390 #undef FORMAT
4391
4392 return result;
4393 }
4394
4395 /* Accepts an integer PID; Returns a string representing a file that
4396 can be opened to get the symbols for the child process. */
4397
4398 static char *
4399 linux_child_pid_to_exec_file (int pid)
4400 {
4401 char *name1, *name2;
4402
4403 name1 = xmalloc (MAXPATHLEN);
4404 name2 = xmalloc (MAXPATHLEN);
4405 make_cleanup (xfree, name1);
4406 make_cleanup (xfree, name2);
4407 memset (name2, 0, MAXPATHLEN);
4408
4409 sprintf (name1, "/proc/%d/exe", pid);
4410 if (readlink (name1, name2, MAXPATHLEN) > 0)
4411 return name2;
4412 else
4413 return name1;
4414 }
4415
4416 /* Records the thread's register state for the corefile note
4417 section. */
4418
4419 static char *
4420 linux_nat_collect_thread_registers (const struct regcache *regcache,
4421 ptid_t ptid, bfd *obfd,
4422 char *note_data, int *note_size,
4423 enum target_signal stop_signal)
4424 {
4425 struct gdbarch *gdbarch = get_regcache_arch (regcache);
4426 const struct regset *regset;
4427 int core_regset_p;
4428 gdb_gregset_t gregs;
4429 gdb_fpregset_t fpregs;
4430
4431 core_regset_p = gdbarch_regset_from_core_section_p (gdbarch);
4432
4433 if (core_regset_p
4434 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg",
4435 sizeof (gregs)))
4436 != NULL && regset->collect_regset != NULL)
4437 regset->collect_regset (regset, regcache, -1, &gregs, sizeof (gregs));
4438 else
4439 fill_gregset (regcache, &gregs, -1);
4440
4441 note_data = (char *) elfcore_write_prstatus
4442 (obfd, note_data, note_size, ptid_get_lwp (ptid),
4443 target_signal_to_host (stop_signal), &gregs);
4444
4445 if (core_regset_p
4446 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg2",
4447 sizeof (fpregs)))
4448 != NULL && regset->collect_regset != NULL)
4449 regset->collect_regset (regset, regcache, -1, &fpregs, sizeof (fpregs));
4450 else
4451 fill_fpregset (regcache, &fpregs, -1);
4452
4453 note_data = (char *) elfcore_write_prfpreg (obfd, note_data, note_size,
4454 &fpregs, sizeof (fpregs));
4455
4456 return note_data;
4457 }
4458
4459 /* Fills the "to_make_corefile_note" target vector. Builds the note
4460 section for a corefile, and returns it in a malloc buffer. */
4461
4462 static char *
4463 linux_nat_make_corefile_notes (bfd *obfd, int *note_size)
4464 {
4465 /* FIXME: uweigand/2011-10-06: Once all GNU/Linux architectures have been
4466 converted to gdbarch_core_regset_sections, this function can go away. */
4467 return linux_make_corefile_notes (target_gdbarch, obfd, note_size,
4468 linux_nat_collect_thread_registers);
4469 }
4470
4471 /* Implement the to_xfer_partial interface for memory reads using the /proc
4472 filesystem. Because we can use a single read() call for /proc, this
4473 can be much more efficient than banging away at PTRACE_PEEKTEXT,
4474 but it doesn't support writes. */
4475
4476 static LONGEST
4477 linux_proc_xfer_partial (struct target_ops *ops, enum target_object object,
4478 const char *annex, gdb_byte *readbuf,
4479 const gdb_byte *writebuf,
4480 ULONGEST offset, LONGEST len)
4481 {
4482 LONGEST ret;
4483 int fd;
4484 char filename[64];
4485
4486 if (object != TARGET_OBJECT_MEMORY || !readbuf)
4487 return 0;
4488
4489 /* Don't bother for one word. */
4490 if (len < 3 * sizeof (long))
4491 return 0;
4492
4493 /* We could keep this file open and cache it - possibly one per
4494 thread. That requires some juggling, but is even faster. */
4495 sprintf (filename, "/proc/%d/mem", PIDGET (inferior_ptid));
4496 fd = open (filename, O_RDONLY | O_LARGEFILE);
4497 if (fd == -1)
4498 return 0;
4499
4500 /* If pread64 is available, use it. It's faster if the kernel
4501 supports it (only one syscall), and it's 64-bit safe even on
4502 32-bit platforms (for instance, SPARC debugging a SPARC64
4503 application). */
4504 #ifdef HAVE_PREAD64
4505 if (pread64 (fd, readbuf, len, offset) != len)
4506 #else
4507 if (lseek (fd, offset, SEEK_SET) == -1 || read (fd, readbuf, len) != len)
4508 #endif
4509 ret = 0;
4510 else
4511 ret = len;
4512
4513 close (fd);
4514 return ret;
4515 }
4516
4517
4518 /* Enumerate spufs IDs for process PID. */
4519 static LONGEST
4520 spu_enumerate_spu_ids (int pid, gdb_byte *buf, ULONGEST offset, LONGEST len)
4521 {
4522 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch);
4523 LONGEST pos = 0;
4524 LONGEST written = 0;
4525 char path[128];
4526 DIR *dir;
4527 struct dirent *entry;
4528
4529 xsnprintf (path, sizeof path, "/proc/%d/fd", pid);
4530 dir = opendir (path);
4531 if (!dir)
4532 return -1;
4533
4534 rewinddir (dir);
4535 while ((entry = readdir (dir)) != NULL)
4536 {
4537 struct stat st;
4538 struct statfs stfs;
4539 int fd;
4540
4541 fd = atoi (entry->d_name);
4542 if (!fd)
4543 continue;
4544
4545 xsnprintf (path, sizeof path, "/proc/%d/fd/%d", pid, fd);
4546 if (stat (path, &st) != 0)
4547 continue;
4548 if (!S_ISDIR (st.st_mode))
4549 continue;
4550
4551 if (statfs (path, &stfs) != 0)
4552 continue;
4553 if (stfs.f_type != SPUFS_MAGIC)
4554 continue;
4555
4556 if (pos >= offset && pos + 4 <= offset + len)
4557 {
4558 store_unsigned_integer (buf + pos - offset, 4, byte_order, fd);
4559 written += 4;
4560 }
4561 pos += 4;
4562 }
4563
4564 closedir (dir);
4565 return written;
4566 }
4567
4568 /* Implement the to_xfer_partial interface for the TARGET_OBJECT_SPU
4569 object type, using the /proc file system. */
4570 static LONGEST
4571 linux_proc_xfer_spu (struct target_ops *ops, enum target_object object,
4572 const char *annex, gdb_byte *readbuf,
4573 const gdb_byte *writebuf,
4574 ULONGEST offset, LONGEST len)
4575 {
4576 char buf[128];
4577 int fd = 0;
4578 int ret = -1;
4579 int pid = PIDGET (inferior_ptid);
4580
4581 if (!annex)
4582 {
4583 if (!readbuf)
4584 return -1;
4585 else
4586 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
4587 }
4588
4589 xsnprintf (buf, sizeof buf, "/proc/%d/fd/%s", pid, annex);
4590 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
4591 if (fd <= 0)
4592 return -1;
4593
4594 if (offset != 0
4595 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4596 {
4597 close (fd);
4598 return 0;
4599 }
4600
4601 if (writebuf)
4602 ret = write (fd, writebuf, (size_t) len);
4603 else if (readbuf)
4604 ret = read (fd, readbuf, (size_t) len);
4605
4606 close (fd);
4607 return ret;
4608 }
4609
4610
4611 /* Parse LINE as a signal set and add its set bits to SIGS. */
4612
4613 static void
4614 add_line_to_sigset (const char *line, sigset_t *sigs)
4615 {
4616 int len = strlen (line) - 1;
4617 const char *p;
4618 int signum;
4619
4620 if (line[len] != '\n')
4621 error (_("Could not parse signal set: %s"), line);
4622
4623 p = line;
4624 signum = len * 4;
4625 while (len-- > 0)
4626 {
4627 int digit;
4628
4629 if (*p >= '0' && *p <= '9')
4630 digit = *p - '0';
4631 else if (*p >= 'a' && *p <= 'f')
4632 digit = *p - 'a' + 10;
4633 else
4634 error (_("Could not parse signal set: %s"), line);
4635
4636 signum -= 4;
4637
4638 if (digit & 1)
4639 sigaddset (sigs, signum + 1);
4640 if (digit & 2)
4641 sigaddset (sigs, signum + 2);
4642 if (digit & 4)
4643 sigaddset (sigs, signum + 3);
4644 if (digit & 8)
4645 sigaddset (sigs, signum + 4);
4646
4647 p++;
4648 }
4649 }
4650
4651 /* Find process PID's pending signals from /proc/pid/status and set
4652 SIGS to match. */
4653
4654 void
4655 linux_proc_pending_signals (int pid, sigset_t *pending,
4656 sigset_t *blocked, sigset_t *ignored)
4657 {
4658 FILE *procfile;
4659 char buffer[MAXPATHLEN], fname[MAXPATHLEN];
4660 struct cleanup *cleanup;
4661
4662 sigemptyset (pending);
4663 sigemptyset (blocked);
4664 sigemptyset (ignored);
4665 sprintf (fname, "/proc/%d/status", pid);
4666 procfile = fopen (fname, "r");
4667 if (procfile == NULL)
4668 error (_("Could not open %s"), fname);
4669 cleanup = make_cleanup_fclose (procfile);
4670
4671 while (fgets (buffer, MAXPATHLEN, procfile) != NULL)
4672 {
4673 /* Normal queued signals are on the SigPnd line in the status
4674 file. However, 2.6 kernels also have a "shared" pending
4675 queue for delivering signals to a thread group, so check for
4676 a ShdPnd line also.
4677
4678 Unfortunately some Red Hat kernels include the shared pending
4679 queue but not the ShdPnd status field. */
4680
4681 if (strncmp (buffer, "SigPnd:\t", 8) == 0)
4682 add_line_to_sigset (buffer + 8, pending);
4683 else if (strncmp (buffer, "ShdPnd:\t", 8) == 0)
4684 add_line_to_sigset (buffer + 8, pending);
4685 else if (strncmp (buffer, "SigBlk:\t", 8) == 0)
4686 add_line_to_sigset (buffer + 8, blocked);
4687 else if (strncmp (buffer, "SigIgn:\t", 8) == 0)
4688 add_line_to_sigset (buffer + 8, ignored);
4689 }
4690
4691 do_cleanups (cleanup);
4692 }
4693
4694 static LONGEST
4695 linux_nat_xfer_osdata (struct target_ops *ops, enum target_object object,
4696 const char *annex, gdb_byte *readbuf,
4697 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
4698 {
4699 gdb_assert (object == TARGET_OBJECT_OSDATA);
4700
4701 return linux_common_xfer_osdata (annex, readbuf, offset, len);
4702 }
4703
4704 static LONGEST
4705 linux_xfer_partial (struct target_ops *ops, enum target_object object,
4706 const char *annex, gdb_byte *readbuf,
4707 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
4708 {
4709 LONGEST xfer;
4710
4711 if (object == TARGET_OBJECT_AUXV)
4712 return memory_xfer_auxv (ops, object, annex, readbuf, writebuf,
4713 offset, len);
4714
4715 if (object == TARGET_OBJECT_OSDATA)
4716 return linux_nat_xfer_osdata (ops, object, annex, readbuf, writebuf,
4717 offset, len);
4718
4719 if (object == TARGET_OBJECT_SPU)
4720 return linux_proc_xfer_spu (ops, object, annex, readbuf, writebuf,
4721 offset, len);
4722
4723 /* GDB calculates all the addresses in possibly larget width of the address.
4724 Address width needs to be masked before its final use - either by
4725 linux_proc_xfer_partial or inf_ptrace_xfer_partial.
4726
4727 Compare ADDR_BIT first to avoid a compiler warning on shift overflow. */
4728
4729 if (object == TARGET_OBJECT_MEMORY)
4730 {
4731 int addr_bit = gdbarch_addr_bit (target_gdbarch);
4732
4733 if (addr_bit < (sizeof (ULONGEST) * HOST_CHAR_BIT))
4734 offset &= ((ULONGEST) 1 << addr_bit) - 1;
4735 }
4736
4737 xfer = linux_proc_xfer_partial (ops, object, annex, readbuf, writebuf,
4738 offset, len);
4739 if (xfer != 0)
4740 return xfer;
4741
4742 return super_xfer_partial (ops, object, annex, readbuf, writebuf,
4743 offset, len);
4744 }
4745
4746 static void
4747 cleanup_target_stop (void *arg)
4748 {
4749 ptid_t *ptid = (ptid_t *) arg;
4750
4751 gdb_assert (arg != NULL);
4752
4753 /* Unpause all */
4754 target_resume (*ptid, 0, TARGET_SIGNAL_0);
4755 }
4756
4757 static VEC(static_tracepoint_marker_p) *
4758 linux_child_static_tracepoint_markers_by_strid (const char *strid)
4759 {
4760 char s[IPA_CMD_BUF_SIZE];
4761 struct cleanup *old_chain;
4762 int pid = ptid_get_pid (inferior_ptid);
4763 VEC(static_tracepoint_marker_p) *markers = NULL;
4764 struct static_tracepoint_marker *marker = NULL;
4765 char *p = s;
4766 ptid_t ptid = ptid_build (pid, 0, 0);
4767
4768 /* Pause all */
4769 target_stop (ptid);
4770
4771 memcpy (s, "qTfSTM", sizeof ("qTfSTM"));
4772 s[sizeof ("qTfSTM")] = 0;
4773
4774 agent_run_command (pid, s);
4775
4776 old_chain = make_cleanup (free_current_marker, &marker);
4777 make_cleanup (cleanup_target_stop, &ptid);
4778
4779 while (*p++ == 'm')
4780 {
4781 if (marker == NULL)
4782 marker = XCNEW (struct static_tracepoint_marker);
4783
4784 do
4785 {
4786 parse_static_tracepoint_marker_definition (p, &p, marker);
4787
4788 if (strid == NULL || strcmp (strid, marker->str_id) == 0)
4789 {
4790 VEC_safe_push (static_tracepoint_marker_p,
4791 markers, marker);
4792 marker = NULL;
4793 }
4794 else
4795 {
4796 release_static_tracepoint_marker (marker);
4797 memset (marker, 0, sizeof (*marker));
4798 }
4799 }
4800 while (*p++ == ','); /* comma-separated list */
4801
4802 memcpy (s, "qTsSTM", sizeof ("qTsSTM"));
4803 s[sizeof ("qTsSTM")] = 0;
4804 agent_run_command (pid, s);
4805 p = s;
4806 }
4807
4808 do_cleanups (old_chain);
4809
4810 return markers;
4811 }
4812
4813 /* Create a prototype generic GNU/Linux target. The client can override
4814 it with local methods. */
4815
4816 static void
4817 linux_target_install_ops (struct target_ops *t)
4818 {
4819 t->to_insert_fork_catchpoint = linux_child_insert_fork_catchpoint;
4820 t->to_remove_fork_catchpoint = linux_child_remove_fork_catchpoint;
4821 t->to_insert_vfork_catchpoint = linux_child_insert_vfork_catchpoint;
4822 t->to_remove_vfork_catchpoint = linux_child_remove_vfork_catchpoint;
4823 t->to_insert_exec_catchpoint = linux_child_insert_exec_catchpoint;
4824 t->to_remove_exec_catchpoint = linux_child_remove_exec_catchpoint;
4825 t->to_set_syscall_catchpoint = linux_child_set_syscall_catchpoint;
4826 t->to_pid_to_exec_file = linux_child_pid_to_exec_file;
4827 t->to_post_startup_inferior = linux_child_post_startup_inferior;
4828 t->to_post_attach = linux_child_post_attach;
4829 t->to_follow_fork = linux_child_follow_fork;
4830 t->to_make_corefile_notes = linux_nat_make_corefile_notes;
4831
4832 super_xfer_partial = t->to_xfer_partial;
4833 t->to_xfer_partial = linux_xfer_partial;
4834
4835 t->to_static_tracepoint_markers_by_strid
4836 = linux_child_static_tracepoint_markers_by_strid;
4837 }
4838
4839 struct target_ops *
4840 linux_target (void)
4841 {
4842 struct target_ops *t;
4843
4844 t = inf_ptrace_target ();
4845 linux_target_install_ops (t);
4846
4847 return t;
4848 }
4849
4850 struct target_ops *
4851 linux_trad_target (CORE_ADDR (*register_u_offset)(struct gdbarch *, int, int))
4852 {
4853 struct target_ops *t;
4854
4855 t = inf_ptrace_trad_target (register_u_offset);
4856 linux_target_install_ops (t);
4857
4858 return t;
4859 }
4860
4861 /* target_is_async_p implementation. */
4862
4863 static int
4864 linux_nat_is_async_p (void)
4865 {
4866 /* NOTE: palves 2008-03-21: We're only async when the user requests
4867 it explicitly with the "set target-async" command.
4868 Someday, linux will always be async. */
4869 return target_async_permitted;
4870 }
4871
4872 /* target_can_async_p implementation. */
4873
4874 static int
4875 linux_nat_can_async_p (void)
4876 {
4877 /* NOTE: palves 2008-03-21: We're only async when the user requests
4878 it explicitly with the "set target-async" command.
4879 Someday, linux will always be async. */
4880 return target_async_permitted;
4881 }
4882
4883 static int
4884 linux_nat_supports_non_stop (void)
4885 {
4886 return 1;
4887 }
4888
4889 /* True if we want to support multi-process. To be removed when GDB
4890 supports multi-exec. */
4891
4892 int linux_multi_process = 1;
4893
4894 static int
4895 linux_nat_supports_multi_process (void)
4896 {
4897 return linux_multi_process;
4898 }
4899
4900 static int
4901 linux_nat_supports_disable_randomization (void)
4902 {
4903 #ifdef HAVE_PERSONALITY
4904 return 1;
4905 #else
4906 return 0;
4907 #endif
4908 }
4909
4910 static int async_terminal_is_ours = 1;
4911
4912 /* target_terminal_inferior implementation. */
4913
4914 static void
4915 linux_nat_terminal_inferior (void)
4916 {
4917 if (!target_is_async_p ())
4918 {
4919 /* Async mode is disabled. */
4920 terminal_inferior ();
4921 return;
4922 }
4923
4924 terminal_inferior ();
4925
4926 /* Calls to target_terminal_*() are meant to be idempotent. */
4927 if (!async_terminal_is_ours)
4928 return;
4929
4930 delete_file_handler (input_fd);
4931 async_terminal_is_ours = 0;
4932 set_sigint_trap ();
4933 }
4934
4935 /* target_terminal_ours implementation. */
4936
4937 static void
4938 linux_nat_terminal_ours (void)
4939 {
4940 if (!target_is_async_p ())
4941 {
4942 /* Async mode is disabled. */
4943 terminal_ours ();
4944 return;
4945 }
4946
4947 /* GDB should never give the terminal to the inferior if the
4948 inferior is running in the background (run&, continue&, etc.),
4949 but claiming it sure should. */
4950 terminal_ours ();
4951
4952 if (async_terminal_is_ours)
4953 return;
4954
4955 clear_sigint_trap ();
4956 add_file_handler (input_fd, stdin_event_handler, 0);
4957 async_terminal_is_ours = 1;
4958 }
4959
4960 static void (*async_client_callback) (enum inferior_event_type event_type,
4961 void *context);
4962 static void *async_client_context;
4963
4964 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4965 so we notice when any child changes state, and notify the
4966 event-loop; it allows us to use sigsuspend in linux_nat_wait_1
4967 above to wait for the arrival of a SIGCHLD. */
4968
4969 static void
4970 sigchld_handler (int signo)
4971 {
4972 int old_errno = errno;
4973
4974 if (debug_linux_nat)
4975 ui_file_write_async_safe (gdb_stdlog,
4976 "sigchld\n", sizeof ("sigchld\n") - 1);
4977
4978 if (signo == SIGCHLD
4979 && linux_nat_event_pipe[0] != -1)
4980 async_file_mark (); /* Let the event loop know that there are
4981 events to handle. */
4982
4983 errno = old_errno;
4984 }
4985
4986 /* Callback registered with the target events file descriptor. */
4987
4988 static void
4989 handle_target_event (int error, gdb_client_data client_data)
4990 {
4991 (*async_client_callback) (INF_REG_EVENT, async_client_context);
4992 }
4993
4994 /* Create/destroy the target events pipe. Returns previous state. */
4995
4996 static int
4997 linux_async_pipe (int enable)
4998 {
4999 int previous = (linux_nat_event_pipe[0] != -1);
5000
5001 if (previous != enable)
5002 {
5003 sigset_t prev_mask;
5004
5005 block_child_signals (&prev_mask);
5006
5007 if (enable)
5008 {
5009 if (pipe (linux_nat_event_pipe) == -1)
5010 internal_error (__FILE__, __LINE__,
5011 "creating event pipe failed.");
5012
5013 fcntl (linux_nat_event_pipe[0], F_SETFL, O_NONBLOCK);
5014 fcntl (linux_nat_event_pipe[1], F_SETFL, O_NONBLOCK);
5015 }
5016 else
5017 {
5018 close (linux_nat_event_pipe[0]);
5019 close (linux_nat_event_pipe[1]);
5020 linux_nat_event_pipe[0] = -1;
5021 linux_nat_event_pipe[1] = -1;
5022 }
5023
5024 restore_child_signals_mask (&prev_mask);
5025 }
5026
5027 return previous;
5028 }
5029
5030 /* target_async implementation. */
5031
5032 static void
5033 linux_nat_async (void (*callback) (enum inferior_event_type event_type,
5034 void *context), void *context)
5035 {
5036 if (callback != NULL)
5037 {
5038 async_client_callback = callback;
5039 async_client_context = context;
5040 if (!linux_async_pipe (1))
5041 {
5042 add_file_handler (linux_nat_event_pipe[0],
5043 handle_target_event, NULL);
5044 /* There may be pending events to handle. Tell the event loop
5045 to poll them. */
5046 async_file_mark ();
5047 }
5048 }
5049 else
5050 {
5051 async_client_callback = callback;
5052 async_client_context = context;
5053 delete_file_handler (linux_nat_event_pipe[0]);
5054 linux_async_pipe (0);
5055 }
5056 return;
5057 }
5058
5059 /* Stop an LWP, and push a TARGET_SIGNAL_0 stop status if no other
5060 event came out. */
5061
5062 static int
5063 linux_nat_stop_lwp (struct lwp_info *lwp, void *data)
5064 {
5065 if (!lwp->stopped)
5066 {
5067 ptid_t ptid = lwp->ptid;
5068
5069 if (debug_linux_nat)
5070 fprintf_unfiltered (gdb_stdlog,
5071 "LNSL: running -> suspending %s\n",
5072 target_pid_to_str (lwp->ptid));
5073
5074
5075 if (lwp->last_resume_kind == resume_stop)
5076 {
5077 if (debug_linux_nat)
5078 fprintf_unfiltered (gdb_stdlog,
5079 "linux-nat: already stopping LWP %ld at "
5080 "GDB's request\n",
5081 ptid_get_lwp (lwp->ptid));
5082 return 0;
5083 }
5084
5085 stop_callback (lwp, NULL);
5086 lwp->last_resume_kind = resume_stop;
5087 }
5088 else
5089 {
5090 /* Already known to be stopped; do nothing. */
5091
5092 if (debug_linux_nat)
5093 {
5094 if (find_thread_ptid (lwp->ptid)->stop_requested)
5095 fprintf_unfiltered (gdb_stdlog,
5096 "LNSL: already stopped/stop_requested %s\n",
5097 target_pid_to_str (lwp->ptid));
5098 else
5099 fprintf_unfiltered (gdb_stdlog,
5100 "LNSL: already stopped/no "
5101 "stop_requested yet %s\n",
5102 target_pid_to_str (lwp->ptid));
5103 }
5104 }
5105 return 0;
5106 }
5107
5108 static void
5109 linux_nat_stop (ptid_t ptid)
5110 {
5111 if (non_stop)
5112 iterate_over_lwps (ptid, linux_nat_stop_lwp, NULL);
5113 else
5114 linux_ops->to_stop (ptid);
5115 }
5116
5117 static void
5118 linux_nat_close (int quitting)
5119 {
5120 /* Unregister from the event loop. */
5121 if (linux_nat_is_async_p ())
5122 linux_nat_async (NULL, 0);
5123
5124 if (linux_ops->to_close)
5125 linux_ops->to_close (quitting);
5126 }
5127
5128 /* When requests are passed down from the linux-nat layer to the
5129 single threaded inf-ptrace layer, ptids of (lwpid,0,0) form are
5130 used. The address space pointer is stored in the inferior object,
5131 but the common code that is passed such ptid can't tell whether
5132 lwpid is a "main" process id or not (it assumes so). We reverse
5133 look up the "main" process id from the lwp here. */
5134
5135 static struct address_space *
5136 linux_nat_thread_address_space (struct target_ops *t, ptid_t ptid)
5137 {
5138 struct lwp_info *lwp;
5139 struct inferior *inf;
5140 int pid;
5141
5142 pid = GET_LWP (ptid);
5143 if (GET_LWP (ptid) == 0)
5144 {
5145 /* An (lwpid,0,0) ptid. Look up the lwp object to get at the
5146 tgid. */
5147 lwp = find_lwp_pid (ptid);
5148 pid = GET_PID (lwp->ptid);
5149 }
5150 else
5151 {
5152 /* A (pid,lwpid,0) ptid. */
5153 pid = GET_PID (ptid);
5154 }
5155
5156 inf = find_inferior_pid (pid);
5157 gdb_assert (inf != NULL);
5158 return inf->aspace;
5159 }
5160
5161 /* Return the cached value of the processor core for thread PTID. */
5162
5163 static int
5164 linux_nat_core_of_thread (struct target_ops *ops, ptid_t ptid)
5165 {
5166 struct lwp_info *info = find_lwp_pid (ptid);
5167
5168 if (info)
5169 return info->core;
5170 return -1;
5171 }
5172
5173 void
5174 linux_nat_add_target (struct target_ops *t)
5175 {
5176 /* Save the provided single-threaded target. We save this in a separate
5177 variable because another target we've inherited from (e.g. inf-ptrace)
5178 may have saved a pointer to T; we want to use it for the final
5179 process stratum target. */
5180 linux_ops_saved = *t;
5181 linux_ops = &linux_ops_saved;
5182
5183 /* Override some methods for multithreading. */
5184 t->to_create_inferior = linux_nat_create_inferior;
5185 t->to_attach = linux_nat_attach;
5186 t->to_detach = linux_nat_detach;
5187 t->to_resume = linux_nat_resume;
5188 t->to_wait = linux_nat_wait;
5189 t->to_pass_signals = linux_nat_pass_signals;
5190 t->to_xfer_partial = linux_nat_xfer_partial;
5191 t->to_kill = linux_nat_kill;
5192 t->to_mourn_inferior = linux_nat_mourn_inferior;
5193 t->to_thread_alive = linux_nat_thread_alive;
5194 t->to_pid_to_str = linux_nat_pid_to_str;
5195 t->to_thread_name = linux_nat_thread_name;
5196 t->to_has_thread_control = tc_schedlock;
5197 t->to_thread_address_space = linux_nat_thread_address_space;
5198 t->to_stopped_by_watchpoint = linux_nat_stopped_by_watchpoint;
5199 t->to_stopped_data_address = linux_nat_stopped_data_address;
5200
5201 t->to_can_async_p = linux_nat_can_async_p;
5202 t->to_is_async_p = linux_nat_is_async_p;
5203 t->to_supports_non_stop = linux_nat_supports_non_stop;
5204 t->to_async = linux_nat_async;
5205 t->to_terminal_inferior = linux_nat_terminal_inferior;
5206 t->to_terminal_ours = linux_nat_terminal_ours;
5207 t->to_close = linux_nat_close;
5208
5209 /* Methods for non-stop support. */
5210 t->to_stop = linux_nat_stop;
5211
5212 t->to_supports_multi_process = linux_nat_supports_multi_process;
5213
5214 t->to_supports_disable_randomization
5215 = linux_nat_supports_disable_randomization;
5216
5217 t->to_core_of_thread = linux_nat_core_of_thread;
5218
5219 /* We don't change the stratum; this target will sit at
5220 process_stratum and thread_db will set at thread_stratum. This
5221 is a little strange, since this is a multi-threaded-capable
5222 target, but we want to be on the stack below thread_db, and we
5223 also want to be used for single-threaded processes. */
5224
5225 add_target (t);
5226 }
5227
5228 /* Register a method to call whenever a new thread is attached. */
5229 void
5230 linux_nat_set_new_thread (struct target_ops *t,
5231 void (*new_thread) (struct lwp_info *))
5232 {
5233 /* Save the pointer. We only support a single registered instance
5234 of the GNU/Linux native target, so we do not need to map this to
5235 T. */
5236 linux_nat_new_thread = new_thread;
5237 }
5238
5239 /* Register a method that converts a siginfo object between the layout
5240 that ptrace returns, and the layout in the architecture of the
5241 inferior. */
5242 void
5243 linux_nat_set_siginfo_fixup (struct target_ops *t,
5244 int (*siginfo_fixup) (struct siginfo *,
5245 gdb_byte *,
5246 int))
5247 {
5248 /* Save the pointer. */
5249 linux_nat_siginfo_fixup = siginfo_fixup;
5250 }
5251
5252 /* Register a method to call prior to resuming a thread. */
5253
5254 void
5255 linux_nat_set_prepare_to_resume (struct target_ops *t,
5256 void (*prepare_to_resume) (struct lwp_info *))
5257 {
5258 /* Save the pointer. */
5259 linux_nat_prepare_to_resume = prepare_to_resume;
5260 }
5261
5262 /* Return the saved siginfo associated with PTID. */
5263 struct siginfo *
5264 linux_nat_get_siginfo (ptid_t ptid)
5265 {
5266 struct lwp_info *lp = find_lwp_pid (ptid);
5267
5268 gdb_assert (lp != NULL);
5269
5270 return &lp->siginfo;
5271 }
5272
5273 /* Provide a prototype to silence -Wmissing-prototypes. */
5274 extern initialize_file_ftype _initialize_linux_nat;
5275
5276 void
5277 _initialize_linux_nat (void)
5278 {
5279 add_setshow_zinteger_cmd ("lin-lwp", class_maintenance,
5280 &debug_linux_nat, _("\
5281 Set debugging of GNU/Linux lwp module."), _("\
5282 Show debugging of GNU/Linux lwp module."), _("\
5283 Enables printf debugging output."),
5284 NULL,
5285 show_debug_linux_nat,
5286 &setdebuglist, &showdebuglist);
5287
5288 /* Save this mask as the default. */
5289 sigprocmask (SIG_SETMASK, NULL, &normal_mask);
5290
5291 /* Install a SIGCHLD handler. */
5292 sigchld_action.sa_handler = sigchld_handler;
5293 sigemptyset (&sigchld_action.sa_mask);
5294 sigchld_action.sa_flags = SA_RESTART;
5295
5296 /* Make it the default. */
5297 sigaction (SIGCHLD, &sigchld_action, NULL);
5298
5299 /* Make sure we don't block SIGCHLD during a sigsuspend. */
5300 sigprocmask (SIG_SETMASK, NULL, &suspend_mask);
5301 sigdelset (&suspend_mask, SIGCHLD);
5302
5303 sigemptyset (&blocked_mask);
5304 }
5305 \f
5306
5307 /* FIXME: kettenis/2000-08-26: The stuff on this page is specific to
5308 the GNU/Linux Threads library and therefore doesn't really belong
5309 here. */
5310
5311 /* Read variable NAME in the target and return its value if found.
5312 Otherwise return zero. It is assumed that the type of the variable
5313 is `int'. */
5314
5315 static int
5316 get_signo (const char *name)
5317 {
5318 struct minimal_symbol *ms;
5319 int signo;
5320
5321 ms = lookup_minimal_symbol (name, NULL, NULL);
5322 if (ms == NULL)
5323 return 0;
5324
5325 if (target_read_memory (SYMBOL_VALUE_ADDRESS (ms), (gdb_byte *) &signo,
5326 sizeof (signo)) != 0)
5327 return 0;
5328
5329 return signo;
5330 }
5331
5332 /* Return the set of signals used by the threads library in *SET. */
5333
5334 void
5335 lin_thread_get_thread_signals (sigset_t *set)
5336 {
5337 struct sigaction action;
5338 int restart, cancel;
5339
5340 sigemptyset (&blocked_mask);
5341 sigemptyset (set);
5342
5343 restart = get_signo ("__pthread_sig_restart");
5344 cancel = get_signo ("__pthread_sig_cancel");
5345
5346 /* LinuxThreads normally uses the first two RT signals, but in some legacy
5347 cases may use SIGUSR1/SIGUSR2. NPTL always uses RT signals, but does
5348 not provide any way for the debugger to query the signal numbers -
5349 fortunately they don't change! */
5350
5351 if (restart == 0)
5352 restart = __SIGRTMIN;
5353
5354 if (cancel == 0)
5355 cancel = __SIGRTMIN + 1;
5356
5357 sigaddset (set, restart);
5358 sigaddset (set, cancel);
5359
5360 /* The GNU/Linux Threads library makes terminating threads send a
5361 special "cancel" signal instead of SIGCHLD. Make sure we catch
5362 those (to prevent them from terminating GDB itself, which is
5363 likely to be their default action) and treat them the same way as
5364 SIGCHLD. */
5365
5366 action.sa_handler = sigchld_handler;
5367 sigemptyset (&action.sa_mask);
5368 action.sa_flags = SA_RESTART;
5369 sigaction (cancel, &action, NULL);
5370
5371 /* We block the "cancel" signal throughout this code ... */
5372 sigaddset (&blocked_mask, cancel);
5373 sigprocmask (SIG_BLOCK, &blocked_mask, NULL);
5374
5375 /* ... except during a sigsuspend. */
5376 sigdelset (&suspend_mask, cancel);
5377 }
This page took 0.495261 seconds and 4 git commands to generate.